2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-19 16:14:13 +08:00

firewire: allow explicit flushing of iso packet completions

Extend the kernel and userspace APIs to allow reporting all currently
completed isochronous packets, even if the next interrupt packet has not
yet been reached.  This is required to determine the status of the
packets at the end of a paused or stopped stream, and useful for more
precise synchronization of audio streams.

Signed-off-by: Clemens Ladisch <clemens@ladisch.de>
Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
This commit is contained in:
Clemens Ladisch 2012-03-18 19:06:39 +01:00 committed by Stefan Richter
parent 18d627113b
commit d1bbd20972
7 changed files with 127 additions and 13 deletions

View File

@ -650,6 +650,11 @@ static void dummy_flush_queue_iso(struct fw_iso_context *ctx)
{
}
static int dummy_flush_iso_completions(struct fw_iso_context *ctx)
{
return -ENODEV;
}
static const struct fw_card_driver dummy_driver_template = {
.read_phy_reg = dummy_read_phy_reg,
.update_phy_reg = dummy_update_phy_reg,
@ -662,6 +667,7 @@ static const struct fw_card_driver dummy_driver_template = {
.set_iso_channels = dummy_set_iso_channels,
.queue_iso = dummy_queue_iso,
.flush_queue_iso = dummy_flush_queue_iso,
.flush_iso_completions = dummy_flush_iso_completions,
};
void fw_card_release(struct kref *kref)

View File

@ -438,6 +438,7 @@ union ioctl_arg {
struct fw_cdev_send_phy_packet send_phy_packet;
struct fw_cdev_receive_phy_packets receive_phy_packets;
struct fw_cdev_set_iso_channels set_iso_channels;
struct fw_cdev_flush_iso flush_iso;
};
static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
@ -1168,6 +1169,16 @@ static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
return fw_iso_context_stop(client->iso_context);
}
static int ioctl_flush_iso(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_flush_iso *a = &arg->flush_iso;
if (client->iso_context == NULL || a->handle != 0)
return -EINVAL;
return fw_iso_context_flush_completions(client->iso_context);
}
static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
@ -1589,6 +1600,7 @@ static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
[0x15] = ioctl_send_phy_packet,
[0x16] = ioctl_receive_phy_packets,
[0x17] = ioctl_set_iso_channels,
[0x18] = ioctl_flush_iso,
};
static int dispatch_ioctl(struct client *client,

View File

@ -191,6 +191,12 @@ void fw_iso_context_queue_flush(struct fw_iso_context *ctx)
}
EXPORT_SYMBOL(fw_iso_context_queue_flush);
int fw_iso_context_flush_completions(struct fw_iso_context *ctx)
{
return ctx->card->driver->flush_iso_completions(ctx);
}
EXPORT_SYMBOL(fw_iso_context_flush_completions);
int fw_iso_context_stop(struct fw_iso_context *ctx)
{
return ctx->card->driver->stop_iso(ctx);

View File

@ -106,6 +106,8 @@ struct fw_card_driver {
void (*flush_queue_iso)(struct fw_iso_context *ctx);
int (*flush_iso_completions)(struct fw_iso_context *ctx);
int (*stop_iso)(struct fw_iso_context *ctx);
};

View File

@ -172,6 +172,9 @@ struct iso_context {
struct context context;
void *header;
size_t header_length;
unsigned long flushing_completions;
u32 mc_buffer_bus;
u16 mc_completed;
u16 last_timestamp;
u8 sync;
u8 tags;
@ -2749,28 +2752,51 @@ static int handle_ir_buffer_fill(struct context *context,
{
struct iso_context *ctx =
container_of(context, struct iso_context, context);
unsigned int req_count, res_count, completed;
u32 buffer_dma;
if (last->res_count != 0)
req_count = le16_to_cpu(last->req_count);
res_count = le16_to_cpu(ACCESS_ONCE(last->res_count));
completed = req_count - res_count;
buffer_dma = le32_to_cpu(last->data_address);
if (completed > 0) {
ctx->mc_buffer_bus = buffer_dma;
ctx->mc_completed = completed;
}
if (res_count != 0)
/* Descriptor(s) not done yet, stop iteration */
return 0;
buffer_dma = le32_to_cpu(last->data_address);
dma_sync_single_range_for_cpu(context->ohci->card.device,
buffer_dma & PAGE_MASK,
buffer_dma & ~PAGE_MASK,
le16_to_cpu(last->req_count),
DMA_FROM_DEVICE);
completed, DMA_FROM_DEVICE);
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) {
ctx->base.callback.mc(&ctx->base,
le32_to_cpu(last->data_address) +
le16_to_cpu(last->req_count),
buffer_dma + completed,
ctx->base.callback_data);
ctx->mc_completed = 0;
}
return 1;
}
static void flush_ir_buffer_fill(struct iso_context *ctx)
{
dma_sync_single_range_for_cpu(ctx->context.ohci->card.device,
ctx->mc_buffer_bus & PAGE_MASK,
ctx->mc_buffer_bus & ~PAGE_MASK,
ctx->mc_completed, DMA_FROM_DEVICE);
ctx->base.callback.mc(&ctx->base,
ctx->mc_buffer_bus + ctx->mc_completed,
ctx->base.callback_data);
ctx->mc_completed = 0;
}
static inline void sync_it_packet_for_cpu(struct context *context,
struct descriptor *pd)
{
@ -2925,8 +2951,10 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
if (ret < 0)
goto out_with_header;
if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) {
set_multichannel_mask(ohci, 0);
ctx->mc_completed = 0;
}
return &ctx->base;
@ -3388,6 +3416,39 @@ static void ohci_flush_queue_iso(struct fw_iso_context *base)
reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
}
static int ohci_flush_iso_completions(struct fw_iso_context *base)
{
struct iso_context *ctx = container_of(base, struct iso_context, base);
int ret = 0;
tasklet_disable(&ctx->context.tasklet);
if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) {
context_tasklet((unsigned long)&ctx->context);
switch (base->type) {
case FW_ISO_CONTEXT_TRANSMIT:
case FW_ISO_CONTEXT_RECEIVE:
if (ctx->header_length != 0)
flush_iso_completions(ctx);
break;
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
if (ctx->mc_completed != 0)
flush_ir_buffer_fill(ctx);
break;
default:
ret = -ENOSYS;
}
clear_bit_unlock(0, &ctx->flushing_completions);
smp_mb__after_clear_bit();
}
tasklet_enable(&ctx->context.tasklet);
return ret;
}
static const struct fw_card_driver ohci_driver = {
.enable = ohci_enable,
.read_phy_reg = ohci_read_phy_reg,
@ -3405,6 +3466,7 @@ static const struct fw_card_driver ohci_driver = {
.set_iso_channels = ohci_set_iso_channels,
.queue_iso = ohci_queue_iso,
.flush_queue_iso = ohci_flush_queue_iso,
.flush_iso_completions = ohci_flush_iso_completions,
.start_iso = ohci_start_iso,
.stop_iso = ohci_stop_iso,
};

View File

@ -212,10 +212,11 @@ struct fw_cdev_event_request2 {
* @header: Stripped headers, if any
*
* This event is sent when the controller has completed an &fw_cdev_iso_packet
* with the %FW_CDEV_ISO_INTERRUPT bit set, or when there have been so many
* completed packets without the interrupt bit set that the kernel's internal
* buffer for @header is about to overflow. (In the latter case, kernels with
* ABI version < 5 drop header data up to the next interrupt packet.)
* with the %FW_CDEV_ISO_INTERRUPT bit set, when explicitly requested with
* %FW_CDEV_IOC_FLUSH_ISO, or when there have been so many completed packets
* without the interrupt bit set that the kernel's internal buffer for @header
* is about to overflow. (In the last case, kernels with ABI version < 5 drop
* header data up to the next interrupt packet.)
*
* Isochronous transmit events (context type %FW_CDEV_ISO_CONTEXT_TRANSMIT):
*
@ -271,7 +272,8 @@ struct fw_cdev_event_iso_interrupt {
* This event is sent in multichannel contexts (context type
* %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL) for &fw_cdev_iso_packet buffer
* chunks that have been completely filled and that have the
* %FW_CDEV_ISO_INTERRUPT bit set.
* %FW_CDEV_ISO_INTERRUPT bit set, or when explicitly requested with
* %FW_CDEV_IOC_FLUSH_ISO.
*
* The buffer is continuously filled with the following data, per packet:
* - the 1394 iso packet header as described at &fw_cdev_event_iso_interrupt,
@ -421,6 +423,9 @@ union fw_cdev_event {
#define FW_CDEV_IOC_RECEIVE_PHY_PACKETS _IOW('#', 0x16, struct fw_cdev_receive_phy_packets)
#define FW_CDEV_IOC_SET_ISO_CHANNELS _IOW('#', 0x17, struct fw_cdev_set_iso_channels)
/* available since kernel version 3.4 */
#define FW_CDEV_IOC_FLUSH_ISO _IOW('#', 0x18, struct fw_cdev_flush_iso)
/*
* ABI version history
* 1 (2.6.22) - initial version
@ -445,6 +450,7 @@ union fw_cdev_event {
* %FW_CDEV_IOC_SET_ISO_CHANNELS
* 5 (3.4) - send %FW_CDEV_EVENT_ISO_INTERRUPT events when needed to
* avoid dropping data
* - added %FW_CDEV_IOC_FLUSH_ISO
*/
/**
@ -854,6 +860,25 @@ struct fw_cdev_stop_iso {
__u32 handle;
};
/**
* struct fw_cdev_flush_iso - flush completed iso packets
* @handle: handle of isochronous context to flush
*
* For %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE contexts,
* report any completed packets.
*
* For %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL contexts, report the current
* offset in the receive buffer, if it has changed; this is typically in the
* middle of some buffer chunk.
*
* Any %FW_CDEV_EVENT_ISO_INTERRUPT or %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL
* events generated by this ioctl are sent synchronously, i.e., are available
* for reading from the file descriptor when this ioctl returns.
*/
struct fw_cdev_flush_iso {
__u32 handle;
};
/**
* struct fw_cdev_get_cycle_timer - read cycle timer register
* @local_time: system time, in microseconds since the Epoch

View File

@ -426,6 +426,7 @@ int fw_iso_context_queue(struct fw_iso_context *ctx,
struct fw_iso_buffer *buffer,
unsigned long payload);
void fw_iso_context_queue_flush(struct fw_iso_context *ctx);
int fw_iso_context_flush_completions(struct fw_iso_context *ctx);
int fw_iso_context_start(struct fw_iso_context *ctx,
int cycle, int sync, int tags);
int fw_iso_context_stop(struct fw_iso_context *ctx);