mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-17 22:41:25 +00:00
firewire: optimize iso queueing by setting wake only after the last packet
When queueing iso packets, the run time is dominated by the two MMIO accesses that set the DMA context's wake bit. Because most drivers submit packets in batches, we can save much time by removing all but the last wakeup. The internal kernel API is changed to require a call to fw_iso_context_queue_flush() after a batch of queued packets. The user space API does not change, so one call to FW_CDEV_IOC_QUEUE_ISO must specify multiple packets to take advantage of this optimization. In my measurements, this patch reduces the time needed to queue fifty skip packets from userspace to one sixth on a 2.5 GHz CPU, or to one third at 800 MHz. Signed-off-by: Clemens Ladisch <clemens@ladisch.de> Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
This commit is contained in:
parent
f30e6d3e41
commit
13882a82ee
@ -630,6 +630,10 @@ static int dummy_queue_iso(struct fw_iso_context *ctx, struct fw_iso_packet *p,
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static void dummy_flush_queue_iso(struct fw_iso_context *ctx)
|
||||
{
|
||||
}
|
||||
|
||||
static const struct fw_card_driver dummy_driver_template = {
|
||||
.read_phy_reg = dummy_read_phy_reg,
|
||||
.update_phy_reg = dummy_update_phy_reg,
|
||||
@ -641,6 +645,7 @@ static const struct fw_card_driver dummy_driver_template = {
|
||||
.start_iso = dummy_start_iso,
|
||||
.set_iso_channels = dummy_set_iso_channels,
|
||||
.queue_iso = dummy_queue_iso,
|
||||
.flush_queue_iso = dummy_flush_queue_iso,
|
||||
};
|
||||
|
||||
void fw_card_release(struct kref *kref)
|
||||
|
@ -1107,6 +1107,7 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
|
||||
payload += u.packet.payload_length;
|
||||
count++;
|
||||
}
|
||||
fw_iso_context_queue_flush(ctx);
|
||||
|
||||
a->size -= uptr_to_u64(p) - a->packets;
|
||||
a->packets = uptr_to_u64(p);
|
||||
|
@ -185,6 +185,12 @@ int fw_iso_context_queue(struct fw_iso_context *ctx,
|
||||
}
|
||||
EXPORT_SYMBOL(fw_iso_context_queue);
|
||||
|
||||
void fw_iso_context_queue_flush(struct fw_iso_context *ctx)
|
||||
{
|
||||
ctx->card->driver->flush_queue_iso(ctx);
|
||||
}
|
||||
EXPORT_SYMBOL(fw_iso_context_queue_flush);
|
||||
|
||||
int fw_iso_context_stop(struct fw_iso_context *ctx)
|
||||
{
|
||||
return ctx->card->driver->stop_iso(ctx);
|
||||
|
@ -97,6 +97,8 @@ struct fw_card_driver {
|
||||
struct fw_iso_buffer *buffer,
|
||||
unsigned long payload);
|
||||
|
||||
void (*flush_queue_iso)(struct fw_iso_context *ctx);
|
||||
|
||||
int (*stop_iso)(struct fw_iso_context *ctx);
|
||||
};
|
||||
|
||||
|
@ -881,7 +881,9 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
|
||||
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
|
||||
if (retval < 0)
|
||||
if (retval >= 0)
|
||||
fw_iso_context_queue_flush(dev->broadcast_rcv_context);
|
||||
else
|
||||
fw_error("requeue failed\n");
|
||||
}
|
||||
|
||||
|
@ -1192,9 +1192,6 @@ static void context_append(struct context *ctx,
|
||||
wmb(); /* finish init of new descriptors before branch_address update */
|
||||
ctx->prev->branch_address = cpu_to_le32(d_bus | z);
|
||||
ctx->prev = find_branch_descriptor(d, z);
|
||||
|
||||
reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
|
||||
flush_writes(ctx->ohci);
|
||||
}
|
||||
|
||||
static void context_stop(struct context *ctx)
|
||||
@ -1348,8 +1345,12 @@ static int at_context_queue_packet(struct context *ctx,
|
||||
|
||||
context_append(ctx, d, z, 4 - z);
|
||||
|
||||
if (!ctx->running)
|
||||
if (ctx->running) {
|
||||
reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
|
||||
flush_writes(ohci);
|
||||
} else {
|
||||
context_run(ctx, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3121,6 +3122,15 @@ static int ohci_queue_iso(struct fw_iso_context *base,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ohci_flush_queue_iso(struct fw_iso_context *base)
|
||||
{
|
||||
struct context *ctx =
|
||||
&container_of(base, struct iso_context, base)->context;
|
||||
|
||||
reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
|
||||
flush_writes(ctx->ohci);
|
||||
}
|
||||
|
||||
static const struct fw_card_driver ohci_driver = {
|
||||
.enable = ohci_enable,
|
||||
.read_phy_reg = ohci_read_phy_reg,
|
||||
@ -3137,6 +3147,7 @@ static const struct fw_card_driver ohci_driver = {
|
||||
.free_iso_context = ohci_free_iso_context,
|
||||
.set_iso_channels = ohci_set_iso_channels,
|
||||
.queue_iso = ohci_queue_iso,
|
||||
.flush_queue_iso = ohci_flush_queue_iso,
|
||||
.start_iso = ohci_start_iso,
|
||||
.stop_iso = ohci_stop_iso,
|
||||
};
|
||||
|
@ -125,6 +125,7 @@ static void handle_iso(struct fw_iso_context *context, u32 cycle,
|
||||
|
||||
i = (i + 1) & (N_PACKETS - 1);
|
||||
}
|
||||
fw_iso_context_queue_flush(ctx->context);
|
||||
ctx->current_packet = i;
|
||||
}
|
||||
|
||||
|
@ -440,6 +440,7 @@ int fw_iso_context_queue(struct fw_iso_context *ctx,
|
||||
struct fw_iso_packet *packet,
|
||||
struct fw_iso_buffer *buffer,
|
||||
unsigned long payload);
|
||||
void fw_iso_context_queue_flush(struct fw_iso_context *ctx);
|
||||
int fw_iso_context_start(struct fw_iso_context *ctx,
|
||||
int cycle, int sync, int tags);
|
||||
int fw_iso_context_stop(struct fw_iso_context *ctx);
|
||||
|
@ -396,6 +396,7 @@ static void out_packet_callback(struct fw_iso_context *context, u32 cycle,
|
||||
|
||||
for (i = 0; i < packets; ++i)
|
||||
queue_out_packet(s, ++cycle);
|
||||
fw_iso_context_queue_flush(s->context);
|
||||
}
|
||||
|
||||
static int queue_initial_skip_packets(struct amdtp_out_stream *s)
|
||||
|
Loading…
Reference in New Issue
Block a user