mirror of
https://github.com/FEX-Emu/linux.git
synced 2025-01-12 20:31:49 +00:00
USB: xhci: Represent 64-bit addresses with one u64.
There are several xHCI data structures that use two 32-bit fields to represent a 64-bit address. Since some architectures don't support 64-bit PCI writes, the fields need to be written in two 32-bit writes. The xHCI specification says that if a platform is incapable of generating 64-bit writes, software must write the low 32-bits first, then the high 32-bits. Hardware that supports 64-bit addressing will wait for the high 32-bit write before reading the revised value, and hardware that only supports 32-bit writes will ignore the high 32-bit write. Previous xHCI code represented 64-bit addresses with two u32 values. This lead to buggy code that would write the 32-bits in the wrong order, or forget to write the upper 32-bits. Change the two u32s to one u64 and create a function call to write all 64-bit addresses in the proper order. This new function could be modified in the future if all platforms support 64-bit writes. Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
parent
b11069f5f6
commit
8e595a5d30
@ -173,6 +173,7 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int
|
||||
{
|
||||
void *addr;
|
||||
u32 temp;
|
||||
u64 temp_64;
|
||||
|
||||
addr = &ir_set->irq_pending;
|
||||
temp = xhci_readl(xhci, addr);
|
||||
@ -200,25 +201,15 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int
|
||||
xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n",
|
||||
addr, (unsigned int)temp);
|
||||
|
||||
addr = &ir_set->erst_base[0];
|
||||
temp = xhci_readl(xhci, addr);
|
||||
xhci_dbg(xhci, " %p: ir_set.erst_base[0] = 0x%x\n",
|
||||
addr, (unsigned int) temp);
|
||||
addr = &ir_set->erst_base;
|
||||
temp_64 = xhci_read_64(xhci, addr);
|
||||
xhci_dbg(xhci, " %p: ir_set.erst_base = @%08llx\n",
|
||||
addr, temp_64);
|
||||
|
||||
addr = &ir_set->erst_base[1];
|
||||
temp = xhci_readl(xhci, addr);
|
||||
xhci_dbg(xhci, " %p: ir_set.erst_base[1] = 0x%x\n",
|
||||
addr, (unsigned int) temp);
|
||||
|
||||
addr = &ir_set->erst_dequeue[0];
|
||||
temp = xhci_readl(xhci, addr);
|
||||
xhci_dbg(xhci, " %p: ir_set.erst_dequeue[0] = 0x%x\n",
|
||||
addr, (unsigned int) temp);
|
||||
|
||||
addr = &ir_set->erst_dequeue[1];
|
||||
temp = xhci_readl(xhci, addr);
|
||||
xhci_dbg(xhci, " %p: ir_set.erst_dequeue[1] = 0x%x\n",
|
||||
addr, (unsigned int) temp);
|
||||
addr = &ir_set->erst_dequeue;
|
||||
temp_64 = xhci_read_64(xhci, addr);
|
||||
xhci_dbg(xhci, " %p: ir_set.erst_dequeue = @%08llx\n",
|
||||
addr, temp_64);
|
||||
}
|
||||
|
||||
void xhci_print_run_regs(struct xhci_hcd *xhci)
|
||||
@ -268,8 +259,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
|
||||
xhci_dbg(xhci, "Link TRB:\n");
|
||||
xhci_print_trb_offsets(xhci, trb);
|
||||
|
||||
address = trb->link.segment_ptr[0] +
|
||||
(((u64) trb->link.segment_ptr[1]) << 32);
|
||||
address = trb->link.segment_ptr;
|
||||
xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address);
|
||||
|
||||
xhci_dbg(xhci, "Interrupter target = 0x%x\n",
|
||||
@ -282,8 +272,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
|
||||
(unsigned int) (trb->link.control & TRB_NO_SNOOP));
|
||||
break;
|
||||
case TRB_TYPE(TRB_TRANSFER):
|
||||
address = trb->trans_event.buffer[0] +
|
||||
(((u64) trb->trans_event.buffer[1]) << 32);
|
||||
address = trb->trans_event.buffer;
|
||||
/*
|
||||
* FIXME: look at flags to figure out if it's an address or if
|
||||
* the data is directly in the buffer field.
|
||||
@ -291,8 +280,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
|
||||
xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address);
|
||||
break;
|
||||
case TRB_TYPE(TRB_COMPLETION):
|
||||
address = trb->event_cmd.cmd_trb[0] +
|
||||
(((u64) trb->event_cmd.cmd_trb[1]) << 32);
|
||||
address = trb->event_cmd.cmd_trb;
|
||||
xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
|
||||
xhci_dbg(xhci, "Completion status = %u\n",
|
||||
(unsigned int) GET_COMP_CODE(trb->event_cmd.status));
|
||||
@ -328,8 +316,8 @@ void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
|
||||
for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
|
||||
trb = &seg->trbs[i];
|
||||
xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr,
|
||||
(unsigned int) trb->link.segment_ptr[0],
|
||||
(unsigned int) trb->link.segment_ptr[1],
|
||||
lower_32_bits(trb->link.segment_ptr),
|
||||
upper_32_bits(trb->link.segment_ptr),
|
||||
(unsigned int) trb->link.intr_target,
|
||||
(unsigned int) trb->link.control);
|
||||
addr += sizeof(*trb);
|
||||
@ -386,8 +374,8 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
|
||||
entry = &erst->entries[i];
|
||||
xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n",
|
||||
(unsigned int) addr,
|
||||
(unsigned int) entry->seg_addr[0],
|
||||
(unsigned int) entry->seg_addr[1],
|
||||
lower_32_bits(entry->seg_addr),
|
||||
upper_32_bits(entry->seg_addr),
|
||||
(unsigned int) entry->seg_size,
|
||||
(unsigned int) entry->rsvd);
|
||||
addr += sizeof(*entry);
|
||||
@ -396,12 +384,13 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
|
||||
|
||||
void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
|
||||
{
|
||||
u32 val;
|
||||
u64 val;
|
||||
|
||||
val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]);
|
||||
xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = 0x%x\n", val);
|
||||
val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[1]);
|
||||
xhci_dbg(xhci, "// xHC command ring deq ptr high bits = 0x%x\n", val);
|
||||
val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
|
||||
xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n",
|
||||
lower_32_bits(val));
|
||||
xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n",
|
||||
upper_32_bits(val));
|
||||
}
|
||||
|
||||
void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep)
|
||||
@ -462,14 +451,10 @@ void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_ad
|
||||
&ctx->ep[i].ep_info2,
|
||||
(unsigned long long)dma, ctx->ep[i].ep_info2);
|
||||
dma += field_size;
|
||||
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[0]\n",
|
||||
&ctx->ep[i].deq[0],
|
||||
(unsigned long long)dma, ctx->ep[i].deq[0]);
|
||||
dma += field_size;
|
||||
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[1]\n",
|
||||
&ctx->ep[i].deq[1],
|
||||
(unsigned long long)dma, ctx->ep[i].deq[1]);
|
||||
dma += field_size;
|
||||
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08llx - deq\n",
|
||||
&ctx->ep[i].deq,
|
||||
(unsigned long long)dma, ctx->ep[i].deq);
|
||||
dma += 2*field_size;
|
||||
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
|
||||
&ctx->ep[i].tx_info,
|
||||
(unsigned long long)dma, ctx->ep[i].tx_info);
|
||||
|
@ -226,6 +226,7 @@ int xhci_init(struct usb_hcd *hcd)
|
||||
static void xhci_work(struct xhci_hcd *xhci)
|
||||
{
|
||||
u32 temp;
|
||||
u64 temp_64;
|
||||
|
||||
/*
|
||||
* Clear the op reg interrupt status first,
|
||||
@ -249,8 +250,8 @@ static void xhci_work(struct xhci_hcd *xhci)
|
||||
xhci_handle_event(xhci);
|
||||
|
||||
/* Clear the event handler busy flag; the event ring should be empty. */
|
||||
temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
|
||||
xhci_writel(xhci, temp & ~ERST_EHB, &xhci->ir_set->erst_dequeue[0]);
|
||||
temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
|
||||
xhci_write_64(xhci, temp_64 & ~ERST_EHB, &xhci->ir_set->erst_dequeue);
|
||||
/* Flush posted writes -- FIXME is this necessary? */
|
||||
xhci_readl(xhci, &xhci->ir_set->irq_pending);
|
||||
}
|
||||
@ -295,6 +296,7 @@ void xhci_event_ring_work(unsigned long arg)
|
||||
{
|
||||
unsigned long flags;
|
||||
int temp;
|
||||
u64 temp_64;
|
||||
struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
|
||||
int i, j;
|
||||
|
||||
@ -311,9 +313,9 @@ void xhci_event_ring_work(unsigned long arg)
|
||||
xhci_dbg(xhci, "Event ring:\n");
|
||||
xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
|
||||
xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
|
||||
temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
|
||||
temp &= ERST_PTR_MASK;
|
||||
xhci_dbg(xhci, "ERST deq = 0x%x\n", temp);
|
||||
temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
|
||||
temp_64 &= ~ERST_PTR_MASK;
|
||||
xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
|
||||
xhci_dbg(xhci, "Command ring:\n");
|
||||
xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
|
||||
xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
|
||||
@ -356,6 +358,7 @@ void xhci_event_ring_work(unsigned long arg)
|
||||
int xhci_run(struct usb_hcd *hcd)
|
||||
{
|
||||
u32 temp;
|
||||
u64 temp_64;
|
||||
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
|
||||
void (*doorbell)(struct xhci_hcd *) = NULL;
|
||||
|
||||
@ -416,11 +419,9 @@ int xhci_run(struct usb_hcd *hcd)
|
||||
xhci_dbg(xhci, "Event ring:\n");
|
||||
xhci_debug_ring(xhci, xhci->event_ring);
|
||||
xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
|
||||
temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
|
||||
temp &= ERST_PTR_MASK;
|
||||
xhci_dbg(xhci, "ERST deq = 0x%x\n", temp);
|
||||
temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[1]);
|
||||
xhci_dbg(xhci, "ERST deq upper = 0x%x\n", temp);
|
||||
temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
|
||||
temp_64 &= ~ERST_PTR_MASK;
|
||||
xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
|
||||
|
||||
temp = xhci_readl(xhci, &xhci->op_regs->command);
|
||||
temp |= (CMD_RUN);
|
||||
@ -888,8 +889,7 @@ static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev)
|
||||
ep_ctx = &virt_dev->in_ctx->ep[i];
|
||||
ep_ctx->ep_info = 0;
|
||||
ep_ctx->ep_info2 = 0;
|
||||
ep_ctx->deq[0] = 0;
|
||||
ep_ctx->deq[1] = 0;
|
||||
ep_ctx->deq = 0;
|
||||
ep_ctx->tx_info = 0;
|
||||
}
|
||||
}
|
||||
@ -1165,7 +1165,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
|
||||
struct xhci_virt_device *virt_dev;
|
||||
int ret = 0;
|
||||
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
|
||||
u32 temp;
|
||||
u64 temp_64;
|
||||
|
||||
if (!udev->slot_id) {
|
||||
xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
|
||||
@ -1227,18 +1227,13 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[0]);
|
||||
xhci_dbg(xhci, "Op regs DCBAA ptr[0] = %#08x\n", temp);
|
||||
temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[1]);
|
||||
xhci_dbg(xhci, "Op regs DCBAA ptr[1] = %#08x\n", temp);
|
||||
xhci_dbg(xhci, "Slot ID %d dcbaa entry[0] @%p = %#08x\n",
|
||||
temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
|
||||
xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
|
||||
xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
|
||||
udev->slot_id,
|
||||
&xhci->dcbaa->dev_context_ptrs[2*udev->slot_id],
|
||||
xhci->dcbaa->dev_context_ptrs[2*udev->slot_id]);
|
||||
xhci_dbg(xhci, "Slot ID %d dcbaa entry[1] @%p = %#08x\n",
|
||||
udev->slot_id,
|
||||
&xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1],
|
||||
xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1]);
|
||||
&xhci->dcbaa->dev_context_ptrs[udev->slot_id],
|
||||
(unsigned long long)
|
||||
xhci->dcbaa->dev_context_ptrs[udev->slot_id]);
|
||||
xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
|
||||
(unsigned long long)virt_dev->out_ctx_dma);
|
||||
xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
|
||||
|
@ -88,7 +88,7 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
|
||||
return;
|
||||
prev->next = next;
|
||||
if (link_trbs) {
|
||||
prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr[0] = next->dma;
|
||||
prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma;
|
||||
|
||||
/* Set the last TRB in the segment to have a TRB type ID of Link TRB */
|
||||
val = prev->trbs[TRBS_PER_SEGMENT-1].link.control;
|
||||
@ -200,8 +200,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
|
||||
return;
|
||||
|
||||
dev = xhci->devs[slot_id];
|
||||
xhci->dcbaa->dev_context_ptrs[2*slot_id] = 0;
|
||||
xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
|
||||
xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
|
||||
if (!dev)
|
||||
return;
|
||||
|
||||
@ -265,13 +264,12 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
|
||||
* Point to output device context in dcbaa; skip the output control
|
||||
* context, which is eight 32 bit fields (or 32 bytes long)
|
||||
*/
|
||||
xhci->dcbaa->dev_context_ptrs[2*slot_id] =
|
||||
xhci->dcbaa->dev_context_ptrs[slot_id] =
|
||||
(u32) dev->out_ctx_dma + (32);
|
||||
xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
|
||||
slot_id,
|
||||
&xhci->dcbaa->dev_context_ptrs[2*slot_id],
|
||||
&xhci->dcbaa->dev_context_ptrs[slot_id],
|
||||
(unsigned long long)dev->out_ctx_dma);
|
||||
xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
|
||||
|
||||
return 1;
|
||||
fail:
|
||||
@ -360,10 +358,9 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
|
||||
ep0_ctx->ep_info2 |= MAX_BURST(0);
|
||||
ep0_ctx->ep_info2 |= ERROR_COUNT(3);
|
||||
|
||||
ep0_ctx->deq[0] =
|
||||
ep0_ctx->deq =
|
||||
dev->ep_rings[0]->first_seg->dma;
|
||||
ep0_ctx->deq[0] |= dev->ep_rings[0]->cycle_state;
|
||||
ep0_ctx->deq[1] = 0;
|
||||
ep0_ctx->deq |= dev->ep_rings[0]->cycle_state;
|
||||
|
||||
/* Steps 7 and 8 were done in xhci_alloc_virt_device() */
|
||||
|
||||
@ -477,8 +474,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
|
||||
if (!virt_dev->new_ep_rings[ep_index])
|
||||
return -ENOMEM;
|
||||
ep_ring = virt_dev->new_ep_rings[ep_index];
|
||||
ep_ctx->deq[0] = ep_ring->first_seg->dma | ep_ring->cycle_state;
|
||||
ep_ctx->deq[1] = 0;
|
||||
ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
|
||||
|
||||
ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
|
||||
|
||||
@ -535,8 +531,7 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci,
|
||||
|
||||
ep_ctx->ep_info = 0;
|
||||
ep_ctx->ep_info2 = 0;
|
||||
ep_ctx->deq[0] = 0;
|
||||
ep_ctx->deq[1] = 0;
|
||||
ep_ctx->deq = 0;
|
||||
ep_ctx->tx_info = 0;
|
||||
/* Don't free the endpoint ring until the set interface or configuration
|
||||
* request succeeds.
|
||||
@ -551,10 +546,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
|
||||
|
||||
/* Free the Event Ring Segment Table and the actual Event Ring */
|
||||
xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
|
||||
xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]);
|
||||
xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
|
||||
xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]);
|
||||
xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
|
||||
xhci_write_64(xhci, 0, &xhci->ir_set->erst_base);
|
||||
xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue);
|
||||
size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
|
||||
if (xhci->erst.entries)
|
||||
pci_free_consistent(pdev, size,
|
||||
@ -566,8 +559,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
|
||||
xhci->event_ring = NULL;
|
||||
xhci_dbg(xhci, "Freed event ring\n");
|
||||
|
||||
xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]);
|
||||
xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]);
|
||||
xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
|
||||
if (xhci->cmd_ring)
|
||||
xhci_ring_free(xhci, xhci->cmd_ring);
|
||||
xhci->cmd_ring = NULL;
|
||||
@ -586,8 +578,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
|
||||
xhci->device_pool = NULL;
|
||||
xhci_dbg(xhci, "Freed device context pool\n");
|
||||
|
||||
xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]);
|
||||
xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]);
|
||||
xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
|
||||
if (xhci->dcbaa)
|
||||
pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
|
||||
xhci->dcbaa, xhci->dcbaa->dma);
|
||||
@ -602,6 +593,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
||||
dma_addr_t dma;
|
||||
struct device *dev = xhci_to_hcd(xhci)->self.controller;
|
||||
unsigned int val, val2;
|
||||
u64 val_64;
|
||||
struct xhci_segment *seg;
|
||||
u32 page_size;
|
||||
int i;
|
||||
@ -647,8 +639,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
||||
xhci->dcbaa->dma = dma;
|
||||
xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
|
||||
(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
|
||||
xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]);
|
||||
xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]);
|
||||
xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
|
||||
|
||||
/*
|
||||
* Initialize the ring segment pool. The ring must be a contiguous
|
||||
@ -675,14 +666,12 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
||||
(unsigned long long)xhci->cmd_ring->first_seg->dma);
|
||||
|
||||
/* Set the address in the Command Ring Control register */
|
||||
val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]);
|
||||
val = (val & ~CMD_RING_ADDR_MASK) |
|
||||
(xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) |
|
||||
val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
|
||||
val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
|
||||
(xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
|
||||
xhci->cmd_ring->cycle_state;
|
||||
xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val);
|
||||
xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]);
|
||||
xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n");
|
||||
xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]);
|
||||
xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val);
|
||||
xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
|
||||
xhci_dbg_cmd_ptrs(xhci);
|
||||
|
||||
val = xhci_readl(xhci, &xhci->cap_regs->db_off);
|
||||
@ -722,8 +711,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
||||
/* set ring base address and size for each segment table entry */
|
||||
for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
|
||||
struct xhci_erst_entry *entry = &xhci->erst.entries[val];
|
||||
entry->seg_addr[0] = seg->dma;
|
||||
entry->seg_addr[1] = 0;
|
||||
entry->seg_addr = seg->dma;
|
||||
entry->seg_size = TRBS_PER_SEGMENT;
|
||||
entry->rsvd = 0;
|
||||
seg = seg->next;
|
||||
@ -741,11 +729,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
||||
/* set the segment table base address */
|
||||
xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
|
||||
(unsigned long long)xhci->erst.erst_dma_addr);
|
||||
val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]);
|
||||
val &= ERST_PTR_MASK;
|
||||
val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK);
|
||||
xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]);
|
||||
xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
|
||||
val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
|
||||
val_64 &= ERST_PTR_MASK;
|
||||
val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
|
||||
xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
|
||||
|
||||
/* Set the event ring dequeue address */
|
||||
xhci_set_hc_event_deq(xhci);
|
||||
|
@ -237,7 +237,7 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
|
||||
|
||||
void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
|
||||
{
|
||||
u32 temp;
|
||||
u64 temp;
|
||||
dma_addr_t deq;
|
||||
|
||||
deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
|
||||
@ -246,13 +246,12 @@ void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
|
||||
xhci_warn(xhci, "WARN something wrong with SW event ring "
|
||||
"dequeue ptr.\n");
|
||||
/* Update HC event ring dequeue pointer */
|
||||
temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
|
||||
temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
|
||||
temp &= ERST_PTR_MASK;
|
||||
if (!in_interrupt())
|
||||
xhci_dbg(xhci, "// Write event ring dequeue pointer\n");
|
||||
xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
|
||||
xhci_writel(xhci, (deq & ~ERST_PTR_MASK) | temp,
|
||||
&xhci->ir_set->erst_dequeue[0]);
|
||||
xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
|
||||
&xhci->ir_set->erst_dequeue);
|
||||
}
|
||||
|
||||
/* Ring the host controller doorbell after placing a command on the ring */
|
||||
@ -352,7 +351,7 @@ static void find_new_dequeue_state(struct xhci_hcd *xhci,
|
||||
if (!state->new_deq_seg)
|
||||
BUG();
|
||||
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
|
||||
state->new_cycle_state = 0x1 & dev->out_ctx->ep[ep_index].deq[0];
|
||||
state->new_cycle_state = 0x1 & dev->out_ctx->ep[ep_index].deq;
|
||||
|
||||
state->new_deq_ptr = cur_td->last_trb;
|
||||
state->new_deq_seg = find_trb_seg(state->new_deq_seg,
|
||||
@ -594,10 +593,8 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
|
||||
* cancelling URBs, which might not be an error...
|
||||
*/
|
||||
} else {
|
||||
xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq[0] = 0x%x, "
|
||||
"deq[1] = 0x%x.\n",
|
||||
dev->out_ctx->ep[ep_index].deq[0],
|
||||
dev->out_ctx->ep[ep_index].deq[1]);
|
||||
xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
|
||||
dev->out_ctx->ep[ep_index].deq);
|
||||
}
|
||||
|
||||
ep_ring->state &= ~SET_DEQ_PENDING;
|
||||
@ -631,7 +628,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
|
||||
u64 cmd_dma;
|
||||
dma_addr_t cmd_dequeue_dma;
|
||||
|
||||
cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0];
|
||||
cmd_dma = event->cmd_trb;
|
||||
cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
|
||||
xhci->cmd_ring->dequeue);
|
||||
/* Is the command ring deq ptr out of sync with the deq seg ptr? */
|
||||
@ -794,10 +791,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
event_dma = event->buffer[0];
|
||||
if (event->buffer[1] != 0)
|
||||
xhci_warn(xhci, "WARN ignoring upper 32-bits of 64-bit TRB dma address\n");
|
||||
|
||||
event_dma = event->buffer;
|
||||
/* This TRB should be in the TD at the head of this ring's TD list */
|
||||
if (list_empty(&ep_ring->td_list)) {
|
||||
xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
|
||||
@ -821,10 +815,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
|
||||
event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)];
|
||||
xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
|
||||
(unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
|
||||
xhci_dbg(xhci, "Offset 0x00 (buffer[0]) = 0x%x\n",
|
||||
(unsigned int) event->buffer[0]);
|
||||
xhci_dbg(xhci, "Offset 0x04 (buffer[0]) = 0x%x\n",
|
||||
(unsigned int) event->buffer[1]);
|
||||
xhci_dbg(xhci, "Offset 0x00 (buffer lo) = 0x%x\n",
|
||||
lower_32_bits(event->buffer));
|
||||
xhci_dbg(xhci, "Offset 0x04 (buffer hi) = 0x%x\n",
|
||||
upper_32_bits(event->buffer));
|
||||
xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n",
|
||||
(unsigned int) event->transfer_len);
|
||||
xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n",
|
||||
@ -1343,8 +1337,8 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
||||
TD_REMAINDER(urb->transfer_buffer_length - running_total) |
|
||||
TRB_INTR_TARGET(0);
|
||||
queue_trb(xhci, ep_ring, false,
|
||||
(u32) addr,
|
||||
(u32) ((u64) addr >> 32),
|
||||
lower_32_bits(addr),
|
||||
upper_32_bits(addr),
|
||||
length_field,
|
||||
/* We always want to know if the TRB was short,
|
||||
* or we won't get an event when it completes.
|
||||
@ -1475,8 +1469,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
||||
TD_REMAINDER(urb->transfer_buffer_length - running_total) |
|
||||
TRB_INTR_TARGET(0);
|
||||
queue_trb(xhci, ep_ring, false,
|
||||
(u32) addr,
|
||||
(u32) ((u64) addr >> 32),
|
||||
lower_32_bits(addr),
|
||||
upper_32_bits(addr),
|
||||
length_field,
|
||||
/* We always want to know if the TRB was short,
|
||||
* or we won't get an event when it completes.
|
||||
@ -1637,7 +1631,8 @@ int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
|
||||
int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
|
||||
u32 slot_id)
|
||||
{
|
||||
return queue_command(xhci, in_ctx_ptr, 0, 0,
|
||||
return queue_command(xhci, lower_32_bits(in_ctx_ptr),
|
||||
upper_32_bits(in_ctx_ptr), 0,
|
||||
TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id));
|
||||
}
|
||||
|
||||
@ -1645,7 +1640,8 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
|
||||
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
|
||||
u32 slot_id)
|
||||
{
|
||||
return queue_command(xhci, in_ctx_ptr, 0, 0,
|
||||
return queue_command(xhci, lower_32_bits(in_ctx_ptr),
|
||||
upper_32_bits(in_ctx_ptr), 0,
|
||||
TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id));
|
||||
}
|
||||
|
||||
@ -1677,7 +1673,8 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
|
||||
xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
|
||||
xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
|
||||
deq_seg, deq_ptr);
|
||||
return queue_command(xhci, (u32) addr | cycle_state, 0, 0,
|
||||
return queue_command(xhci, lower_32_bits(addr) | cycle_state,
|
||||
upper_32_bits(addr), 0,
|
||||
trb_slot_id | trb_ep_index | type);
|
||||
}
|
||||
|
||||
|
@ -25,6 +25,7 @@
|
||||
|
||||
#include <linux/usb.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include "../core/hcd.h"
|
||||
/* Code sharing between pci-quirks and xhci hcd */
|
||||
@ -42,14 +43,6 @@
|
||||
* xHCI register interface.
|
||||
* This corresponds to the eXtensible Host Controller Interface (xHCI)
|
||||
* Revision 0.95 specification
|
||||
*
|
||||
* Registers should always be accessed with double word or quad word accesses.
|
||||
*
|
||||
* Some xHCI implementations may support 64-bit address pointers. Registers
|
||||
* with 64-bit address pointers should be written to with dword accesses by
|
||||
* writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
|
||||
* xHCI implementations that do not support 64-bit address pointers will ignore
|
||||
* the high dword, and write order is irrelevant.
|
||||
*/
|
||||
|
||||
/**
|
||||
@ -166,10 +159,10 @@ struct xhci_op_regs {
|
||||
u32 reserved1;
|
||||
u32 reserved2;
|
||||
u32 dev_notification;
|
||||
u32 cmd_ring[2];
|
||||
u64 cmd_ring;
|
||||
/* rsvd: offset 0x20-2F */
|
||||
u32 reserved3[4];
|
||||
u32 dcbaa_ptr[2];
|
||||
u64 dcbaa_ptr;
|
||||
u32 config_reg;
|
||||
/* rsvd: offset 0x3C-3FF */
|
||||
u32 reserved4[241];
|
||||
@ -254,7 +247,7 @@ struct xhci_op_regs {
|
||||
#define CMD_RING_RUNNING (1 << 3)
|
||||
/* bits 4:5 reserved and should be preserved */
|
||||
/* Command Ring pointer - bit mask for the lower 32 bits. */
|
||||
#define CMD_RING_ADDR_MASK (0xffffffc0)
|
||||
#define CMD_RING_RSVD_BITS (0x3f)
|
||||
|
||||
/* CONFIG - Configure Register - config_reg bitmasks */
|
||||
/* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */
|
||||
@ -382,8 +375,8 @@ struct xhci_intr_reg {
|
||||
u32 irq_control;
|
||||
u32 erst_size;
|
||||
u32 rsvd;
|
||||
u32 erst_base[2];
|
||||
u32 erst_dequeue[2];
|
||||
u64 erst_base;
|
||||
u64 erst_dequeue;
|
||||
};
|
||||
|
||||
/* irq_pending bitmasks */
|
||||
@ -538,7 +531,7 @@ struct xhci_slot_ctx {
|
||||
struct xhci_ep_ctx {
|
||||
u32 ep_info;
|
||||
u32 ep_info2;
|
||||
u32 deq[2];
|
||||
u64 deq;
|
||||
u32 tx_info;
|
||||
/* offset 0x14 - 0x1f reserved for HC internal use */
|
||||
u32 reserved[3];
|
||||
@ -641,7 +634,7 @@ struct xhci_virt_device {
|
||||
*/
|
||||
struct xhci_device_context_array {
|
||||
/* 64-bit device addresses; we only write 32-bit addresses */
|
||||
u32 dev_context_ptrs[2*MAX_HC_SLOTS];
|
||||
u64 dev_context_ptrs[MAX_HC_SLOTS];
|
||||
/* private xHCD pointers */
|
||||
dma_addr_t dma;
|
||||
};
|
||||
@ -654,7 +647,7 @@ struct xhci_device_context_array {
|
||||
|
||||
struct xhci_stream_ctx {
|
||||
/* 64-bit stream ring address, cycle state, and stream type */
|
||||
u32 stream_ring[2];
|
||||
u64 stream_ring;
|
||||
/* offset 0x14 - 0x1f reserved for HC internal use */
|
||||
u32 reserved[2];
|
||||
};
|
||||
@ -662,7 +655,7 @@ struct xhci_stream_ctx {
|
||||
|
||||
struct xhci_transfer_event {
|
||||
/* 64-bit buffer address, or immediate data */
|
||||
u32 buffer[2];
|
||||
u64 buffer;
|
||||
u32 transfer_len;
|
||||
/* This field is interpreted differently based on the type of TRB */
|
||||
u32 flags;
|
||||
@ -744,7 +737,7 @@ struct xhci_transfer_event {
|
||||
|
||||
struct xhci_link_trb {
|
||||
/* 64-bit segment pointer*/
|
||||
u32 segment_ptr[2];
|
||||
u64 segment_ptr;
|
||||
u32 intr_target;
|
||||
u32 control;
|
||||
};
|
||||
@ -755,7 +748,7 @@ struct xhci_link_trb {
|
||||
/* Command completion event TRB */
|
||||
struct xhci_event_cmd {
|
||||
/* Pointer to command TRB, or the value passed by the event data trb */
|
||||
u32 cmd_trb[2];
|
||||
u64 cmd_trb;
|
||||
u32 status;
|
||||
u32 flags;
|
||||
};
|
||||
@ -943,7 +936,7 @@ struct xhci_ring {
|
||||
|
||||
struct xhci_erst_entry {
|
||||
/* 64-bit event ring segment address */
|
||||
u32 seg_addr[2];
|
||||
u64 seg_addr;
|
||||
u32 seg_size;
|
||||
/* Set to zero */
|
||||
u32 rsvd;
|
||||
@ -1079,6 +1072,38 @@ static inline void xhci_writel(struct xhci_hcd *xhci,
|
||||
writel(val, regs);
|
||||
}
|
||||
|
||||
/*
|
||||
* Registers should always be accessed with double word or quad word accesses.
|
||||
*
|
||||
* Some xHCI implementations may support 64-bit address pointers. Registers
|
||||
* with 64-bit address pointers should be written to with dword accesses by
|
||||
* writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
|
||||
* xHCI implementations that do not support 64-bit address pointers will ignore
|
||||
* the high dword, and write order is irrelevant.
|
||||
*/
|
||||
static inline u64 xhci_read_64(const struct xhci_hcd *xhci,
|
||||
__u64 __iomem *regs)
|
||||
{
|
||||
__u32 __iomem *ptr = (__u32 __iomem *) regs;
|
||||
u64 val_lo = readl(ptr);
|
||||
u64 val_hi = readl(ptr + 1);
|
||||
return val_lo + (val_hi << 32);
|
||||
}
|
||||
static inline void xhci_write_64(struct xhci_hcd *xhci,
|
||||
const u64 val, __u64 __iomem *regs)
|
||||
{
|
||||
__u32 __iomem *ptr = (__u32 __iomem *) regs;
|
||||
u32 val_lo = lower_32_bits(val);
|
||||
u32 val_hi = upper_32_bits(val);
|
||||
|
||||
if (!in_interrupt())
|
||||
xhci_dbg(xhci,
|
||||
"`MEM_WRITE_DWORD(3'b000, 64'h%p, 64'h%0lx, 4'hf);\n",
|
||||
regs, (long unsigned int) val);
|
||||
writel(val_lo, ptr);
|
||||
writel(val_hi, ptr + 1);
|
||||
}
|
||||
|
||||
/* xHCI debugging */
|
||||
void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
|
||||
void xhci_print_registers(struct xhci_hcd *xhci);
|
||||
|
Loading…
x
Reference in New Issue
Block a user