2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-19 02:34:01 +08:00

xHCI: store ring's type

When allocate a ring, store its type - four transfer types for endpoint,
TYPE_STREAM for stream transfer, and TYPE_COMMAND/TYPE_EVENT for xHCI host.

This helps to get rid of three bool function parameters: link_trbs, isoc
and consumer.

Signed-off-by: Andiry Xu <andiry.xu@amd.com>
Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
Tested-by: Paul Zimmerman <Paul.Zimmerman@synopsys.com>
This commit is contained in:
Andiry Xu 2012-03-05 17:49:32 +08:00 committed by Sarah Sharp
parent 8d3709f3dd
commit 3b72fca09d
3 changed files with 92 additions and 75 deletions

View File

@ -73,14 +73,14 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
* related flags, such as End TRB, Toggle Cycle, and no snoop.
*/
static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
struct xhci_segment *next, bool link_trbs, bool isoc)
struct xhci_segment *next, enum xhci_ring_type type)
{
u32 val;
if (!prev || !next)
return;
prev->next = next;
if (link_trbs) {
if (type != TYPE_EVENT) {
prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
cpu_to_le64(next->dma);
@ -91,7 +91,8 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
/* Always set the chain bit with 0.95 hardware */
/* Set chain bit for isoc rings on AMD 0.96 host */
if (xhci_link_trb_quirk(xhci) ||
(isoc && (xhci->quirks & XHCI_AMD_0x96_HOST)))
(type == TYPE_ISOC &&
(xhci->quirks & XHCI_AMD_0x96_HOST)))
val |= TRB_CHAIN;
prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
}
@ -144,7 +145,7 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring)
* See section 4.9.1 and figures 15 and 16.
*/
static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
unsigned int num_segs, bool link_trbs, bool isoc, gfp_t flags)
unsigned int num_segs, enum xhci_ring_type type, gfp_t flags)
{
struct xhci_ring *ring;
struct xhci_segment *prev;
@ -154,6 +155,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
return NULL;
INIT_LIST_HEAD(&ring->td_list);
ring->type = type;
if (num_segs == 0)
return ring;
@ -169,14 +171,15 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
next = xhci_segment_alloc(xhci, flags);
if (!next)
goto fail;
xhci_link_segments(xhci, prev, next, link_trbs, isoc);
xhci_link_segments(xhci, prev, next, type);
prev = next;
num_segs--;
}
xhci_link_segments(xhci, prev, ring->first_seg, link_trbs, isoc);
xhci_link_segments(xhci, prev, ring->first_seg, type);
if (link_trbs) {
/* Only event ring does not use link TRB */
if (type != TYPE_EVENT) {
/* See section 4.9.2.1 and 6.4.4.1 */
prev->trbs[TRBS_PER_SEGMENT-1].link.control |=
cpu_to_le32(LINK_TOGGLE);
@ -217,16 +220,17 @@ void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
* pointers to the beginning of the ring.
*/
static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
struct xhci_ring *ring, bool isoc)
struct xhci_ring *ring, enum xhci_ring_type type)
{
struct xhci_segment *seg = ring->first_seg;
do {
memset(seg->trbs, 0,
sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
/* All endpoint rings have link TRBs */
xhci_link_segments(xhci, seg, seg->next, 1, isoc);
xhci_link_segments(xhci, seg, seg->next, type);
seg = seg->next;
} while (seg != ring->first_seg);
ring->type = type;
xhci_initialize_ring_info(ring);
/* td list should be empty since all URBs have been cancelled,
* but just in case...
@ -528,7 +532,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
*/
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
stream_info->stream_rings[cur_stream] =
xhci_ring_alloc(xhci, 1, true, false, mem_flags);
xhci_ring_alloc(xhci, 1, TYPE_STREAM, mem_flags);
cur_ring = stream_info->stream_rings[cur_stream];
if (!cur_ring)
goto cleanup_rings;
@ -862,7 +866,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
}
/* Allocate endpoint 0 ring */
dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, false, flags);
dev->eps[0].ring = xhci_ring_alloc(xhci, 1, TYPE_CTRL, flags);
if (!dev->eps[0].ring)
goto fail;
@ -1300,11 +1304,13 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
struct xhci_ring *ep_ring;
unsigned int max_packet;
unsigned int max_burst;
enum xhci_ring_type type;
u32 max_esit_payload;
ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
type = usb_endpoint_type(&ep->desc);
/* Set up the endpoint ring */
/*
* Isochronous endpoint ring needs bigger size because one isoc URB
@ -1314,10 +1320,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
*/
if (usb_endpoint_xfer_isoc(&ep->desc))
virt_dev->eps[ep_index].new_ring =
xhci_ring_alloc(xhci, 8, true, true, mem_flags);
xhci_ring_alloc(xhci, 8, type, mem_flags);
else
virt_dev->eps[ep_index].new_ring =
xhci_ring_alloc(xhci, 1, true, false, mem_flags);
xhci_ring_alloc(xhci, 1, type, mem_flags);
if (!virt_dev->eps[ep_index].new_ring) {
/* Attempt to use the ring cache */
if (virt_dev->num_rings_cached == 0)
@ -1327,7 +1333,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
virt_dev->num_rings_cached--;
xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
usb_endpoint_xfer_isoc(&ep->desc) ? true : false);
type);
}
virt_dev->eps[ep_index].skip = false;
ep_ring = virt_dev->eps[ep_index].new_ring;
@ -2235,7 +2241,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
goto fail;
/* Set up the command ring to have one segments for now. */
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, false, flags);
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, TYPE_COMMAND, flags);
if (!xhci->cmd_ring)
goto fail;
xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
@ -2266,7 +2272,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
* the event ring segment table (ERST). Section 4.9.3.
*/
xhci_dbg(xhci, "// Allocating event ring\n");
xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, false,
xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, TYPE_EVENT,
flags);
if (!xhci->event_ring)
goto fail;

View File

@ -143,7 +143,7 @@ static void next_trb(struct xhci_hcd *xhci,
* See Cycle bit rules. SW is the consumer for the event ring only.
* Don't make a ring full of link TRBs. That would be dumb and this would loop.
*/
static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
union xhci_trb *next = ++(ring->dequeue);
unsigned long long addr;
@ -153,7 +153,8 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
* the end of an event ring segment (which doesn't have link TRBS)
*/
while (last_trb(xhci, ring, ring->deq_seg, next)) {
if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
if (ring->type == TYPE_EVENT && last_trb_on_last_seg(xhci,
ring, ring->deq_seg, next)) {
ring->cycle_state = (ring->cycle_state ? 0 : 1);
}
ring->deq_seg = ring->deq_seg->next;
@ -181,7 +182,7 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
* prepare_transfer()?
*/
static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
bool consumer, bool more_trbs_coming, bool isoc)
bool more_trbs_coming)
{
u32 chain;
union xhci_trb *next;
@ -195,35 +196,35 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
* the end of an event ring segment (which doesn't have link TRBS)
*/
while (last_trb(xhci, ring, ring->enq_seg, next)) {
if (!consumer) {
if (ring != xhci->event_ring) {
/*
* If the caller doesn't plan on enqueueing more
* TDs before ringing the doorbell, then we
* don't want to give the link TRB to the
* hardware just yet. We'll give the link TRB
* back in prepare_ring() just before we enqueue
* the TD at the top of the ring.
*/
if (!chain && !more_trbs_coming)
break;
if (ring->type != TYPE_EVENT) {
/*
* If the caller doesn't plan on enqueueing more
* TDs before ringing the doorbell, then we
* don't want to give the link TRB to the
* hardware just yet. We'll give the link TRB
* back in prepare_ring() just before we enqueue
* the TD at the top of the ring.
*/
if (!chain && !more_trbs_coming)
break;
/* If we're not dealing with 0.95 hardware or
* isoc rings on AMD 0.96 host,
* carry over the chain bit of the previous TRB
* (which may mean the chain bit is cleared).
*/
if (!(isoc && (xhci->quirks & XHCI_AMD_0x96_HOST))
/* If we're not dealing with 0.95 hardware or
* isoc rings on AMD 0.96 host,
* carry over the chain bit of the previous TRB
* (which may mean the chain bit is cleared).
*/
if (!(ring->type == TYPE_ISOC &&
(xhci->quirks & XHCI_AMD_0x96_HOST))
&& !xhci_link_trb_quirk(xhci)) {
next->link.control &=
cpu_to_le32(~TRB_CHAIN);
next->link.control |=
cpu_to_le32(chain);
}
/* Give this link TRB to the hardware */
wmb();
next->link.control ^= cpu_to_le32(TRB_CYCLE);
next->link.control &=
cpu_to_le32(~TRB_CHAIN);
next->link.control |=
cpu_to_le32(chain);
}
/* Give this link TRB to the hardware */
wmb();
next->link.control ^= cpu_to_le32(TRB_CYCLE);
/* Toggle the cycle bit after the last ring segment. */
if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
ring->cycle_state = (ring->cycle_state ? 0 : 1);
@ -1185,7 +1186,7 @@ bandwidth_change:
xhci->error_bitmask |= 1 << 6;
break;
}
inc_deq(xhci, xhci->cmd_ring, false);
inc_deq(xhci, xhci->cmd_ring);
}
static void handle_vendor_event(struct xhci_hcd *xhci,
@ -1398,7 +1399,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
cleanup:
/* Update event ring dequeue pointer before dropping the lock */
inc_deq(xhci, xhci->event_ring, true);
inc_deq(xhci, xhci->event_ring);
/* Don't make the USB core poll the roothub if we got a bad port status
* change event. Besides, at that point we can't tell which roothub
@ -1593,8 +1594,8 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
} else {
/* Update ring dequeue pointer */
while (ep_ring->dequeue != td->last_trb)
inc_deq(xhci, ep_ring, false);
inc_deq(xhci, ep_ring, false);
inc_deq(xhci, ep_ring);
inc_deq(xhci, ep_ring);
}
td_cleanup:
@ -1842,8 +1843,8 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
/* Update ring dequeue pointer */
while (ep_ring->dequeue != td->last_trb)
inc_deq(xhci, ep_ring, false);
inc_deq(xhci, ep_ring, false);
inc_deq(xhci, ep_ring);
inc_deq(xhci, ep_ring);
return finish_td(xhci, td, NULL, event, ep, status, true);
}
@ -2230,7 +2231,7 @@ cleanup:
* Will roll back to continue process missed tds.
*/
if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
inc_deq(xhci, xhci->event_ring, true);
inc_deq(xhci, xhci->event_ring);
}
if (ret) {
@ -2345,7 +2346,7 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
if (update_ptrs)
/* Update SW event ring dequeue pointer */
inc_deq(xhci, xhci->event_ring, true);
inc_deq(xhci, xhci->event_ring);
/* Are there more items on the event ring? Caller will call us again to
* check.
@ -2461,7 +2462,7 @@ irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
* prepare_transfer()?
*/
static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
bool consumer, bool more_trbs_coming, bool isoc,
bool more_trbs_coming,
u32 field1, u32 field2, u32 field3, u32 field4)
{
struct xhci_generic_trb *trb;
@ -2471,7 +2472,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
trb->field[1] = cpu_to_le32(field2);
trb->field[2] = cpu_to_le32(field3);
trb->field[3] = cpu_to_le32(field4);
inc_enq(xhci, ring, consumer, more_trbs_coming, isoc);
inc_enq(xhci, ring, more_trbs_coming);
}
/*
@ -2479,7 +2480,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
* FIXME allocate segments if the ring is full.
*/
static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
u32 ep_state, unsigned int num_trbs, bool isoc, gfp_t mem_flags)
u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
{
/* Make sure the endpoint has been added to xHC schedule */
switch (ep_state) {
@ -2524,8 +2525,9 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
/* If we're not dealing with 0.95 hardware or isoc rings
* on AMD 0.96 host, clear the chain bit.
*/
if (!xhci_link_trb_quirk(xhci) && !(isoc &&
(xhci->quirks & XHCI_AMD_0x96_HOST)))
if (!xhci_link_trb_quirk(xhci) &&
!(ring->type == TYPE_ISOC &&
(xhci->quirks & XHCI_AMD_0x96_HOST)))
next->link.control &= cpu_to_le32(~TRB_CHAIN);
else
next->link.control |= cpu_to_le32(TRB_CHAIN);
@ -2553,7 +2555,6 @@ static int prepare_transfer(struct xhci_hcd *xhci,
unsigned int num_trbs,
struct urb *urb,
unsigned int td_index,
bool isoc,
gfp_t mem_flags)
{
int ret;
@ -2571,7 +2572,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
ret = prepare_ring(xhci, ep_ring,
le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
num_trbs, isoc, mem_flags);
num_trbs, mem_flags);
if (ret)
return ret;
@ -2781,7 +2782,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
num_trbs, urb, 0, false, mem_flags);
num_trbs, urb, 0, mem_flags);
if (trb_buff_len < 0)
return trb_buff_len;
@ -2869,7 +2870,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
more_trbs_coming = true;
else
more_trbs_coming = false;
queue_trb(xhci, ep_ring, false, more_trbs_coming, false,
queue_trb(xhci, ep_ring, more_trbs_coming,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
@ -2951,7 +2952,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
num_trbs, urb, 0, false, mem_flags);
num_trbs, urb, 0, mem_flags);
if (ret < 0)
return ret;
@ -3023,7 +3024,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
more_trbs_coming = true;
else
more_trbs_coming = false;
queue_trb(xhci, ep_ring, false, more_trbs_coming, false,
queue_trb(xhci, ep_ring, more_trbs_coming,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
@ -3080,7 +3081,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
num_trbs++;
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
num_trbs, urb, 0, false, mem_flags);
num_trbs, urb, 0, mem_flags);
if (ret < 0)
return ret;
@ -3113,7 +3114,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
}
queue_trb(xhci, ep_ring, false, true, false,
queue_trb(xhci, ep_ring, true,
setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
TRB_LEN(8) | TRB_INTR_TARGET(0),
@ -3133,7 +3134,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType & USB_DIR_IN)
field |= TRB_DIR_IN;
queue_trb(xhci, ep_ring, false, true, false,
queue_trb(xhci, ep_ring, true,
lower_32_bits(urb->transfer_dma),
upper_32_bits(urb->transfer_dma),
length_field,
@ -3149,7 +3150,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field = 0;
else
field = TRB_DIR_IN;
queue_trb(xhci, ep_ring, false, false, false,
queue_trb(xhci, ep_ring, false,
0,
0,
TRB_INTR_TARGET(0),
@ -3289,8 +3290,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
urb->stream_id, trbs_per_td, urb, i, true,
mem_flags);
urb->stream_id, trbs_per_td, urb, i, mem_flags);
if (ret < 0) {
if (i == 0)
return ret;
@ -3360,7 +3360,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
remainder |
TRB_INTR_TARGET(0);
queue_trb(xhci, ep_ring, false, more_trbs_coming, true,
queue_trb(xhci, ep_ring, more_trbs_coming,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
@ -3443,7 +3443,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
* Do not insert any td of the urb to the ring if the check failed.
*/
ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
num_trbs, true, mem_flags);
num_trbs, mem_flags);
if (ret)
return ret;
@ -3502,7 +3502,7 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
reserved_trbs++;
ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
reserved_trbs, false, GFP_ATOMIC);
reserved_trbs, GFP_ATOMIC);
if (ret < 0) {
xhci_err(xhci, "ERR: No room for command on command ring\n");
if (command_must_succeed)
@ -3510,8 +3510,8 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
"unfailable commands failed.\n");
return ret;
}
queue_trb(xhci, xhci->cmd_ring, false, false, false, field1, field2,
field3, field4 | xhci->cmd_ring->cycle_state);
queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
field4 | xhci->cmd_ring->cycle_state);
return 0;
}

View File

@ -1250,6 +1250,16 @@ struct xhci_dequeue_state {
int new_cycle_state;
};
enum xhci_ring_type {
TYPE_CTRL = 0,
TYPE_ISOC,
TYPE_BULK,
TYPE_INTR,
TYPE_STREAM,
TYPE_COMMAND,
TYPE_EVENT,
};
struct xhci_ring {
struct xhci_segment *first_seg;
union xhci_trb *enqueue;
@ -1266,6 +1276,7 @@ struct xhci_ring {
*/
u32 cycle_state;
unsigned int stream_id;
enum xhci_ring_type type;
bool last_td_was_short;
};