Commit 3b72fca0 authored by Andiry Xu's avatar Andiry Xu Committed by Sarah Sharp

xHCI: store ring's type

When allocate a ring, store its type - four transfer types for endpoint,
TYPE_STREAM for stream transfer, and TYPE_COMMAND/TYPE_EVENT for xHCI host.

This helps to get rid of three bool function parameters: link_trbs, isoc
and consumer.
Signed-off-by: default avatarAndiry Xu <andiry.xu@amd.com>
Signed-off-by: default avatarSarah Sharp <sarah.a.sharp@linux.intel.com>
Tested-by: default avatarPaul Zimmerman <Paul.Zimmerman@synopsys.com>
parent 8d3709f3
...@@ -73,14 +73,14 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) ...@@ -73,14 +73,14 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
* related flags, such as End TRB, Toggle Cycle, and no snoop. * related flags, such as End TRB, Toggle Cycle, and no snoop.
*/ */
static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
struct xhci_segment *next, bool link_trbs, bool isoc) struct xhci_segment *next, enum xhci_ring_type type)
{ {
u32 val; u32 val;
if (!prev || !next) if (!prev || !next)
return; return;
prev->next = next; prev->next = next;
if (link_trbs) { if (type != TYPE_EVENT) {
prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
cpu_to_le64(next->dma); cpu_to_le64(next->dma);
...@@ -91,7 +91,8 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, ...@@ -91,7 +91,8 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
/* Always set the chain bit with 0.95 hardware */ /* Always set the chain bit with 0.95 hardware */
/* Set chain bit for isoc rings on AMD 0.96 host */ /* Set chain bit for isoc rings on AMD 0.96 host */
if (xhci_link_trb_quirk(xhci) || if (xhci_link_trb_quirk(xhci) ||
(isoc && (xhci->quirks & XHCI_AMD_0x96_HOST))) (type == TYPE_ISOC &&
(xhci->quirks & XHCI_AMD_0x96_HOST)))
val |= TRB_CHAIN; val |= TRB_CHAIN;
prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
} }
...@@ -144,7 +145,7 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring) ...@@ -144,7 +145,7 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring)
* See section 4.9.1 and figures 15 and 16. * See section 4.9.1 and figures 15 and 16.
*/ */
static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
unsigned int num_segs, bool link_trbs, bool isoc, gfp_t flags) unsigned int num_segs, enum xhci_ring_type type, gfp_t flags)
{ {
struct xhci_ring *ring; struct xhci_ring *ring;
struct xhci_segment *prev; struct xhci_segment *prev;
...@@ -154,6 +155,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, ...@@ -154,6 +155,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
return NULL; return NULL;
INIT_LIST_HEAD(&ring->td_list); INIT_LIST_HEAD(&ring->td_list);
ring->type = type;
if (num_segs == 0) if (num_segs == 0)
return ring; return ring;
...@@ -169,14 +171,15 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, ...@@ -169,14 +171,15 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
next = xhci_segment_alloc(xhci, flags); next = xhci_segment_alloc(xhci, flags);
if (!next) if (!next)
goto fail; goto fail;
xhci_link_segments(xhci, prev, next, link_trbs, isoc); xhci_link_segments(xhci, prev, next, type);
prev = next; prev = next;
num_segs--; num_segs--;
} }
xhci_link_segments(xhci, prev, ring->first_seg, link_trbs, isoc); xhci_link_segments(xhci, prev, ring->first_seg, type);
if (link_trbs) { /* Only event ring does not use link TRB */
if (type != TYPE_EVENT) {
/* See section 4.9.2.1 and 6.4.4.1 */ /* See section 4.9.2.1 and 6.4.4.1 */
prev->trbs[TRBS_PER_SEGMENT-1].link.control |= prev->trbs[TRBS_PER_SEGMENT-1].link.control |=
cpu_to_le32(LINK_TOGGLE); cpu_to_le32(LINK_TOGGLE);
...@@ -217,16 +220,17 @@ void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci, ...@@ -217,16 +220,17 @@ void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
* pointers to the beginning of the ring. * pointers to the beginning of the ring.
*/ */
static void xhci_reinit_cached_ring(struct xhci_hcd *xhci, static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
struct xhci_ring *ring, bool isoc) struct xhci_ring *ring, enum xhci_ring_type type)
{ {
struct xhci_segment *seg = ring->first_seg; struct xhci_segment *seg = ring->first_seg;
do { do {
memset(seg->trbs, 0, memset(seg->trbs, 0,
sizeof(union xhci_trb)*TRBS_PER_SEGMENT); sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
/* All endpoint rings have link TRBs */ /* All endpoint rings have link TRBs */
xhci_link_segments(xhci, seg, seg->next, 1, isoc); xhci_link_segments(xhci, seg, seg->next, type);
seg = seg->next; seg = seg->next;
} while (seg != ring->first_seg); } while (seg != ring->first_seg);
ring->type = type;
xhci_initialize_ring_info(ring); xhci_initialize_ring_info(ring);
/* td list should be empty since all URBs have been cancelled, /* td list should be empty since all URBs have been cancelled,
* but just in case... * but just in case...
...@@ -528,7 +532,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, ...@@ -528,7 +532,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
*/ */
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
stream_info->stream_rings[cur_stream] = stream_info->stream_rings[cur_stream] =
xhci_ring_alloc(xhci, 1, true, false, mem_flags); xhci_ring_alloc(xhci, 1, TYPE_STREAM, mem_flags);
cur_ring = stream_info->stream_rings[cur_stream]; cur_ring = stream_info->stream_rings[cur_stream];
if (!cur_ring) if (!cur_ring)
goto cleanup_rings; goto cleanup_rings;
...@@ -862,7 +866,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, ...@@ -862,7 +866,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
} }
/* Allocate endpoint 0 ring */ /* Allocate endpoint 0 ring */
dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, false, flags); dev->eps[0].ring = xhci_ring_alloc(xhci, 1, TYPE_CTRL, flags);
if (!dev->eps[0].ring) if (!dev->eps[0].ring)
goto fail; goto fail;
...@@ -1300,11 +1304,13 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, ...@@ -1300,11 +1304,13 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
struct xhci_ring *ep_ring; struct xhci_ring *ep_ring;
unsigned int max_packet; unsigned int max_packet;
unsigned int max_burst; unsigned int max_burst;
enum xhci_ring_type type;
u32 max_esit_payload; u32 max_esit_payload;
ep_index = xhci_get_endpoint_index(&ep->desc); ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
type = usb_endpoint_type(&ep->desc);
/* Set up the endpoint ring */ /* Set up the endpoint ring */
/* /*
* Isochronous endpoint ring needs bigger size because one isoc URB * Isochronous endpoint ring needs bigger size because one isoc URB
...@@ -1314,10 +1320,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, ...@@ -1314,10 +1320,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
*/ */
if (usb_endpoint_xfer_isoc(&ep->desc)) if (usb_endpoint_xfer_isoc(&ep->desc))
virt_dev->eps[ep_index].new_ring = virt_dev->eps[ep_index].new_ring =
xhci_ring_alloc(xhci, 8, true, true, mem_flags); xhci_ring_alloc(xhci, 8, type, mem_flags);
else else
virt_dev->eps[ep_index].new_ring = virt_dev->eps[ep_index].new_ring =
xhci_ring_alloc(xhci, 1, true, false, mem_flags); xhci_ring_alloc(xhci, 1, type, mem_flags);
if (!virt_dev->eps[ep_index].new_ring) { if (!virt_dev->eps[ep_index].new_ring) {
/* Attempt to use the ring cache */ /* Attempt to use the ring cache */
if (virt_dev->num_rings_cached == 0) if (virt_dev->num_rings_cached == 0)
...@@ -1327,7 +1333,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, ...@@ -1327,7 +1333,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
virt_dev->num_rings_cached--; virt_dev->num_rings_cached--;
xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring, xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
usb_endpoint_xfer_isoc(&ep->desc) ? true : false); type);
} }
virt_dev->eps[ep_index].skip = false; virt_dev->eps[ep_index].skip = false;
ep_ring = virt_dev->eps[ep_index].new_ring; ep_ring = virt_dev->eps[ep_index].new_ring;
...@@ -2235,7 +2241,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -2235,7 +2241,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
goto fail; goto fail;
/* Set up the command ring to have one segments for now. */ /* Set up the command ring to have one segments for now. */
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, false, flags); xhci->cmd_ring = xhci_ring_alloc(xhci, 1, TYPE_COMMAND, flags);
if (!xhci->cmd_ring) if (!xhci->cmd_ring)
goto fail; goto fail;
xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring); xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
...@@ -2266,7 +2272,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -2266,7 +2272,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
* the event ring segment table (ERST). Section 4.9.3. * the event ring segment table (ERST). Section 4.9.3.
*/ */
xhci_dbg(xhci, "// Allocating event ring\n"); xhci_dbg(xhci, "// Allocating event ring\n");
xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, false, xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, TYPE_EVENT,
flags); flags);
if (!xhci->event_ring) if (!xhci->event_ring)
goto fail; goto fail;
......
...@@ -143,7 +143,7 @@ static void next_trb(struct xhci_hcd *xhci, ...@@ -143,7 +143,7 @@ static void next_trb(struct xhci_hcd *xhci,
* See Cycle bit rules. SW is the consumer for the event ring only. * See Cycle bit rules. SW is the consumer for the event ring only.
* Don't make a ring full of link TRBs. That would be dumb and this would loop. * Don't make a ring full of link TRBs. That would be dumb and this would loop.
*/ */
static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
{ {
union xhci_trb *next = ++(ring->dequeue); union xhci_trb *next = ++(ring->dequeue);
unsigned long long addr; unsigned long long addr;
...@@ -153,7 +153,8 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer ...@@ -153,7 +153,8 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
* the end of an event ring segment (which doesn't have link TRBS) * the end of an event ring segment (which doesn't have link TRBS)
*/ */
while (last_trb(xhci, ring, ring->deq_seg, next)) { while (last_trb(xhci, ring, ring->deq_seg, next)) {
if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) { if (ring->type == TYPE_EVENT && last_trb_on_last_seg(xhci,
ring, ring->deq_seg, next)) {
ring->cycle_state = (ring->cycle_state ? 0 : 1); ring->cycle_state = (ring->cycle_state ? 0 : 1);
} }
ring->deq_seg = ring->deq_seg->next; ring->deq_seg = ring->deq_seg->next;
...@@ -181,7 +182,7 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer ...@@ -181,7 +182,7 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
* prepare_transfer()? * prepare_transfer()?
*/ */
static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
bool consumer, bool more_trbs_coming, bool isoc) bool more_trbs_coming)
{ {
u32 chain; u32 chain;
union xhci_trb *next; union xhci_trb *next;
...@@ -195,8 +196,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, ...@@ -195,8 +196,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
* the end of an event ring segment (which doesn't have link TRBS) * the end of an event ring segment (which doesn't have link TRBS)
*/ */
while (last_trb(xhci, ring, ring->enq_seg, next)) { while (last_trb(xhci, ring, ring->enq_seg, next)) {
if (!consumer) { if (ring->type != TYPE_EVENT) {
if (ring != xhci->event_ring) {
/* /*
* If the caller doesn't plan on enqueueing more * If the caller doesn't plan on enqueueing more
* TDs before ringing the doorbell, then we * TDs before ringing the doorbell, then we
...@@ -213,7 +213,8 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, ...@@ -213,7 +213,8 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
* carry over the chain bit of the previous TRB * carry over the chain bit of the previous TRB
* (which may mean the chain bit is cleared). * (which may mean the chain bit is cleared).
*/ */
if (!(isoc && (xhci->quirks & XHCI_AMD_0x96_HOST)) if (!(ring->type == TYPE_ISOC &&
(xhci->quirks & XHCI_AMD_0x96_HOST))
&& !xhci_link_trb_quirk(xhci)) { && !xhci_link_trb_quirk(xhci)) {
next->link.control &= next->link.control &=
cpu_to_le32(~TRB_CHAIN); cpu_to_le32(~TRB_CHAIN);
...@@ -223,7 +224,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, ...@@ -223,7 +224,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
/* Give this link TRB to the hardware */ /* Give this link TRB to the hardware */
wmb(); wmb();
next->link.control ^= cpu_to_le32(TRB_CYCLE); next->link.control ^= cpu_to_le32(TRB_CYCLE);
}
/* Toggle the cycle bit after the last ring segment. */ /* Toggle the cycle bit after the last ring segment. */
if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
ring->cycle_state = (ring->cycle_state ? 0 : 1); ring->cycle_state = (ring->cycle_state ? 0 : 1);
...@@ -1185,7 +1186,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, ...@@ -1185,7 +1186,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
xhci->error_bitmask |= 1 << 6; xhci->error_bitmask |= 1 << 6;
break; break;
} }
inc_deq(xhci, xhci->cmd_ring, false); inc_deq(xhci, xhci->cmd_ring);
} }
static void handle_vendor_event(struct xhci_hcd *xhci, static void handle_vendor_event(struct xhci_hcd *xhci,
...@@ -1398,7 +1399,7 @@ static void handle_port_status(struct xhci_hcd *xhci, ...@@ -1398,7 +1399,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
cleanup: cleanup:
/* Update event ring dequeue pointer before dropping the lock */ /* Update event ring dequeue pointer before dropping the lock */
inc_deq(xhci, xhci->event_ring, true); inc_deq(xhci, xhci->event_ring);
/* Don't make the USB core poll the roothub if we got a bad port status /* Don't make the USB core poll the roothub if we got a bad port status
* change event. Besides, at that point we can't tell which roothub * change event. Besides, at that point we can't tell which roothub
...@@ -1593,8 +1594,8 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, ...@@ -1593,8 +1594,8 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
} else { } else {
/* Update ring dequeue pointer */ /* Update ring dequeue pointer */
while (ep_ring->dequeue != td->last_trb) while (ep_ring->dequeue != td->last_trb)
inc_deq(xhci, ep_ring, false); inc_deq(xhci, ep_ring);
inc_deq(xhci, ep_ring, false); inc_deq(xhci, ep_ring);
} }
td_cleanup: td_cleanup:
...@@ -1842,8 +1843,8 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, ...@@ -1842,8 +1843,8 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
/* Update ring dequeue pointer */ /* Update ring dequeue pointer */
while (ep_ring->dequeue != td->last_trb) while (ep_ring->dequeue != td->last_trb)
inc_deq(xhci, ep_ring, false); inc_deq(xhci, ep_ring);
inc_deq(xhci, ep_ring, false); inc_deq(xhci, ep_ring);
return finish_td(xhci, td, NULL, event, ep, status, true); return finish_td(xhci, td, NULL, event, ep, status, true);
} }
...@@ -2230,7 +2231,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -2230,7 +2231,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
* Will roll back to continue process missed tds. * Will roll back to continue process missed tds.
*/ */
if (trb_comp_code == COMP_MISSED_INT || !ep->skip) { if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
inc_deq(xhci, xhci->event_ring, true); inc_deq(xhci, xhci->event_ring);
} }
if (ret) { if (ret) {
...@@ -2345,7 +2346,7 @@ static int xhci_handle_event(struct xhci_hcd *xhci) ...@@ -2345,7 +2346,7 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
if (update_ptrs) if (update_ptrs)
/* Update SW event ring dequeue pointer */ /* Update SW event ring dequeue pointer */
inc_deq(xhci, xhci->event_ring, true); inc_deq(xhci, xhci->event_ring);
/* Are there more items on the event ring? Caller will call us again to /* Are there more items on the event ring? Caller will call us again to
* check. * check.
...@@ -2461,7 +2462,7 @@ irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd) ...@@ -2461,7 +2462,7 @@ irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
* prepare_transfer()? * prepare_transfer()?
*/ */
static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
bool consumer, bool more_trbs_coming, bool isoc, bool more_trbs_coming,
u32 field1, u32 field2, u32 field3, u32 field4) u32 field1, u32 field2, u32 field3, u32 field4)
{ {
struct xhci_generic_trb *trb; struct xhci_generic_trb *trb;
...@@ -2471,7 +2472,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, ...@@ -2471,7 +2472,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
trb->field[1] = cpu_to_le32(field2); trb->field[1] = cpu_to_le32(field2);
trb->field[2] = cpu_to_le32(field3); trb->field[2] = cpu_to_le32(field3);
trb->field[3] = cpu_to_le32(field4); trb->field[3] = cpu_to_le32(field4);
inc_enq(xhci, ring, consumer, more_trbs_coming, isoc); inc_enq(xhci, ring, more_trbs_coming);
} }
/* /*
...@@ -2479,7 +2480,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, ...@@ -2479,7 +2480,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
* FIXME allocate segments if the ring is full. * FIXME allocate segments if the ring is full.
*/ */
static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
u32 ep_state, unsigned int num_trbs, bool isoc, gfp_t mem_flags) u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
{ {
/* Make sure the endpoint has been added to xHC schedule */ /* Make sure the endpoint has been added to xHC schedule */
switch (ep_state) { switch (ep_state) {
...@@ -2524,7 +2525,8 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, ...@@ -2524,7 +2525,8 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
/* If we're not dealing with 0.95 hardware or isoc rings /* If we're not dealing with 0.95 hardware or isoc rings
* on AMD 0.96 host, clear the chain bit. * on AMD 0.96 host, clear the chain bit.
*/ */
if (!xhci_link_trb_quirk(xhci) && !(isoc && if (!xhci_link_trb_quirk(xhci) &&
!(ring->type == TYPE_ISOC &&
(xhci->quirks & XHCI_AMD_0x96_HOST))) (xhci->quirks & XHCI_AMD_0x96_HOST)))
next->link.control &= cpu_to_le32(~TRB_CHAIN); next->link.control &= cpu_to_le32(~TRB_CHAIN);
else else
...@@ -2553,7 +2555,6 @@ static int prepare_transfer(struct xhci_hcd *xhci, ...@@ -2553,7 +2555,6 @@ static int prepare_transfer(struct xhci_hcd *xhci,
unsigned int num_trbs, unsigned int num_trbs,
struct urb *urb, struct urb *urb,
unsigned int td_index, unsigned int td_index,
bool isoc,
gfp_t mem_flags) gfp_t mem_flags)
{ {
int ret; int ret;
...@@ -2571,7 +2572,7 @@ static int prepare_transfer(struct xhci_hcd *xhci, ...@@ -2571,7 +2572,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
ret = prepare_ring(xhci, ep_ring, ret = prepare_ring(xhci, ep_ring,
le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
num_trbs, isoc, mem_flags); num_trbs, mem_flags);
if (ret) if (ret)
return ret; return ret;
...@@ -2781,7 +2782,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -2781,7 +2782,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id, ep_index, urb->stream_id,
num_trbs, urb, 0, false, mem_flags); num_trbs, urb, 0, mem_flags);
if (trb_buff_len < 0) if (trb_buff_len < 0)
return trb_buff_len; return trb_buff_len;
...@@ -2869,7 +2870,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -2869,7 +2870,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
more_trbs_coming = true; more_trbs_coming = true;
else else
more_trbs_coming = false; more_trbs_coming = false;
queue_trb(xhci, ep_ring, false, more_trbs_coming, false, queue_trb(xhci, ep_ring, more_trbs_coming,
lower_32_bits(addr), lower_32_bits(addr),
upper_32_bits(addr), upper_32_bits(addr),
length_field, length_field,
...@@ -2951,7 +2952,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -2951,7 +2952,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
ret = prepare_transfer(xhci, xhci->devs[slot_id], ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id, ep_index, urb->stream_id,
num_trbs, urb, 0, false, mem_flags); num_trbs, urb, 0, mem_flags);
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -3023,7 +3024,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3023,7 +3024,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
more_trbs_coming = true; more_trbs_coming = true;
else else
more_trbs_coming = false; more_trbs_coming = false;
queue_trb(xhci, ep_ring, false, more_trbs_coming, false, queue_trb(xhci, ep_ring, more_trbs_coming,
lower_32_bits(addr), lower_32_bits(addr),
upper_32_bits(addr), upper_32_bits(addr),
length_field, length_field,
...@@ -3080,7 +3081,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3080,7 +3081,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
num_trbs++; num_trbs++;
ret = prepare_transfer(xhci, xhci->devs[slot_id], ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id, ep_index, urb->stream_id,
num_trbs, urb, 0, false, mem_flags); num_trbs, urb, 0, mem_flags);
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -3113,7 +3114,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3113,7 +3114,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
} }
} }
queue_trb(xhci, ep_ring, false, true, false, queue_trb(xhci, ep_ring, true,
setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16, setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16, le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
TRB_LEN(8) | TRB_INTR_TARGET(0), TRB_LEN(8) | TRB_INTR_TARGET(0),
...@@ -3133,7 +3134,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3133,7 +3134,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (urb->transfer_buffer_length > 0) { if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType & USB_DIR_IN) if (setup->bRequestType & USB_DIR_IN)
field |= TRB_DIR_IN; field |= TRB_DIR_IN;
queue_trb(xhci, ep_ring, false, true, false, queue_trb(xhci, ep_ring, true,
lower_32_bits(urb->transfer_dma), lower_32_bits(urb->transfer_dma),
upper_32_bits(urb->transfer_dma), upper_32_bits(urb->transfer_dma),
length_field, length_field,
...@@ -3149,7 +3150,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3149,7 +3150,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field = 0; field = 0;
else else
field = TRB_DIR_IN; field = TRB_DIR_IN;
queue_trb(xhci, ep_ring, false, false, false, queue_trb(xhci, ep_ring, false,
0, 0,
0, 0,
TRB_INTR_TARGET(0), TRB_INTR_TARGET(0),
...@@ -3289,8 +3290,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3289,8 +3290,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
trbs_per_td = count_isoc_trbs_needed(xhci, urb, i); trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
urb->stream_id, trbs_per_td, urb, i, true, urb->stream_id, trbs_per_td, urb, i, mem_flags);
mem_flags);
if (ret < 0) { if (ret < 0) {
if (i == 0) if (i == 0)
return ret; return ret;
...@@ -3360,7 +3360,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3360,7 +3360,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
remainder | remainder |
TRB_INTR_TARGET(0); TRB_INTR_TARGET(0);
queue_trb(xhci, ep_ring, false, more_trbs_coming, true, queue_trb(xhci, ep_ring, more_trbs_coming,
lower_32_bits(addr), lower_32_bits(addr),
upper_32_bits(addr), upper_32_bits(addr),
length_field, length_field,
...@@ -3443,7 +3443,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3443,7 +3443,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
* Do not insert any td of the urb to the ring if the check failed. * Do not insert any td of the urb to the ring if the check failed.
*/ */
ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
num_trbs, true, mem_flags); num_trbs, mem_flags);
if (ret) if (ret)
return ret; return ret;
...@@ -3502,7 +3502,7 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, ...@@ -3502,7 +3502,7 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
reserved_trbs++; reserved_trbs++;
ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING, ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
reserved_trbs, false, GFP_ATOMIC); reserved_trbs, GFP_ATOMIC);
if (ret < 0) { if (ret < 0) {
xhci_err(xhci, "ERR: No room for command on command ring\n"); xhci_err(xhci, "ERR: No room for command on command ring\n");
if (command_must_succeed) if (command_must_succeed)
...@@ -3510,8 +3510,8 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, ...@@ -3510,8 +3510,8 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
"unfailable commands failed.\n"); "unfailable commands failed.\n");
return ret; return ret;
} }
queue_trb(xhci, xhci->cmd_ring, false, false, false, field1, field2, queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
field3, field4 | xhci->cmd_ring->cycle_state); field4 | xhci->cmd_ring->cycle_state);
return 0; return 0;
} }
......
...@@ -1250,6 +1250,16 @@ struct xhci_dequeue_state { ...@@ -1250,6 +1250,16 @@ struct xhci_dequeue_state {
int new_cycle_state; int new_cycle_state;
}; };
enum xhci_ring_type {
TYPE_CTRL = 0,
TYPE_ISOC,
TYPE_BULK,
TYPE_INTR,
TYPE_STREAM,
TYPE_COMMAND,
TYPE_EVENT,
};
struct xhci_ring { struct xhci_ring {
struct xhci_segment *first_seg; struct xhci_segment *first_seg;
union xhci_trb *enqueue; union xhci_trb *enqueue;
...@@ -1266,6 +1276,7 @@ struct xhci_ring { ...@@ -1266,6 +1276,7 @@ struct xhci_ring {
*/ */
u32 cycle_state; u32 cycle_state;
unsigned int stream_id; unsigned int stream_id;
enum xhci_ring_type type;
bool last_td_was_short; bool last_td_was_short;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment