Commit 55f6153d authored by Mathias Nyman's avatar Mathias Nyman Committed by Greg Kroah-Hartman

xhci: remove extra loop in interrupt context

When finishing a TD we walk the endpoint dequeue trb pointer
until it matches the last TRB of the TD.

TDs can contain over 100 TRBs, meaning we call a function 100 times,
do a few comparisons and increase a couple values for each of these calls,
all in interrupt context.

This can all be avoided by adding a pointer to the last TRB segment, and
a number of TRBs in the TD. So instead of walking through each TRB just
set the new dequeue segment, pointer, and number of free TRBs directly.

Getting rid of the while loop also reduces the risk of getting stuck in a
infinite loop in the interrupt handler. Loop relied on valid matching
dequeue and last_trb values to break.
Signed-off-by: default avatarMathias Nyman <mathias.nyman@linux.intel.com>
Link: https://lore.kernel.org/r/20210129130044.206855-12-mathias.nyman@linux.intel.comSigned-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 296fcdab
...@@ -2041,8 +2041,9 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, ...@@ -2041,8 +2041,9 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
EP_HARD_RESET); EP_HARD_RESET);
} else { } else {
/* Update ring dequeue pointer */ /* Update ring dequeue pointer */
while (ep_ring->dequeue != td->last_trb) ep_ring->dequeue = td->last_trb;
inc_deq(xhci, ep_ring); ep_ring->deq_seg = td->last_trb_seg;
ep_ring->num_trbs_free += td->num_trbs - 1;
inc_deq(xhci, ep_ring); inc_deq(xhci, ep_ring);
} }
...@@ -2263,8 +2264,9 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, ...@@ -2263,8 +2264,9 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
frame->actual_length = 0; frame->actual_length = 0;
/* Update ring dequeue pointer */ /* Update ring dequeue pointer */
while (ep->ring->dequeue != td->last_trb) ep->ring->dequeue = td->last_trb;
inc_deq(xhci, ep->ring); ep->ring->deq_seg = td->last_trb_seg;
ep->ring->num_trbs_free += td->num_trbs - 1;
inc_deq(xhci, ep->ring); inc_deq(xhci, ep->ring);
return xhci_td_cleanup(xhci, td, ep->ring, status); return xhci_td_cleanup(xhci, td, ep->ring, status);
...@@ -3420,7 +3422,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3420,7 +3422,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field |= TRB_IOC; field |= TRB_IOC;
more_trbs_coming = false; more_trbs_coming = false;
td->last_trb = ring->enqueue; td->last_trb = ring->enqueue;
td->last_trb_seg = ring->enq_seg;
if (xhci_urb_suitable_for_idt(urb)) { if (xhci_urb_suitable_for_idt(urb)) {
memcpy(&send_addr, urb->transfer_buffer, memcpy(&send_addr, urb->transfer_buffer,
trb_buff_len); trb_buff_len);
...@@ -3446,7 +3448,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3446,7 +3448,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
upper_32_bits(send_addr), upper_32_bits(send_addr),
length_field, length_field,
field); field);
td->num_trbs++;
addr += trb_buff_len; addr += trb_buff_len;
sent_len = trb_buff_len; sent_len = trb_buff_len;
...@@ -3470,8 +3472,10 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3470,8 +3472,10 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
ep_index, urb->stream_id, ep_index, urb->stream_id,
1, urb, 1, mem_flags); 1, urb, 1, mem_flags);
urb_priv->td[1].last_trb = ring->enqueue; urb_priv->td[1].last_trb = ring->enqueue;
urb_priv->td[1].last_trb_seg = ring->enq_seg;
field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC; field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field); queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
urb_priv->td[1].num_trbs++;
} }
check_trb_math(urb, enqd_len); check_trb_math(urb, enqd_len);
...@@ -3522,6 +3526,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3522,6 +3526,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
urb_priv = urb->hcpriv; urb_priv = urb->hcpriv;
td = &urb_priv->td[0]; td = &urb_priv->td[0];
td->num_trbs = num_trbs;
/* /*
* Don't give the first TRB to the hardware (by toggling the cycle bit) * Don't give the first TRB to the hardware (by toggling the cycle bit)
...@@ -3594,6 +3599,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3594,6 +3599,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* Save the DMA address of the last TRB in the TD */ /* Save the DMA address of the last TRB in the TD */
td->last_trb = ep_ring->enqueue; td->last_trb = ep_ring->enqueue;
td->last_trb_seg = ep_ring->enq_seg;
/* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */ /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
/* If the device sent data, the status stage is an OUT transfer */ /* If the device sent data, the status stage is an OUT transfer */
...@@ -3838,7 +3844,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3838,7 +3844,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
goto cleanup; goto cleanup;
} }
td = &urb_priv->td[i]; td = &urb_priv->td[i];
td->num_trbs = trbs_per_td;
/* use SIA as default, if frame id is used overwrite it */ /* use SIA as default, if frame id is used overwrite it */
sia_frame_id = TRB_SIA; sia_frame_id = TRB_SIA;
if (!(urb->transfer_flags & URB_ISO_ASAP) && if (!(urb->transfer_flags & URB_ISO_ASAP) &&
...@@ -3881,6 +3887,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3881,6 +3887,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
} else { } else {
more_trbs_coming = false; more_trbs_coming = false;
td->last_trb = ep_ring->enqueue; td->last_trb = ep_ring->enqueue;
td->last_trb_seg = ep_ring->enq_seg;
field |= TRB_IOC; field |= TRB_IOC;
if (trb_block_event_intr(xhci, num_tds, i)) if (trb_block_event_intr(xhci, num_tds, i))
field |= TRB_BEI; field |= TRB_BEI;
......
...@@ -1546,9 +1546,11 @@ struct xhci_td { ...@@ -1546,9 +1546,11 @@ struct xhci_td {
struct xhci_segment *start_seg; struct xhci_segment *start_seg;
union xhci_trb *first_trb; union xhci_trb *first_trb;
union xhci_trb *last_trb; union xhci_trb *last_trb;
struct xhci_segment *last_trb_seg;
struct xhci_segment *bounce_seg; struct xhci_segment *bounce_seg;
/* actual_length of the URB has already been set */ /* actual_length of the URB has already been set */
bool urb_length_set; bool urb_length_set;
unsigned int num_trbs;
}; };
/* xHCI command default timeout value */ /* xHCI command default timeout value */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment