Commit 86065c27 authored by Mathias Nyman's avatar Mathias Nyman Committed by Greg Kroah-Hartman

xhci: don't rely on precalculated value of needed trbs in the enqueue loop

Queue trbs until all payload data in the urb is tranferred.

The actual number of trbs might need to change from the pre-calculated
number when the packet alignment restrictions for td fragments in
xhci 4.11.7.1 are taken into account.

Long term plan is to get rid of calculating the needed trbs in advance
all together. It's an unnecessary extra walk through the scatterlist.

This change also allows some bulk queue function simplifications
Signed-off-by: default avatarMathias Nyman <mathias.nyman@linux.intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 124c3937
...@@ -3127,9 +3127,10 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3127,9 +3127,10 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct scatterlist *sg = NULL; struct scatterlist *sg = NULL;
bool more_trbs_coming = true; bool more_trbs_coming = true;
bool need_zero_pkt = false; bool need_zero_pkt = false;
unsigned int num_trbs, last_trb_num, i; bool first_trb = true;
unsigned int num_trbs;
unsigned int start_cycle, num_sgs = 0; unsigned int start_cycle, num_sgs = 0;
unsigned int running_total, block_len, trb_buff_len, full_len; unsigned int enqd_len, block_len, trb_buff_len, full_len;
int ret; int ret;
u32 field, length_field, remainder; u32 field, length_field, remainder;
u64 addr; u64 addr;
...@@ -3138,14 +3139,19 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3138,14 +3139,19 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (!ring) if (!ring)
return -EINVAL; return -EINVAL;
full_len = urb->transfer_buffer_length;
/* If we have scatter/gather list, we use it. */ /* If we have scatter/gather list, we use it. */
if (urb->num_sgs) { if (urb->num_sgs) {
num_sgs = urb->num_mapped_sgs; num_sgs = urb->num_mapped_sgs;
sg = urb->sg; sg = urb->sg;
addr = (u64) sg_dma_address(sg);
block_len = sg_dma_len(sg);
num_trbs = count_sg_trbs_needed(urb); num_trbs = count_sg_trbs_needed(urb);
} else } else {
num_trbs = count_trbs_needed(urb); num_trbs = count_trbs_needed(urb);
addr = (u64) urb->transfer_dma;
block_len = full_len;
}
ret = prepare_transfer(xhci, xhci->devs[slot_id], ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id, ep_index, urb->stream_id,
num_trbs, urb, 0, mem_flags); num_trbs, urb, 0, mem_flags);
...@@ -3154,8 +3160,6 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3154,8 +3160,6 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
urb_priv = urb->hcpriv; urb_priv = urb->hcpriv;
last_trb_num = num_trbs - 1;
/* Deal with URB_ZERO_PACKET - need one more td/trb */ /* Deal with URB_ZERO_PACKET - need one more td/trb */
if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->length > 1) if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->length > 1)
need_zero_pkt = true; need_zero_pkt = true;
...@@ -3170,40 +3174,20 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3170,40 +3174,20 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
start_trb = &ring->enqueue->generic; start_trb = &ring->enqueue->generic;
start_cycle = ring->cycle_state; start_cycle = ring->cycle_state;
full_len = urb->transfer_buffer_length;
running_total = 0;
block_len = 0;
/* Queue the TRBs, even if they are zero-length */ /* Queue the TRBs, even if they are zero-length */
for (i = 0; i < num_trbs; i++) { for (enqd_len = 0; enqd_len < full_len; enqd_len += trb_buff_len) {
field = TRB_TYPE(TRB_NORMAL); field = TRB_TYPE(TRB_NORMAL);
if (block_len == 0) { /* TRB buffer should not cross 64KB boundaries */
/* A new contiguous block. */ trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
if (sg) { trb_buff_len = min_t(unsigned int, trb_buff_len, block_len);
addr = (u64) sg_dma_address(sg);
block_len = sg_dma_len(sg);
} else {
addr = (u64) urb->transfer_dma;
block_len = full_len;
}
/* TRB buffer should not cross 64KB boundaries */
trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
trb_buff_len = min_t(unsigned int,
trb_buff_len,
block_len);
} else {
/* Further through the contiguous block. */
trb_buff_len = block_len;
if (trb_buff_len > TRB_MAX_BUFF_SIZE)
trb_buff_len = TRB_MAX_BUFF_SIZE;
}
if (running_total + trb_buff_len > full_len) if (enqd_len + trb_buff_len > full_len)
trb_buff_len = full_len - running_total; trb_buff_len = full_len - enqd_len;
/* Don't change the cycle bit of the first TRB until later */ /* Don't change the cycle bit of the first TRB until later */
if (i == 0) { if (first_trb) {
first_trb = false;
if (start_cycle == 0) if (start_cycle == 0)
field |= TRB_CYCLE; field |= TRB_CYCLE;
} else } else
...@@ -3212,7 +3196,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3212,7 +3196,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* Chain all the TRBs together; clear the chain bit in the last /* Chain all the TRBs together; clear the chain bit in the last
* TRB to indicate it's the last TRB in the chain. * TRB to indicate it's the last TRB in the chain.
*/ */
if (i < last_trb_num) { if (enqd_len + trb_buff_len < full_len) {
field |= TRB_CHAIN; field |= TRB_CHAIN;
} else { } else {
field |= TRB_IOC; field |= TRB_IOC;
...@@ -3225,9 +3209,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3225,9 +3209,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field |= TRB_ISP; field |= TRB_ISP;
/* Set the TRB length, TD size, and interrupter fields. */ /* Set the TRB length, TD size, and interrupter fields. */
remainder = xhci_td_remainder(xhci, running_total, remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len,
trb_buff_len, full_len, full_len, urb, more_trbs_coming);
urb, more_trbs_coming);
length_field = TRB_LEN(trb_buff_len) | length_field = TRB_LEN(trb_buff_len) |
TRB_TD_SIZE(remainder) | TRB_TD_SIZE(remainder) |
TRB_INTR_TARGET(0); TRB_INTR_TARGET(0);
...@@ -3238,17 +3222,16 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3238,17 +3222,16 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
length_field, length_field,
field); field);
running_total += trb_buff_len;
addr += trb_buff_len; addr += trb_buff_len;
block_len -= trb_buff_len; block_len -= trb_buff_len;
if (sg) { if (sg && block_len == 0) {
if (block_len == 0) { /* New sg entry */
/* New sg entry */ --num_sgs;
--num_sgs; if (num_sgs != 0) {
if (num_sgs == 0)
break;
sg = sg_next(sg); sg = sg_next(sg);
block_len = sg_dma_len(sg);
addr = (u64) sg_dma_address(sg);
} }
} }
} }
...@@ -3262,7 +3245,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3262,7 +3245,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field); queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
} }
check_trb_math(urb, running_total); check_trb_math(urb, enqd_len);
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
start_cycle, start_trb); start_cycle, start_trb);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment