Commit c92bcfa7 authored by Sarah Sharp's avatar Sarah Sharp Committed by Greg Kroah-Hartman

USB: xhci: Stall handling bug fixes.

Correct the xHCI code to handle stalls on USB endpoints.  We need to move
the endpoint ring's dequeue pointer past the stalled transfer, or the HW
will try to restart the transfer the next time the doorbell is rung.

Don't attempt to clear a halt on an endpoint if we haven't seen a stalled
transfer for it.  The USB core will attempt to clear a halt on all
endpoints when it selects a new configuration.
Signed-off-by: default avatarSarah Sharp <sarah.a.sharp@linux.intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent d115b048
...@@ -1089,6 +1089,8 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, ...@@ -1089,6 +1089,8 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
unsigned int ep_index; unsigned int ep_index;
unsigned long flags; unsigned long flags;
int ret; int ret;
struct xhci_dequeue_state deq_state;
struct xhci_ring *ep_ring;
xhci = hcd_to_xhci(hcd); xhci = hcd_to_xhci(hcd);
udev = (struct usb_device *) ep->hcpriv; udev = (struct usb_device *) ep->hcpriv;
...@@ -1098,11 +1100,33 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, ...@@ -1098,11 +1100,33 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
if (!ep->hcpriv) if (!ep->hcpriv)
return; return;
ep_index = xhci_get_endpoint_index(&ep->desc); ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ring = xhci->devs[udev->slot_id]->ep_rings[ep_index];
if (!ep_ring->stopped_td) {
xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
ep->desc.bEndpointAddress);
return;
}
xhci_dbg(xhci, "Queueing reset endpoint command\n"); xhci_dbg(xhci, "Queueing reset endpoint command\n");
spin_lock_irqsave(&xhci->lock, flags); spin_lock_irqsave(&xhci->lock, flags);
ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index); ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
/*
* Can't change the ring dequeue pointer until it's transitioned to the
* stopped state, which is only upon a successful reset endpoint
* command. Better hope that last command worked!
*/
if (!ret) { if (!ret) {
xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
/* We need to move the HW's dequeue pointer past this TD,
* or it will attempt to resend it on the next doorbell ring.
*/
xhci_find_new_dequeue_state(xhci, udev->slot_id,
ep_index, ep_ring->stopped_td, &deq_state);
xhci_dbg(xhci, "Queueing new dequeue state\n");
xhci_queue_new_dequeue_state(xhci, ep_ring,
udev->slot_id,
ep_index, &deq_state);
kfree(ep_ring->stopped_td);
xhci_ring_cmd_db(xhci); xhci_ring_cmd_db(xhci);
} }
spin_unlock_irqrestore(&xhci->lock, flags); spin_unlock_irqrestore(&xhci->lock, flags);
......
...@@ -335,12 +335,6 @@ static struct xhci_segment *find_trb_seg( ...@@ -335,12 +335,6 @@ static struct xhci_segment *find_trb_seg(
return cur_seg; return cur_seg;
} }
struct dequeue_state {
struct xhci_segment *new_deq_seg;
union xhci_trb *new_deq_ptr;
int new_cycle_state;
};
/* /*
* Move the xHC's endpoint ring dequeue pointer past cur_td. * Move the xHC's endpoint ring dequeue pointer past cur_td.
* Record the new state of the xHC's endpoint ring dequeue segment, * Record the new state of the xHC's endpoint ring dequeue segment,
...@@ -355,26 +349,30 @@ struct dequeue_state { ...@@ -355,26 +349,30 @@ struct dequeue_state {
* - Finally we move the dequeue state one TRB further, toggling the cycle bit * - Finally we move the dequeue state one TRB further, toggling the cycle bit
* if we've moved it past a link TRB with the toggle cycle bit set. * if we've moved it past a link TRB with the toggle cycle bit set.
*/ */
static void find_new_dequeue_state(struct xhci_hcd *xhci, void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index, unsigned int slot_id, unsigned int ep_index,
struct xhci_td *cur_td, struct dequeue_state *state) struct xhci_td *cur_td, struct xhci_dequeue_state *state)
{ {
struct xhci_virt_device *dev = xhci->devs[slot_id]; struct xhci_virt_device *dev = xhci->devs[slot_id];
struct xhci_ring *ep_ring = dev->ep_rings[ep_index]; struct xhci_ring *ep_ring = dev->ep_rings[ep_index];
struct xhci_generic_trb *trb; struct xhci_generic_trb *trb;
struct xhci_ep_ctx *ep_ctx; struct xhci_ep_ctx *ep_ctx;
dma_addr_t addr;
state->new_cycle_state = 0; state->new_cycle_state = 0;
xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
state->new_deq_seg = find_trb_seg(cur_td->start_seg, state->new_deq_seg = find_trb_seg(cur_td->start_seg,
ep_ring->stopped_trb, ep_ring->stopped_trb,
&state->new_cycle_state); &state->new_cycle_state);
if (!state->new_deq_seg) if (!state->new_deq_seg)
BUG(); BUG();
/* Dig out the cycle state saved by the xHC during the stop ep cmd */ /* Dig out the cycle state saved by the xHC during the stop ep cmd */
xhci_dbg(xhci, "Finding endpoint context\n");
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
state->new_cycle_state = 0x1 & ep_ctx->deq; state->new_cycle_state = 0x1 & ep_ctx->deq;
state->new_deq_ptr = cur_td->last_trb; state->new_deq_ptr = cur_td->last_trb;
xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
state->new_deq_seg = find_trb_seg(state->new_deq_seg, state->new_deq_seg = find_trb_seg(state->new_deq_seg,
state->new_deq_ptr, state->new_deq_ptr,
&state->new_cycle_state); &state->new_cycle_state);
...@@ -388,6 +386,12 @@ static void find_new_dequeue_state(struct xhci_hcd *xhci, ...@@ -388,6 +386,12 @@ static void find_new_dequeue_state(struct xhci_hcd *xhci,
next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
/* Don't update the ring cycle state for the producer (us). */ /* Don't update the ring cycle state for the producer (us). */
xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
state->new_deq_seg);
addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
(unsigned long long) addr);
xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n");
ep_ring->dequeue = state->new_deq_ptr; ep_ring->dequeue = state->new_deq_ptr;
ep_ring->deq_seg = state->new_deq_seg; ep_ring->deq_seg = state->new_deq_seg;
} }
...@@ -437,6 +441,30 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, ...@@ -437,6 +441,30 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index, struct xhci_segment *deq_seg, unsigned int ep_index, struct xhci_segment *deq_seg,
union xhci_trb *deq_ptr, u32 cycle_state); union xhci_trb *deq_ptr, u32 cycle_state);
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
struct xhci_ring *ep_ring, unsigned int slot_id,
unsigned int ep_index, struct xhci_dequeue_state *deq_state)
{
xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
"new deq ptr = %p (0x%llx dma), new cycle = %u\n",
deq_state->new_deq_seg,
(unsigned long long)deq_state->new_deq_seg->dma,
deq_state->new_deq_ptr,
(unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
deq_state->new_cycle_state);
queue_set_tr_deq(xhci, slot_id, ep_index,
deq_state->new_deq_seg,
deq_state->new_deq_ptr,
(u32) deq_state->new_cycle_state);
/* Stop the TD queueing code from ringing the doorbell until
* this command completes. The HC won't set the dequeue pointer
* if the ring is running, and ringing the doorbell starts the
* ring running.
*/
ep_ring->state |= SET_DEQ_PENDING;
xhci_ring_cmd_db(xhci);
}
/* /*
* When we get a command completion for a Stop Endpoint Command, we need to * When we get a command completion for a Stop Endpoint Command, we need to
* unlink any cancelled TDs from the ring. There are two ways to do that: * unlink any cancelled TDs from the ring. There are two ways to do that:
...@@ -457,7 +485,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, ...@@ -457,7 +485,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
struct xhci_td *cur_td = 0; struct xhci_td *cur_td = 0;
struct xhci_td *last_unlinked_td; struct xhci_td *last_unlinked_td;
struct dequeue_state deq_state; struct xhci_dequeue_state deq_state;
#ifdef CONFIG_USB_HCD_STAT #ifdef CONFIG_USB_HCD_STAT
ktime_t stop_time = ktime_get(); ktime_t stop_time = ktime_get();
#endif #endif
...@@ -485,7 +513,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, ...@@ -485,7 +513,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
* move the xHC endpoint ring dequeue pointer past this TD. * move the xHC endpoint ring dequeue pointer past this TD.
*/ */
if (cur_td == ep_ring->stopped_td) if (cur_td == ep_ring->stopped_td)
find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td,
&deq_state); &deq_state);
else else
td_to_noop(xhci, ep_ring, cur_td); td_to_noop(xhci, ep_ring, cur_td);
...@@ -501,24 +529,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, ...@@ -501,24 +529,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " xhci_queue_new_dequeue_state(xhci, ep_ring,
"new deq ptr = %p (0x%llx dma), new cycle = %u\n", slot_id, ep_index, &deq_state);
deq_state.new_deq_seg,
(unsigned long long)deq_state.new_deq_seg->dma,
deq_state.new_deq_ptr,
(unsigned long long)xhci_trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr),
deq_state.new_cycle_state);
queue_set_tr_deq(xhci, slot_id, ep_index,
deq_state.new_deq_seg,
deq_state.new_deq_ptr,
(u32) deq_state.new_cycle_state);
/* Stop the TD queueing code from ringing the doorbell until
* this command completes. The HC won't set the dequeue pointer
* if the ring is running, and ringing the doorbell starts the
* ring running.
*/
ep_ring->state |= SET_DEQ_PENDING;
xhci_ring_cmd_db(xhci);
} else { } else {
/* Otherwise just ring the doorbell to restart the ring */ /* Otherwise just ring the doorbell to restart the ring */
ring_ep_doorbell(xhci, slot_id, ep_index); ring_ep_doorbell(xhci, slot_id, ep_index);
...@@ -929,12 +941,15 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -929,12 +941,15 @@ static int handle_tx_event(struct xhci_hcd *xhci,
if (event_trb != ep_ring->dequeue) { if (event_trb != ep_ring->dequeue) {
/* The event was for the status stage */ /* The event was for the status stage */
if (event_trb == td->last_trb) { if (event_trb == td->last_trb) {
if (td->urb->actual_length != 0) {
/* Don't overwrite a previously set error code */
if (status == -EINPROGRESS || status == 0)
/* Did we already see a short data stage? */ /* Did we already see a short data stage? */
if (td->urb->actual_length != 0)
status = -EREMOTEIO; status = -EREMOTEIO;
else } else {
td->urb->actual_length = td->urb->actual_length =
td->urb->transfer_buffer_length; td->urb->transfer_buffer_length;
}
} else { } else {
/* Maybe the event was for the data stage? */ /* Maybe the event was for the data stage? */
if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) { if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) {
...@@ -992,15 +1007,19 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -992,15 +1007,19 @@ static int handle_tx_event(struct xhci_hcd *xhci,
TRB_LEN(event->transfer_len)); TRB_LEN(event->transfer_len));
td->urb->actual_length = 0; td->urb->actual_length = 0;
} }
/* Don't overwrite a previously set error code */
if (status == -EINPROGRESS) {
if (td->urb->transfer_flags & URB_SHORT_NOT_OK) if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
status = -EREMOTEIO; status = -EREMOTEIO;
else else
status = 0; status = 0;
}
} else { } else {
td->urb->actual_length = td->urb->transfer_buffer_length; td->urb->actual_length = td->urb->transfer_buffer_length;
/* Ignore a short packet completion if the /* Ignore a short packet completion if the
* untransferred length was zero. * untransferred length was zero.
*/ */
if (status == -EREMOTEIO)
status = 0; status = 0;
} }
} else { } else {
...@@ -1028,12 +1047,22 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -1028,12 +1047,22 @@ static int handle_tx_event(struct xhci_hcd *xhci,
TRB_LEN(event->transfer_len); TRB_LEN(event->transfer_len);
} }
} }
/* The Endpoint Stop Command completion will take care of
* any stopped TDs. A stopped TD may be restarted, so don't update the
* ring dequeue pointer or take this TD off any lists yet.
*/
if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL || if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL ||
GET_COMP_CODE(event->transfer_len) == COMP_STOP) { GET_COMP_CODE(event->transfer_len) == COMP_STOP) {
/* The Endpoint Stop Command completion will take care of any
* stopped TDs. A stopped TD may be restarted, so don't update
* the ring dequeue pointer or take this TD off any lists yet.
*/
ep_ring->stopped_td = td;
ep_ring->stopped_trb = event_trb;
} else {
if (GET_COMP_CODE(event->transfer_len) == COMP_STALL) {
/* The transfer is completed from the driver's
* perspective, but we need to issue a set dequeue
* command for this stalled endpoint to move the dequeue
* pointer past the TD. We can't do that here because
* the halt condition must be cleared first.
*/
ep_ring->stopped_td = td; ep_ring->stopped_td = td;
ep_ring->stopped_trb = event_trb; ep_ring->stopped_trb = event_trb;
} else { } else {
...@@ -1041,6 +1070,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -1041,6 +1070,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
while (ep_ring->dequeue != td->last_trb) while (ep_ring->dequeue != td->last_trb)
inc_deq(xhci, ep_ring, false); inc_deq(xhci, ep_ring, false);
inc_deq(xhci, ep_ring, false); inc_deq(xhci, ep_ring, false);
}
/* Clean up the endpoint's TD list */ /* Clean up the endpoint's TD list */
urb = td->urb; urb = td->urb;
...@@ -1050,7 +1080,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -1050,7 +1080,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
list_del(&td->cancelled_td_list); list_del(&td->cancelled_td_list);
ep_ring->cancels_pending--; ep_ring->cancels_pending--;
} }
/* Leave the TD around for the reset endpoint function to use */
if (GET_COMP_CODE(event->transfer_len) != COMP_STALL) {
kfree(td); kfree(td);
}
urb->hcpriv = NULL; urb->hcpriv = NULL;
} }
cleanup: cleanup:
...@@ -1166,13 +1199,13 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, ...@@ -1166,13 +1199,13 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
*/ */
xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
return -ENOENT; return -ENOENT;
case EP_STATE_HALTED:
case EP_STATE_ERROR: case EP_STATE_ERROR:
xhci_warn(xhci, "WARN waiting for halt or error on ep " xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
"to be cleared\n");
/* FIXME event handling code for error needs to clear it */ /* FIXME event handling code for error needs to clear it */
/* XXX not sure if this should be -ENOENT or not */ /* XXX not sure if this should be -ENOENT or not */
return -EINVAL; return -EINVAL;
case EP_STATE_HALTED:
xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
case EP_STATE_STOPPED: case EP_STATE_STOPPED:
case EP_STATE_RUNNING: case EP_STATE_RUNNING:
break; break;
...@@ -1724,10 +1757,12 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, ...@@ -1724,10 +1757,12 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
u32 type = TRB_TYPE(TRB_SET_DEQ); u32 type = TRB_TYPE(TRB_SET_DEQ);
addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
if (addr == 0) if (addr == 0) {
xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
deq_seg, deq_ptr); deq_seg, deq_ptr);
return 0;
}
return queue_command(xhci, lower_32_bits(addr) | cycle_state, return queue_command(xhci, lower_32_bits(addr) | cycle_state,
upper_32_bits(addr), 0, upper_32_bits(addr), 0,
trb_slot_id | trb_ep_index | type); trb_slot_id | trb_ep_index | type);
......
...@@ -952,6 +952,12 @@ struct xhci_ring { ...@@ -952,6 +952,12 @@ struct xhci_ring {
u32 cycle_state; u32 cycle_state;
}; };
struct xhci_dequeue_state {
struct xhci_segment *new_deq_seg;
union xhci_trb *new_deq_ptr;
int new_cycle_state;
};
struct xhci_erst_entry { struct xhci_erst_entry {
/* 64-bit event ring segment address */ /* 64-bit event ring segment address */
u64 seg_addr; u64 seg_addr;
...@@ -1203,6 +1209,12 @@ int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, ...@@ -1203,6 +1209,12 @@ int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id); u32 slot_id);
int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index); unsigned int ep_index);
void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_td *cur_td, struct xhci_dequeue_state *state);
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
struct xhci_ring *ep_ring, unsigned int slot_id,
unsigned int ep_index, struct xhci_dequeue_state *deq_state);
/* xHCI roothub code */ /* xHCI roothub code */
int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment