Commit ae636747 authored by Sarah Sharp's avatar Sarah Sharp Committed by Greg Kroah-Hartman

USB: xhci: URB cancellation support.

Add URB cancellation support to the xHCI host controller driver.  This
currently supports cancellation for endpoints that do not have streams
enabled.

An URB is represented by a number of Transaction Request Buffers (TRBs),
that are chained together to make one (or more) Transaction Descriptors
(TDs) on an endpoint ring.  The ring is comprised of contiguous segments,
linked together with Link TRBs (which may or may not be chained into a TD).

To cancel an URB, we must stop the endpoint ring, make the hardware skip
over the TDs in the URB (either by turning them into No-op TDs, or by
moving the hardware's ring dequeue pointer past the last TRB in the last
TD), and then restart the ring.

There are times when we must drop the xHCI lock during this process, like
when we need to complete cancelled URBs.  We must ensure that additional
URBs can be marked as cancelled, and that new URBs can be enqueued (since
the URB completion handlers can do either).  The new endpoint ring
variables cancels_pending and state (which can only be modified while
holding the xHCI lock) ensure that future cancellation and enqueueing do
not interrupt any pending cancellation code.

To facilitate cancellation, we must keep track of the starting ring
segment, first TRB, and last TRB for each URB.  We also need to keep track
of the list of TDs that have been marked as cancelled, separate from the
list of TDs that are queued for this endpoint.  The new variables and
cancellation list are stored in the xhci_td structure.
Signed-off-by: default avatarSarah Sharp <sarah.a.sharp@linux.intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent 8a96c052
...@@ -613,12 +613,70 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) ...@@ -613,12 +613,70 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
return ret; return ret;
} }
/* Remove from hardware lists /*
* completions normally happen asynchronously * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
* USB transfers, potentially stopping in the middle of a TRB buffer. The HC
* should pick up where it left off in the TD, unless a Set Transfer Ring
* Dequeue Pointer is issued.
*
* The TRBs that make up the buffers for the canceled URB will be "removed" from
* the ring. Since the ring is a contiguous structure, they can't be physically
* removed. Instead, there are two options:
*
* 1) If the HC is in the middle of processing the URB to be canceled, we
* simply move the ring's dequeue pointer past those TRBs using the Set
* Transfer Ring Dequeue Pointer command. This will be the common case,
* when drivers timeout on the last submitted URB and attempt to cancel.
*
* 2) If the HC is in the middle of a different TD, we turn the TRBs into a
* series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
* HC will need to invalidate the any TRBs it has cached after the stop
* endpoint command, as noted in the xHCI 0.95 errata.
*
* 3) The TD may have completed by the time the Stop Endpoint Command
* completes, so software needs to handle that case too.
*
* This function should protect against the TD enqueueing code ringing the
* doorbell while this code is waiting for a Stop Endpoint command to complete.
* It also needs to account for multiple cancellations on happening at the same
* time for the same endpoint.
*
* Note that this function can be called in any context, or so says
* usb_hcd_unlink_urb()
*/ */
int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{ {
return -ENOSYS; unsigned long flags;
int ret;
struct xhci_hcd *xhci;
struct xhci_td *td;
unsigned int ep_index;
struct xhci_ring *ep_ring;
xhci = hcd_to_xhci(hcd);
spin_lock_irqsave(&xhci->lock, flags);
/* Make sure the URB hasn't completed or been unlinked already */
ret = usb_hcd_check_unlink_urb(hcd, urb, status);
if (ret || !urb->hcpriv)
goto done;
xhci_dbg(xhci, "Cancel URB 0x%x\n", (unsigned int) urb);
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index];
td = (struct xhci_td *) urb->hcpriv;
ep_ring->cancels_pending++;
list_add_tail(&td->cancelled_td_list, &ep_ring->cancelled_td_list);
/* Queue a stop endpoint command, but only if this is
* the first cancellation to be handled.
*/
if (ep_ring->cancels_pending == 1) {
queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index);
ring_cmd_db(xhci);
}
done:
spin_unlock_irqrestore(&xhci->lock, flags);
return ret;
} }
/* Drop an endpoint from a new bandwidth configuration for this device. /* Drop an endpoint from a new bandwidth configuration for this device.
......
...@@ -142,6 +142,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, ...@@ -142,6 +142,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
return 0; return 0;
INIT_LIST_HEAD(&ring->td_list); INIT_LIST_HEAD(&ring->td_list);
INIT_LIST_HEAD(&ring->cancelled_td_list);
if (num_segs == 0) if (num_segs == 0)
return ring; return ring;
......
This diff is collapsed.
...@@ -514,6 +514,7 @@ struct xhci_slot_ctx { ...@@ -514,6 +514,7 @@ struct xhci_slot_ctx {
/* bits 8:26 reserved */ /* bits 8:26 reserved */
/* Slot state */ /* Slot state */
#define SLOT_STATE (0x1f << 27) #define SLOT_STATE (0x1f << 27)
#define GET_SLOT_STATE(p) (((p) & (0x1f << 27)) >> 27)
/** /**
...@@ -765,6 +766,11 @@ struct xhci_event_cmd { ...@@ -765,6 +766,11 @@ struct xhci_event_cmd {
#define TRB_TO_SLOT_ID(p) (((p) & (0xff<<24)) >> 24) #define TRB_TO_SLOT_ID(p) (((p) & (0xff<<24)) >> 24)
#define SLOT_ID_FOR_TRB(p) (((p) & 0xff) << 24) #define SLOT_ID_FOR_TRB(p) (((p) & 0xff) << 24)
/* Stop Endpoint TRB - ep_index to endpoint ID for this TRB */
#define TRB_TO_EP_INDEX(p) ((((p) & (0x1f << 16)) >> 16) - 1)
#define EP_ID_FOR_TRB(p) ((((p) + 1) & 0x1f) << 16)
/* Port Status Change Event TRB fields */ /* Port Status Change Event TRB fields */
/* Port ID - bits 31:24 */ /* Port ID - bits 31:24 */
#define GET_PORT_ID(p) (((p) & (0xff << 24)) >> 24) #define GET_PORT_ID(p) (((p) & (0xff << 24)) >> 24)
...@@ -893,12 +899,6 @@ union xhci_trb { ...@@ -893,12 +899,6 @@ union xhci_trb {
#define TRB_MAX_BUFF_SHIFT 16 #define TRB_MAX_BUFF_SHIFT 16
#define TRB_MAX_BUFF_SIZE (1 << TRB_MAX_BUFF_SHIFT) #define TRB_MAX_BUFF_SIZE (1 << TRB_MAX_BUFF_SHIFT)
struct xhci_td {
struct list_head td_list;
struct urb *urb;
union xhci_trb *last_trb;
};
struct xhci_segment { struct xhci_segment {
union xhci_trb *trbs; union xhci_trb *trbs;
/* private to HCD */ /* private to HCD */
...@@ -906,6 +906,15 @@ struct xhci_segment { ...@@ -906,6 +906,15 @@ struct xhci_segment {
dma_addr_t dma; dma_addr_t dma;
} __attribute__ ((packed)); } __attribute__ ((packed));
struct xhci_td {
struct list_head td_list;
struct list_head cancelled_td_list;
struct urb *urb;
struct xhci_segment *start_seg;
union xhci_trb *first_trb;
union xhci_trb *last_trb;
};
struct xhci_ring { struct xhci_ring {
struct xhci_segment *first_seg; struct xhci_segment *first_seg;
union xhci_trb *enqueue; union xhci_trb *enqueue;
...@@ -915,6 +924,14 @@ struct xhci_ring { ...@@ -915,6 +924,14 @@ struct xhci_ring {
struct xhci_segment *deq_seg; struct xhci_segment *deq_seg;
unsigned int deq_updates; unsigned int deq_updates;
struct list_head td_list; struct list_head td_list;
/* ---- Related to URB cancellation ---- */
struct list_head cancelled_td_list;
unsigned int cancels_pending;
unsigned int state;
#define SET_DEQ_PENDING (1 << 0)
/* The TRB that was last reported in a stopped endpoint ring */
union xhci_trb *stopped_trb;
struct xhci_td *stopped_td;
/* /*
* Write the cycle state into the TRB cycle field to give ownership of * Write the cycle state into the TRB cycle field to give ownership of
* the TRB to the host controller (if we are the producer), or to check * the TRB to the host controller (if we are the producer), or to check
...@@ -1119,6 +1136,8 @@ void handle_event(struct xhci_hcd *xhci); ...@@ -1119,6 +1136,8 @@ void handle_event(struct xhci_hcd *xhci);
void set_hc_event_deq(struct xhci_hcd *xhci); void set_hc_event_deq(struct xhci_hcd *xhci);
int queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id); int queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id);
int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id); int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id);
int queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index);
int queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index); int queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index);
int queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index); int queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index);
int queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id); int queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment