Commit 63a0d9ab authored by Sarah Sharp's avatar Sarah Sharp Committed by Greg Kroah-Hartman

USB: xhci: Endpoint representation refactoring.

The xhci_ring structure contained information that is really related to an
endpoint, not a ring.  This will cause problems later when endpoint
streams are supported and there are multiple rings per endpoint.

Move the endpoint state and cancellation information into a new virtual
endpoint structure, xhci_virt_ep.  The list of TRBs to be cancelled should
be per endpoint, not per ring, for easy access.  There can be only one TRB
that the endpoint stopped on after a stop endpoint command (even with
streams enabled); move the stopped TRB information into the new virtual
endpoint structure.  Also move the 31 endpoint rings and temporary ring
storage from the virtual device structure (xhci_virt_device) into the
virtual endpoint structure (xhci_virt_ep).
Signed-off-by: default avatarSarah Sharp <sarah.a.sharp@linux.intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent 9e221be8
...@@ -351,13 +351,14 @@ void xhci_event_ring_work(unsigned long arg) ...@@ -351,13 +351,14 @@ void xhci_event_ring_work(unsigned long arg)
xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
xhci_dbg_cmd_ptrs(xhci); xhci_dbg_cmd_ptrs(xhci);
for (i = 0; i < MAX_HC_SLOTS; ++i) { for (i = 0; i < MAX_HC_SLOTS; ++i) {
if (xhci->devs[i]) { if (!xhci->devs[i])
for (j = 0; j < 31; ++j) { continue;
if (xhci->devs[i]->ep_rings[j]) { for (j = 0; j < 31; ++j) {
xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j); struct xhci_ring *ring = xhci->devs[i]->eps[j].ring;
xhci_debug_segment(xhci, xhci->devs[i]->ep_rings[j]->deq_seg); if (!ring)
} continue;
} xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j);
xhci_debug_segment(xhci, ring->deq_seg);
} }
} }
...@@ -778,6 +779,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) ...@@ -778,6 +779,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
struct xhci_td *td; struct xhci_td *td;
unsigned int ep_index; unsigned int ep_index;
struct xhci_ring *ep_ring; struct xhci_ring *ep_ring;
struct xhci_virt_ep *ep;
xhci = hcd_to_xhci(hcd); xhci = hcd_to_xhci(hcd);
spin_lock_irqsave(&xhci->lock, flags); spin_lock_irqsave(&xhci->lock, flags);
...@@ -790,17 +792,18 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) ...@@ -790,17 +792,18 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
xhci_dbg(xhci, "Event ring:\n"); xhci_dbg(xhci, "Event ring:\n");
xhci_debug_ring(xhci, xhci->event_ring); xhci_debug_ring(xhci, xhci->event_ring);
ep_index = xhci_get_endpoint_index(&urb->ep->desc); ep_index = xhci_get_endpoint_index(&urb->ep->desc);
ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index]; ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
ep_ring = ep->ring;
xhci_dbg(xhci, "Endpoint ring:\n"); xhci_dbg(xhci, "Endpoint ring:\n");
xhci_debug_ring(xhci, ep_ring); xhci_debug_ring(xhci, ep_ring);
td = (struct xhci_td *) urb->hcpriv; td = (struct xhci_td *) urb->hcpriv;
ep_ring->cancels_pending++; ep->cancels_pending++;
list_add_tail(&td->cancelled_td_list, &ep_ring->cancelled_td_list); list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
/* Queue a stop endpoint command, but only if this is /* Queue a stop endpoint command, but only if this is
* the first cancellation to be handled. * the first cancellation to be handled.
*/ */
if (ep_ring->cancels_pending == 1) { if (ep->cancels_pending == 1) {
xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index);
xhci_ring_cmd_db(xhci); xhci_ring_cmd_db(xhci);
} }
...@@ -1206,10 +1209,10 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ...@@ -1206,10 +1209,10 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
xhci_zero_in_ctx(xhci, virt_dev); xhci_zero_in_ctx(xhci, virt_dev);
/* Free any old rings */ /* Free any old rings */
for (i = 1; i < 31; ++i) { for (i = 1; i < 31; ++i) {
if (virt_dev->new_ep_rings[i]) { if (virt_dev->eps[i].new_ring) {
xhci_ring_free(xhci, virt_dev->ep_rings[i]); xhci_ring_free(xhci, virt_dev->eps[i].ring);
virt_dev->ep_rings[i] = virt_dev->new_ep_rings[i]; virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
virt_dev->new_ep_rings[i] = NULL; virt_dev->eps[i].new_ring = NULL;
} }
} }
...@@ -1236,9 +1239,9 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ...@@ -1236,9 +1239,9 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
virt_dev = xhci->devs[udev->slot_id]; virt_dev = xhci->devs[udev->slot_id];
/* Free any rings allocated for added endpoints */ /* Free any rings allocated for added endpoints */
for (i = 0; i < 31; ++i) { for (i = 0; i < 31; ++i) {
if (virt_dev->new_ep_rings[i]) { if (virt_dev->eps[i].new_ring) {
xhci_ring_free(xhci, virt_dev->new_ep_rings[i]); xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
virt_dev->new_ep_rings[i] = NULL; virt_dev->eps[i].new_ring = NULL;
} }
} }
xhci_zero_in_ctx(xhci, virt_dev); xhci_zero_in_ctx(xhci, virt_dev);
...@@ -1281,17 +1284,18 @@ void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, ...@@ -1281,17 +1284,18 @@ void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
} }
void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
struct usb_device *udev, struct usb_device *udev, unsigned int ep_index)
unsigned int ep_index, struct xhci_ring *ep_ring)
{ {
struct xhci_dequeue_state deq_state; struct xhci_dequeue_state deq_state;
struct xhci_virt_ep *ep;
xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
ep = &xhci->devs[udev->slot_id]->eps[ep_index];
/* We need to move the HW's dequeue pointer past this TD, /* We need to move the HW's dequeue pointer past this TD,
* or it will attempt to resend it on the next doorbell ring. * or it will attempt to resend it on the next doorbell ring.
*/ */
xhci_find_new_dequeue_state(xhci, udev->slot_id, xhci_find_new_dequeue_state(xhci, udev->slot_id,
ep_index, ep_ring->stopped_td, ep_index, ep->stopped_td,
&deq_state); &deq_state);
/* HW with the reset endpoint quirk will use the saved dequeue state to /* HW with the reset endpoint quirk will use the saved dequeue state to
...@@ -1299,8 +1303,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, ...@@ -1299,8 +1303,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
*/ */
if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
xhci_dbg(xhci, "Queueing new dequeue state\n"); xhci_dbg(xhci, "Queueing new dequeue state\n");
xhci_queue_new_dequeue_state(xhci, ep_ring, xhci_queue_new_dequeue_state(xhci, udev->slot_id,
udev->slot_id,
ep_index, &deq_state); ep_index, &deq_state);
} else { } else {
/* Better hope no one uses the input context between now and the /* Better hope no one uses the input context between now and the
...@@ -1327,7 +1330,7 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, ...@@ -1327,7 +1330,7 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
unsigned int ep_index; unsigned int ep_index;
unsigned long flags; unsigned long flags;
int ret; int ret;
struct xhci_ring *ep_ring; struct xhci_virt_ep *virt_ep;
xhci = hcd_to_xhci(hcd); xhci = hcd_to_xhci(hcd);
udev = (struct usb_device *) ep->hcpriv; udev = (struct usb_device *) ep->hcpriv;
...@@ -1337,8 +1340,8 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, ...@@ -1337,8 +1340,8 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
if (!ep->hcpriv) if (!ep->hcpriv)
return; return;
ep_index = xhci_get_endpoint_index(&ep->desc); ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ring = xhci->devs[udev->slot_id]->ep_rings[ep_index]; virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
if (!ep_ring->stopped_td) { if (!virt_ep->stopped_td) {
xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n", xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
ep->desc.bEndpointAddress); ep->desc.bEndpointAddress);
return; return;
...@@ -1357,8 +1360,8 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, ...@@ -1357,8 +1360,8 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
* command. Better hope that last command worked! * command. Better hope that last command worked!
*/ */
if (!ret) { if (!ret) {
xhci_cleanup_stalled_ring(xhci, udev, ep_index, ep_ring); xhci_cleanup_stalled_ring(xhci, udev, ep_index);
kfree(ep_ring->stopped_td); kfree(virt_ep->stopped_td);
xhci_ring_cmd_db(xhci); xhci_ring_cmd_db(xhci);
} }
spin_unlock_irqrestore(&xhci->lock, flags); spin_unlock_irqrestore(&xhci->lock, flags);
......
...@@ -144,7 +144,6 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, ...@@ -144,7 +144,6 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
return 0; return 0;
INIT_LIST_HEAD(&ring->td_list); INIT_LIST_HEAD(&ring->td_list);
INIT_LIST_HEAD(&ring->cancelled_td_list);
if (num_segs == 0) if (num_segs == 0)
return ring; return ring;
...@@ -265,8 +264,8 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) ...@@ -265,8 +264,8 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
return; return;
for (i = 0; i < 31; ++i) for (i = 0; i < 31; ++i)
if (dev->ep_rings[i]) if (dev->eps[i].ring)
xhci_ring_free(xhci, dev->ep_rings[i]); xhci_ring_free(xhci, dev->eps[i].ring);
if (dev->in_ctx) if (dev->in_ctx)
xhci_free_container_ctx(xhci, dev->in_ctx); xhci_free_container_ctx(xhci, dev->in_ctx);
...@@ -281,6 +280,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, ...@@ -281,6 +280,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
struct usb_device *udev, gfp_t flags) struct usb_device *udev, gfp_t flags)
{ {
struct xhci_virt_device *dev; struct xhci_virt_device *dev;
int i;
/* Slot ID 0 is reserved */ /* Slot ID 0 is reserved */
if (slot_id == 0 || xhci->devs[slot_id]) { if (slot_id == 0 || xhci->devs[slot_id]) {
...@@ -309,9 +309,13 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, ...@@ -309,9 +309,13 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
(unsigned long long)dev->in_ctx->dma); (unsigned long long)dev->in_ctx->dma);
/* Initialize the cancellation list for each endpoint */
for (i = 0; i < 31; i++)
INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
/* Allocate endpoint 0 ring */ /* Allocate endpoint 0 ring */
dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags); dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags);
if (!dev->ep_rings[0]) if (!dev->eps[0].ring)
goto fail; goto fail;
init_completion(&dev->cmd_completion); init_completion(&dev->cmd_completion);
...@@ -428,8 +432,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud ...@@ -428,8 +432,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
ep0_ctx->ep_info2 |= ERROR_COUNT(3); ep0_ctx->ep_info2 |= ERROR_COUNT(3);
ep0_ctx->deq = ep0_ctx->deq =
dev->ep_rings[0]->first_seg->dma; dev->eps[0].ring->first_seg->dma;
ep0_ctx->deq |= dev->ep_rings[0]->cycle_state; ep0_ctx->deq |= dev->eps[0].ring->cycle_state;
/* Steps 7 and 8 were done in xhci_alloc_virt_device() */ /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
...@@ -539,10 +543,11 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, ...@@ -539,10 +543,11 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
/* Set up the endpoint ring */ /* Set up the endpoint ring */
virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags); virt_dev->eps[ep_index].new_ring =
if (!virt_dev->new_ep_rings[ep_index]) xhci_ring_alloc(xhci, 1, true, mem_flags);
if (!virt_dev->eps[ep_index].new_ring)
return -ENOMEM; return -ENOMEM;
ep_ring = virt_dev->new_ep_rings[ep_index]; ep_ring = virt_dev->eps[ep_index].new_ring;
ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
......
This diff is collapsed.
...@@ -625,6 +625,23 @@ struct xhci_input_control_ctx { ...@@ -625,6 +625,23 @@ struct xhci_input_control_ctx {
/* add context bitmasks */ /* add context bitmasks */
#define ADD_EP(x) (0x1 << x) #define ADD_EP(x) (0x1 << x)
struct xhci_virt_ep {
struct xhci_ring *ring;
/* Temporary storage in case the configure endpoint command fails and we
* have to restore the device state to the previous state
*/
struct xhci_ring *new_ring;
unsigned int ep_state;
#define SET_DEQ_PENDING (1 << 0)
#define EP_HALTED (1 << 1)
/* ---- Related to URB cancellation ---- */
struct list_head cancelled_td_list;
unsigned int cancels_pending;
/* The TRB that was last reported in a stopped endpoint ring */
union xhci_trb *stopped_trb;
struct xhci_td *stopped_td;
};
struct xhci_virt_device { struct xhci_virt_device {
/* /*
* Commands to the hardware are passed an "input context" that * Commands to the hardware are passed an "input context" that
...@@ -637,13 +654,7 @@ struct xhci_virt_device { ...@@ -637,13 +654,7 @@ struct xhci_virt_device {
struct xhci_container_ctx *out_ctx; struct xhci_container_ctx *out_ctx;
/* Used for addressing devices and configuration changes */ /* Used for addressing devices and configuration changes */
struct xhci_container_ctx *in_ctx; struct xhci_container_ctx *in_ctx;
struct xhci_virt_ep eps[31];
/* FIXME when stream support is added */
struct xhci_ring *ep_rings[31];
/* Temporary storage in case the configure endpoint command fails and we
* have to restore the device state to the previous state
*/
struct xhci_ring *new_ep_rings[31];
struct completion cmd_completion; struct completion cmd_completion;
/* Status of the last command issued for this device */ /* Status of the last command issued for this device */
u32 cmd_status; u32 cmd_status;
...@@ -945,15 +956,6 @@ struct xhci_ring { ...@@ -945,15 +956,6 @@ struct xhci_ring {
struct xhci_segment *deq_seg; struct xhci_segment *deq_seg;
unsigned int deq_updates; unsigned int deq_updates;
struct list_head td_list; struct list_head td_list;
/* ---- Related to URB cancellation ---- */
struct list_head cancelled_td_list;
unsigned int cancels_pending;
unsigned int state;
#define SET_DEQ_PENDING (1 << 0)
#define EP_HALTED (1 << 1)
/* The TRB that was last reported in a stopped endpoint ring */
union xhci_trb *stopped_trb;
struct xhci_td *stopped_td;
/* /*
* Write the cycle state into the TRB cycle field to give ownership of * Write the cycle state into the TRB cycle field to give ownership of
* the TRB to the host controller (if we are the producer), or to check * the TRB to the host controller (if we are the producer), or to check
...@@ -1236,11 +1238,10 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, ...@@ -1236,11 +1238,10 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index, unsigned int slot_id, unsigned int ep_index,
struct xhci_td *cur_td, struct xhci_dequeue_state *state); struct xhci_td *cur_td, struct xhci_dequeue_state *state);
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
struct xhci_ring *ep_ring, unsigned int slot_id, unsigned int slot_id, unsigned int ep_index,
unsigned int ep_index, struct xhci_dequeue_state *deq_state); struct xhci_dequeue_state *deq_state);
void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
struct usb_device *udev, struct usb_device *udev, unsigned int ep_index);
unsigned int ep_index, struct xhci_ring *ep_ring);
void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci, void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index, unsigned int slot_id, unsigned int ep_index,
struct xhci_dequeue_state *deq_state); struct xhci_dequeue_state *deq_state);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment