Commit d70f4231 authored by Mathias Nyman's avatar Mathias Nyman Committed by Greg Kroah-Hartman

xhci: adjust parameters passed to cleanup_halted_endpoint()

Instead of passing slot id and endpoint index to
cleanup_halted_endpoint() pass the endpoint structure pointer
as it's already known.

Avoids again digging out the endpoint structure based on
slot id and endpoint index, and passing them along the
call chain for this purpose only.

Add slot_id to the virt_dev structure so that it
can easily be found from a virt_dev, or its child, the
virt_ep endpoint structure.
Signed-off-by: default avatarMathias Nyman <mathias.nyman@linux.intel.com>
Link: https://lore.kernel.org/r/20210129130044.206855-4-mathias.nyman@linux.intel.comSigned-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent d4dff804
...@@ -994,6 +994,8 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, ...@@ -994,6 +994,8 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
if (!dev) if (!dev)
return 0; return 0;
dev->slot_id = slot_id;
/* Allocate the (output) device context that will be used in the HC. */ /* Allocate the (output) device context that will be used in the HC. */
dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
if (!dev->out_ctx) if (!dev->out_ctx)
......
...@@ -1850,13 +1850,12 @@ static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td, ...@@ -1850,13 +1850,12 @@ static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td,
} }
static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index, struct xhci_virt_ep *ep, unsigned int stream_id,
unsigned int stream_id, struct xhci_td *td, struct xhci_td *td,
enum xhci_ep_reset_type reset_type) enum xhci_ep_reset_type reset_type)
{ {
struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
struct xhci_command *command; struct xhci_command *command;
unsigned int slot_id = ep->vdev->slot_id;
/* /*
* Avoid resetting endpoint if link is inactive. Can cause host hang. * Avoid resetting endpoint if link is inactive. Can cause host hang.
* Device will be reset soon to recover the link so don't do anything * Device will be reset soon to recover the link so don't do anything
...@@ -1870,11 +1869,11 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, ...@@ -1870,11 +1869,11 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
ep->ep_state |= EP_HALTED; ep->ep_state |= EP_HALTED;
xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type); xhci_queue_reset_ep(xhci, command, slot_id, ep->ep_index, reset_type);
if (reset_type == EP_HARD_RESET) { if (reset_type == EP_HARD_RESET) {
ep->ep_state |= EP_HARD_CLEAR_TOGGLE; ep->ep_state |= EP_HARD_CLEAR_TOGGLE;
xhci_cleanup_stalled_ring(xhci, slot_id, ep_index, stream_id, xhci_cleanup_stalled_ring(xhci, slot_id, ep->ep_index, stream_id,
td); td);
} }
xhci_ring_cmd_db(xhci); xhci_ring_cmd_db(xhci);
...@@ -1972,10 +1971,8 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, ...@@ -1972,10 +1971,8 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
{ {
struct xhci_ep_ctx *ep_ctx; struct xhci_ep_ctx *ep_ctx;
struct xhci_ring *ep_ring; struct xhci_ring *ep_ring;
unsigned int slot_id;
u32 trb_comp_code; u32 trb_comp_code;
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
...@@ -2004,8 +2001,8 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, ...@@ -2004,8 +2001,8 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
*/ */
if ((ep->ep_index != 0) || (trb_comp_code != COMP_STALL_ERROR)) if ((ep->ep_index != 0) || (trb_comp_code != COMP_STALL_ERROR))
xhci_clear_hub_tt_buffer(xhci, td, ep); xhci_clear_hub_tt_buffer(xhci, td, ep);
xhci_cleanup_halted_endpoint(xhci, slot_id, ep->ep_index, xhci_cleanup_halted_endpoint(xhci, ep, ep_ring->stream_id, td,
ep_ring->stream_id, td, EP_HARD_RESET); EP_HARD_RESET);
} else { } else {
/* Update ring dequeue pointer */ /* Update ring dequeue pointer */
while (ep_ring->dequeue != td->last_trb) while (ep_ring->dequeue != td->last_trb)
...@@ -2248,9 +2245,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, ...@@ -2248,9 +2245,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
struct xhci_ring *ep_ring; struct xhci_ring *ep_ring;
u32 trb_comp_code; u32 trb_comp_code;
u32 remaining, requested, ep_trb_len; u32 remaining, requested, ep_trb_len;
unsigned int slot_id;
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx); slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx);
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
...@@ -2289,8 +2284,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, ...@@ -2289,8 +2284,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
le32_to_cpu(slot_ctx->tt_info) & TT_SLOT) le32_to_cpu(slot_ctx->tt_info) & TT_SLOT)
break; break;
*status = 0; *status = 0;
xhci_cleanup_halted_endpoint(xhci, slot_id, ep->ep_index, xhci_cleanup_halted_endpoint(xhci, ep, ep_ring->stream_id, td,
ep_ring->stream_id, td, EP_SOFT_RESET); EP_SOFT_RESET);
return 0; return 0;
default: default:
/* do nothing */ /* do nothing */
...@@ -2366,8 +2361,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -2366,8 +2361,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
case COMP_USB_TRANSACTION_ERROR: case COMP_USB_TRANSACTION_ERROR:
case COMP_INVALID_STREAM_TYPE_ERROR: case COMP_INVALID_STREAM_TYPE_ERROR:
case COMP_INVALID_STREAM_ID_ERROR: case COMP_INVALID_STREAM_ID_ERROR:
xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, 0, xhci_cleanup_halted_endpoint(xhci, ep, 0, NULL,
NULL, EP_SOFT_RESET); EP_SOFT_RESET);
goto cleanup; goto cleanup;
case COMP_RING_UNDERRUN: case COMP_RING_UNDERRUN:
case COMP_RING_OVERRUN: case COMP_RING_OVERRUN:
...@@ -2551,8 +2546,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -2551,8 +2546,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
if (trb_comp_code == COMP_STALL_ERROR || if (trb_comp_code == COMP_STALL_ERROR ||
xhci_requires_manual_halt_cleanup(xhci, ep_ctx, xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
trb_comp_code)) { trb_comp_code)) {
xhci_cleanup_halted_endpoint(xhci, slot_id, xhci_cleanup_halted_endpoint(xhci, ep,
ep_index,
ep_ring->stream_id, ep_ring->stream_id,
NULL, NULL,
EP_HARD_RESET); EP_HARD_RESET);
...@@ -2646,8 +2640,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -2646,8 +2640,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
if (trb_comp_code == COMP_STALL_ERROR || if (trb_comp_code == COMP_STALL_ERROR ||
xhci_requires_manual_halt_cleanup(xhci, ep_ctx, xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
trb_comp_code)) trb_comp_code))
xhci_cleanup_halted_endpoint(xhci, slot_id, xhci_cleanup_halted_endpoint(xhci, ep,
ep_index,
ep_ring->stream_id, ep_ring->stream_id,
td, EP_HARD_RESET); td, EP_HARD_RESET);
goto cleanup; goto cleanup;
......
...@@ -997,6 +997,7 @@ struct xhci_interval_bw_table { ...@@ -997,6 +997,7 @@ struct xhci_interval_bw_table {
struct xhci_virt_device { struct xhci_virt_device {
int slot_id;
struct usb_device *udev; struct usb_device *udev;
/* /*
* Commands to the hardware are passed an "input context" that * Commands to the hardware are passed an "input context" that
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment