Commit 9affb1d9 authored by Mathias Nyman's avatar Mathias Nyman Committed by Greg Kroah-Hartman

xhci: add helper to stop endpoint and wait for completion

Expose xhci_stop_endpoint_sync() which is a synchronous variant of
xhci_queue_stop_endpoint().  This is useful for client drivers that are
using the secondary interrupters, and need to stop/clean up the current
session.  The stop endpoint command handler will also take care of cleaning
up the ring.

Modifications to repurpose the new API into existing stop endpoint
sequences was implemented by Wesley Cheng.
Signed-off-by: default avatarMathias Nyman <mathias.nyman@linux.intel.com>
Co-developed-by: default avatarWesley Cheng <quic_wcheng@quicinc.com>
Signed-off-by: default avatarWesley Cheng <quic_wcheng@quicinc.com>
Link: https://lore.kernel.org/r/20240217001017.29969-11-quic_wcheng@quicinc.comSigned-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent edc47759
...@@ -2796,6 +2796,48 @@ static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, ...@@ -2796,6 +2796,48 @@ static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
return -ENOMEM; return -ENOMEM;
} }
/*
* Synchronous XHCI stop endpoint helper. Issues the stop endpoint command and
* waits for the command completion before returning.
*/
int xhci_stop_endpoint_sync(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, int suspend,
gfp_t gfp_flags)
{
struct xhci_command *command;
unsigned long flags;
int ret;
command = xhci_alloc_command(xhci, true, gfp_flags);
if (!command)
return -ENOMEM;
spin_lock_irqsave(&xhci->lock, flags);
ret = xhci_queue_stop_endpoint(xhci, command, ep->vdev->slot_id,
ep->ep_index, suspend);
if (ret < 0) {
spin_unlock_irqrestore(&xhci->lock, flags);
goto out;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
ret = wait_for_completion_timeout(command->completion, msecs_to_jiffies(3000));
if (!ret)
xhci_warn(xhci, "%s: Unable to stop endpoint.\n",
__func__);
if (command->status == COMP_COMMAND_ABORTED ||
command->status == COMP_COMMAND_RING_STOPPED) {
xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n");
ret = -ETIME;
}
out:
xhci_free_command(xhci, command);
return ret;
}
EXPORT_SYMBOL_GPL(xhci_stop_endpoint_sync);
/* Issue a configure endpoint command or evaluate context command /* Issue a configure endpoint command or evaluate context command
* and wait for it to finish. * and wait for it to finish.
...@@ -3119,7 +3161,7 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd, ...@@ -3119,7 +3161,7 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
struct xhci_virt_device *vdev; struct xhci_virt_device *vdev;
struct xhci_virt_ep *ep; struct xhci_virt_ep *ep;
struct xhci_input_control_ctx *ctrl_ctx; struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_command *stop_cmd, *cfg_cmd; struct xhci_command *cfg_cmd;
unsigned int ep_index; unsigned int ep_index;
unsigned long flags; unsigned long flags;
u32 ep_flag; u32 ep_flag;
...@@ -3177,10 +3219,6 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd, ...@@ -3177,10 +3219,6 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG) if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG)
return; return;
stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT);
if (!stop_cmd)
return;
cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT); cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT);
if (!cfg_cmd) if (!cfg_cmd)
goto cleanup; goto cleanup;
...@@ -3203,23 +3241,16 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd, ...@@ -3203,23 +3241,16 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
goto cleanup; goto cleanup;
} }
err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, spin_unlock_irqrestore(&xhci->lock, flags);
ep_index, 0);
err = xhci_stop_endpoint_sync(xhci, ep, 0, GFP_NOWAIT);
if (err < 0) { if (err < 0) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_free_command(xhci, cfg_cmd);
xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ", xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ",
__func__, err); __func__, err);
goto cleanup; goto cleanup;
} }
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
wait_for_completion(stop_cmd->completion);
spin_lock_irqsave(&xhci->lock, flags); spin_lock_irqsave(&xhci->lock, flags);
/* config ep command clears toggle if add and drop ep flags are set */ /* config ep command clears toggle if add and drop ep flags are set */
ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx); ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
if (!ctrl_ctx) { if (!ctrl_ctx) {
...@@ -3251,7 +3282,6 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd, ...@@ -3251,7 +3282,6 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
xhci_free_command(xhci, cfg_cmd); xhci_free_command(xhci, cfg_cmd);
cleanup: cleanup:
xhci_free_command(xhci, stop_cmd);
spin_lock_irqsave(&xhci->lock, flags); spin_lock_irqsave(&xhci->lock, flags);
if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE) if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE)
ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE; ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
......
...@@ -1920,6 +1920,8 @@ void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci, ...@@ -1920,6 +1920,8 @@ void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
void xhci_cleanup_command_queue(struct xhci_hcd *xhci); void xhci_cleanup_command_queue(struct xhci_hcd *xhci);
void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring); void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring);
unsigned int count_trbs(u64 addr, u64 len); unsigned int count_trbs(u64 addr, u64 len);
int xhci_stop_endpoint_sync(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
int suspend, gfp_t gfp_flags);
/* xHCI roothub code */ /* xHCI roothub code */
void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port, void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment