Commit 2a8f82c4 authored by Sarah Sharp's avatar Sarah Sharp Committed by Greg Kroah-Hartman

USB: xhci: Notify the xHC when a device is reset.

When a USB device is reset, the xHCI hardware must know, in order to match
the device state and disable all endpoints except control endpoint 0.
Issue a Reset Device command after a USB device is successfully reset.
Wait on the command to finish, and then cache or free the disabled
endpoint rings.

There are four different USB device states that the xHCI hardware tracks:
 - disabled/enabled - device connection has just been detected,
 - default - the device has been reset and has an address of 0,
 - addressed - the device has a non-zero address but no configuration has
   been set,
 - configured - a set configuration succeeded.

The USB core may issue a port reset when a device is in any state, but the
Reset Device command will fail for a 0.96 xHC if the device is not in the
addressed or configured state.  Don't consider this failure as an error,
but don't free any endpoint rings if this command fails.

A storage driver may request that the USB device be reset during error
handling, so use GPF_NOIO instead of GPF_KERNEL while allocating memory
for the Reset Device command.
Signed-off-by: default avatarSarah Sharp <sarah.a.sharp@linux.intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent 6219c047
...@@ -406,6 +406,25 @@ static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma) ...@@ -406,6 +406,25 @@ static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma)
} }
} }
inline char *xhci_get_slot_state(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx)
{
struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
switch (GET_SLOT_STATE(slot_ctx->dev_state)) {
case 0:
return "enabled/disabled";
case 1:
return "default";
case 2:
return "addressed";
case 3:
return "configured";
default:
return "reserved";
}
}
void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
{ {
/* Fields are 32 bits wide, DMA addresses are in bytes */ /* Fields are 32 bits wide, DMA addresses are in bytes */
......
...@@ -1442,6 +1442,131 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, ...@@ -1442,6 +1442,131 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
xhci_warn(xhci, "FIXME allocate a new ring segment\n"); xhci_warn(xhci, "FIXME allocate a new ring segment\n");
} }
/*
* This submits a Reset Device Command, which will set the device state to 0,
* set the device address to 0, and disable all the endpoints except the default
* control endpoint. The USB core should come back and call
* xhci_address_device(), and then re-set up the configuration. If this is
* called because of a usb_reset_and_verify_device(), then the old alternate
* settings will be re-installed through the normal bandwidth allocation
* functions.
*
* Wait for the Reset Device command to finish. Remove all structures
* associated with the endpoints that were disabled. Clear the input device
* structure? Cache the rings? Reset the control endpoint 0 max packet size?
*/
int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
{
int ret, i;
unsigned long flags;
struct xhci_hcd *xhci;
unsigned int slot_id;
struct xhci_virt_device *virt_dev;
struct xhci_command *reset_device_cmd;
int timeleft;
int last_freed_endpoint;
ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
if (ret <= 0)
return ret;
xhci = hcd_to_xhci(hcd);
slot_id = udev->slot_id;
virt_dev = xhci->devs[slot_id];
if (!virt_dev) {
xhci_dbg(xhci, "%s called with invalid slot ID %u\n",
__func__, slot_id);
return -EINVAL;
}
xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
/* Allocate the command structure that holds the struct completion.
* Assume we're in process context, since the normal device reset
* process has to wait for the device anyway. Storage devices are
* reset as part of error handling, so use GFP_NOIO instead of
* GFP_KERNEL.
*/
reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
if (!reset_device_cmd) {
xhci_dbg(xhci, "Couldn't allocate command structure.\n");
return -ENOMEM;
}
/* Attempt to submit the Reset Device command to the command ring */
spin_lock_irqsave(&xhci->lock, flags);
reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
ret = xhci_queue_reset_device(xhci, slot_id);
if (ret) {
xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
list_del(&reset_device_cmd->cmd_list);
spin_unlock_irqrestore(&xhci->lock, flags);
goto command_cleanup;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
/* Wait for the Reset Device command to finish */
timeleft = wait_for_completion_interruptible_timeout(
reset_device_cmd->completion,
USB_CTRL_SET_TIMEOUT);
if (timeleft <= 0) {
xhci_warn(xhci, "%s while waiting for reset device command\n",
timeleft == 0 ? "Timeout" : "Signal");
spin_lock_irqsave(&xhci->lock, flags);
/* The timeout might have raced with the event ring handler, so
* only delete from the list if the item isn't poisoned.
*/
if (reset_device_cmd->cmd_list.next != LIST_POISON1)
list_del(&reset_device_cmd->cmd_list);
spin_unlock_irqrestore(&xhci->lock, flags);
ret = -ETIME;
goto command_cleanup;
}
/* The Reset Device command can't fail, according to the 0.95/0.96 spec,
* unless we tried to reset a slot ID that wasn't enabled,
* or the device wasn't in the addressed or configured state.
*/
ret = reset_device_cmd->status;
switch (ret) {
case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
case COMP_CTX_STATE: /* 0.96 completion code for same thing */
xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n",
slot_id,
xhci_get_slot_state(xhci, virt_dev->out_ctx));
xhci_info(xhci, "Not freeing device rings.\n");
/* Don't treat this as an error. May change my mind later. */
ret = 0;
goto command_cleanup;
case COMP_SUCCESS:
xhci_dbg(xhci, "Successful reset device command.\n");
break;
default:
if (xhci_is_vendor_info_code(xhci, ret))
break;
xhci_warn(xhci, "Unknown completion code %u for "
"reset device command.\n", ret);
ret = -EINVAL;
goto command_cleanup;
}
/* Everything but endpoint 0 is disabled, so free or cache the rings. */
last_freed_endpoint = 1;
for (i = 1; i < 31; ++i) {
if (!virt_dev->eps[i].ring)
continue;
xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
last_freed_endpoint = i;
}
xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
ret = 0;
command_cleanup:
xhci_free_command(xhci, reset_device_cmd);
return ret;
}
/* /*
* At this point, the struct usb_device is about to go away, the device has * At this point, the struct usb_device is about to go away, the device has
* disconnected, and all traffic has been stopped and the endpoints have been * disconnected, and all traffic has been stopped and the endpoints have been
......
...@@ -953,6 +953,17 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, ...@@ -953,6 +953,17 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
case TRB_TYPE(TRB_RESET_EP): case TRB_TYPE(TRB_RESET_EP):
handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue); handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
break; break;
case TRB_TYPE(TRB_RESET_DEV):
xhci_dbg(xhci, "Completed reset device command.\n");
slot_id = TRB_TO_SLOT_ID(
xhci->cmd_ring->dequeue->generic.field[3]);
virt_dev = xhci->devs[slot_id];
if (virt_dev)
handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
else
xhci_warn(xhci, "Reset device command completion "
"for disabled slot %u\n", slot_id);
break;
default: default:
/* Skip over unknown commands on the event ring */ /* Skip over unknown commands on the event ring */
xhci->error_bitmask |= 1 << 6; xhci->error_bitmask |= 1 << 6;
...@@ -2189,6 +2200,14 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, ...@@ -2189,6 +2200,14 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
false); false);
} }
/* Queue a reset device command TRB */
int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
{
return queue_command(xhci, 0, 0, 0,
TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
false);
}
/* Queue a configure endpoint command TRB */ /* Queue a configure endpoint command TRB */
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id, bool command_must_succeed) u32 slot_id, bool command_must_succeed)
......
...@@ -1210,6 +1210,8 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst); ...@@ -1210,6 +1210,8 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci); void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring); void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep); void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep);
inline char *xhci_get_slot_state(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx);
/* xHCI memory management */ /* xHCI memory management */
void xhci_mem_cleanup(struct xhci_hcd *xhci); void xhci_mem_cleanup(struct xhci_hcd *xhci);
...@@ -1298,6 +1300,7 @@ int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, ...@@ -1298,6 +1300,7 @@ int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id); u32 slot_id);
int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index); unsigned int ep_index);
int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id);
void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index, unsigned int slot_id, unsigned int ep_index,
struct xhci_td *cur_td, struct xhci_dequeue_state *state); struct xhci_td *cur_td, struct xhci_dequeue_state *state);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment