Commit f4c8f03c authored by Lu Baolu's avatar Lu Baolu Committed by Greg Kroah-Hartman

usb: xhci: clean up error_bitmask usage

In xhci_handle_event(), when errors are detected, driver always sets
a bit in error_bitmask (one member of the xhci private driver data).
That means users have to retrieve and decode the value of error_bitmask
in xhci private driver data if they want to know whether those erros
ever happened in xhci_handle_event(). Otherwise, those errors are just
ignored silently.

This patch cleans up this by replacing the setting of error_bitmask
with the kernel print functions, so that users can easily check and
report the errors happened in xhci_handle_event().
Signed-off-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: default avatarMathias Nyman <mathias.nyman@linux.intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 2a72126d
...@@ -1180,7 +1180,7 @@ static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci, ...@@ -1180,7 +1180,7 @@ static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
struct xhci_event_cmd *event) struct xhci_event_cmd *event)
{ {
if (!(xhci->quirks & XHCI_NEC_HOST)) { if (!(xhci->quirks & XHCI_NEC_HOST)) {
xhci->error_bitmask |= 1 << 6; xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n");
return; return;
} }
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
...@@ -1322,14 +1322,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, ...@@ -1322,14 +1322,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
cmd_trb = xhci->cmd_ring->dequeue; cmd_trb = xhci->cmd_ring->dequeue;
cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
cmd_trb); cmd_trb);
/* Is the command ring deq ptr out of sync with the deq seg ptr? */ /*
if (cmd_dequeue_dma == 0) { * Check whether the completion event is for our internal kept
xhci->error_bitmask |= 1 << 4; * command.
return; */
} if (!cmd_dequeue_dma || cmd_dma != (u64)cmd_dequeue_dma) {
/* Does the DMA address match our internal dequeue pointer address? */ xhci_warn(xhci,
if (cmd_dma != (u64) cmd_dequeue_dma) { "ERROR mismatched command completion event\n");
xhci->error_bitmask |= 1 << 5;
return; return;
} }
...@@ -1415,7 +1414,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, ...@@ -1415,7 +1414,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
break; break;
default: default:
/* Skip over unknown commands on the event ring */ /* Skip over unknown commands on the event ring */
xhci->error_bitmask |= 1 << 6; xhci_info(xhci, "INFO unknown command type %d\n", cmd_type);
break; break;
} }
...@@ -1516,10 +1515,10 @@ static void handle_port_status(struct xhci_hcd *xhci, ...@@ -1516,10 +1515,10 @@ static void handle_port_status(struct xhci_hcd *xhci,
bool bogus_port_status = false; bool bogus_port_status = false;
/* Port status change events always have a successful completion code */ /* Port status change events always have a successful completion code */
if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) { if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
xhci_warn(xhci, "WARN: xHC returned failed port status event\n"); xhci_warn(xhci,
xhci->error_bitmask |= 1 << 8; "WARN: xHC returned failed port status event\n");
}
port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0])); port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id); xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
...@@ -2522,18 +2521,17 @@ static int xhci_handle_event(struct xhci_hcd *xhci) ...@@ -2522,18 +2521,17 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
int update_ptrs = 1; int update_ptrs = 1;
int ret; int ret;
/* Event ring hasn't been allocated yet. */
if (!xhci->event_ring || !xhci->event_ring->dequeue) { if (!xhci->event_ring || !xhci->event_ring->dequeue) {
xhci->error_bitmask |= 1 << 1; xhci_err(xhci, "ERROR event ring not ready\n");
return 0; return -ENOMEM;
} }
event = xhci->event_ring->dequeue; event = xhci->event_ring->dequeue;
/* Does the HC or OS own the TRB? */ /* Does the HC or OS own the TRB? */
if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) != if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
xhci->event_ring->cycle_state) { xhci->event_ring->cycle_state)
xhci->error_bitmask |= 1 << 2;
return 0; return 0;
}
/* /*
* Barrier between reading the TRB_CYCLE (valid) flag above and any * Barrier between reading the TRB_CYCLE (valid) flag above and any
...@@ -2541,7 +2539,7 @@ static int xhci_handle_event(struct xhci_hcd *xhci) ...@@ -2541,7 +2539,7 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
*/ */
rmb(); rmb();
/* FIXME: Handle more event types. */ /* FIXME: Handle more event types. */
switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) { switch (le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) {
case TRB_TYPE(TRB_COMPLETION): case TRB_TYPE(TRB_COMPLETION):
handle_cmd_completion(xhci, &event->event_cmd); handle_cmd_completion(xhci, &event->event_cmd);
break; break;
...@@ -2551,9 +2549,7 @@ static int xhci_handle_event(struct xhci_hcd *xhci) ...@@ -2551,9 +2549,7 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
break; break;
case TRB_TYPE(TRB_TRANSFER): case TRB_TYPE(TRB_TRANSFER):
ret = handle_tx_event(xhci, &event->trans_event); ret = handle_tx_event(xhci, &event->trans_event);
if (ret < 0) if (ret >= 0)
xhci->error_bitmask |= 1 << 9;
else
update_ptrs = 0; update_ptrs = 0;
break; break;
case TRB_TYPE(TRB_DEV_NOTE): case TRB_TYPE(TRB_DEV_NOTE):
...@@ -2564,7 +2560,9 @@ static int xhci_handle_event(struct xhci_hcd *xhci) ...@@ -2564,7 +2560,9 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
TRB_TYPE(48)) TRB_TYPE(48))
handle_vendor_event(xhci, event); handle_vendor_event(xhci, event);
else else
xhci->error_bitmask |= 1 << 3; xhci_warn(xhci, "ERROR unknown event type %d\n",
TRB_FIELD_TO_TYPE(
le32_to_cpu(event->event_cmd.flags)));
} }
/* Any of the above functions may drop and re-acquire the lock, so check /* Any of the above functions may drop and re-acquire the lock, so check
* to make sure a watchdog timer didn't mark the host as non-responsive. * to make sure a watchdog timer didn't mark the host as non-responsive.
......
...@@ -1618,8 +1618,6 @@ struct xhci_hcd { ...@@ -1618,8 +1618,6 @@ struct xhci_hcd {
#define XHCI_STATE_DYING (1 << 0) #define XHCI_STATE_DYING (1 << 0)
#define XHCI_STATE_HALTED (1 << 1) #define XHCI_STATE_HALTED (1 << 1)
#define XHCI_STATE_REMOVING (1 << 2) #define XHCI_STATE_REMOVING (1 << 2)
/* Statistics */
int error_bitmask;
unsigned int quirks; unsigned int quirks;
#define XHCI_LINK_TRB_QUIRK (1 << 0) #define XHCI_LINK_TRB_QUIRK (1 << 0)
#define XHCI_RESET_EP_QUIRK (1 << 1) #define XHCI_RESET_EP_QUIRK (1 << 1)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment