Commit fc42fcf3 authored by David Brownell's avatar David Brownell Committed by Greg Kroah-Hartman

[PATCH] USB: ehci-hcd updates

This should apply to 2.5.59 too.  It seems to get rid of some pesky
hangs, on at least some hardware, but I won't have time to test it
on either VIA version ... maybe someone else will make the time?  :)

      New QH state prevents a re-activation race
	- nobody can un-halt a qh before its cleanup is done
	- resubmit-from-completion had this race (some usbtest cases)
	  as could some normal submit paths on busy endpoints (storage)
	- faster controllers would trip on this more consistently

      Queues of qtds
	- work harder to avoid ever modifing any qh in software
	- short reads block queue advance much less often
	- be more cautious with large (>~19KB) unaligned buffers

      Unlinking urbs
	- if qtd unlinked is at queue head, use its latest status
	  (main effect is reporting bytes from partial transfers)
	- another new qh state:  defer qh unlink if IAA is busy
	  (eliminates a busy-wait loop in a rare scenario)

      Enable features to improve bus utilization
	- PCI MWI ... can produce better write throughput; and by
	  using right cacheline size, sometimes read throughput too
	- USB NAK throttle ... sometimes reduces PCI access rates

      Other
	- async dump shows more funky qh+qtd states, and NAK count
	- cope with with some of the sprintf wierdness
	- periodic dump is usually smaller (so is that schedule)
      	- minor cleanups
parent 9b224d53
......@@ -277,7 +277,26 @@ static inline void remove_debug_files (struct ehci_hcd *bus) { }
default: tmp = '?'; break; \
}; tmp; })
static void qh_lines (struct ehci_qh *qh, char **nextp, unsigned *sizep)
static inline char token_mark (u32 token)
{
token = le32_to_cpu (token);
if (token & QTD_STS_ACTIVE)
return '*';
if (token & QTD_STS_HALT)
return '-';
if (QTD_PID (token) != 1 /* not IN: OUT or SETUP */
|| QTD_LENGTH (token) == 0)
return ' ';
/* tries to advance through hw_alt_next */
return '/';
}
static void qh_lines (
struct ehci_hcd *ehci,
struct ehci_qh *qh,
char **nextp,
unsigned *sizep
)
{
u32 scratch;
u32 hw_curr;
......@@ -286,26 +305,49 @@ static void qh_lines (struct ehci_qh *qh, char **nextp, unsigned *sizep)
unsigned temp;
unsigned size = *sizep;
char *next = *nextp;
char mark;
mark = token_mark (qh->hw_token);
if (mark == '/') { /* qh_alt_next controls qh advance? */
if ((qh->hw_alt_next & QTD_MASK) == ehci->async->hw_alt_next)
mark = '#'; /* blocked */
else if (qh->hw_alt_next & cpu_to_le32 (0x01))
mark = '.'; /* use hw_qtd_next */
/* else alt_next points to some other qtd */
}
scratch = cpu_to_le32p (&qh->hw_info1);
hw_curr = cpu_to_le32p (&qh->hw_current);
hw_curr = (mark == '*') ? cpu_to_le32p (&qh->hw_current) : 0;
temp = snprintf (next, size,
"qh/%p dev%d %cs ep%d %08x %08x (%08x %08x)",
"qh/%p dev%d %cs ep%d %08x %08x (%08x%c %s nak%d)",
qh, scratch & 0x007f,
speed_char (scratch),
(scratch >> 8) & 0x000f,
scratch, cpu_to_le32p (&qh->hw_info2),
hw_curr, cpu_to_le32p (&qh->hw_token));
cpu_to_le32p (&qh->hw_token), mark,
(cpu_to_le32 (0x8000000) & qh->hw_token)
? "data0" : "data1",
(cpu_to_le32p (&qh->hw_alt_next) >> 1) & 0x0f);
size -= temp;
next += temp;
/* hc may be modifying the list as we read it ... */
list_for_each (entry, &qh->qtd_list) {
td = list_entry (entry, struct ehci_qtd, qtd_list);
scratch = cpu_to_le32p (&td->hw_token);
mark = ' ';
if (hw_curr == td->qtd_dma)
mark = '*';
else if (qh->hw_qtd_next == td->qtd_dma)
mark = '+';
else if (QTD_LENGTH (scratch)) {
if (td->hw_alt_next == ehci->async->hw_alt_next)
mark = '#';
else if (td->hw_alt_next != EHCI_LIST_END)
mark = '/';
}
temp = snprintf (next, size,
"\n\t%std/%p %s len=%d %08x urb %p",
(hw_curr == td->qtd_dma) ? "*" : "",
td, ({ char *tmp;
"\n\t%p%c%s len=%d %08x urb %p",
td, mark, ({ char *tmp;
switch ((scratch>>8)&0x03) {
case 0: tmp = "out"; break;
case 1: tmp = "in"; break;
......@@ -315,13 +357,27 @@ static void qh_lines (struct ehci_qh *qh, char **nextp, unsigned *sizep)
(scratch >> 16) & 0x7fff,
scratch,
td->urb);
if (temp < 0)
temp = 0;
else if (size < temp)
temp = size;
size -= temp;
next += temp;
if (temp == size)
goto done;
}
temp = snprintf (next, size, "\n");
*sizep = size - temp;
*nextp = next + temp;
if (temp < 0)
temp = 0;
else if (size < temp)
temp = size;
size -= temp;
next += temp;
done:
*sizep = size;
*nextp = next;
}
static ssize_t
......@@ -344,14 +400,15 @@ show_async (struct device *dev, char *buf)
* one QH per line, and TDs we know about
*/
spin_lock_irqsave (&ehci->lock, flags);
for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh)
qh_lines (qh, &next, &size);
if (ehci->reclaim) {
for (qh = ehci->async->qh_next.qh; size > 0 && qh; qh = qh->qh_next.qh)
qh_lines (ehci, qh, &next, &size);
if (ehci->reclaim && size > 0) {
temp = snprintf (next, size, "\nreclaim =\n");
size -= temp;
next += temp;
qh_lines (ehci->reclaim, &next, &size);
for (qh = ehci->reclaim; size > 0 && qh; qh = qh->reclaim)
qh_lines (ehci, qh, &next, &size);
}
spin_unlock_irqrestore (&ehci->lock, flags);
......@@ -421,7 +478,7 @@ show_periodic (struct device *dev, char *buf)
scratch & 0x007f,
(scratch >> 8) & 0x000f,
p.qh->usecs, p.qh->c_usecs,
scratch >> 16);
0x7ff & (scratch >> 16));
/* FIXME TD info too */
......@@ -490,7 +547,8 @@ show_registers (struct device *dev, char *buf)
/* Capability Registers */
i = readw (&ehci->caps->hci_version);
temp = snprintf (next, size, "EHCI %x.%02x, hcd state %d\n",
temp = snprintf (next, size,
"EHCI %x.%02x, hcd state %d (version " DRIVER_VERSION ")\n",
i >> 8, i & 0x0ff, ehci->hcd.state);
size -= temp;
next += temp;
......
......@@ -94,7 +94,7 @@
* 2001-June Works with usb-storage and NEC EHCI on 2.4
*/
#define DRIVER_VERSION "2002-Nov-29"
#define DRIVER_VERSION "2003-Jan-22"
#define DRIVER_AUTHOR "David Brownell"
#define DRIVER_DESC "USB 2.0 'Enhanced' Host Controller (EHCI) Driver"
......@@ -110,10 +110,11 @@ static const char hcd_name [] = "ehci-hcd";
/* magic numbers that can affect system performance */
#define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
#define EHCI_TUNE_RL_HS 0 /* nak throttle; see 4.9 */
#define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
#define EHCI_TUNE_RL_TT 0
#define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
#define EHCI_TUNE_MULT_TT 1
#define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */
#define EHCI_WATCHDOG_JIFFIES (HZ/100) /* arbitrary; ~10 msec */
#define EHCI_ASYNC_JIFFIES (HZ/20) /* async idle timeout */
......@@ -416,13 +417,26 @@ static int ehci_start (struct usb_hcd *hcd)
ehci_info (ehci, "enabled 64bit PCI DMA\n");
}
/* help hc dma work well with cachelines */
pci_set_mwi (ehci->hcd.pdev);
/* clear interrupt enables, set irq latency */
temp = readl (&ehci->regs->command) & 0xff;
if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
log2_irq_thresh = 0;
temp |= 1 << (16 + log2_irq_thresh);
// if hc can park (ehci >= 0.96), default is 3 packets per async QH
// keeping default periodic framelist size
if (HCC_PGM_FRAMELISTLEN (hcc_params)) {
/* periodic schedule size can be smaller than default */
temp &= ~(3 << 2);
temp |= (EHCI_TUNE_FLS << 2);
switch (EHCI_TUNE_FLS) {
case 0: ehci->periodic_size = 1024; break;
case 1: ehci->periodic_size = 512; break;
case 2: ehci->periodic_size = 256; break;
default: BUG ();
}
}
temp &= ~(CMD_IAAD | CMD_ASE | CMD_PSE),
// Philips, Intel, and maybe others need CMD_RUN before the
// root hub will detect new devices (why?); NEC doesn't
......@@ -759,7 +773,6 @@ static int ehci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
struct ehci_qh *qh;
unsigned long flags;
int maybe_irq = 1;
spin_lock_irqsave (&ehci->lock, flags);
switch (usb_pipetype (urb->pipe)) {
......@@ -769,23 +782,23 @@ static int ehci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
qh = (struct ehci_qh *) urb->hcpriv;
if (!qh)
break;
while (qh->qh_state == QH_STATE_LINKED
/* if we need to use IAA and it's busy, defer */
if (qh->qh_state == QH_STATE_LINKED
&& ehci->reclaim
&& HCD_IS_RUNNING (ehci->hcd.state)
) {
spin_unlock_irqrestore (&ehci->lock, flags);
struct ehci_qh *last;
if (maybe_irq) {
if (in_interrupt ())
return -EAGAIN;
maybe_irq = 0;
}
/* let pending unlinks complete, so this can start */
wait_ms (1);
for (last = ehci->reclaim;
last->reclaim;
last = last->reclaim)
continue;
qh->qh_state = QH_STATE_UNLINK_WAIT;
last->reclaim = qh;
spin_lock_irqsave (&ehci->lock, flags);
}
if (!HCD_IS_RUNNING (ehci->hcd.state) && ehci->reclaim)
/* bypass IAA if the hc can't care */
} else if (!HCD_IS_RUNNING (ehci->hcd.state) && ehci->reclaim)
end_unlink_async (ehci, NULL);
/* something else might have unlinked the qh by now */
......
......@@ -75,8 +75,6 @@ static struct ehci_qtd *ehci_qtd_alloc (struct ehci_hcd *ehci, int flags)
qtd = pci_pool_alloc (ehci->qtd_pool, flags, &dma);
if (qtd != 0) {
ehci_qtd_init (qtd, dma);
if (ehci->async)
qtd->hw_alt_next = ehci->async->hw_alt_next;
}
return qtd;
}
......
......@@ -43,7 +43,8 @@
/* fill a qtd, returning how much of the buffer we were able to queue up */
static int
qtd_fill (struct ehci_qtd *qtd, dma_addr_t buf, size_t len, int token)
qtd_fill (struct ehci_qtd *qtd, dma_addr_t buf, size_t len,
int token, int maxpacket)
{
int i, count;
u64 addr = buf;
......@@ -69,6 +70,10 @@ qtd_fill (struct ehci_qtd *qtd, dma_addr_t buf, size_t len, int token)
else
count = len;
}
/* short packets may only terminate transfers */
if (count != len)
count -= (count % maxpacket);
}
qtd->hw_token = cpu_to_le32 ((count << 16) | token);
qtd->length = count;
......@@ -85,7 +90,7 @@ qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
{
qh->hw_current = 0;
qh->hw_qtd_next = QTD_NEXT (qtd->qtd_dma);
qh->hw_alt_next = ehci->async->hw_alt_next;
qh->hw_alt_next = EHCI_LIST_END;
/* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
wmb ();
......@@ -96,7 +101,7 @@ qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
#define IS_SHORT_READ(token) (QTD_LENGTH (token) != 0 && QTD_PID (token) == 1)
static inline void qtd_copy_status (
static void qtd_copy_status (
struct ehci_hcd *ehci,
struct urb *urb,
size_t length,
......@@ -224,12 +229,24 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh, struct pt_regs *regs)
{
struct ehci_qtd *last = 0, *end = qh->dummy;
struct list_head *entry, *tmp;
int stopped = 0;
int stopped;
unsigned count = 0;
int do_status = 0;
u8 state;
if (unlikely (list_empty (&qh->qtd_list)))
return count;
/* completions (or tasks on other cpus) must never clobber HALT
* till we've gone through and cleaned everything up, even when
* they add urbs to this qh's queue or mark them for unlinking.
*
* NOTE: unlinking expects to be done in queue order.
*/
state = qh->qh_state;
qh->qh_state = QH_STATE_COMPLETING;
stopped = (state == QH_STATE_IDLE);
/* remove de-activated QTDs from front of queue.
* after faults (including short reads), cleanup this urb
* then let the queue advance.
......@@ -261,7 +278,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh, struct pt_regs *regs)
rmb ();
token = le32_to_cpu (qtd->hw_token);
stopped = stopped
|| (qh->qh_state == QH_STATE_IDLE)
|| (HALT_BIT & qh->hw_token) != 0
|| (ehci->hcd.state == USB_STATE_HALT);
......@@ -271,36 +287,53 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh, struct pt_regs *regs)
/* magic dummy for short reads; won't advance */
if (IS_SHORT_READ (token)
&& !(token & QTD_STS_HALT)
&& ehci->async->hw_alt_next
== qh->hw_alt_next)
&& (qh->hw_alt_next & QTD_MASK)
== ehci->async->hw_alt_next) {
stopped = 1;
goto halt;
}
/* stop scanning when we reach qtds the hc is using */
} else if (likely (!stopped)) {
last = 0;
break;
} else {
/* ignore active qtds unless some previous qtd
/* ignore active urbs unless some previous qtd
* for the urb faulted (including short read) or
* its urb was canceled. we may patch qh or qtds.
*/
if ((token & QTD_STS_ACTIVE)
&& urb->status == -EINPROGRESS) {
last = 0;
if (likely (urb->status == -EINPROGRESS))
continue;
/* issue status after short control reads */
if (unlikely (do_status != 0)
&& QTD_PID (token) == 0 /* OUT */) {
do_status = 0;
continue;
}
/* token in overlay may be most current */
if (state == QH_STATE_IDLE
&& cpu_to_le32 (qtd->qtd_dma)
== qh->hw_current)
token = le32_to_cpu (qh->hw_token);
/* force halt for unlinked or blocked qh, so we'll
* patch the qh later and so that completions can't
* activate it while we "know" it's stopped.
*/
if ((HALT_BIT & qh->hw_token) == 0) {
halt:
qh->hw_token |= HALT_BIT;
wmb ();
stopped = 1;
}
}
/* remove it from the queue */
spin_lock (&urb->lock);
qtd_copy_status (ehci, urb, qtd->length, token);
do_status = (urb->status == -EREMOTEIO)
&& usb_pipecontrol (urb->pipe);
spin_unlock (&urb->lock);
if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
......@@ -319,6 +352,9 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh, struct pt_regs *regs)
ehci_qtd_free (ehci, last);
}
/* restore original state; caller must unlink or relink */
qh->qh_state = state;
/* update qh after fault cleanup */
if (unlikely ((HALT_BIT & qh->hw_token) != 0)) {
qh_update (ehci, qh,
......@@ -367,7 +403,7 @@ qh_urb_transaction (
struct ehci_qtd *qtd, *qtd_prev;
dma_addr_t buf;
int len, maxpacket;
int is_input, status_patch = 0;
int is_input;
u32 token;
/*
......@@ -388,7 +424,7 @@ qh_urb_transaction (
if (usb_pipecontrol (urb->pipe)) {
/* SETUP pid */
qtd_fill (qtd, urb->setup_dma, sizeof (struct usb_ctrlrequest),
token | (2 /* "setup" */ << 8));
token | (2 /* "setup" */ << 8), 8);
/* ... and always at least one more pid */
token ^= QTD_TOGGLE;
......@@ -399,10 +435,6 @@ qh_urb_transaction (
qtd->urb = urb;
qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma);
list_add_tail (&qtd->qtd_list, head);
if (len > 0 && is_input
&& !(urb->transfer_flags & URB_SHORT_NOT_OK))
status_patch = 1;
}
/*
......@@ -413,6 +445,7 @@ qh_urb_transaction (
else
buf = 0;
// FIXME this 'buf' check break some zlps...
if (!buf || is_input)
token |= (1 /* "in" */ << 8);
/* else it's already initted to "out" pid (0 << 8) */
......@@ -427,9 +460,11 @@ qh_urb_transaction (
for (;;) {
int this_qtd_len;
this_qtd_len = qtd_fill (qtd, buf, len, token);
this_qtd_len = qtd_fill (qtd, buf, len, token, maxpacket);
len -= this_qtd_len;
buf += this_qtd_len;
if (is_input)
qtd->hw_alt_next = ehci->async->hw_alt_next;
/* qh makes control packets use qtd toggle; maybe switch it */
if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
......@@ -447,6 +482,13 @@ qh_urb_transaction (
list_add_tail (&qtd->qtd_list, head);
}
/* unless the bulk/interrupt caller wants a chance to clean
* up after short reads, hc should advance qh past this urb
*/
if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
|| usb_pipecontrol (urb->pipe)))
qtd->hw_alt_next = EHCI_LIST_END;
/*
* control requests may need a terminating data "status" ack;
* bulk ones may need a terminating short packet (zero length).
......@@ -473,23 +515,10 @@ qh_urb_transaction (
list_add_tail (&qtd->qtd_list, head);
/* never any data in such packets */
qtd_fill (qtd, 0, 0, token);
qtd_fill (qtd, 0, 0, token, 0);
}
}
/* if we're permitting a short control read, we want the hardware to
* just continue after short data and send the status ack. it can do
* that on the last data packet (typically the only one). for other
* packets, software fixup is needed (in qh_completions).
*/
if (status_patch) {
struct ehci_qtd *prev;
prev = list_entry (qtd->qtd_list.prev,
struct ehci_qtd, qtd_list);
prev->hw_alt_next = QTD_NEXT (qtd->qtd_dma);
}
/* by default, enable interrupt on urb completion */
if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT)))
qtd->hw_token |= __constant_cpu_to_le32 (QTD_IOC);
......@@ -611,7 +640,8 @@ qh_make (
case USB_SPEED_FULL:
/* EPS 0 means "full" */
info1 |= (EHCI_TUNE_RL_TT << 28);
if (type != PIPE_INTERRUPT)
info1 |= (EHCI_TUNE_RL_TT << 28);
if (type == PIPE_CONTROL) {
info1 |= (1 << 27); /* for TT */
info1 |= 1 << 14; /* toggle from qtd */
......@@ -628,12 +658,13 @@ qh_make (
case USB_SPEED_HIGH: /* no TT involved */
info1 |= (2 << 12); /* EPS "high" */
info1 |= (EHCI_TUNE_RL_HS << 28);
if (type == PIPE_CONTROL) {
info1 |= (EHCI_TUNE_RL_HS << 28);
info1 |= 64 << 16; /* usb2 fixed maxpacket */
info1 |= 1 << 14; /* toggle from qtd */
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else if (type == PIPE_BULK) {
info1 |= (EHCI_TUNE_RL_HS << 28);
info1 |= 512 << 16; /* usb2 fixed maxpacket */
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else { /* PIPE_INTERRUPT */
......@@ -769,8 +800,7 @@ static struct ehci_qh *qh_append_tds (
&& !usb_pipecontrol (urb->pipe)) {
/* "never happens": drivers do stall cleanup right */
if (qh->qh_state != QH_STATE_IDLE
&& (cpu_to_le32 (QTD_STS_HALT)
& qh->hw_token) == 0)
&& qh->qh_state != QH_STATE_COMPLETING)
ehci_warn (ehci, "clear toggle dev%d "
"ep%d%s: not idle\n",
usb_pipedevice (urb->pipe),
......@@ -809,7 +839,6 @@ static struct ehci_qh *qh_append_tds (
__list_splice (qtd_list, qh->qtd_list.prev);
ehci_qtd_init (qtd, qtd->qtd_dma);
qtd->hw_alt_next = ehci->async->hw_alt_next;
qh->dummy = qtd;
/* hc must see the new dummy at list end */
......@@ -877,9 +906,12 @@ submit_async (
/* the async qh for the qtds being reclaimed are now unlinked from the HC */
static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
static void end_unlink_async (struct ehci_hcd *ehci, struct pt_regs *regs)
{
struct ehci_qh *qh = ehci->reclaim;
struct ehci_qh *next;
del_timer (&ehci->watchdog);
......@@ -890,6 +922,10 @@ static void end_unlink_async (struct ehci_hcd *ehci, struct pt_regs *regs)
ehci->reclaim = 0;
ehci->reclaim_ready = 0;
/* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
next = qh->reclaim;
qh->reclaim = 0;
qh_completions (ehci, qh, regs);
if (!list_empty (&qh->qtd_list)
......@@ -909,6 +945,9 @@ static void end_unlink_async (struct ehci_hcd *ehci, struct pt_regs *regs)
jiffies + EHCI_ASYNC_JIFFIES);
}
}
if (next)
start_unlink_async (ehci, next);
}
/* makes sure the async qh will become idle */
......@@ -921,7 +960,8 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
#ifdef DEBUG
if (ehci->reclaim
|| qh->qh_state != QH_STATE_LINKED
|| (qh->qh_state != QH_STATE_LINKED
&& qh->qh_state != QH_STATE_UNLINK_WAIT)
#ifdef CONFIG_SMP
// this macro lies except on SMP compiles
|| !spin_is_locked (&ehci->lock)
......@@ -953,6 +993,9 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
wmb ();
if (unlikely (ehci->hcd.state == USB_STATE_HALT)) {
/* if (unlikely (qh->reclaim != 0))
* this will recurse, probably not much
*/
end_unlink_async (ehci, NULL);
return;
}
......
......@@ -236,12 +236,12 @@ struct ehci_qtd {
/* the rest is HCD-private */
dma_addr_t qtd_dma; /* qtd address */
struct list_head qtd_list; /* sw qtd list */
/* dma same in urb's qtds, except 1st control qtd (setup buffer) */
struct urb *urb; /* qtd's urb */
size_t length; /* length of buffer */
} __attribute__ ((aligned (32)));
#define QTD_MASK cpu_to_le32 (~0x1f) /* mask NakCnt+T in qh->hw_alt_next */
/*-------------------------------------------------------------------------*/
/* type tag from {qh,itd,sitd,fstn}->hw_next */
......@@ -305,6 +305,7 @@ struct ehci_qh {
union ehci_shadow qh_next; /* ptr to qh; or periodic */
struct list_head qtd_list; /* sw qtd list */
struct ehci_qtd *dummy;
struct ehci_qh *reclaim; /* next to reclaim */
atomic_t refcount;
unsigned stamp;
......@@ -313,6 +314,8 @@ struct ehci_qh {
#define QH_STATE_LINKED 1 /* HC sees this */
#define QH_STATE_UNLINK 2 /* HC may still see this */
#define QH_STATE_IDLE 3 /* HC doesn't see this */
#define QH_STATE_UNLINK_WAIT 4 /* LINKED and on reclaim q */
#define QH_STATE_COMPLETING 5 /* don't touch token.HALT */
/* periodic schedule info */
u8 usecs; /* intr bandwidth */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment