Commit 9a53bdfb authored by David Brownell's avatar David Brownell Committed by Greg Kroah-Hartman

[PATCH] ohci-hcd, fix urb unlink races

This patch:

- Fixes a longstanding urb unlink race, by switching to a single queue
  for EDs being unlinked.  The previous two-queue scheme was sensitive to
  IRQ latencies:  one extra millisecond would make it use the wrong queue.
  This updated scheme should handle latencies of up to 32K microseconds
  (Cthulu forfend :) and slightly shrinks object code size.

- Related (mostly) cleanup.  Some functions and one ED field renamed, ED
  layout is a smidgeon more compact (even given more data), driver version
  string doesn't reflect CVS, debug message only comes out in verbose mode.
parent 1fe62d53
...@@ -12,6 +12,9 @@ ...@@ -12,6 +12,9 @@
* *
* History: * History:
* *
* 2002/06/01 remember frame when HC won't see EDs any more; use that info
* to fix urb unlink races caused by interrupt latency assumptions;
* minor ED field and function naming updates
* 2002/01/18 package as a patch for 2.5.3; this should match the * 2002/01/18 package as a patch for 2.5.3; this should match the
* 2.4.17 kernel modulo some bugs being fixed. * 2.4.17 kernel modulo some bugs being fixed.
* *
...@@ -106,7 +109,7 @@ ...@@ -106,7 +109,7 @@
* - lots more testing!! * - lots more testing!!
*/ */
#define DRIVER_VERSION "$Revision: 1.9 $" #define DRIVER_VERSION "2002-Jun-01"
#define DRIVER_AUTHOR "Roman Weissgaerber <weissg@vienna.at>, David Brownell" #define DRIVER_AUTHOR "Roman Weissgaerber <weissg@vienna.at>, David Brownell"
#define DRIVER_DESC "USB 1.1 'Open' Host Controller (OHCI) Driver" #define DRIVER_DESC "USB 1.1 'Open' Host Controller (OHCI) Driver"
...@@ -287,7 +290,7 @@ static int ohci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb) ...@@ -287,7 +290,7 @@ static int ohci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
} }
urb_priv->state = URB_DEL; urb_priv->state = URB_DEL;
ed_unlink (urb->dev, urb_priv->ed); start_urb_unlink (ohci, urb_priv->ed);
spin_unlock_irqrestore (&ohci->lock, flags); spin_unlock_irqrestore (&ohci->lock, flags);
} else { } else {
/* /*
...@@ -508,16 +511,15 @@ static void ohci_irq (struct usb_hcd *hcd) ...@@ -508,16 +511,15 @@ static void ohci_irq (struct usb_hcd *hcd)
/* could track INTR_SO to reduce available PCI/... bandwidth */ /* could track INTR_SO to reduce available PCI/... bandwidth */
// FIXME: this assumes SOF (1/ms) interrupts don't get lost... /* handle any pending URB/ED unlinks, leaving INTR_SF enabled
if (ints & OHCI_INTR_SF) { * when there's still unlinking to be done (next frame).
unsigned int frame = le16_to_cpu (ohci->hcca->frame_no) & 1; */
spin_lock (&ohci->lock);
if (ohci->ed_rm_list)
finish_unlinks (ohci, le16_to_cpu (ohci->hcca->frame_no));
if ((ints & OHCI_INTR_SF) != 0 && !ohci->ed_rm_list)
writel (OHCI_INTR_SF, &regs->intrdisable); writel (OHCI_INTR_SF, &regs->intrdisable);
if (ohci->ed_rm_list [!frame] != NULL) { spin_unlock (&ohci->lock);
dl_del_list (ohci, !frame);
}
if (ohci->ed_rm_list [frame] != NULL)
writel (OHCI_INTR_SF, &regs->intrenable);
}
writel (ints, &regs->intrstatus); writel (ints, &regs->intrstatus);
writel (OHCI_INTR_MIE, &regs->intrenable); writel (OHCI_INTR_MIE, &regs->intrenable);
...@@ -719,8 +721,7 @@ static int hc_restart (struct ohci_hcd *ohci) ...@@ -719,8 +721,7 @@ static int hc_restart (struct ohci_hcd *ohci)
for (i = 0; i < NUM_INTS; i++) ohci->hcca->int_table [i] = 0; for (i = 0; i < NUM_INTS; i++) ohci->hcca->int_table [i] = 0;
/* no EDs to remove */ /* no EDs to remove */
ohci->ed_rm_list [0] = NULL; ohci->ed_rm_list = NULL;
ohci->ed_rm_list [1] = NULL;
/* empty control and bulk lists */ /* empty control and bulk lists */
ohci->ed_isotail = NULL; ohci->ed_isotail = NULL;
...@@ -802,7 +803,7 @@ static int ohci_resume (struct usb_hcd *hcd) ...@@ -802,7 +803,7 @@ static int ohci_resume (struct usb_hcd *hcd)
ohci->disabled = 0; ohci->disabled = 0;
ohci->sleeping = 0; ohci->sleeping = 0;
ohci->hc_control = OHCI_CONTROL_INIT | OHCI_USB_OPER; ohci->hc_control = OHCI_CONTROL_INIT | OHCI_USB_OPER;
if (!ohci->ed_rm_list [0] && !ohci->ed_rm_list [1]) { if (!ohci->ed_rm_list) {
if (ohci->ed_controltail) if (ohci->ed_controltail)
ohci->hc_control |= OHCI_CTRL_CLE; ohci->hc_control |= OHCI_CTRL_CLE;
if (ohci->ed_bulktail) if (ohci->ed_bulktail)
......
...@@ -208,8 +208,7 @@ static int ep_link (struct ohci_hcd *ohci, struct ed *edi) ...@@ -208,8 +208,7 @@ static int ep_link (struct ohci_hcd *ohci, struct ed *edi)
} }
ed->ed_prev = ohci->ed_controltail; ed->ed_prev = ohci->ed_controltail;
if (!ohci->ed_controltail if (!ohci->ed_controltail
&& !ohci->ed_rm_list [0] && !ohci->ed_rm_list
&& !ohci->ed_rm_list [1]
&& !ohci->sleeping && !ohci->sleeping
) { ) {
ohci->hc_control |= OHCI_CTRL_CLE; ohci->hc_control |= OHCI_CTRL_CLE;
...@@ -227,8 +226,7 @@ static int ep_link (struct ohci_hcd *ohci, struct ed *edi) ...@@ -227,8 +226,7 @@ static int ep_link (struct ohci_hcd *ohci, struct ed *edi)
} }
ed->ed_prev = ohci->ed_bulktail; ed->ed_prev = ohci->ed_bulktail;
if (!ohci->ed_bulktail if (!ohci->ed_bulktail
&& !ohci->ed_rm_list [0] && !ohci->ed_rm_list
&& !ohci->ed_rm_list [1]
&& !ohci->sleeping && !ohci->sleeping
) { ) {
ohci->hc_control |= OHCI_CTRL_BLE; ohci->hc_control |= OHCI_CTRL_BLE;
...@@ -240,16 +238,16 @@ static int ep_link (struct ohci_hcd *ohci, struct ed *edi) ...@@ -240,16 +238,16 @@ static int ep_link (struct ohci_hcd *ohci, struct ed *edi)
case PIPE_INTERRUPT: case PIPE_INTERRUPT:
load = ed->int_load; load = ed->int_load;
interval = ep_2_n_interval (ed->int_period); interval = ep_2_n_interval (ed->int_period);
ed->int_interval = interval; ed->interval = interval;
int_branch = ep_int_balance (ohci, interval, load); int_branch = ep_int_balance (ohci, interval, load);
ed->int_branch = int_branch; ed->int_branch = int_branch;
for (i = 0; i < ep_rev (6, interval); i += inter) { for (i = 0; i < ep_rev (6, interval); i += inter) {
inter = 1; inter = 1;
for (ed_p = & (ohci->hcca->int_table [ep_rev (5, i) + int_branch]); for (ed_p = & (ohci->hcca->int_table [ep_rev (5, i) + int_branch]);
(*ed_p != 0) && ((dma_to_ed (ohci, le32_to_cpup (ed_p)))->int_interval >= interval); (*ed_p != 0) && ((dma_to_ed (ohci, le32_to_cpup (ed_p)))->interval >= interval);
ed_p = & ((dma_to_ed (ohci, le32_to_cpup (ed_p)))->hwNextED)) ed_p = & ((dma_to_ed (ohci, le32_to_cpup (ed_p)))->hwNextED))
inter = ep_rev (6, (dma_to_ed (ohci, le32_to_cpup (ed_p)))->int_interval); inter = ep_rev (6, (dma_to_ed (ohci, le32_to_cpup (ed_p)))->interval);
ed->hwNextED = *ed_p; ed->hwNextED = *ed_p;
*ed_p = cpu_to_le32 (ed->dma); *ed_p = cpu_to_le32 (ed->dma);
} }
...@@ -260,7 +258,7 @@ static int ep_link (struct ohci_hcd *ohci, struct ed *edi) ...@@ -260,7 +258,7 @@ static int ep_link (struct ohci_hcd *ohci, struct ed *edi)
case PIPE_ISOCHRONOUS: case PIPE_ISOCHRONOUS:
ed->hwNextED = 0; ed->hwNextED = 0;
ed->int_interval = 1; ed->interval = 1;
if (ohci->ed_isotail != NULL) { if (ohci->ed_isotail != NULL) {
ohci->ed_isotail->hwNextED = cpu_to_le32 (ed->dma); ohci->ed_isotail->hwNextED = cpu_to_le32 (ed->dma);
ed->ed_prev = ohci->ed_isotail; ed->ed_prev = ohci->ed_isotail;
...@@ -270,7 +268,7 @@ static int ep_link (struct ohci_hcd *ohci, struct ed *edi) ...@@ -270,7 +268,7 @@ static int ep_link (struct ohci_hcd *ohci, struct ed *edi)
for (ed_p = & (ohci->hcca->int_table [ep_rev (5, i)]); for (ed_p = & (ohci->hcca->int_table [ep_rev (5, i)]);
*ed_p != 0; *ed_p != 0;
ed_p = & ((dma_to_ed (ohci, le32_to_cpup (ed_p)))->hwNextED)) ed_p = & ((dma_to_ed (ohci, le32_to_cpup (ed_p)))->hwNextED))
inter = ep_rev (6, (dma_to_ed (ohci, le32_to_cpup (ed_p)))->int_interval); inter = ep_rev (6, (dma_to_ed (ohci, le32_to_cpup (ed_p)))->interval);
*ed_p = cpu_to_le32 (ed->dma); *ed_p = cpu_to_le32 (ed->dma);
} }
ed->ed_prev = NULL; ed->ed_prev = NULL;
...@@ -311,7 +309,7 @@ static void periodic_unlink ( ...@@ -311,7 +309,7 @@ static void periodic_unlink (
* the link from the ed still points to another operational ed or 0 * the link from the ed still points to another operational ed or 0
* so the HC can eventually finish the processing of the unlinked ed * so the HC can eventually finish the processing of the unlinked ed
*/ */
static int ep_unlink (struct ohci_hcd *ohci, struct ed *ed) static int start_ed_unlink (struct ohci_hcd *ohci, struct ed *ed)
{ {
int i; int i;
...@@ -357,8 +355,8 @@ static int ep_unlink (struct ohci_hcd *ohci, struct ed *ed) ...@@ -357,8 +355,8 @@ static int ep_unlink (struct ohci_hcd *ohci, struct ed *ed)
break; break;
case PIPE_INTERRUPT: case PIPE_INTERRUPT:
periodic_unlink (ohci, ed, ed->int_branch, ed->int_interval); periodic_unlink (ohci, ed, ed->int_branch, ed->interval);
for (i = ed->int_branch; i < NUM_INTS; i += ed->int_interval) for (i = ed->int_branch; i < NUM_INTS; i += ed->interval)
ohci->ohci_int_load [i] -= ed->int_load; ohci->ohci_int_load [i] -= ed->int_load;
#ifdef OHCI_VERBOSE_DEBUG #ifdef OHCI_VERBOSE_DEBUG
ohci_dump_periodic (ohci, "UNLINK_INT"); ohci_dump_periodic (ohci, "UNLINK_INT");
...@@ -384,6 +382,10 @@ static int ep_unlink (struct ohci_hcd *ohci, struct ed *ed) ...@@ -384,6 +382,10 @@ static int ep_unlink (struct ohci_hcd *ohci, struct ed *ed)
/* FIXME ED's "unlink" state is indeterminate; /* FIXME ED's "unlink" state is indeterminate;
* the HC might still be caching it (till SOF). * the HC might still be caching it (till SOF).
* - use ed_rm_list and finish_unlinks(), adding some state that
* prevents clobbering hw linkage before the appropriate SOF
* - a speedup: when only one urb is queued on the ed, save 1msec
* by making start_urb_unlink() use this routine to deschedule.
*/ */
ed->state = ED_UNLINK; ed->state = ED_UNLINK;
return 0; return 0;
...@@ -478,11 +480,8 @@ static struct ed *ep_add_ed ( ...@@ -478,11 +480,8 @@ static struct ed *ep_add_ed (
* put the ep on the rm_list and stop the bulk or ctrl list * put the ep on the rm_list and stop the bulk or ctrl list
* real work is done at the next start frame (SF) hardware interrupt * real work is done at the next start frame (SF) hardware interrupt
*/ */
static void ed_unlink (struct usb_device *usb_dev, struct ed *ed) static void start_urb_unlink (struct ohci_hcd *ohci, struct ed *ed)
{ {
unsigned int frame;
struct ohci_hcd *ohci = hcd_to_ohci (usb_dev->bus->hcpriv);
/* already pending? */ /* already pending? */
if (ed->state & ED_URB_DEL) if (ed->state & ED_URB_DEL)
return; return;
...@@ -503,9 +502,15 @@ static void ed_unlink (struct usb_device *usb_dev, struct ed *ed) ...@@ -503,9 +502,15 @@ static void ed_unlink (struct usb_device *usb_dev, struct ed *ed)
break; break;
} }
frame = le16_to_cpu (ohci->hcca->frame_no) & 0x1; /* SF interrupt might get delayed; record the frame counter value that
ed->ed_rm_list = ohci->ed_rm_list [frame]; * indicates when the HC isn't looking at it, so concurrent unlinks
ohci->ed_rm_list [frame] = ed; * behave. frame_no wraps every 2^16 msec, and changes right before
* SF is triggered.
*/
ed->tick = le16_to_cpu (ohci->hcca->frame_no) + 1;
ed->ed_rm_list = ohci->ed_rm_list;
ohci->ed_rm_list = ed;
/* enable SOF interrupt */ /* enable SOF interrupt */
if (!ohci->sleeping) { if (!ohci->sleeping) {
...@@ -816,10 +821,12 @@ static struct td *dl_reverse_done_list (struct ohci_hcd *ohci) ...@@ -816,10 +821,12 @@ static struct td *dl_reverse_done_list (struct ohci_hcd *ohci)
if (td_list->ed->hwHeadP & ED_H) { if (td_list->ed->hwHeadP & ED_H) {
if (urb_priv && ((td_list->index + 1) if (urb_priv && ((td_list->index + 1)
< urb_priv->length)) { < urb_priv->length)) {
#ifdef OHCI_VERBOSE_DEBUG
dbg ("urb %p TD %d of %d, patch ED", dbg ("urb %p TD %d of %d, patch ED",
td_list->urb, td_list->urb,
1 + td_list->index, 1 + td_list->index,
urb_priv->length); urb_priv->length);
#endif
td_list->ed->hwHeadP = td_list->ed->hwHeadP =
(urb_priv->td [urb_priv->length - 1]->hwNextTD (urb_priv->td [urb_priv->length - 1]->hwNextTD
& __constant_cpu_to_le32 (TD_MASK)) & __constant_cpu_to_le32 (TD_MASK))
...@@ -841,27 +848,37 @@ static struct td *dl_reverse_done_list (struct ohci_hcd *ohci) ...@@ -841,27 +848,37 @@ static struct td *dl_reverse_done_list (struct ohci_hcd *ohci)
/*-------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------*/
/* there are some pending requests to unlink /* wrap-aware logic stolen from <linux/jiffies.h> */
* - some URBs/TDs if urb_priv->state == URB_DEL #define tick_before(t1,t2) ((((s16)(t1))-((s16)(t2))) < 0)
*/
static void dl_del_list (struct ohci_hcd *ohci, unsigned int frame) /* there are some urbs/eds to unlink; called in_irq(), with HCD locked */
static void finish_unlinks (struct ohci_hcd *ohci, u16 tick)
{ {
unsigned long flags; struct ed *ed, **last;
struct ed *ed;
__u32 edINFO;
__u32 tdINFO;
struct td *td = NULL, *td_next = NULL,
*tdHeadP = NULL, *tdTailP;
__u32 *td_p;
int ctrl = 0, bulk = 0; int ctrl = 0, bulk = 0;
spin_lock_irqsave (&ohci->lock, flags); for (last = &ohci->ed_rm_list, ed = *last; ed != NULL; ed = *last) {
struct td *td, *td_next, *tdHeadP, *tdTailP;
u32 *td_p;
int unlinked;
for (ed = ohci->ed_rm_list [frame]; ed != NULL; ed = ed->ed_rm_list) { /* only take off EDs that the HC isn't using, accounting for
* frame counter wraps. completion callbacks might prepend
* EDs to the list, they'll be checked next irq.
*/
if (tick_before (tick, ed->tick)) {
last = &ed->ed_rm_list;
continue;
}
*last = ed->ed_rm_list;
ed->ed_rm_list = 0;
unlinked = 0;
/* unlink urbs from first one requested to queue end;
* leave earlier urbs alone
*/
tdTailP = dma_to_td (ohci, le32_to_cpup (&ed->hwTailP)); tdTailP = dma_to_td (ohci, le32_to_cpup (&ed->hwTailP));
tdHeadP = dma_to_td (ohci, le32_to_cpup (&ed->hwHeadP)); tdHeadP = dma_to_td (ohci, le32_to_cpup (&ed->hwHeadP));
edINFO = le32_to_cpup (&ed->hwINFO);
td_p = &ed->hwHeadP; td_p = &ed->hwHeadP;
for (td = tdHeadP; td != tdTailP; td = td_next) { for (td = tdHeadP; td != tdTailP; td = td_next) {
...@@ -870,8 +887,11 @@ static void dl_del_list (struct ohci_hcd *ohci, unsigned int frame) ...@@ -870,8 +887,11 @@ static void dl_del_list (struct ohci_hcd *ohci, unsigned int frame)
td_next = dma_to_td (ohci, td_next = dma_to_td (ohci,
le32_to_cpup (&td->hwNextTD)); le32_to_cpup (&td->hwNextTD));
if ((urb_priv->state == URB_DEL)) { if (unlinked || (urb_priv->state == URB_DEL)) {
tdINFO = le32_to_cpup (&td->hwINFO); u32 tdINFO = le32_to_cpup (&td->hwINFO);
unlinked = 1;
/* HC may have partly processed this TD */ /* HC may have partly processed this TD */
if (TD_CC_GET (tdINFO) < 0xE) if (TD_CC_GET (tdINFO) < 0xE)
td_done (urb, td); td_done (urb, td);
...@@ -880,22 +900,32 @@ static void dl_del_list (struct ohci_hcd *ohci, unsigned int frame) ...@@ -880,22 +900,32 @@ static void dl_del_list (struct ohci_hcd *ohci, unsigned int frame)
/* URB is done; clean up */ /* URB is done; clean up */
if (++ (urb_priv->td_cnt) == urb_priv->length) { if (++ (urb_priv->td_cnt) == urb_priv->length) {
spin_unlock_irqrestore (&ohci->lock, if (urb->status == -EINPROGRESS)
flags); urb->status = -ECONNRESET;
spin_unlock (&ohci->lock);
finish_urb (ohci, urb); finish_urb (ohci, urb);
spin_lock_irqsave (&ohci->lock, flags); spin_lock (&ohci->lock);
} }
} else { } else {
td_p = &td->hwNextTD; td_p = &td->hwNextTD;
} }
} }
/* FIXME actually want four cases here:
* (a) finishing URB unlink
* [a1] no URBs queued, so start ED unlink
* [a2] some (earlier) URBs still linked, re-enable
* (b) finishing ED unlink
* [b1] no URBs queued, ED is truly idle now
* [b2] URBs now queued, link ED back into schedule
* right now we only have (a)
*/
ed->state &= ~ED_URB_DEL; ed->state &= ~ED_URB_DEL;
tdHeadP = dma_to_td (ohci, le32_to_cpup (&ed->hwHeadP)); tdHeadP = dma_to_td (ohci, le32_to_cpup (&ed->hwHeadP));
if (tdHeadP == tdTailP) { if (tdHeadP == tdTailP) {
if (ed->state == ED_OPER) if (ed->state == ED_OPER)
ep_unlink (ohci, ed); start_ed_unlink (ohci, ed);
td_free (ohci, tdTailP); td_free (ohci, tdTailP);
ed->hwINFO = ED_SKIP; ed->hwINFO = ED_SKIP;
ed->state = ED_NEW; ed->state = ED_NEW;
...@@ -918,7 +948,7 @@ static void dl_del_list (struct ohci_hcd *ohci, unsigned int frame) ...@@ -918,7 +948,7 @@ static void dl_del_list (struct ohci_hcd *ohci, unsigned int frame)
writel (0, &ohci->regs->ed_controlcurrent); writel (0, &ohci->regs->ed_controlcurrent);
if (bulk) /* reset bulk list */ if (bulk) /* reset bulk list */
writel (0, &ohci->regs->ed_bulkcurrent); writel (0, &ohci->regs->ed_bulkcurrent);
if (!ohci->ed_rm_list [!frame]) { if (!ohci->ed_rm_list) {
if (ohci->ed_controltail) if (ohci->ed_controltail)
ohci->hc_control |= OHCI_CTRL_CLE; ohci->hc_control |= OHCI_CTRL_CLE;
if (ohci->ed_bulktail) if (ohci->ed_bulktail)
...@@ -926,9 +956,6 @@ static void dl_del_list (struct ohci_hcd *ohci, unsigned int frame) ...@@ -926,9 +956,6 @@ static void dl_del_list (struct ohci_hcd *ohci, unsigned int frame)
writel (ohci->hc_control, &ohci->regs->control); writel (ohci->hc_control, &ohci->regs->control);
} }
} }
ohci->ed_rm_list [frame] = NULL;
spin_unlock_irqrestore (&ohci->lock, flags);
} }
...@@ -939,7 +966,7 @@ static void dl_del_list (struct ohci_hcd *ohci, unsigned int frame) ...@@ -939,7 +966,7 @@ static void dl_del_list (struct ohci_hcd *ohci, unsigned int frame)
* Process normal completions (error or success) and clean the schedules. * Process normal completions (error or success) and clean the schedules.
* *
* This is the main path for handing urbs back to drivers. The only other * This is the main path for handing urbs back to drivers. The only other
* path is dl_del_list(), which unlinks URBs by scanning EDs, instead of * path is finish_unlinks(), which unlinks URBs using ed_rm_list, instead of
* scanning the (re-reversed) donelist as this does. * scanning the (re-reversed) donelist as this does.
*/ */
static void dl_done_list (struct ohci_hcd *ohci, struct td *td) static void dl_done_list (struct ohci_hcd *ohci, struct td *td)
...@@ -960,7 +987,7 @@ static void dl_done_list (struct ohci_hcd *ohci, struct td *td) ...@@ -960,7 +987,7 @@ static void dl_done_list (struct ohci_hcd *ohci, struct td *td)
/* If all this urb's TDs are done, call complete(). /* If all this urb's TDs are done, call complete().
* Interrupt transfers are the only special case: * Interrupt transfers are the only special case:
* they're reissued, until "deleted" by usb_unlink_urb * they're reissued, until "deleted" by usb_unlink_urb
* (real work done in a SOF intr, by dl_del_list). * (real work done in a SOF intr, by finish_unlinks).
*/ */
if (urb_priv->td_cnt == urb_priv->length) { if (urb_priv->td_cnt == urb_priv->length) {
int resubmit; int resubmit;
...@@ -980,7 +1007,7 @@ static void dl_done_list (struct ohci_hcd *ohci, struct td *td) ...@@ -980,7 +1007,7 @@ static void dl_done_list (struct ohci_hcd *ohci, struct td *td)
if ((ed->hwHeadP & __constant_cpu_to_le32 (TD_MASK)) if ((ed->hwHeadP & __constant_cpu_to_le32 (TD_MASK))
== ed->hwTailP == ed->hwTailP
&& (ed->state == ED_OPER)) && (ed->state == ED_OPER))
ep_unlink (ohci, ed); start_ed_unlink (ohci, ed);
td = td_next; td = td_next;
} }
spin_unlock_irqrestore (&ohci->lock, flags); spin_unlock_irqrestore (&ohci->lock, flags);
......
...@@ -27,22 +27,29 @@ struct ed { ...@@ -27,22 +27,29 @@ struct ed {
__u32 hwNextED; /* next ED in list */ __u32 hwNextED; /* next ED in list */
/* rest are purely for the driver's use */ /* rest are purely for the driver's use */
struct ed *ed_prev; dma_addr_t dma; /* addr of ED */
__u8 int_period; struct ed *ed_prev; /* for non-interrupt EDs */
__u8 int_branch;
__u8 int_load; u8 type; /* PIPE_{BULK,...} */
__u8 int_interval; u8 interval; /* interrupt, isochronous */
__u8 state; // ED_{NEW,UNLINK,OPER} union {
struct intr_info { /* interrupt */
u8 int_period;
u8 int_branch;
u8 int_load;
};
u16 last_iso; /* isochronous */
};
u8 state; /* ED_{NEW,UNLINK,OPER} */
#define ED_NEW 0x00 /* unused, no dummy td */ #define ED_NEW 0x00 /* unused, no dummy td */
#define ED_UNLINK 0x01 /* dummy td, maybe linked to hc */ #define ED_UNLINK 0x01 /* dummy td, maybe linked to hc */
#define ED_OPER 0x02 /* dummy td, _is_ linked to hc */ #define ED_OPER 0x02 /* dummy td, _is_ linked to hc */
#define ED_URB_DEL 0x08 /* for unlinking; masked in */ #define ED_URB_DEL 0x08 /* for unlinking; masked in */
__u8 type; /* HC may see EDs on rm_list until next frame (frame_no == tick) */
__u16 last_iso; u16 tick;
struct ed *ed_rm_list; struct ed *ed_rm_list;
dma_addr_t dma; /* addr of ED */
} __attribute__ ((aligned(16))); } __attribute__ ((aligned(16)));
#define ED_MASK ((u32)~0x0f) /* strip hw status in low addr bits */ #define ED_MASK ((u32)~0x0f) /* strip hw status in low addr bits */
...@@ -335,7 +342,7 @@ struct ohci_hcd { ...@@ -335,7 +342,7 @@ struct ohci_hcd {
struct ohci_hcca *hcca; struct ohci_hcca *hcca;
dma_addr_t hcca_dma; dma_addr_t hcca_dma;
struct ed *ed_rm_list [2]; /* to be removed */ struct ed *ed_rm_list; /* to be removed */
struct ed *ed_bulktail; /* last in bulk list */ struct ed *ed_bulktail; /* last in bulk list */
struct ed *ed_controltail; /* last in ctrl list */ struct ed *ed_controltail; /* last in ctrl list */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment