Commit 4ff8e934 authored by David Brownell's avatar David Brownell Committed by Greg Kroah-Hartman

[PATCH] ohci unlink cleanups

Attached is a patch that cleans up a few more issues in the OHCI unlink
code.

There may still be an ISO-IN data problem, I'll look at that separately
since it seems unrelated to unlink issues.

- Simplify/correct ED lifecycle
	* UNLINK is now for real: descheduled and waiting for SOF
	* finish_unlinks() expects descheduled EDs (may reschedule)
	* only ed_deschedule() turns off hardware schedule processing
	* no more NEW state
	* no more ED_URB_DEL flag (it added extra states)
	* new IDLE state, "not scheduled" (replaces previous UNLINKing)
- Bugfixes
	* ed_get(), potential memleak is now gone
	* urb_enqueue(), won't submit to dead/sleeping hc
	* free_config(), rescans after SOF when needed
	* ed_schedule(), use wmb()
	* ed_schedule() and finish_unlinks(), more thorough about
	  restarting control or bulk processing
	* finish_unlinks(), more cautious about reentering
- General:
	* ed->ed_rm_list renamed ed_next; to be used more later
	* slightly shrink object code
	* rename some functions

This leaves one notable issue in the unlink paths:  the driver never waits
for SOF after descheduling (empty) EDs.  That's racey in most cases, though
there are a few light-traffic cases where that's correct (in part because
the ED is empty).  Easy to fix once the rest of this is known to behave.
parent 48a7ed7b
...@@ -10,8 +10,15 @@ ...@@ -10,8 +10,15 @@
* [ (C) Copyright 1999 Gregory P. Smith] * [ (C) Copyright 1999 Gregory P. Smith]
* *
* *
* OHCI is the main "non-Intel/VIA" standard for USB 1.1 host controller
* interfaces (though some non-x86 Intel chips use it). It supports
* smarter hardware than UHCI. A download link for the spec available
* through the http://www.usb.org website.
*
* History: * History:
* *
* 2002/07/19 fixes to management of ED and schedule state.
* 2002/06/09 SA-1111 support (Christopher Hoover)
* 2002/06/01 remember frame when HC won't see EDs any more; use that info * 2002/06/01 remember frame when HC won't see EDs any more; use that info
* to fix urb unlink races caused by interrupt latency assumptions; * to fix urb unlink races caused by interrupt latency assumptions;
* minor ED field and function naming updates * minor ED field and function naming updates
...@@ -95,12 +102,12 @@ ...@@ -95,12 +102,12 @@
/* /*
* TO DO: * TO DO:
* *
* - "disabled" should be the hcd state * - "disabled" and "sleeping" should be in hcd->state
* - bandwidth alloc to generic code * - bandwidth alloc to generic code
* - lots more testing!! * - lots more testing!!
*/ */
#define DRIVER_VERSION "2002-Jun-15" #define DRIVER_VERSION "2002-Jul-19"
#define DRIVER_AUTHOR "Roman Weissgaerber <weissg@vienna.at>, David Brownell" #define DRIVER_AUTHOR "Roman Weissgaerber <weissg@vienna.at>, David Brownell"
#define DRIVER_DESC "USB 1.1 'Open' Host Controller (OHCI) Driver" #define DRIVER_DESC "USB 1.1 'Open' Host Controller (OHCI) Driver"
...@@ -140,6 +147,7 @@ static int ohci_urb_enqueue ( ...@@ -140,6 +147,7 @@ static int ohci_urb_enqueue (
int i, size = 0; int i, size = 0;
unsigned long flags; unsigned long flags;
int bustime = 0; int bustime = 0;
int retval = 0;
#ifdef OHCI_VERBOSE_DEBUG #ifdef OHCI_VERBOSE_DEBUG
urb_print (urb, "SUB", usb_pipein (pipe)); urb_print (urb, "SUB", usb_pipein (pipe));
...@@ -191,19 +199,25 @@ static int ohci_urb_enqueue ( ...@@ -191,19 +199,25 @@ static int ohci_urb_enqueue (
return -ENOMEM; return -ENOMEM;
memset (urb_priv, 0, sizeof (urb_priv_t) + size * sizeof (struct td *)); memset (urb_priv, 0, sizeof (urb_priv_t) + size * sizeof (struct td *));
spin_lock_irqsave (&ohci->lock, flags);
/* don't submit to a dead HC */
if (ohci->disabled || ohci->sleeping) {
retval = -ENODEV;
goto fail;
}
/* fill the private part of the URB */ /* fill the private part of the URB */
urb_priv->length = size; urb_priv->length = size;
urb_priv->ed = ed; urb_priv->ed = ed;
/* allocate the TDs (updating hash chains) */ /* allocate the TDs (updating hash chains) */
spin_lock_irqsave (&ohci->lock, flags);
for (i = 0; i < size; i++) { for (i = 0; i < size; i++) {
urb_priv->td [i] = td_alloc (ohci, SLAB_ATOMIC); urb_priv->td [i] = td_alloc (ohci, SLAB_ATOMIC);
if (!urb_priv->td [i]) { if (!urb_priv->td [i]) {
urb_priv->length = i; urb_priv->length = i;
urb_free_priv (ohci, urb_priv); retval = -ENOMEM;
spin_unlock_irqrestore (&ohci->lock, flags); goto fail;
return -ENOMEM;
} }
} }
...@@ -217,11 +231,11 @@ static int ohci_urb_enqueue ( ...@@ -217,11 +231,11 @@ static int ohci_urb_enqueue (
switch (usb_pipetype (pipe)) { switch (usb_pipetype (pipe)) {
case PIPE_ISOCHRONOUS: case PIPE_ISOCHRONOUS:
if (urb->transfer_flags & USB_ISO_ASAP) { if (urb->transfer_flags & USB_ISO_ASAP) {
urb->start_frame = ( (ed->state == ED_OPER) urb->start_frame = ((ed->state != ED_IDLE)
? (ed->intriso.last_iso + 1) ? (ed->intriso.last_iso + 1)
: (le16_to_cpu (ohci->hcca->frame_no) : (le16_to_cpu (ohci->hcca->frame_no)
+ 10)) & 0xffff; + 10)) & 0xffff;
} }
/* FALLTHROUGH */ /* FALLTHROUGH */
case PIPE_INTERRUPT: case PIPE_INTERRUPT:
if (urb->bandwidth == 0) { if (urb->bandwidth == 0) {
...@@ -238,18 +252,20 @@ static int ohci_urb_enqueue ( ...@@ -238,18 +252,20 @@ static int ohci_urb_enqueue (
urb->hcpriv = urb_priv; urb->hcpriv = urb_priv;
/* link the ed into a chain if is not already */ /* schedule the ed if needed */
if (ed->state != ED_OPER) if (ed->state == ED_IDLE)
ep_link (ohci, ed); ed_schedule (ohci, ed);
/* fill the TDs and link them to the ed; and /* fill the TDs and link them to the ed; and
* enable that part of the schedule, if needed * enable that part of the schedule, if needed
*/ */
td_submit_urb (urb); td_submit_urb (urb);
fail:
if (retval)
urb_free_priv (ohci, urb_priv);
spin_unlock_irqrestore (&ohci->lock, flags); spin_unlock_irqrestore (&ohci->lock, flags);
return retval;
return 0;
} }
/* /*
...@@ -270,19 +286,17 @@ static int ohci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb) ...@@ -270,19 +286,17 @@ static int ohci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
if (!ohci->disabled) { if (!ohci->disabled) {
urb_priv_t *urb_priv; urb_priv_t *urb_priv;
/* flag the urb's data for deletion in some upcoming /* Unless an IRQ completed the unlink while it was being
* SF interrupt's delete list processing * handed to us, flag it for unlink and giveback, and force
* some upcoming INTR_SF to call finish_unlinks()
*/ */
spin_lock_irqsave (&ohci->lock, flags); spin_lock_irqsave (&ohci->lock, flags);
urb_priv = urb->hcpriv; urb_priv = urb->hcpriv;
if (urb_priv) {
if (!urb_priv || (urb_priv->state == URB_DEL)) { urb_priv->state = URB_DEL;
spin_unlock_irqrestore (&ohci->lock, flags); if (urb_priv->ed->state == ED_OPER)
return 0; start_urb_unlink (ohci, urb_priv->ed);
} }
urb_priv->state = URB_DEL;
start_urb_unlink (ohci, urb_priv->ed);
spin_unlock_irqrestore (&ohci->lock, flags); spin_unlock_irqrestore (&ohci->lock, flags);
} else { } else {
/* /*
...@@ -290,12 +304,16 @@ static int ohci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb) ...@@ -290,12 +304,16 @@ static int ohci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
* any more ... just clean up every urb's memory. * any more ... just clean up every urb's memory.
*/ */
finish_urb (ohci, urb); finish_urb (ohci, urb);
} }
return 0; return 0;
} }
/*-------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------*/
/* frees config/altsetting state for endpoints,
* including ED memory, dummy TD, and bulk/intr data toggle
*/
static void static void
ohci_free_config (struct usb_hcd *hcd, struct usb_device *udev) ohci_free_config (struct usb_hcd *hcd, struct usb_device *udev)
{ {
...@@ -303,7 +321,11 @@ ohci_free_config (struct usb_hcd *hcd, struct usb_device *udev) ...@@ -303,7 +321,11 @@ ohci_free_config (struct usb_hcd *hcd, struct usb_device *udev)
struct hcd_dev *dev = (struct hcd_dev *) udev->hcpriv; struct hcd_dev *dev = (struct hcd_dev *) udev->hcpriv;
int i; int i;
unsigned long flags; unsigned long flags;
#ifdef DEBUG
int rescans = 0;
#endif
rescan:
/* free any eds, and dummy tds, still hanging around */ /* free any eds, and dummy tds, still hanging around */
spin_lock_irqsave (&ohci->lock, flags); spin_lock_irqsave (&ohci->lock, flags);
for (i = 0; i < 32; i++) { for (i = 0; i < 32; i++) {
...@@ -312,27 +334,47 @@ ohci_free_config (struct usb_hcd *hcd, struct usb_device *udev) ...@@ -312,27 +334,47 @@ ohci_free_config (struct usb_hcd *hcd, struct usb_device *udev)
if (!ed) if (!ed)
continue; continue;
ed->state &= ~ED_URB_DEL; if (ohci->disabled && ed->state != ED_IDLE)
if (ohci->disabled && ed->state == ED_OPER) ed->state = ED_IDLE;
ed->state = ED_UNLINK;
switch (ed->state) { switch (ed->state) {
case ED_NEW: case ED_UNLINK: /* wait a frame? */
break; goto do_rescan;
case ED_UNLINK: case ED_IDLE: /* fully unlinked */
td_free (ohci, ed->dummy); td_free (ohci, ed->dummy);
break; break;
case ED_OPER:
default: default:
#ifdef DEBUG
err ("illegal ED %d state in free_config, %d", err ("illegal ED %d state in free_config, %d",
i, ed->state); i, ed->state);
#ifdef DEBUG
BUG ();
#endif #endif
/* ED_OPER: some driver disconnect() is broken,
* it didn't even start its unlinks much less wait
* for their completions.
* OTHERWISE: hcd bug, ed is garbage
*/
BUG ();
} }
ed_free (ohci, ed); ed_free (ohci, ed);
} }
spin_unlock_irqrestore (&ohci->lock, flags); spin_unlock_irqrestore (&ohci->lock, flags);
return;
do_rescan:
#ifdef DEBUG
/* a driver->disconnect() returned before its unlinks completed? */
if (in_interrupt ()) {
dbg ("WARNING: spin in interrupt; driver->disconnect() bug");
dbg ("dev usb-%s-%s ep 0x%x",
ohci->hcd.self.bus_name, udev->devpath, i);
}
BUG_ON (!(readl (&ohci->regs->intrenable) & OHCI_INTR_SF));
BUG_ON (rescans >= 2); /* HWBUG */
rescans++;
#endif
spin_unlock_irqrestore (&ohci->lock, flags);
wait_ms (1);
goto rescan;
} }
static int ohci_get_frame (struct usb_hcd *hcd) static int ohci_get_frame (struct usb_hcd *hcd)
......
...@@ -170,50 +170,50 @@ static int ep_rev (int num_bits, int word) ...@@ -170,50 +170,50 @@ static int ep_rev (int num_bits, int word)
/* link an ed into one of the HC chains */ /* link an ed into one of the HC chains */
static int ep_link (struct ohci_hcd *ohci, struct ed *edi) static void ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
{ {
int int_branch, i; int int_branch, i;
int inter, interval, load; int inter, interval, load;
__u32 *ed_p; __u32 *ed_p;
volatile struct ed *ed = edi;
ed->state = ED_OPER; ed->state = ED_OPER;
ed->hwNextED = 0;
wmb ();
/* we care about rm_list when setting CLE/BLE in case the HC was at
* work on some TD when CLE/BLE was turned off, and isn't quiesced
* yet. finish_unlinks() restarts as needed, some upcoming INTR_SF.
*/
switch (ed->type) { switch (ed->type) {
case PIPE_CONTROL: case PIPE_CONTROL:
ed->hwNextED = 0;
if (ohci->ed_controltail == NULL) { if (ohci->ed_controltail == NULL) {
writel (ed->dma, &ohci->regs->ed_controlhead); writel (ed->dma, &ohci->regs->ed_controlhead);
} else { } else {
ohci->ed_controltail->hwNextED = cpu_to_le32 (ed->dma); ohci->ed_controltail->hwNextED = cpu_to_le32 (ed->dma);
} }
ed->ed_prev = ohci->ed_controltail; ed->ed_prev = ohci->ed_controltail;
if (!ohci->ed_controltail if (!ohci->ed_controltail && !ohci->ed_rm_list) {
&& !ohci->ed_rm_list
&& !ohci->sleeping
) {
ohci->hc_control |= OHCI_CTRL_CLE; ohci->hc_control |= OHCI_CTRL_CLE;
writel (0, &ohci->regs->ed_controlcurrent);
writel (ohci->hc_control, &ohci->regs->control); writel (ohci->hc_control, &ohci->regs->control);
} }
ohci->ed_controltail = edi; ohci->ed_controltail = ed;
break; break;
case PIPE_BULK: case PIPE_BULK:
ed->hwNextED = 0;
if (ohci->ed_bulktail == NULL) { if (ohci->ed_bulktail == NULL) {
writel (ed->dma, &ohci->regs->ed_bulkhead); writel (ed->dma, &ohci->regs->ed_bulkhead);
} else { } else {
ohci->ed_bulktail->hwNextED = cpu_to_le32 (ed->dma); ohci->ed_bulktail->hwNextED = cpu_to_le32 (ed->dma);
} }
ed->ed_prev = ohci->ed_bulktail; ed->ed_prev = ohci->ed_bulktail;
if (!ohci->ed_bulktail if (!ohci->ed_bulktail && !ohci->ed_rm_list) {
&& !ohci->ed_rm_list
&& !ohci->sleeping
) {
ohci->hc_control |= OHCI_CTRL_BLE; ohci->hc_control |= OHCI_CTRL_BLE;
writel (0, &ohci->regs->ed_bulkcurrent);
writel (ohci->hc_control, &ohci->regs->control); writel (ohci->hc_control, &ohci->regs->control);
} }
ohci->ed_bulktail = edi; ohci->ed_bulktail = ed;
break; break;
case PIPE_INTERRUPT: case PIPE_INTERRUPT:
...@@ -231,17 +231,16 @@ static int ep_link (struct ohci_hcd *ohci, struct ed *edi) ...@@ -231,17 +231,16 @@ static int ep_link (struct ohci_hcd *ohci, struct ed *edi)
ed->hwNextED = *ed_p; ed->hwNextED = *ed_p;
*ed_p = cpu_to_le32 (ed->dma); *ed_p = cpu_to_le32 (ed->dma);
} }
wmb ();
#ifdef OHCI_VERBOSE_DEBUG #ifdef OHCI_VERBOSE_DEBUG
ohci_dump_periodic (ohci, "LINK_INT"); ohci_dump_periodic (ohci, "LINK_INT");
#endif #endif
break; break;
case PIPE_ISOCHRONOUS: case PIPE_ISOCHRONOUS:
ed->hwNextED = 0; ed->ed_prev = ohci->ed_isotail;
ed->interval = 1;
if (ohci->ed_isotail != NULL) { if (ohci->ed_isotail != NULL) {
ohci->ed_isotail->hwNextED = cpu_to_le32 (ed->dma); ohci->ed_isotail->hwNextED = cpu_to_le32 (ed->dma);
ed->ed_prev = ohci->ed_isotail;
} else { } else {
for ( i = 0; i < NUM_INTS; i += inter) { for ( i = 0; i < NUM_INTS; i += inter) {
inter = 1; inter = 1;
...@@ -251,15 +250,18 @@ static int ep_link (struct ohci_hcd *ohci, struct ed *edi) ...@@ -251,15 +250,18 @@ static int ep_link (struct ohci_hcd *ohci, struct ed *edi)
inter = ep_rev (6, (dma_to_ed (ohci, le32_to_cpup (ed_p)))->interval); inter = ep_rev (6, (dma_to_ed (ohci, le32_to_cpup (ed_p)))->interval);
*ed_p = cpu_to_le32 (ed->dma); *ed_p = cpu_to_le32 (ed->dma);
} }
ed->ed_prev = NULL;
} }
ohci->ed_isotail = edi; wmb ();
ohci->ed_isotail = ed;
#ifdef OHCI_VERBOSE_DEBUG #ifdef OHCI_VERBOSE_DEBUG
ohci_dump_periodic (ohci, "LINK_ISO"); ohci_dump_periodic (ohci, "LINK_ISO");
#endif #endif
break; break;
} }
return 0;
/* the HC may not see the schedule updates yet, but if it does
* then they'll be properly ordered.
*/
} }
/*-------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------*/
...@@ -288,9 +290,8 @@ static void periodic_unlink ( ...@@ -288,9 +290,8 @@ static void periodic_unlink (
* just the link to the ed is unlinked. * just the link to the ed is unlinked.
* the link from the ed still points to another operational ed or 0 * the link from the ed still points to another operational ed or 0
* so the HC can eventually finish the processing of the unlinked ed * so the HC can eventually finish the processing of the unlinked ed
* caller guarantees the ED has no active TDs.
*/ */
static int start_ed_unlink (struct ohci_hcd *ohci, struct ed *ed) static void ed_deschedule (struct ohci_hcd *ohci, struct ed *ed)
{ {
int i; int i;
...@@ -361,15 +362,14 @@ static int start_ed_unlink (struct ohci_hcd *ohci, struct ed *ed) ...@@ -361,15 +362,14 @@ static int start_ed_unlink (struct ohci_hcd *ohci, struct ed *ed)
break; break;
} }
/* FIXME ED's "unlink" state is indeterminate; /* FIXME Except for a couple of exceptionally clean unlink cases
* the HC might still be caching it (till SOF). * (like unlinking the only c/b ED, with no TDs) HCs may still be
* - use ed_rm_list and finish_unlinks(), adding some state that * caching this (till SOF).
* prevents clobbering hw linkage before the appropriate SOF *
* - a speedup: when only one urb is queued on the ed, save 1msec * To avoid racing with the hardware, this needs to use ED_UNLINK
* by making start_urb_unlink() use this routine to deschedule. * and delay til next INTR_SF. Merge with start_urb_unlink().
*/ */
ed->state = ED_UNLINK; ed->state = ED_IDLE;
return 0;
} }
...@@ -403,35 +403,27 @@ static struct ed *ed_get ( ...@@ -403,35 +403,27 @@ static struct ed *ed_get (
spin_lock_irqsave (&ohci->lock, flags); spin_lock_irqsave (&ohci->lock, flags);
if (!(ed = dev->ep [ep])) { if (!(ed = dev->ep [ep])) {
struct td *td;
ed = ed_alloc (ohci, SLAB_ATOMIC); ed = ed_alloc (ohci, SLAB_ATOMIC);
if (!ed) { if (!ed) {
/* out of memory */ /* out of memory */
goto done; goto done;
} }
dev->ep [ep] = ed; dev->ep [ep] = ed;
}
if (ed->state & ED_URB_DEL) {
/* pending unlink request */
ed = 0;
goto done;
}
if (ed->state == ED_NEW) {
struct td *td;
ed->hwINFO = ED_SKIP;
/* dummy td; end of td list for ed */ /* dummy td; end of td list for ed */
td = td_alloc (ohci, SLAB_ATOMIC); td = td_alloc (ohci, SLAB_ATOMIC);
if (!td) { if (!td) {
/* out of memory */ /* out of memory */
ed_free (ohci, ed);
ed = 0; ed = 0;
goto done; goto done;
} }
ed->dummy = td; ed->dummy = td;
ed->hwTailP = cpu_to_le32 (td->td_dma); ed->hwTailP = cpu_to_le32 (td->td_dma);
ed->hwHeadP = ed->hwTailP; /* ED_C, ED_H zeroed */ ed->hwHeadP = ed->hwTailP; /* ED_C, ED_H zeroed */
ed->state = ED_UNLINK; ed->state = ED_IDLE;
ed->type = type; ed->type = type;
} }
...@@ -439,7 +431,7 @@ static struct ed *ed_get ( ...@@ -439,7 +431,7 @@ static struct ed *ed_get (
* state/mode info. Currently the upper layers don't support such * state/mode info. Currently the upper layers don't support such
* guarantees; we're lucky changing config/altsetting is rare. * guarantees; we're lucky changing config/altsetting is rare.
*/ */
if (ed->state == ED_UNLINK) { if (ed->state == ED_IDLE) {
u32 info; u32 info;
info = usb_pipedevice (pipe); info = usb_pipedevice (pipe);
...@@ -494,30 +486,13 @@ static struct ed *ed_get ( ...@@ -494,30 +486,13 @@ static struct ed *ed_get (
/*-------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------*/
/* request unlinking of an endpoint from an operational HC. /* request unlinking of an endpoint from an operational HC.
* put the ep on the rm_list and stop the bulk or ctrl list * put the ep on the rm_list
* real work is done at the next start frame (SF) hardware interrupt * real work is done at the next start frame (SF) hardware interrupt
*/ */
static void start_urb_unlink (struct ohci_hcd *ohci, struct ed *ed) static void start_urb_unlink (struct ohci_hcd *ohci, struct ed *ed)
{ {
/* already pending? */ ed_deschedule (ohci, ed);
if (ed->state & ED_URB_DEL) ed->state = ED_UNLINK;
return;
ed->state |= ED_URB_DEL;
ed->hwINFO |= ED_SKIP;
switch (ed->type) {
case PIPE_CONTROL: /* stop control list */
ohci->hc_control &= ~OHCI_CTRL_CLE;
writel (ohci->hc_control,
&ohci->regs->control);
break;
case PIPE_BULK: /* stop bulk list */
ohci->hc_control &= ~OHCI_CTRL_BLE;
writel (ohci->hc_control,
&ohci->regs->control);
break;
}
/* SF interrupt might get delayed; record the frame counter value that /* SF interrupt might get delayed; record the frame counter value that
* indicates when the HC isn't looking at it, so concurrent unlinks * indicates when the HC isn't looking at it, so concurrent unlinks
...@@ -526,7 +501,7 @@ static void start_urb_unlink (struct ohci_hcd *ohci, struct ed *ed) ...@@ -526,7 +501,7 @@ static void start_urb_unlink (struct ohci_hcd *ohci, struct ed *ed)
*/ */
ed->tick = le16_to_cpu (ohci->hcca->frame_no) + 1; ed->tick = le16_to_cpu (ohci->hcca->frame_no) + 1;
ed->ed_rm_list = ohci->ed_rm_list; ed->ed_next = ohci->ed_rm_list;
ohci->ed_rm_list = ed; ohci->ed_rm_list = ed;
/* enable SOF interrupt */ /* enable SOF interrupt */
...@@ -744,13 +719,15 @@ static void td_done (struct urb *urb, struct td *td) ...@@ -744,13 +719,15 @@ static void td_done (struct urb *urb, struct td *td)
u32 tdINFO = le32_to_cpup (&td->hwINFO); u32 tdINFO = le32_to_cpup (&td->hwINFO);
int cc = 0; int cc = 0;
/* ISO ... drivers see per-TD length/status */ /* ISO ... drivers see per-TD length/status */
if (tdINFO & TD_ISO) { if (tdINFO & TD_ISO) {
u16 tdPSW = le16_to_cpu (td->hwPSW [0]); u16 tdPSW = le16_to_cpu (td->hwPSW [0]);
int dlen = 0; int dlen = 0;
cc = (tdPSW >> 12) & 0xF; cc = (tdPSW >> 12) & 0xF;
if (cc >= 0x0E) /* hc didn't touch? */
return;
if (usb_pipeout (urb->pipe)) if (usb_pipeout (urb->pipe))
dlen = urb->iso_frame_desc [td->index].length; dlen = urb->iso_frame_desc [td->index].length;
else else
...@@ -759,9 +736,11 @@ static void td_done (struct urb *urb, struct td *td) ...@@ -759,9 +736,11 @@ static void td_done (struct urb *urb, struct td *td)
urb->iso_frame_desc [td->index].actual_length = dlen; urb->iso_frame_desc [td->index].actual_length = dlen;
urb->iso_frame_desc [td->index].status = cc_to_error [cc]; urb->iso_frame_desc [td->index].status = cc_to_error [cc];
if (cc != 0) #ifdef VERBOSE_DEBUG
if (cc != TD_CC_NOERROR)
dbg (" urb %p iso TD %p (%d) len %d CC %d", dbg (" urb %p iso TD %p (%d) len %d CC %d",
urb, td, 1 + td->index, dlen, cc); urb, td, 1 + td->index, dlen, cc);
#endif
/* BULK, INT, CONTROL ... drivers see aggregate length/status, /* BULK, INT, CONTROL ... drivers see aggregate length/status,
* except that "setup" bytes aren't counted and "short" transfers * except that "setup" bytes aren't counted and "short" transfers
...@@ -783,7 +762,7 @@ static void td_done (struct urb *urb, struct td *td) ...@@ -783,7 +762,7 @@ static void td_done (struct urb *urb, struct td *td)
if (cc == TD_DATAUNDERRUN if (cc == TD_DATAUNDERRUN
&& !(urb->transfer_flags & URB_SHORT_NOT_OK)) && !(urb->transfer_flags & URB_SHORT_NOT_OK))
cc = TD_CC_NOERROR; cc = TD_CC_NOERROR;
if (cc != TD_CC_NOERROR) { if (cc != TD_CC_NOERROR && cc < 0x0E) {
spin_lock (&urb->lock); spin_lock (&urb->lock);
if (urb->status == -EINPROGRESS) if (urb->status == -EINPROGRESS)
urb->status = cc_to_error [cc]; urb->status = cc_to_error [cc];
...@@ -801,7 +780,7 @@ static void td_done (struct urb *urb, struct td *td) ...@@ -801,7 +780,7 @@ static void td_done (struct urb *urb, struct td *td)
} }
#ifdef VERBOSE_DEBUG #ifdef VERBOSE_DEBUG
if (cc != 0) if (cc != TD_CC_NOERROR && cc < 0x0E)
dbg (" urb %p TD %p (%d) CC %d, len=%d/%d", dbg (" urb %p TD %p (%d) CC %d, len=%d/%d",
urb, td, 1 + td->index, cc, urb, td, 1 + td->index, cc,
urb->actual_length, urb->actual_length,
...@@ -876,28 +855,39 @@ static struct td *dl_reverse_done_list (struct ohci_hcd *ohci) ...@@ -876,28 +855,39 @@ static struct td *dl_reverse_done_list (struct ohci_hcd *ohci)
static void finish_unlinks (struct ohci_hcd *ohci, u16 tick) static void finish_unlinks (struct ohci_hcd *ohci, u16 tick)
{ {
struct ed *ed, **last; struct ed *ed, **last;
int ctrl = 0, bulk = 0;
rescan_all:
for (last = &ohci->ed_rm_list, ed = *last; ed != NULL; ed = *last) { for (last = &ohci->ed_rm_list, ed = *last; ed != NULL; ed = *last) {
struct td *td, *td_next, *tdHeadP, *tdTailP; struct td *td, *td_next, *tdHeadP, *tdTailP;
u32 *td_p; u32 *td_p;
int unlinked; int completed, modified;
/* only take off EDs that the HC isn't using, accounting for /* only take off EDs that the HC isn't using, accounting for
* frame counter wraps. completion callbacks might prepend * frame counter wraps.
* EDs to the list, they'll be checked next irq.
*/ */
if (tick_before (tick, ed->tick)) { if (tick_before (tick, ed->tick) && !ohci->disabled) {
last = &ed->ed_rm_list; last = &ed->ed_next;
continue; continue;
} }
*last = ed->ed_rm_list;
ed->ed_rm_list = 0;
unlinked = 0;
/* unlink urbs from first one requested to queue end; /* reentrancy: if we drop the schedule lock, someone might
* leave earlier urbs alone * have modified this list. normally it's just prepending
* entries (which we'd ignore), but paranoia won't hurt.
*/ */
*last = ed->ed_next;
ed->ed_next = 0;
modified = 0;
/* unlink urbs as requested, but rescan the list after
* we call a completion since it might have unlinked
* another (earlier) urb
*
* FIXME use td_list to scan, not ed hashtables.
* completely abolish ed hashtables!
*/
rescan_this:
completed = 0;
tdTailP = dma_to_td (ohci, le32_to_cpup (&ed->hwTailP)); tdTailP = dma_to_td (ohci, le32_to_cpup (&ed->hwTailP));
tdHeadP = dma_to_td (ohci, le32_to_cpup (&ed->hwHeadP)); tdHeadP = dma_to_td (ohci, le32_to_cpup (&ed->hwHeadP));
td_p = &ed->hwHeadP; td_p = &ed->hwHeadP;
...@@ -908,21 +898,18 @@ static void finish_unlinks (struct ohci_hcd *ohci, u16 tick) ...@@ -908,21 +898,18 @@ static void finish_unlinks (struct ohci_hcd *ohci, u16 tick)
td_next = dma_to_td (ohci, td_next = dma_to_td (ohci,
le32_to_cpup (&td->hwNextTD)); le32_to_cpup (&td->hwNextTD));
if (unlinked || (urb_priv->state == URB_DEL)) { if (urb_priv->state == URB_DEL) {
u32 tdINFO = le32_to_cpup (&td->hwINFO);
unlinked = 1;
/* HC may have partly processed this TD */ /* HC may have partly processed this TD */
if (TD_CC_GET (tdINFO) < 0xE) td_done (urb, td);
td_done (urb, td); urb_priv->td_cnt++;
*td_p = td->hwNextTD | (*td_p *td_p = td->hwNextTD | (*td_p
& __constant_cpu_to_le32 (0x3)); & __constant_cpu_to_le32 (0x3));
/* URB is done; clean up */ /* URB is done; clean up */
if (++ (urb_priv->td_cnt) == urb_priv->length) { if (urb_priv->td_cnt == urb_priv->length) {
if (urb->status == -EINPROGRESS) modified = completed = 1;
urb->status = -ECONNRESET;
spin_unlock (&ohci->lock); spin_unlock (&ohci->lock);
finish_urb (ohci, urb); finish_urb (ohci, urb);
spin_lock (&ohci->lock); spin_lock (&ohci->lock);
...@@ -932,49 +919,52 @@ static void finish_unlinks (struct ohci_hcd *ohci, u16 tick) ...@@ -932,49 +919,52 @@ static void finish_unlinks (struct ohci_hcd *ohci, u16 tick)
} }
} }
/* FIXME actually want four cases here: /* ED's now officially unlinked, hc doesn't see */
* (a) finishing URB unlink ed->state = ED_IDLE;
* [a1] no URBs queued, so start ED unlink ed->hwINFO &= ~ED_SKIP;
* [a2] some (earlier) URBs still linked, re-enable ed->hwHeadP &= ~cpu_to_le32 (ED_H);
* (b) finishing ED unlink ed->hwNextED = 0;
* [b1] no URBs queued, ED is truly idle now
* ... we could set state ED_NEW and free dummy
* [b2] URBs now queued, link ED back into schedule
* right now we only have (a)
*/
ed->state &= ~ED_URB_DEL;
tdHeadP = dma_to_td (ohci, le32_to_cpup (&ed->hwHeadP));
if (tdHeadP == tdTailP) {
if (ed->state == ED_OPER)
start_ed_unlink (ohci, ed);
} else
ed->hwINFO &= ~ED_SKIP;
switch (ed->type) { /* but if there's work queued, reschedule */
case PIPE_CONTROL: tdHeadP = dma_to_td (ohci, le32_to_cpup (&ed->hwHeadP));
ctrl = 1; if (tdHeadP != tdTailP) {
break; if (completed)
case PIPE_BULK: goto rescan_this;
bulk = 1; if (!ohci->disabled && !ohci->sleeping)
break; ed_schedule (ohci, ed);
} }
if (modified)
goto rescan_all;
} }
/* maybe reenable control and bulk lists */ /* maybe reenable control and bulk lists */
if (!ohci->disabled) { if (!ohci->disabled && !ohci->ed_rm_list) {
if (ctrl) /* reset control list */ u32 command = 0, control = 0;
writel (0, &ohci->regs->ed_controlcurrent);
if (bulk) /* reset bulk list */ if (ohci->ed_controltail) {
writel (0, &ohci->regs->ed_bulkcurrent); command |= OHCI_CLF;
if (!ohci->ed_rm_list) { if (!(ohci->hc_control & OHCI_CTRL_CLE)) {
if (ohci->ed_controltail) control |= OHCI_CTRL_CLE;
ohci->hc_control |= OHCI_CTRL_CLE; writel (0, &ohci->regs->ed_controlcurrent);
if (ohci->ed_bulktail) }
ohci->hc_control |= OHCI_CTRL_BLE;
writel (ohci->hc_control, &ohci->regs->control);
} }
} if (ohci->ed_bulktail) {
command |= OHCI_BLF;
if (!(ohci->hc_control & OHCI_CTRL_BLE)) {
control |= OHCI_CTRL_BLE;
writel (0, &ohci->regs->ed_bulkcurrent);
}
}
/* CLE/BLE to enable, CLF/BLF to (maybe) kickstart */
if (control) {
ohci->hc_control |= control;
writel (ohci->hc_control, &ohci->regs->control);
}
if (command)
writel (command, &ohci->regs->cmdstatus);
}
} }
...@@ -1026,7 +1016,7 @@ static void dl_done_list (struct ohci_hcd *ohci, struct td *td) ...@@ -1026,7 +1016,7 @@ static void dl_done_list (struct ohci_hcd *ohci, struct td *td)
if ((ed->hwHeadP & __constant_cpu_to_le32 (TD_MASK)) if ((ed->hwHeadP & __constant_cpu_to_le32 (TD_MASK))
== ed->hwTailP == ed->hwTailP
&& (ed->state == ED_OPER)) && (ed->state == ED_OPER))
start_ed_unlink (ohci, ed); ed_deschedule (ohci, ed);
td = td_next; td = td_next;
} }
spin_unlock_irqrestore (&ohci->lock, flags); spin_unlock_irqrestore (&ohci->lock, flags);
......
...@@ -31,15 +31,21 @@ struct ed { ...@@ -31,15 +31,21 @@ struct ed {
/* rest are purely for the driver's use */ /* rest are purely for the driver's use */
dma_addr_t dma; /* addr of ED */ dma_addr_t dma; /* addr of ED */
struct td *dummy; /* next TD to activate */
/* host's view of schedule */
struct ed *ed_next; /* on schedule or rm_list */
struct ed *ed_prev; /* for non-interrupt EDs */ struct ed *ed_prev; /* for non-interrupt EDs */
struct td *dummy;
struct list_head td_list; /* "shadow list" of our TDs */ struct list_head td_list; /* "shadow list" of our TDs */
u8 state; /* ED_{NEW,UNLINK,OPER} */ /* create --> IDLE --> OPER --> ... --> IDLE --> destroy
#define ED_NEW 0x00 /* unused, no dummy td */ * usually: OPER --> UNLINK --> (IDLE | OPER) --> ...
#define ED_UNLINK 0x01 /* dummy td, maybe linked to hc */ * some special cases : OPER --> IDLE ...
#define ED_OPER 0x02 /* dummy td, _is_ linked to hc */ */
#define ED_URB_DEL 0x08 /* for unlinking; masked in */ u8 state; /* ED_{IDLE,UNLINK,OPER} */
#define ED_IDLE 0x00 /* NOT linked to HC */
#define ED_UNLINK 0x01 /* being unlinked from hc */
#define ED_OPER 0x02 /* IS linked to hc */
u8 type; /* PIPE_{BULK,...} */ u8 type; /* PIPE_{BULK,...} */
u16 interval; /* interrupt, isochronous */ u16 interval; /* interrupt, isochronous */
...@@ -53,7 +59,6 @@ struct ed { ...@@ -53,7 +59,6 @@ struct ed {
/* HC may see EDs on rm_list until next frame (frame_no == tick) */ /* HC may see EDs on rm_list until next frame (frame_no == tick) */
u16 tick; u16 tick;
struct ed *ed_rm_list;
} __attribute__ ((aligned(16))); } __attribute__ ((aligned(16)));
#define ED_MASK ((u32)~0x0f) /* strip hw status in low addr bits */ #define ED_MASK ((u32)~0x0f) /* strip hw status in low addr bits */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment