Commit e0901283 authored by David Brownell's avatar David Brownell Committed by Greg Kroah-Hartman

USB

ehci-0306, iso, philips, speedups
  
      - adds preliminary highspeed ISO support
      - tweaks the driver to support the Philips EHCI
      - does less in the IRQ handler
      - avoids accessing one immutable PCI register
  
The ISO support should be enough to start writing
drivers, not that I know of any ISO devices that are
really available yet, but it's not fully cooked yet.
  
As a functional milestone, this means Linux now
handles all kinds of highspeed device I/O.  (But it
doesn't yet handle split periodic transactions, to
full or low speed devices through USB 2.0 hubs.)
    
Thanks to Rory Bolt for the non-ISO bits here!
parent ddbdbc8c
/*
* Copyright (c) 2000-2001 by David Brownell
* Copyright (c) 2000-2002 by David Brownell
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
......@@ -31,10 +31,6 @@
#include <linux/list.h>
#include <linux/interrupt.h>
#ifndef CONFIG_USB_DEBUG
#define CONFIG_USB_DEBUG /* this is still experimental! */
#endif
#ifdef CONFIG_USB_DEBUG
#define DEBUG
#else
......@@ -73,19 +69,25 @@
* ...
*
* HISTORY:
*
* 2002-03-05 Initial high-speed ISO support; reduce ITD memory; shift
* more checking to generic hcd framework (db). Make it work with
* Philips EHCI; reduce PCI traffic; shorten IRQ path (Rory Bolt).
* 2002-01-14 Minor cleanup; version synch.
* 2002-01-08 Fix roothub handoff of FS/LS to companion controllers.
* 2002-01-04 Control/Bulk queuing behaves.
*
* 2001-12-12 Initial patch version for Linux 2.5.1 kernel.
* 2001-June Works with usb-storage and NEC EHCI on 2.4
*/
#define DRIVER_VERSION "$Revision: 0.26 $"
#define DRIVER_VERSION "$Revision: 0.27 $"
#define DRIVER_AUTHOR "David Brownell"
#define DRIVER_DESC "USB 2.0 'Enhanced' Host Controller (EHCI) Driver"
// #define EHCI_VERBOSE_DEBUG
// #define have_iso
// #define have_split_iso
#ifdef CONFIG_DEBUG_SLAB
# define EHCI_SLAB_FLAGS (SLAB_POISON)
......@@ -187,6 +189,9 @@ static int ehci_start (struct usb_hcd *hcd)
dbg_hcs_params (ehci, "ehci_start");
dbg_hcc_params (ehci, "ehci_start");
/* cache this readonly data; minimize PCI reads */
ehci->hcs_params = readl (&ehci->caps->hcs_params);
/*
* hw default: 1K periodic list heads, one per frame.
* periodic_size can shrink by USBCMD update if hcc_params allows.
......@@ -204,7 +209,7 @@ static int ehci_start (struct usb_hcd *hcd)
ehci->async = 0;
ehci->reclaim = 0;
ehci->next_frame = -1;
ehci->next_uframe = -1;
/* controller state: unknown --> reset */
......@@ -310,7 +315,7 @@ static void ehci_stop (struct usb_hcd *hcd)
// root hub is shut down separately (first, when possible)
scan_async (ehci);
if (ehci->next_frame != -1)
if (ehci->next_uframe != -1)
scan_periodic (ehci);
ehci_mem_cleanup (ehci);
......@@ -332,14 +337,12 @@ static int ehci_get_frame (struct usb_hcd *hcd)
static int ehci_suspend (struct usb_hcd *hcd, u32 state)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
u32 params;
int ports;
int i;
dbg ("%s: suspend to %d", hcd->bus_name, state);
params = readl (&ehci->caps->hcs_params);
ports = HCS_N_PORTS (params);
ports = HCS_N_PORTS (ehci->hcs_params);
// FIXME: This assumes what's probably a D3 level suspend...
......@@ -375,14 +378,12 @@ dbg ("%s: suspend port %d", hcd->bus_name, i);
static int ehci_resume (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
u32 params;
int ports;
int i;
dbg ("%s: resume", hcd->bus_name);
params = readl (&ehci->caps->hcs_params);
ports = HCS_N_PORTS (params);
ports = HCS_N_PORTS (ehci->hcs_params);
// FIXME: if controller didn't retain state,
// return and let generic code clean it up
......@@ -426,7 +427,7 @@ static void ehci_tasklet (unsigned long param)
if (ehci->reclaim_ready)
end_unlink_async (ehci);
scan_async (ehci);
if (ehci->next_frame != -1)
if (ehci->next_uframe != -1)
scan_periodic (ehci);
// FIXME: when nothing is connected to the root hub,
......@@ -440,19 +441,19 @@ static void ehci_irq (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
u32 status = readl (&ehci->regs->status);
int bh = 0;
int bh;
/* clear (just) interrupts */
status &= INTR_MASK;
if (!status) /* irq sharing? */
return;
/* clear (just) interrupts */
writel (status, &ehci->regs->status);
readl (&ehci->regs->command); /* unblock posted write */
if (unlikely (hcd->state == USB_STATE_HALT)) /* irq sharing? */
return;
bh = 0;
#ifdef EHCI_VERBOSE_DEBUG
/* unrequested/ignored: Port Change Detect, Frame List Rollover */
if (status & INTR_MASK)
dbg_status (ehci, "irq", status);
#endif
......@@ -520,17 +521,15 @@ static int ehci_urb_enqueue (
return intr_submit (ehci, urb, &qtd_list, mem_flags);
case PIPE_ISOCHRONOUS:
#ifdef have_iso
if (urb->dev->speed == USB_SPEED_HIGH)
return itd_submit (ehci, urb);
return itd_submit (ehci, urb, mem_flags);
#ifdef have_split_iso
else
return sitd_submit (ehci, urb);
return sitd_submit (ehci, urb, mem_flags);
#else
// FIXME highspeed iso stuff is written but never run/tested.
// and the split iso support isn't even written yet.
dbg ("no iso support yet");
dbg ("no split iso support yet");
return -ENOSYS;
#endif /* have_iso */
#endif /* have_split_iso */
}
return 0;
......
/*
* Copyright (c) 2001 by David Brownell
* Copyright (c) 2001-2002 by David Brownell
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
......@@ -68,8 +68,7 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
/* init status to no-changes */
buf [0] = 0;
temp = readl (&ehci->caps->hcs_params);
ports = HCS_N_PORTS (temp);
ports = HCS_N_PORTS (ehci->hcs_params);
if (ports > 7) {
buf [1] = 0;
retval++;
......@@ -107,8 +106,7 @@ ehci_hub_descriptor (
struct ehci_hcd *ehci,
struct usb_hub_descriptor *desc
) {
u32 params = readl (&ehci->caps->hcs_params);
int ports = HCS_N_PORTS (params);
int ports = HCS_N_PORTS (ehci->hcs_params);
u16 temp;
desc->bDescriptorType = 0x29;
......@@ -124,10 +122,10 @@ ehci_hub_descriptor (
memset (&desc->bitmap [temp], 0xff, temp);
temp = 0x0008; /* per-port overcurrent reporting */
if (HCS_PPC (params)) /* per-port power control */
temp |= 0x0001;
if (HCS_INDICATOR (params)) /* per-port indicators (LEDs) */
temp |= 0x0080;
if (HCS_PPC (ehci->hcs_params))
temp |= 0x0001; /* per-port power control */
if (HCS_INDICATOR (ehci->hcs_params))
temp |= 0x0080; /* per-port indicators (LEDs) */
desc->wHubCharacteristics = cpu_to_le16 (temp);
}
......@@ -142,8 +140,7 @@ static int ehci_hub_control (
u16 wLength
) {
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
u32 params = readl (&ehci->caps->hcs_params);
int ports = HCS_N_PORTS (params);
int ports = HCS_N_PORTS (ehci->hcs_params);
u32 temp;
unsigned long flags;
int retval = 0;
......@@ -189,7 +186,7 @@ static int ehci_hub_control (
/* ? */
break;
case USB_PORT_FEAT_POWER:
if (HCS_PPC (params))
if (HCS_PPC (ehci->hcs_params))
writel (temp & ~PORT_POWER,
&ehci->regs->port_status [wIndex]);
break;
......@@ -300,7 +297,7 @@ static int ehci_hub_control (
&ehci->regs->port_status [wIndex]);
break;
case USB_PORT_FEAT_POWER:
if (HCS_PPC (params))
if (HCS_PPC (ehci->hcs_params))
writel (temp | PORT_POWER,
&ehci->regs->port_status [wIndex]);
break;
......@@ -312,6 +309,13 @@ static int ehci_hub_control (
hcd->bus_name, wIndex + 1);
temp |= PORT_OWNER;
} else {
/* Philips 1562 wants CMD_RUN to reset */
if (!HCD_IS_RUNNING(ehci->hcd.state)) {
u32 cmd = readl (&ehci->regs->command);
cmd |= CMD_RUN;
writel (cmd, &ehci->regs->command);
ehci->hcd.state = USB_STATE_RUNNING;
}
vdbg ("%s port %d reset",
hcd->bus_name, wIndex + 1);
temp |= PORT_RESET;
......
/*
* Copyright (c) 2001 by David Brownell
* Copyright (c) 2001-2002 by David Brownell
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
......@@ -643,12 +643,19 @@ ehci_qh_make (
if (usb_pipecontrol (urb->pipe)) {
info1 |= 64 << 16; /* usb2 fixed maxpacket */
info1 |= 1 << 14; /* toggle from qtd */
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else if (usb_pipebulk (urb->pipe)) {
info1 |= 512 << 16; /* usb2 fixed maxpacket */
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else
info1 |= usb_maxpacket (urb->dev, urb->pipe,
usb_pipeout (urb->pipe)) << 16;
} else {
u32 temp;
temp = usb_maxpacket (urb->dev, urb->pipe,
usb_pipeout (urb->pipe));
info1 |= (temp & 0x3ff) << 16; /* maxpacket */
/* HS intr can be "high bandwidth" */
temp = 1 + ((temp >> 11) & 0x03);
info2 |= temp << 30; /* mult */
}
break;
default:
#ifdef DEBUG
......
/*
* Copyright (c) 2001 by David Brownell
* Copyright (c) 2001-2002 by David Brownell
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
......@@ -55,12 +55,12 @@ periodic_next_shadow (union ehci_shadow *periodic, int tag)
return &periodic->qh->qh_next;
case Q_TYPE_FSTN:
return &periodic->fstn->fstn_next;
#ifdef have_iso
case Q_TYPE_ITD:
return &periodic->itd->itd_next;
#ifdef have_split_iso
case Q_TYPE_SITD:
return &periodic->sitd->sitd_next;
#endif /* have_iso */
#endif /* have_split_iso */
}
dbg ("BAD shadow %p tag %d", periodic->ptr, tag);
// BUG ();
......@@ -109,9 +109,6 @@ periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
u32 *hw_p = &ehci->periodic [frame];
union ehci_shadow *q = &ehci->pshadow [frame];
unsigned usecs = 0;
#ifdef have_iso
u32 temp = 0;
#endif
while (q->ptr) {
switch (Q_NEXT_TYPE (*hw_p)) {
......@@ -130,15 +127,13 @@ periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
}
q = &q->fstn->fstn_next;
break;
#ifdef have_iso
case Q_TYPE_ITD:
temp = le32_to_cpu (q->itd->transaction [uframe]);
temp >>= 16;
temp &= 0x0fff;
if (temp)
usecs += HS_USECS_ISO (temp);
/* NOTE the "one uframe per itd" policy */
if (q->itd->hw_transaction [uframe] != 0)
usecs += q->itd->usecs;
q = &q->itd->itd_next;
break;
#ifdef have_split_iso
case Q_TYPE_SITD:
temp = q->sitd->hw_fullspeed_ep &
__constant_cpu_to_le32 (1 << 31);
......@@ -163,7 +158,7 @@ periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
}
q = &q->sitd->sitd_next;
break;
#endif /* have_iso */
#endif /* have_split_iso */
default:
BUG ();
}
......@@ -178,6 +173,45 @@ periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
/*-------------------------------------------------------------------------*/
static void enable_periodic (struct ehci_hcd *ehci)
{
u32 cmd;
/* did clearing PSE did take effect yet?
* takes effect only at frame boundaries...
*/
while (readl (&ehci->regs->status) & STS_PSS)
udelay (20);
cmd = readl (&ehci->regs->command) | CMD_PSE;
writel (cmd, &ehci->regs->command);
/* posted write ... PSS happens later */
ehci->hcd.state = USB_STATE_RUNNING;
/* make sure tasklet scans these */
ehci->next_uframe = readl (&ehci->regs->frame_index)
% (ehci->periodic_size << 3);
}
static void disable_periodic (struct ehci_hcd *ehci)
{
u32 cmd;
/* did setting PSE not take effect yet?
* takes effect only at frame boundaries...
*/
while (!(readl (&ehci->regs->status) & STS_PSS))
udelay (20);
cmd = readl (&ehci->regs->command) & ~CMD_PSE;
writel (cmd, &ehci->regs->command);
/* posted write ... */
ehci->next_uframe = -1;
}
/*-------------------------------------------------------------------------*/
static void intr_deschedule (
struct ehci_hcd *ehci,
unsigned frame,
......@@ -199,21 +233,9 @@ static void intr_deschedule (
ehci->periodic_urbs--;
/* maybe turn off periodic schedule */
if (!ehci->periodic_urbs) {
u32 cmd = readl (&ehci->regs->command);
/* did setting PSE not take effect yet?
* takes effect only at frame boundaries...
*/
while (!(readl (&ehci->regs->status) & STS_PSS))
udelay (20);
cmd &= ~CMD_PSE;
writel (cmd, &ehci->regs->command);
/* posted write ... */
ehci->next_frame = -1;
} else
if (!ehci->periodic_urbs)
disable_periodic (ehci);
else
vdbg ("periodic schedule still enabled");
spin_unlock_irqrestore (&ehci->lock, flags);
......@@ -242,7 +264,7 @@ static int intr_submit (
) {
unsigned epnum, period;
unsigned temp;
unsigned short mult, usecs;
unsigned short usecs;
unsigned long flags;
struct ehci_qh *qh;
struct hcd_dev *dev;
......@@ -255,12 +277,7 @@ static int intr_submit (
epnum |= 0x10;
} else
temp = urb->dev->epmaxpacketout [epnum];
mult = 1;
if (urb->dev->speed == USB_SPEED_HIGH) {
/* high speed "high bandwidth" is coded in ep maxpacket */
mult += (temp >> 11) & 0x03;
temp &= 0x03ff;
} else {
if (urb->dev->speed != USB_SPEED_HIGH) {
dbg ("no intr/tt scheduling yet");
status = -ENOSYS;
goto done;
......@@ -279,20 +296,11 @@ static int intr_submit (
usecs = HS_USECS (urb->transfer_buffer_length);
/*
* force a power-of-two (frames) sized polling interval
*
* NOTE: endpoint->bInterval for highspeed is measured in uframes,
* while for full/low speeds it's in frames. Here we "know" that
* urb->interval doesn't give acccess to high interrupt rates.
*/
period = ehci->periodic_size;
temp = period;
if (unlikely (urb->interval < 1))
urb->interval = 1;
while (temp > urb->interval)
temp >>= 1;
period = urb->interval = temp;
/* FIXME handle HS periods of less than 1 frame. */
if (urb->interval < 8)
period = 1;
else
period = urb->interval >> 8;
spin_lock_irqsave (&ehci->lock, flags);
......@@ -335,7 +343,6 @@ static int intr_submit (
unsigned frame = urb->interval;
qh->hw_next = EHCI_LIST_END;
qh->hw_info2 |= cpu_to_le32 (mult << 30);
qh->usecs = usecs;
urb->hcpriv = qh_put (qh);
......@@ -378,7 +385,7 @@ static int intr_submit (
/* stuff into the periodic schedule */
qh->qh_state = QH_STATE_LINKED;
vdbg ("qh %p usecs %d period %d starting frame %d.%d",
vdbg ("qh %p usecs %d period %d starting %d.%d",
qh, qh->usecs, period, frame, uframe);
do {
if (unlikely (ehci->pshadow [frame].ptr != 0)) {
......@@ -397,23 +404,8 @@ static int intr_submit (
usb_claim_bandwidth (urb->dev, urb, usecs, 0);
/* maybe enable periodic schedule processing */
if (!ehci->periodic_urbs++) {
u32 cmd;
/* did clearing PSE did take effect yet?
* takes effect only at frame boundaries...
*/
while (readl (&ehci->regs->status) & STS_PSS)
udelay (20);
cmd = readl (&ehci->regs->command) | CMD_PSE;
writel (cmd, &ehci->regs->command);
/* posted write ... PSS happens later */
ehci->hcd.state = USB_STATE_RUNNING;
/* make sure tasklet scans these */
ehci->next_frame = ehci_get_frame (&ehci->hcd);
}
if (!ehci->periodic_urbs++)
enable_periodic (ehci);
break;
} while (frame);
......@@ -489,53 +481,56 @@ intr_complete (
/*-------------------------------------------------------------------------*/
#ifdef have_iso
static inline void itd_free (struct ehci_hcd *ehci, struct ehci_itd *itd)
static void
itd_free_list (struct ehci_hcd *ehci, struct urb *urb)
{
struct ehci_itd *first_itd = urb->hcpriv;
pci_unmap_single (ehci->hcd.pdev,
first_itd->buf_dma, urb->transfer_buffer_length,
usb_pipein (urb->pipe)
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE);
while (!list_empty (&first_itd->itd_list)) {
struct ehci_itd *itd;
itd = list_entry (
first_itd->itd_list.next,
struct ehci_itd, itd_list);
list_del (&itd->itd_list);
pci_pool_free (ehci->itd_pool, itd, itd->itd_dma);
}
pci_pool_free (ehci->itd_pool, first_itd, first_itd->itd_dma);
urb->hcpriv = 0;
}
/*
* Create itd and allocate into uframes within specified frame.
* Caller must update the resulting uframe links.
*/
static struct ehci_itd *
itd_make (
static int
itd_fill (
struct ehci_hcd *ehci,
struct ehci_itd *itd,
struct urb *urb,
unsigned index, // urb->iso_frame_desc [index]
unsigned frame, // scheduled start
dma_addr_t dma, // mapped transfer buffer
int mem_flags
dma_addr_t dma // mapped transfer buffer
) {
struct ehci_itd *itd;
u64 temp;
u32 buf1;
unsigned epnum, maxp, multi, usecs;
unsigned i, epnum, maxp, multi;
unsigned length;
unsigned i, bufnum;
/* allocate itd, start to fill it */
itd = pci_pool_alloc (ehci->itd_pool, mem_flags, &dma);
if (!itd)
return itd;
itd->hw_next = EHCI_LIST_END;
itd->urb = urb;
itd->index = index;
INIT_LIST_HEAD (&itd->itd_list);
itd->uframe = (frame * 8) % ehci->periodic_size;
/* tell itd about the buffer its transfers will consume */
/* tell itd about its transfer buffer, max 2 pages */
length = urb->iso_frame_desc [index].length;
dma += urb->iso_frame_desc [index].offset;
temp = dma & ~0x0fff;
for (i = 0; i < 7; i++) {
for (i = 0; i < 2; i++) {
itd->hw_bufp [i] = cpu_to_le32 ((u32) temp);
itd->hw_bufp_hi [i] = cpu_to_le32 ((u32)(temp >> 32));
temp += 0x0fff;
temp += 0x1000;
}
itd->buf_dma = dma;
/*
* this might be a "high bandwidth" highspeed endpoint,
......@@ -544,108 +539,311 @@ itd_make (
epnum = usb_pipeendpoint (urb->pipe);
if (usb_pipein (urb->pipe)) {
maxp = urb->dev->epmaxpacketin [epnum];
buf1 = (1 << 11) | maxp;
buf1 = (1 << 11);
} else {
maxp = urb->dev->epmaxpacketout [epnum];
buf1 = maxp;
buf1 = 0;
}
buf1 |= (maxp & 0x03ff);
multi = 1;
multi += (temp >> 11) & 0x03;
multi += (maxp >> 11) & 0x03;
maxp &= 0x03ff;
maxp *= multi;
/* transfer can't fit in any uframe? */
if (length < 0 || maxp < length) {
dbg ("BAD iso packet: %d bytes, max %d, urb %p [%d] (of %d)",
length, maxp, urb, index,
urb->iso_frame_desc [index].length);
return -ENOSPC;
}
itd->usecs = HS_USECS_ISO (length);
/* "plus" info in low order bits of buffer pointers */
itd->hw_bufp [0] |= cpu_to_le32 ((epnum << 8) | urb->dev->devnum);
itd->hw_bufp [1] |= cpu_to_le32 (buf1);
itd->hw_bufp [2] |= cpu_to_le32 (multi);
/* schedule as many uframes as needed */
maxp *= multi;
usecs = HS_USECS_ISO (maxp);
bufnum = 0;
for (i = 0; i < 8; i++) {
unsigned t, offset, scratch;
/* figure hw_transaction[] value (it's scheduled later) */
itd->transaction = EHCI_ISOC_ACTIVE;
itd->transaction |= dma & 0x0fff; /* offset; buffer=0 */
if ((index + 1) == urb->number_of_packets)
itd->transaction |= EHCI_ITD_IOC; /* end-of-urb irq */
itd->transaction |= length << 16;
cpu_to_le32s (&itd->transaction);
if (length <= 0) {
itd->hw_transaction [i] = 0;
continue;
}
return 0;
}
/* don't commit more than 80% periodic == 100 usec */
if ((periodic_usecs (ehci, itd->uframe, i) + usecs) > 100)
continue;
static int
itd_urb_transaction (
struct ehci_hcd *ehci,
struct urb *urb,
int mem_flags
) {
int frame_index;
struct ehci_itd *first_itd, *itd;
int status;
dma_addr_t buf_dma, itd_dma;
/* we'll use this uframe; figure hw_transaction */
t = EHCI_ISOC_ACTIVE;
t |= bufnum << 12; // which buffer?
offset = temp & 0x0fff; // offset therein
t |= offset;
if ((offset + maxp) >= 4096) // hc auto-wraps end-of-"page"
bufnum++;
if (length <= maxp) {
// interrupt only needed at end-of-urb
if ((index + 1) == urb->number_of_packets)
t |= EHCI_ITD_IOC;
scratch = length;
} else
scratch = maxp;
t |= scratch << 16;
t = cpu_to_le32 (t);
/* set up one dma mapping for this urb */
buf_dma = pci_map_single (ehci->hcd.pdev,
urb->transfer_buffer, urb->transfer_buffer_length,
usb_pipein (urb->pipe)
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE);
if (buf_dma == 0)
return -ENOMEM;
itd->hw_transaction [i] = itd->transaction [i] = t;
length -= scratch;
/* allocate/init ITDs */
for (frame_index = 0, first_itd = 0;
frame_index < urb->number_of_packets;
frame_index++) {
itd = pci_pool_alloc (ehci->itd_pool, mem_flags, &itd_dma);
if (!itd) {
status = -ENOMEM;
goto fail;
}
if (length > 0) {
dbg ("iso frame too big, urb %p [%d], %d extra (of %d)",
urb, index, length, urb->iso_frame_desc [index].length);
itd_free (ehci, itd);
itd = 0;
memset (itd, 0, sizeof *itd);
itd->itd_dma = itd_dma;
status = itd_fill (ehci, itd, urb, frame_index, buf_dma);
if (status != 0)
goto fail;
if (first_itd)
list_add_tail (&itd->itd_list,
&first_itd->itd_list);
else {
INIT_LIST_HEAD (&itd->itd_list);
urb->hcpriv = first_itd = itd;
}
return itd;
}
urb->error_count = 0;
return 0;
fail:
if (urb->hcpriv)
itd_free_list (ehci, urb);
return status;
}
/*-------------------------------------------------------------------------*/
static inline void
itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
{
u32 ptr;
ptr = cpu_to_le32 (itd->itd_dma); // type 0 == itd
if (ehci->pshadow [frame].ptr) {
if (!itd->itd_next.ptr) {
/* always prepend ITD/SITD ... only QH tree is order-sensitive */
itd->itd_next = ehci->pshadow [frame];
itd->hw_next = ehci->periodic [frame];
} else if (itd->itd_next.ptr != ehci->pshadow [frame].ptr) {
dbg ("frame %d itd link goof", frame);
BUG ();
ehci->pshadow [frame].itd = itd;
ehci->periodic [frame] = cpu_to_le32 (itd->itd_dma) | Q_TYPE_ITD;
}
/*
* return zero on success, else -errno
* - start holds first uframe to start scheduling into
* - max is the first uframe it's NOT (!) OK to start scheduling into
* math to be done modulo "mod" (ehci->periodic_size << 3)
*/
static int get_iso_range (
struct ehci_hcd *ehci,
struct urb *urb,
unsigned *start,
unsigned *max,
unsigned mod
) {
struct list_head *lh;
struct hcd_dev *dev = urb->dev->hcpriv;
int last = -1;
unsigned now, span, end;
span = urb->interval * urb->number_of_packets;
/* first see if we know when the next transfer SHOULD happen */
list_for_each (lh, &dev->urb_list) {
struct urb *u;
struct ehci_itd *itd;
unsigned s;
u = list_entry (lh, struct urb, urb_list);
if (u == urb || u->pipe != urb->pipe)
continue;
if (u->interval != urb->interval) { /* must not change! */
dbg ("urb %p interval %d ... != %p interval %d",
u, u->interval, urb, urb->interval);
return -EINVAL;
}
/* URB for this endpoint... covers through when? */
itd = urb->hcpriv;
s = itd->uframe + u->interval * u->number_of_packets;
if (last < 0)
last = s;
else {
/*
* So far we can only queue two ISO URBs...
*
* FIXME do interval math, figure out whether
* this URB is "before" or not ... also, handle
* the case where the URB might have completed,
* but hasn't yet been processed.
*/
dbg ("NYET: queue >2 URBs per ISO endpoint");
return -EDOM;
}
}
ehci->pshadow [frame].itd = itd;
ehci->periodic [frame] = ptr;
/* calculate the legal range [start,max) */
now = readl (&ehci->regs->frame_index) + 1; /* next uframe */
if (!ehci->periodic_urbs)
now += 8; /* startup delay */
now %= mod;
end = now + mod;
if (last < 0) {
*start = now + ehci->i_thresh + /* paranoia */ 1;
*max = end - span;
if (*max < *start + 1)
*max = *start + 1;
} else {
*start = last % mod;
*max = (last + 1) % mod;
}
/* explicit start frame? */
if (!(urb->transfer_flags & USB_ISO_ASAP)) {
unsigned temp;
/* sanity check: must be in range */
urb->start_frame %= ehci->periodic_size;
temp = urb->start_frame << 3;
if (temp < *start)
temp += mod;
if (temp > *max)
return -EDOM;
/* use that explicit start frame */
*start = urb->start_frame << 3;
temp += 8;
if (temp < *max)
*max = temp;
}
// FIXME minimize wraparound to "now" ... insist max+span
// (and start+span) remains a few frames short of "end"
*max %= ehci->periodic_size;
if ((*start + span) < end)
return 0;
return -EFBIG;
}
static int
itd_schedule (struct ehci_hcd *ehci, struct urb *urb)
{
unsigned start, max, i;
int status;
unsigned mod = ehci->periodic_size << 3;
for (i = 0; i < urb->number_of_packets; i++) {
urb->iso_frame_desc [i].status = -EINPROGRESS;
urb->iso_frame_desc [i].actual_length = 0;
}
if ((status = get_iso_range (ehci, urb, &start, &max, mod)) != 0)
return status;
do {
unsigned uframe;
unsigned usecs;
struct ehci_itd *itd;
/* check schedule: enough space? */
itd = urb->hcpriv;
uframe = start;
for (i = 0, uframe = start;
i < urb->number_of_packets;
i++, uframe += urb->interval) {
uframe %= mod;
/* can't commit more than 80% periodic == 100 usec */
if (periodic_usecs (ehci, uframe >> 3, uframe & 0x7)
> (100 - itd->usecs)) {
itd = 0;
break;
}
itd = list_entry (itd->itd_list.next,
struct ehci_itd, itd_list);
}
if (!itd)
continue;
/* that's where we'll schedule this! */
itd = urb->hcpriv;
urb->start_frame = start >> 3;
vdbg ("ISO urb %p (%d packets period %d) starting %d.%d",
urb, urb->number_of_packets, urb->interval,
urb->start_frame, start & 0x7);
for (i = 0, uframe = start, usecs = 0;
i < urb->number_of_packets;
i++, uframe += urb->interval) {
uframe %= mod;
itd->uframe = uframe;
itd->hw_transaction [uframe & 0x07] = itd->transaction;
itd_link (ehci, (uframe >> 3) % ehci->periodic_size,
itd);
usecs += itd->usecs;
itd = list_entry (itd->itd_list.next,
struct ehci_itd, itd_list);
}
/* update bandwidth utilization records (for usbfs) */
/* FIXME usbcore expects per-frame average, which isn't
* most accurate model... this provides the total claim,
* and expects the average to be computed only display.
*/
usb_claim_bandwidth (urb->dev, urb, usecs, 1);
/* maybe enable periodic schedule processing */
if (!ehci->periodic_urbs++)
enable_periodic (ehci);
return 0;
} while ((start = ++start % mod) != max);
/* no room in the schedule */
dbg ("urb %p, CAN'T SCHEDULE", urb);
return -ENOSPC;
}
/*-------------------------------------------------------------------------*/
#define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
static unsigned long
itd_complete (struct ehci_hcd *ehci, struct ehci_itd *itd, unsigned long flags)
{
itd_complete (
struct ehci_hcd *ehci,
struct ehci_itd *itd,
unsigned uframe,
unsigned long flags
) {
struct urb *urb = itd->urb;
/* if not unlinking: */
if (!(urb->transfer_flags & EHCI_STATE_UNLINK)
&& ehci->hcd.state != USB_STATE_HALT) {
int i;
struct usb_iso_packet_descriptor *desc;
struct ehci_itd *first_itd = urb->hcpriv;
u32 t;
/* update status for this frame's transfers */
/* update status for this uframe's transfers */
desc = &urb->iso_frame_desc [itd->index];
desc->status = 0;
desc->actual_length = 0;
for (i = 0; i < 8; i++) {
u32 t = itd->hw_transaction [i];
if (t & (ISO_ERRS | EHCI_ISOC_ACTIVE)) {
t = itd->hw_transaction [uframe];
itd->hw_transaction [uframe] = 0;
if (t & EHCI_ISOC_ACTIVE)
desc->status = -EXDEV;
else if (t & EHCI_ISOC_BUF_ERR)
else if (t & ISO_ERRS) {
urb->error_count++;
if (t & EHCI_ISOC_BUF_ERR)
desc->status = usb_pipein (urb->pipe)
? -ENOSR /* couldn't read */
: -ECOMM; /* couldn't write */
......@@ -653,173 +851,95 @@ itd_complete (struct ehci_hcd *ehci, struct ehci_itd *itd, unsigned long flags)
desc->status = -EOVERFLOW;
else /* (t & EHCI_ISOC_XACTERR) */
desc->status = -EPROTO;
break;
}
/* HC need not update length with this error */
if (!(t & EHCI_ISOC_BABBLE))
desc->actual_length += EHCI_ITD_LENGTH (t);
} else {
desc->status = 0;
desc->actual_length += EHCI_ITD_LENGTH (t);
}
vdbg ("itd %p urb %p packet %d/%d trans %x status %d len %d",
itd, urb, itd->index + 1, urb->number_of_packets,
t, desc->status, desc->actual_length);
/* handle completion now? */
if ((itd->index + 1) != urb->number_of_packets)
return flags;
i = usb_pipein (urb->pipe);
if (i)
pci_dma_sync_single (ehci->hcd.pdev,
first_itd->buf_dma,
urb->transfer_buffer_length,
PCI_DMA_FROMDEVICE);
/* call completion with no locks; it can unlink ... */
spin_unlock_irqrestore (&ehci->lock, flags);
urb->complete (urb);
spin_lock_irqsave (&ehci->lock, flags);
/* re-activate this URB? or unlink? */
if (!(urb->transfer_flags & EHCI_STATE_UNLINK)
&& ehci->hcd.state != USB_STATE_HALT) {
if (!i)
pci_dma_sync_single (ehci->hcd.pdev,
first_itd->buf_dma,
urb->transfer_buffer_length,
PCI_DMA_TODEVICE);
itd = urb->hcpriv;
do {
for (i = 0; i < 8; i++)
itd->hw_transaction [i]
= itd->transaction [i];
itd = list_entry (itd->itd_list.next,
struct ehci_itd, itd_list);
} while (itd != urb->hcpriv);
return flags;
}
/* unlink done only on the last itd */
} else if ((itd->index + 1) != urb->number_of_packets)
return flags;
/* we're unlinking ... */
/*
* For now, always give the urb back to the driver ... expect it
* to submit a new urb (or resubmit this), and to have another
* already queued when un-interrupted transfers are needed.
* No, that's not what OHCI or UHCI are now doing.
*
* FIXME Revisit the ISO URB model. It's cleaner not to have all
* the special case magic, but it'd be faster to reuse existing
* ITD/DMA setup and schedule state. Easy to dma_sync/complete(),
* then either reschedule or, if unlinking, free and giveback().
* But we can't overcommit like the full and low speed HCs do, and
* there's no clean way to report an error when rescheduling...
*
* NOTE that for now we don't accelerate ISO unlinks; they just
* happen according to the current schedule. Means a delay of
* up to about a second (max).
*/
itd_free_list (ehci, urb);
if (urb->status == -EINPROGRESS)
urb->status = 0;
/* decouple urb from the hcd */
spin_unlock_irqrestore (&ehci->lock, flags);
if (ehci->hcd.state == USB_STATE_HALT)
urb->status = -ESHUTDOWN;
itd = urb->hcpriv;
urb->hcpriv = 0;
ehci_urb_done (ehci, itd->buf_dma, urb);
usb_hcd_giveback_urb (&ehci->hcd, urb);
spin_lock_irqsave (&ehci->lock, flags);
/* take itds out of the hc's periodic schedule */
list_entry (itd->itd_list.prev, struct ehci_itd, itd_list)
->itd_list.next = 0;
do {
struct ehci_itd *next;
if (itd->itd_list.next)
next = list_entry (itd->itd_list.next,
struct ehci_itd, itd_list);
else
next = 0;
// FIXME: hc WILL (!) lap us here, if we get behind
// by 128 msec (or less, with smaller periodic_size).
// Reading/caching these itds will cause trouble...
/* defer stopping schedule; completion can submit */
ehci->periodic_urbs--;
if (!ehci->periodic_urbs)
disable_periodic (ehci);
periodic_unlink (ehci, itd->uframe, itd);
itd_free (ehci, itd);
itd = next;
} while (itd);
return flags;
}
/*-------------------------------------------------------------------------*/
static int itd_submit (struct ehci_hcd *ehci, struct urb *urb)
static int itd_submit (struct ehci_hcd *ehci, struct urb *urb, int mem_flags)
{
struct ehci_itd *first_itd = 0, *itd;
unsigned frame_index;
dma_addr_t dma;
int status;
unsigned long flags;
dbg ("itd_submit");
dbg ("itd_submit urb %p", urb);
/* set up one dma mapping for this urb */
dma = pci_map_single (ehci->hcd.pdev,
urb->transfer_buffer, urb->transfer_buffer_length,
usb_pipein (urb->pipe)
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE);
if (dma == 0)
return -ENOMEM;
/* NOTE DMA mapping assumes this ... */
if (urb->iso_frame_desc [0].offset != 0)
return -EINVAL;
/*
* Schedule as needed. This is VERY optimistic about free
* bandwidth! But the API assumes drivers can pick frames
* intelligently (how?), so there's no other good option.
*
* FIXME this doesn't handle urb->next rings, or try to
* use the iso periodicity.
* NOTE doing this for now, anticipating periodic URB models
* get updated to be "explicit resubmit".
*/
if (urb->transfer_flags & USB_ISO_ASAP) {
urb->start_frame = ehci_get_frame (&ehci->hcd);
urb->start_frame++;
if (urb->next) {
dbg ("use explicit resubmit for ISO");
return -EINVAL;
}
urb->start_frame %= ehci->periodic_size;
/* create and populate itds (doing uframe scheduling) */
/* allocate ITDs w/o locking anything */
status = itd_urb_transaction (ehci, urb, mem_flags);
if (status < 0)
return status;
/* schedule ... need to lock */
spin_lock_irqsave (&ehci->lock, flags);
for (frame_index = 0;
frame_index < urb->number_of_packets;
frame_index++) {
itd = itd_make (ehci, urb, frame_index,
urb->start_frame + frame_index,
dma, SLAB_ATOMIC);
if (itd) {
if (first_itd)
list_add_tail (&itd->itd_list,
&first_itd->itd_list);
else
first_itd = itd;
} else {
status = itd_schedule (ehci, urb);
spin_unlock_irqrestore (&ehci->lock, flags);
if (first_itd) {
while (!list_empty (&first_itd->itd_list)) {
itd = list_entry (
first_itd->itd_list.next,
struct ehci_itd, itd_list);
list_del (&itd->itd_list);
itd_free (ehci, itd);
}
itd_free (ehci, first_itd);
}
pci_unmap_single (ehci->hcd.pdev,
dma, urb->transfer_buffer_length,
usb_pipein (urb->pipe)
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE);
return -ENOMEM;
}
}
/* stuff into the schedule */
itd = first_itd;
do {
unsigned i;
for (i = 0; i < 8; i++) {
if (!itd->hw_transaction [i])
continue;
itd_link (ehci, itd->uframe + i, itd);
}
itd = list_entry (itd->itd_list.next,
struct ehci_itd, itd_list);
} while (itd != first_itd);
urb->hcpriv = first_itd;
if (status < 0)
itd_free_list (ehci, urb);
spin_unlock_irqrestore (&ehci->lock, flags);
return 0;
return status;
}
#ifdef have_split_iso
/*-------------------------------------------------------------------------*/
/*
......@@ -827,7 +947,7 @@ static int itd_submit (struct ehci_hcd *ehci, struct urb *urb)
* the TTs in USB 2.0 hubs.
*/
static inline void
static void
sitd_free (struct ehci_hcd *ehci, struct ehci_sitd *sitd)
{
pci_pool_free (ehci->sitd_pool, sitd, sitd->sitd_dma);
......@@ -861,7 +981,7 @@ sitd_make (
}
static inline void
static void
sitd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
{
u32 ptr;
......@@ -894,12 +1014,11 @@ sitd_complete (
/*-------------------------------------------------------------------------*/
static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb)
static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb, int mem_flags)
{
// struct ehci_sitd *first_sitd = 0;
unsigned frame_index;
dma_addr_t dma;
int mem_flags;
dbg ("NYI -- sitd_submit");
......@@ -908,8 +1027,6 @@ static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb)
// FIXME: setup one big dma mapping
dma = 0;
mem_flags = SLAB_ATOMIC;
for (frame_index = 0;
frame_index < urb->number_of_packets;
frame_index++) {
......@@ -941,53 +1058,62 @@ static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb)
return -ENOSYS;
}
#endif /* have_iso */
#endif /* have_split_iso */
/*-------------------------------------------------------------------------*/
static void scan_periodic (struct ehci_hcd *ehci)
{
unsigned frame;
unsigned clock;
unsigned frame, clock, now_uframe, mod;
unsigned long flags;
mod = ehci->periodic_size << 3;
spin_lock_irqsave (&ehci->lock, flags);
/*
* When running, scan from last scan point up to "now"
* else clean up by scanning everything that's left.
* Touches as few pages as possible: cache-friendly.
* It's safe to scan entries more than once, though.
* Don't scan ISO entries more than once, though.
*/
if (HCD_IS_RUNNING (ehci->hcd.state)) {
frame = ehci->next_frame;
clock = ehci_get_frame (&ehci->hcd);
frame = ehci->next_uframe >> 3;
if (HCD_IS_RUNNING (ehci->hcd.state))
now_uframe = readl (&ehci->regs->frame_index);
else
now_uframe = (frame << 3) - 1;
now_uframe %= mod;
clock = now_uframe >> 3;
/* when shutting down, scan everything for thoroughness */
} else {
frame = 0;
clock = ehci->periodic_size - 1;
}
for (;;) {
union ehci_shadow q;
u32 type;
union ehci_shadow q, *q_p;
u32 type, *hw_p;
unsigned uframes;
restart:
q.ptr = ehci->pshadow [frame].ptr;
type = Q_NEXT_TYPE (ehci->periodic [frame]);
/* scan schedule to _before_ current frame index */
if (frame == clock)
uframes = now_uframe & 0x07;
else
uframes = 8;
q_p = &ehci->pshadow [frame];
hw_p = &ehci->periodic [frame];
q.ptr = q_p->ptr;
type = Q_NEXT_TYPE (*hw_p);
/* scan each element in frame's queue for completions */
while (q.ptr != 0) {
int last;
unsigned uf;
union ehci_shadow temp;
switch (type) {
case Q_TYPE_QH:
last = (q.qh->hw_next == EHCI_LIST_END);
temp = q.qh->qh_next;
type = Q_NEXT_TYPE (q.qh->hw_next);
flags = intr_complete (ehci, frame,
qh_put (q.qh), flags);
type = Q_NEXT_TYPE (q.qh->hw_next);
temp = q.qh->qh_next;
qh_unput (ehci, q.qh);
q = temp;
break;
......@@ -1000,22 +1126,49 @@ static void scan_periodic (struct ehci_hcd *ehci)
dbg ("ignoring completions from FSTNs");
}
type = Q_NEXT_TYPE (q.fstn->hw_next);
temp = q.fstn->fstn_next;
q = q.fstn->fstn_next;
break;
#ifdef have_iso
case Q_TYPE_ITD:
last = (q.itd->hw_next == EHCI_LIST_END);
flags = itd_complete (ehci, q.itd, flags);
/* Unlink each (S)ITD we see, since the ISO
* URB model forces constant rescheduling.
* That complicates sharing uframes in ITDs,
* and means we need to skip uframes the HC
* hasn't yet processed.
*/
for (uf = 0; uf < uframes; uf++) {
if (q.itd->hw_transaction [uf] != 0) {
temp = q;
*q_p = q.itd->itd_next;
*hw_p = q.itd->hw_next;
type = Q_NEXT_TYPE (*hw_p);
/* might free q.itd ... */
flags = itd_complete (ehci,
temp.itd, uf, flags);
break;
}
}
/* we might skip this ITD's uframe ... */
if (uf == uframes) {
q_p = &q.itd->itd_next;
hw_p = &q.itd->hw_next;
type = Q_NEXT_TYPE (q.itd->hw_next);
q = q.itd->itd_next;
}
q = *q_p;
break;
#ifdef have_split_iso
case Q_TYPE_SITD:
last = (q.sitd->hw_next == EHCI_LIST_END);
flags = sitd_complete (ehci, q.sitd, flags);
type = Q_NEXT_TYPE (q.sitd->hw_next);
// FIXME unlink SITD after split completes
q = q.sitd->sitd_next;
break;
#endif /* have_iso */
#endif /* have_split_iso */
default:
dbg ("corrupt type %d frame %d shadow %p",
type, frame, q.ptr);
......@@ -1044,13 +1197,16 @@ static void scan_periodic (struct ehci_hcd *ehci)
if (!HCD_IS_RUNNING (ehci->hcd.state))
break;
ehci->next_frame = clock;
now = ehci_get_frame (&ehci->hcd);
if (clock == now)
ehci->next_uframe = now_uframe;
now = readl (&ehci->regs->frame_index) % mod;
if (now_uframe == now)
break;
clock = now;
} else if (++frame >= ehci->periodic_size)
frame = 0;
/* rescan the rest of this frame, then ... */
now_uframe = now;
clock = now_uframe >> 3;
} else
frame = (frame + 1) % ehci->periodic_size;
}
spin_unlock_irqrestore (&ehci->lock, flags);
}
}
/*
* Copyright (c) 2001 by David Brownell
* Copyright (c) 2001-2002 by David Brownell
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
......@@ -49,7 +49,7 @@ struct ehci_hcd { /* one per controller */
unsigned i_thresh; /* uframes HC might cache */
union ehci_shadow *pshadow; /* mirror hw periodic table */
int next_frame; /* scan periodic, start here */
int next_uframe; /* scan periodic, start here */
unsigned periodic_urbs; /* how many urbs scheduled? */
/* deferred work from IRQ, etc */
......@@ -62,6 +62,7 @@ struct ehci_hcd { /* one per controller */
struct usb_hcd hcd;
struct ehci_caps *caps;
struct ehci_regs *regs;
u32 hcs_params; /* cached register copy */
/* per-HC memory pools (could be per-PCI-bus, but ...) */
struct pci_pool *qh_pool; /* qh per active urb */
......@@ -324,13 +325,14 @@ struct ehci_itd {
union ehci_shadow itd_next; /* ptr to periodic q entry */
struct urb *urb;
unsigned index; /* in urb->iso_frame_desc */
struct list_head itd_list; /* list of urb frames' itds */
dma_addr_t buf_dma; /* frame's buffer address */
unsigned uframe; /* in periodic schedule */
u32 transaction [8]; /* copy of hw_transaction */
/* for now, only one hw_transaction per itd */
u32 transaction;
u16 index; /* in urb->iso_frame_desc */
u16 uframe; /* in periodic schedule */
u16 usecs;
} __attribute__ ((aligned (32)));
/*-------------------------------------------------------------------------*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment