Commit 322bdbf7 authored by Linus Torvalds's avatar Linus Torvalds

Merge http://linuxusb.bkbits.net/linus-2.5

into home.transmeta.com:/home/torvalds/v2.5/linux
parents b2520184 7442ce49
......@@ -299,6 +299,8 @@ KAO -->
EHCI, OHCI, or UHCI.
</para>
!Edrivers/usb/core/hcd.c
!Edrivers/usb/core/hcd-pci.c
!Edrivers/usb/core/buffer.c
</sect1>
</chapter>
......
......@@ -2,10 +2,10 @@
# Makefile for USB Core files and filesystem
#
export-objs := usb.o hcd.o hcd-pci.o urb.o message.o file.o
export-objs := usb.o hcd.o hcd-pci.o urb.o message.o file.o buffer.o
usbcore-objs := usb.o usb-debug.o hub.o hcd.o urb.o message.o \
config.o file.o
config.o file.o buffer.o
ifeq ($(CONFIG_PCI),y)
usbcore-objs += hcd-pci.o
......
/*
* DMA memory management for framework level HCD code (hc_driver)
*
* This implementation plugs in through generic "usb_bus" level methods,
* and works with real PCI, or when "pci device == null" makes sense.
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pci.h>
#ifdef CONFIG_USB_DEBUG
#define DEBUG
#else
#undef DEBUG
#endif
#include <linux/usb.h>
#include "hcd.h"
/*
* DMA-Consistent Buffers
*/
/* FIXME tune these based on pool statistics ... */
static const size_t pool_max [HCD_BUFFER_POOLS] = {
32,
128,
512,
PAGE_SIZE / 2
/* bigger --> allocate pages */
};
/* SETUP primitives */
/**
* hcd_buffer_create - initialize buffer pools
* @hcd: the bus whose buffer pools are to be initialized
*
* Call this as part of initializing a host controller that uses the pci dma
* memory allocators. It initializes some pools of dma-consistent memory that
* will be shared by all drivers using that controller, or returns a negative
* errno value on error.
*
* Call hcd_buffer_destroy() to clean up after using those pools.
*/
int hcd_buffer_create (struct usb_hcd *hcd)
{
char name [16];
int i, size;
for (i = 0; i < HCD_BUFFER_POOLS; i++) {
if (!(size = pool_max [i]))
continue;
snprintf (name, sizeof name, "buffer-%d", size);
hcd->pool [i] = pci_pool_create (name, hcd->pdev,
size, size, 0, SLAB_KERNEL);
if (!hcd->pool [i]) {
hcd_buffer_destroy (hcd);
return -ENOMEM;
}
}
return 0;
}
EXPORT_SYMBOL (hcd_buffer_create);
/**
* hcd_buffer_destroy - deallocate buffer pools
* @hcd: the bus whose buffer pools are to be destroyed
*
* This frees the buffer pools created by hcd_buffer_create().
*/
void hcd_buffer_destroy (struct usb_hcd *hcd)
{
int i;
for (i = 0; i < HCD_BUFFER_POOLS; i++) {
struct pci_pool *pool = hcd->pool [i];
if (pool) {
pci_pool_destroy (pool);
hcd->pool [i] = 0;
}
}
}
EXPORT_SYMBOL (hcd_buffer_destroy);
/* sometimes alloc/free could use kmalloc with SLAB_DMA, for
* better sharing and to leverage mm/slab.c intelligence.
*/
void *hcd_buffer_alloc (
struct usb_bus *bus,
size_t size,
int mem_flags,
dma_addr_t *dma
)
{
struct usb_hcd *hcd = bus->hcpriv;
int i;
for (i = 0; i < HCD_BUFFER_POOLS; i++) {
if (size <= pool_max [i])
return pci_pool_alloc (hcd->pool [i], mem_flags, dma);
}
return pci_alloc_consistent (hcd->pdev, size, dma);
}
void hcd_buffer_free (
struct usb_bus *bus,
size_t size,
void *addr,
dma_addr_t dma
)
{
struct usb_hcd *hcd = bus->hcpriv;
int i;
for (i = 0; i < HCD_BUFFER_POOLS; i++) {
if (size <= pool_max [i]) {
pci_pool_free (hcd->pool [i], addr, dma);
return;
}
}
pci_free_consistent (hcd->pdev, size, addr, dma);
}
/*
* DMA-Mappings for arbitrary memory buffers
*/
int hcd_buffer_map (
struct usb_bus *bus,
void *addr,
dma_addr_t *dma,
size_t size,
int direction
) {
struct usb_hcd *hcd = bus->hcpriv;
// FIXME pci_map_single() has no standard failure mode!
*dma = pci_map_single (hcd->pdev, addr, size,
(direction == USB_DIR_IN)
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE);
return 0;
}
void hcd_buffer_dmasync (
struct usb_bus *bus,
dma_addr_t dma,
size_t size,
int direction
) {
struct usb_hcd *hcd = bus->hcpriv;
pci_dma_sync_single (hcd->pdev, dma, size,
(direction == USB_DIR_IN)
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE);
}
void hcd_buffer_unmap (
struct usb_bus *bus,
dma_addr_t dma,
size_t size,
int direction
) {
struct usb_hcd *hcd = bus->hcpriv;
pci_unmap_single (hcd->pdev, dma, size,
(direction == USB_DIR_IN)
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE);
}
// FIXME DMA-Mappings for struct scatterlist
......@@ -130,10 +130,19 @@ int usb_hcd_pci_probe (struct pci_dev *dev, const struct pci_device_id *id)
return retval;
}
}
pci_set_drvdata(dev, hcd);
pci_set_drvdata (dev, hcd);
hcd->driver = driver;
hcd->description = driver->description;
hcd->pdev = dev;
hcd->self.bus_name = dev->slot_name;
hcd->product_desc = dev->name;
if ((retval = hcd_buffer_create (hcd)) != 0) {
clean_3:
driver->hcd_free (hcd);
goto clean_2;
}
info ("%s @ %s, %s", hcd->description, dev->slot_name, dev->name);
pci_read_config_byte (dev, PCI_LATENCY_TIMER, &latency);
......@@ -154,8 +163,7 @@ int usb_hcd_pci_probe (struct pci_dev *dev, const struct pci_device_id *id)
!= 0) {
err ("request interrupt %s failed", bufp);
retval = -EBUSY;
driver->hcd_free (hcd);
goto clean_2;
goto clean_3;
}
hcd->irq = dev->irq;
......@@ -168,8 +176,6 @@ int usb_hcd_pci_probe (struct pci_dev *dev, const struct pci_device_id *id)
usb_bus_init (&hcd->self);
hcd->self.op = &usb_hcd_operations;
hcd->self.hcpriv = (void *) hcd;
hcd->self.bus_name = dev->slot_name;
hcd->product_desc = dev->name;
INIT_LIST_HEAD (&hcd->dev_list);
......@@ -216,6 +222,7 @@ void usb_hcd_pci_remove (struct pci_dev *dev)
usb_disconnect (&hub);
hcd->driver->stop (hcd);
hcd_buffer_destroy (hcd);
hcd->state = USB_STATE_HALT;
free_irq (hcd->irq, hcd);
......
......@@ -454,7 +454,6 @@ static int rh_status_urb (struct usb_hcd *hcd, struct urb *urb)
/* rh_timer protected by hcd_data_lock */
if (timer_pending (&hcd->rh_timer)
|| urb->status != -EINPROGRESS
|| !HCD_IS_RUNNING (hcd->state)
|| urb->transfer_buffer_length < len) {
dbg ("not queuing status urb, stat %d", urb->status);
return -EINVAL;
......@@ -508,8 +507,12 @@ static void rh_report_status (unsigned long ptr)
BUG ();
}
spin_unlock_irqrestore (&hcd_data_lock, flags);
} else
} else {
spin_unlock_irqrestore (&urb->lock, flags);
spin_lock_irqsave (&hcd_data_lock, flags);
rh_status_urb (hcd, urb);
spin_unlock_irqrestore (&hcd_data_lock, flags);
}
} else {
/* this urb's been unlinked */
urb->hcpriv = 0;
......@@ -1245,6 +1248,11 @@ struct usb_operations usb_hcd_operations = {
.submit_urb = hcd_submit_urb,
.unlink_urb = hcd_unlink_urb,
.deallocate = hcd_free_dev,
.buffer_alloc = hcd_buffer_alloc,
.buffer_free = hcd_buffer_free,
.buffer_map = hcd_buffer_map,
.buffer_dmasync = hcd_buffer_dmasync,
.buffer_unmap = hcd_buffer_unmap,
};
EXPORT_SYMBOL (usb_hcd_operations);
......
......@@ -58,6 +58,9 @@ struct usb_hcd { /* usb_bus.hcpriv points to this */
atomic_t resume_count; /* multiple resumes issue */
#endif
#define HCD_BUFFER_POOLS 4
struct pci_pool *pool [HCD_BUFFER_POOLS];
int state;
# define __ACTIVE 0x01
# define __SLEEPY 0x02
......@@ -109,6 +112,25 @@ struct usb_operations {
int (*get_frame_number) (struct usb_device *usb_dev);
int (*submit_urb) (struct urb *urb, int mem_flags);
int (*unlink_urb) (struct urb *urb);
/* allocate dma-consistent buffer for URB_DMA_NOMAPPING */
void *(*buffer_alloc)(struct usb_bus *bus, size_t size,
int mem_flags,
dma_addr_t *dma);
void (*buffer_free)(struct usb_bus *bus, size_t size,
void *addr, dma_addr_t dma);
int (*buffer_map) (struct usb_bus *bus,
void *addr, dma_addr_t *dma,
size_t size, int direction);
void (*buffer_dmasync) (struct usb_bus *bus,
dma_addr_t dma,
size_t size, int direction);
void (*buffer_unmap) (struct usb_bus *bus,
dma_addr_t dma,
size_t size, int direction);
// FIXME also: buffer_sg_map (), buffer_sg_unmap ()
};
/* each driver provides one of these, and hardware init support */
......@@ -181,6 +203,25 @@ extern int usb_hcd_pci_resume (struct pci_dev *dev);
#endif /* CONFIG_PCI */
/* pci-ish (pdev null is ok) buffer alloc/mapping support */
int hcd_buffer_create (struct usb_hcd *hcd);
void hcd_buffer_destroy (struct usb_hcd *hcd);
void *hcd_buffer_alloc (struct usb_bus *bus, size_t size,
int mem_flags, dma_addr_t *dma);
void hcd_buffer_free (struct usb_bus *bus, size_t size,
void *addr, dma_addr_t dma);
int hcd_buffer_map (struct usb_bus *bus,
void *addr, dma_addr_t *dma,
size_t size, int direction);
void hcd_buffer_dmasync (struct usb_bus *bus,
dma_addr_t dma,
size_t size, int direction);
void hcd_buffer_unmap (struct usb_bus *bus,
dma_addr_t dma,
size_t size, int direction);
/* generic bus glue, needed for host controllers that don't use PCI */
extern struct usb_operations usb_hcd_operations;
extern void usb_hcd_irq (int irq, void *__hcd, struct pt_regs *r);
......
......@@ -863,9 +863,11 @@ static ssize_t show_product (struct device *dev, char *buf, size_t count, loff_t
return 0;
udev = to_usb_device (dev);
len = usb_string(udev, udev->descriptor.iProduct, buf, PAGE_SIZE);
len = usb_string(udev, udev->descriptor.iProduct, buf, PAGE_SIZE);
if (len < 0)
return 0;
buf[len] = '\n';
buf[len+1] = 0x00;
buf[len+1] = 0;
return len+1;
}
static DEVICE_ATTR(product,"product",S_IRUGO,show_product,NULL);
......@@ -881,9 +883,11 @@ show_manufacturer (struct device *dev, char *buf, size_t count, loff_t off)
return 0;
udev = to_usb_device (dev);
len = usb_string(udev, udev->descriptor.iManufacturer, buf, PAGE_SIZE);
len = usb_string(udev, udev->descriptor.iManufacturer, buf, PAGE_SIZE);
if (len < 0)
return 0;
buf[len] = '\n';
buf[len+1] = 0x00;
buf[len+1] = 0;
return len+1;
}
static DEVICE_ATTR(manufacturer,"manufacturer",S_IRUGO,show_manufacturer,NULL);
......@@ -899,9 +903,11 @@ show_serial (struct device *dev, char *buf, size_t count, loff_t off)
return 0;
udev = to_usb_device (dev);
len = usb_string(udev, udev->descriptor.iSerialNumber, buf, PAGE_SIZE);
len = usb_string(udev, udev->descriptor.iSerialNumber, buf, PAGE_SIZE);
if (len < 0)
return 0;
buf[len] = '\n';
buf[len+1] = 0x00;
buf[len+1] = 0;
return len+1;
}
static DEVICE_ATTR(serial,"serial",S_IRUGO,show_serial,NULL);
......@@ -918,13 +924,13 @@ static void usb_find_drivers(struct usb_device *dev)
unsigned claimed = 0;
/* FIXME should get called for each new configuration not just the
* first one for a device. switching configs (or altesettings) should
* first one for a device. switching configs (or altsettings) should
* undo driverfs and HCD state for the previous interfaces.
*/
for (ifnum = 0; ifnum < dev->actconfig->bNumInterfaces; ifnum++) {
struct usb_interface *interface = &dev->actconfig->interface[ifnum];
struct usb_interface_descriptor *desc = interface->altsetting;
/* register this interface with driverfs */
interface->dev.parent = &dev->dev;
interface->dev.bus = &usb_bus_type;
......@@ -1455,6 +1461,152 @@ int usb_new_device(struct usb_device *dev)
}
/**
* usb_buffer_alloc - allocate dma-consistent buffer for URB_NO_DMA_MAP
* @dev: device the buffer will be used with
* @size: requested buffer size
* @mem_flags: affect whether allocation may block
* @dma: used to return DMA address of buffer
*
* Return value is either null (indicating no buffer could be allocated), or
* the cpu-space pointer to a buffer that may be used to perform DMA to the
* specified device. Such cpu-space buffers are returned along with the DMA
* address (through the pointer provided).
*
* These buffers are used with URB_NO_DMA_MAP set in urb->transfer_flags to
* avoid behaviors like using "DMA bounce buffers", or tying down I/O mapping
* hardware for long idle periods. The implementation varies between
* platforms, depending on details of how DMA will work to this device.
*
* When the buffer is no longer used, free it with usb_buffer_free().
*/
void *usb_buffer_alloc (
struct usb_device *dev,
size_t size,
int mem_flags,
dma_addr_t *dma
)
{
if (!dev || !dev->bus || !dev->bus->op || !dev->bus->op->buffer_alloc)
return 0;
return dev->bus->op->buffer_alloc (dev->bus, size, mem_flags, dma);
}
/**
* usb_buffer_free - free memory allocated with usb_buffer_alloc()
* @dev: device the buffer was used with
* @size: requested buffer size
* @addr: CPU address of buffer
* @dma: DMA address of buffer
*
* This reclaims an I/O buffer, letting it be reused. The memory must have
* been allocated using usb_buffer_alloc(), and the parameters must match
* those provided in that allocation request.
*/
void usb_buffer_free (
struct usb_device *dev,
size_t size,
void *addr,
dma_addr_t dma
)
{
if (!dev || !dev->bus || !dev->bus->op || !dev->bus->op->buffer_free)
return;
dev->bus->op->buffer_free (dev->bus, size, addr, dma);
}
/**
* usb_buffer_map - create DMA mapping(s) for an urb
* @urb: urb whose transfer_buffer will be mapped
*
* Return value is either null (indicating no buffer could be mapped), or
* the parameter. URB_NO_DMA_MAP is added to urb->transfer_flags if the
* operation succeeds.
*
* This call would normally be used for an urb which is reused, perhaps
* as the target of a large periodic transfer, with usb_buffer_dmasync()
* calls to synchronize memory and dma state. It may not be used for
* control requests.
*
* Reverse the effect of this call with usb_buffer_unmap().
*/
struct urb *usb_buffer_map (struct urb *urb)
{
struct usb_bus *bus;
struct usb_operations *op;
if (!urb
|| usb_pipecontrol (urb->pipe)
|| !urb->dev
|| !(bus = urb->dev->bus)
|| !(op = bus->op)
|| !op->buffer_map)
return 0;
if (op->buffer_map (bus,
urb->transfer_buffer,
&urb->transfer_dma,
urb->transfer_buffer_length,
usb_pipein (urb->pipe)
? USB_DIR_IN
: USB_DIR_OUT))
return 0;
urb->transfer_flags |= URB_NO_DMA_MAP;
return urb;
}
/**
* usb_buffer_dmasync - synchronize DMA and CPU view of buffer(s)
* @urb: urb whose transfer_buffer will be synchronized
*/
void usb_buffer_dmasync (struct urb *urb)
{
struct usb_bus *bus;
struct usb_operations *op;
if (!urb
|| !(urb->transfer_flags & URB_NO_DMA_MAP)
|| !urb->dev
|| !(bus = urb->dev->bus)
|| !(op = bus->op)
|| !op->buffer_dmasync)
return;
op->buffer_dmasync (bus,
urb->transfer_dma,
urb->transfer_buffer_length,
usb_pipein (urb->pipe)
? USB_DIR_IN
: USB_DIR_OUT);
}
/**
* usb_buffer_unmap - free DMA mapping(s) for an urb
* @urb: urb whose transfer_buffer will be unmapped
*
* Reverses the effect of usb_buffer_map().
*/
void usb_buffer_unmap (struct urb *urb)
{
struct usb_bus *bus;
struct usb_operations *op;
if (!urb
|| !(urb->transfer_flags & URB_NO_DMA_MAP)
|| !urb->dev
|| !(bus = urb->dev->bus)
|| !(op = bus->op)
|| !op->buffer_unmap)
return;
op->buffer_unmap (bus,
urb->transfer_dma,
urb->transfer_buffer_length,
usb_pipein (urb->pipe)
? USB_DIR_IN
: USB_DIR_OUT);
}
#ifdef CONFIG_PROC_FS
struct list_head *usb_driver_get_list(void)
{
......@@ -1534,4 +1686,11 @@ EXPORT_SYMBOL(__usb_get_extra_descriptor);
EXPORT_SYMBOL(usb_get_current_frame_number);
EXPORT_SYMBOL (usb_buffer_alloc);
EXPORT_SYMBOL (usb_buffer_free);
EXPORT_SYMBOL (usb_buffer_map);
EXPORT_SYMBOL (usb_buffer_dmasync);
EXPORT_SYMBOL (usb_buffer_unmap);
MODULE_LICENSE("GPL");
/*
* Copyright (c) 2001 by David Brownell
* Copyright (c) 2001-2002 by David Brownell
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
......@@ -175,3 +175,215 @@ dbg_qh (char *label, struct ehci_hcd *ehci, struct ehci_qh *qh) {}
(status & PORT_CONNECT) ? " CONNECT" : "" \
)
#ifdef DEBUG
#define speed_char(info1) ({ char tmp; \
switch (info1 & (3 << 12)) { \
case 0 << 12: tmp = 'f'; break; \
case 1 << 12: tmp = 'l'; break; \
case 2 << 12: tmp = 'h'; break; \
default: tmp = '?'; break; \
}; tmp; })
static ssize_t
show_async (struct device *dev, char *buf, size_t count, loff_t off)
{
struct pci_dev *pdev;
struct ehci_hcd *ehci;
unsigned long flags;
unsigned temp, size;
char *next;
struct ehci_qh *qh;
if (off != 0)
return 0;
pdev = container_of (dev, struct pci_dev, dev);
ehci = container_of (pci_get_drvdata (pdev), struct ehci_hcd, hcd);
next = buf;
size = count;
/* dumps a snapshot of the async schedule.
* usually empty except for long-term bulk reads, or head.
* one QH per line, and TDs we know about
*/
spin_lock_irqsave (&ehci->lock, flags);
if (ehci->async) {
qh = ehci->async;
do {
u32 scratch;
struct list_head *entry;
struct ehci_qtd *td;
scratch = cpu_to_le32p (&qh->hw_info1);
temp = snprintf (next, size, "qh %p dev%d %cs ep%d",
qh, scratch & 0x007f,
speed_char (scratch),
(scratch >> 8) & 0x000f);
size -= temp;
next += temp;
list_for_each (entry, &qh->qtd_list) {
td = list_entry (entry, struct ehci_qtd,
qtd_list);
scratch = cpu_to_le32p (&td->hw_token);
temp = snprintf (next, size,
", td %p len=%d %s",
td, scratch >> 16,
({ char *tmp;
switch ((scratch>>8)&0x03) {
case 0: tmp = "out"; break;
case 1: tmp = "in"; break;
case 2: tmp = "setup"; break;
default: tmp = "?"; break;
} tmp;})
);
size -= temp;
next += temp;
}
temp = snprintf (next, size, "\n");
size -= temp;
next += temp;
} while ((qh = qh->qh_next.qh) != ehci->async);
}
spin_unlock_irqrestore (&ehci->lock, flags);
return count - size;
}
static DEVICE_ATTR (async, "sched-async", S_IRUSR, show_async, NULL);
#define DBG_SCHED_LIMIT 64
static ssize_t
show_periodic (struct device *dev, char *buf, size_t count, loff_t off)
{
struct pci_dev *pdev;
struct ehci_hcd *ehci;
unsigned long flags;
union ehci_shadow p, *seen;
unsigned temp, size, seen_count;
char *next;
unsigned i, tag;
if (off != 0)
return 0;
if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, SLAB_ATOMIC)))
return 0;
seen_count = 0;
pdev = container_of (dev, struct pci_dev, dev);
ehci = container_of (pci_get_drvdata (pdev), struct ehci_hcd, hcd);
next = buf;
size = count;
temp = snprintf (next, size, "size = %d\n", ehci->periodic_size);
size -= temp;
next += temp;
/* dump a snapshot of the periodic schedule.
* iso changes, interrupt usually doesn't.
*/
spin_lock_irqsave (&ehci->lock, flags);
for (i = 0; i < ehci->periodic_size; i++) {
p = ehci->pshadow [i];
if (!p.ptr)
continue;
tag = Q_NEXT_TYPE (ehci->periodic [i]);
temp = snprintf (next, size, "%4d: ", i);
size -= temp;
next += temp;
do {
switch (tag) {
case Q_TYPE_QH:
temp = snprintf (next, size, " intr-%d %p",
p.qh->period, p.qh);
size -= temp;
next += temp;
for (temp = 0; temp < seen_count; temp++) {
if (seen [temp].ptr == p.ptr)
break;
}
/* show more info the first time around */
if (temp == seen_count) {
u32 scratch = cpu_to_le32p (
&p.qh->hw_info1);
temp = snprintf (next, size,
" (%cs dev%d ep%d)",
speed_char (scratch),
scratch & 0x007f,
(scratch >> 8) & 0x000f);
/* FIXME TDs too */
if (seen_count < DBG_SCHED_LIMIT)
seen [seen_count++].qh = p.qh;
} else
temp = 0;
tag = Q_NEXT_TYPE (p.qh->hw_next);
p = p.qh->qh_next;
break;
case Q_TYPE_FSTN:
temp = snprintf (next, size,
" fstn-%8x/%p", p.fstn->hw_prev,
p.fstn);
tag = Q_NEXT_TYPE (p.fstn->hw_next);
p = p.fstn->fstn_next;
break;
case Q_TYPE_ITD:
temp = snprintf (next, size,
" itd/%p", p.itd);
tag = Q_NEXT_TYPE (p.itd->hw_next);
p = p.itd->itd_next;
break;
case Q_TYPE_SITD:
temp = snprintf (next, size,
" sitd/%p", p.sitd);
tag = Q_NEXT_TYPE (p.sitd->hw_next);
p = p.sitd->sitd_next;
break;
}
size -= temp;
next += temp;
} while (p.ptr);
temp = snprintf (next, size, "\n");
size -= temp;
next += temp;
}
spin_unlock_irqrestore (&ehci->lock, flags);
kfree (seen);
return count - size;
}
static DEVICE_ATTR (periodic, "sched-periodic", S_IRUSR, show_periodic, NULL);
#undef DBG_SCHED_LIMIT
static inline void create_debug_files (struct ehci_hcd *bus)
{
device_create_file (&bus->hcd.pdev->dev, &dev_attr_async);
device_create_file (&bus->hcd.pdev->dev, &dev_attr_periodic);
}
static inline void remove_debug_files (struct ehci_hcd *bus)
{
device_remove_file (&bus->hcd.pdev->dev, &dev_attr_async);
device_remove_file (&bus->hcd.pdev->dev, &dev_attr_periodic);
}
#else /* DEBUG */
static inline void create_debug_files (struct ehci_hcd *bus)
{
}
static inline void remove_debug_files (struct ehci_hcd *bus)
{
}
#endif /* DEBUG */
......@@ -65,6 +65,8 @@
*
* HISTORY:
*
* 2002-08-06 Handling for bulk and interrupt transfers is mostly shared;
* only scheduling is different, no arbitrary limitations.
* 2002-07-25 Sanity check PCI reads, mostly for better cardbus support,
* clean up HC run state handshaking.
* 2002-05-24 Preliminary FS/LS interrupts, using scheduling shortcuts
......@@ -85,7 +87,7 @@
* 2001-June Works with usb-storage and NEC EHCI on 2.4
*/
#define DRIVER_VERSION "2002-Jul-25"
#define DRIVER_VERSION "2002-Aug-06"
#define DRIVER_AUTHOR "David Brownell"
#define DRIVER_DESC "USB 2.0 'Enhanced' Host Controller (EHCI) Driver"
......@@ -93,6 +95,8 @@
// #define EHCI_VERBOSE_DEBUG
// #define have_split_iso
#define INTR_AUTOMAGIC /* to be removed later in 2.5 */
/* magic numbers that can affect system performance */
#define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
#define EHCI_TUNE_RL_HS 0 /* nak throttle; see 4.9 */
......@@ -376,6 +380,8 @@ static int ehci_start (struct usb_hcd *hcd)
return -ENOMEM;
}
create_debug_files (ehci);
/*
* Start, enabling full USB 2.0 functionality ... usb 1.1 devices
* are explicitly handed to companion controller(s), so no TT is
......@@ -429,6 +435,8 @@ static void ehci_stop (struct usb_hcd *hcd)
ehci_ready (ehci);
ehci_reset (ehci);
remove_debug_files (ehci);
/* root hub is shut down separately (first, when possible) */
tasklet_disable (&ehci->tasklet);
ehci_tasklet ((unsigned long) ehci);
......@@ -614,7 +622,8 @@ static void ehci_irq (struct usb_hcd *hcd)
*
* hcd-specific init for hcpriv hasn't been done yet
*
* NOTE: EHCI queues control and bulk requests transparently, like OHCI.
* NOTE: control, bulk, and interrupt share the same code to append TDs
* to a (possibly active) QH, and the same QH scanning code.
*/
static int ehci_urb_enqueue (
struct usb_hcd *hcd,
......@@ -626,10 +635,11 @@ static int ehci_urb_enqueue (
urb->transfer_flags &= ~EHCI_STATE_UNLINK;
INIT_LIST_HEAD (&qtd_list);
switch (usb_pipetype (urb->pipe)) {
case PIPE_CONTROL:
case PIPE_BULK:
switch (usb_pipetype (urb->pipe)) {
// case PIPE_CONTROL:
// case PIPE_BULK:
default:
if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
return -ENOMEM;
return submit_async (ehci, urb, &qtd_list, mem_flags);
......@@ -649,9 +659,6 @@ static int ehci_urb_enqueue (
dbg ("no split iso support yet");
return -ENOSYS;
#endif /* have_split_iso */
default: /* can't happen */
return -ENOSYS;
}
}
......@@ -665,15 +672,16 @@ static int ehci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
unsigned long flags;
dbg ("%s urb_dequeue %p qh state %d",
hcd->self.bus_name, urb, qh->qh_state);
dbg ("%s urb_dequeue %p qh %p state %d",
hcd->self.bus_name, urb, qh, qh->qh_state);
switch (usb_pipetype (urb->pipe)) {
case PIPE_CONTROL:
case PIPE_BULK:
// case PIPE_CONTROL:
// case PIPE_BULK:
default:
spin_lock_irqsave (&ehci->lock, flags);
if (ehci->reclaim) {
dbg ("dq: reclaim busy, %s", RUN_CONTEXT);
dbg ("dq: reclaim busy, %s", RUN_CONTEXT);
if (in_interrupt ()) {
spin_unlock_irqrestore (&ehci->lock, flags);
return -EAGAIN;
......@@ -683,28 +691,43 @@ dbg ("dq: reclaim busy, %s", RUN_CONTEXT);
&& ehci->hcd.state != USB_STATE_HALT
) {
spin_unlock_irqrestore (&ehci->lock, flags);
// yeech ... this could spin for up to two frames!
dbg ("wait for dequeue: state %d, reclaim %p, hcd state %d",
qh->qh_state, ehci->reclaim, ehci->hcd.state
);
udelay (100);
/* let pending unlinks complete */
wait_ms (1);
spin_lock_irqsave (&ehci->lock, flags);
}
}
if (qh->qh_state == QH_STATE_LINKED)
start_unlink_async (ehci, qh);
spin_unlock_irqrestore (&ehci->lock, flags);
return 0;
break;
case PIPE_INTERRUPT:
intr_deschedule (ehci, urb->start_frame, qh,
(urb->dev->speed == USB_SPEED_HIGH)
? urb->interval
: (urb->interval << 3));
if (ehci->hcd.state == USB_STATE_HALT)
urb->status = -ESHUTDOWN;
qh_completions (ehci, qh, 1);
return 0;
if (qh->qh_state == QH_STATE_LINKED) {
/* messy, can spin or block a microframe ... */
intr_deschedule (ehci, qh, 1);
/* qh_state == IDLE */
}
qh_completions (ehci, qh);
/* reschedule QH iff another request is queued */
if (!list_empty (&qh->qtd_list)
&& HCD_IS_RUNNING (ehci->hcd.state)) {
int status;
spin_lock_irqsave (&ehci->lock, flags);
status = qh_schedule (ehci, qh);
spin_unlock_irqrestore (&ehci->lock, flags);
if (status != 0) {
// shouldn't happen often, but ...
// FIXME kill those tds' urbs
err ("can't reschedule qh %p, err %d",
qh, status);
}
return status;
}
break;
case PIPE_ISOCHRONOUS:
// itd or sitd ...
......@@ -712,9 +735,9 @@ dbg ("wait for dequeue: state %d, reclaim %p, hcd state %d",
// wait till next completion, do it then.
// completion irqs can wait up to 1024 msec,
urb->transfer_flags |= EHCI_STATE_UNLINK;
return 0;
break;
}
return -EINVAL;
return 0;
}
/*-------------------------------------------------------------------------*/
......@@ -728,6 +751,7 @@ static void ehci_free_config (struct usb_hcd *hcd, struct usb_device *udev)
int i;
unsigned long flags;
/* ASSERT: no requests/urbs are still linked (so no TDs) */
/* ASSERT: nobody can be submitting urbs for this any more */
dbg ("%s: free_config devnum %d", hcd->self.bus_name, udev->devnum);
......@@ -736,34 +760,57 @@ static void ehci_free_config (struct usb_hcd *hcd, struct usb_device *udev)
for (i = 0; i < 32; i++) {
if (dev->ep [i]) {
struct ehci_qh *qh;
char *why;
/* dev->ep never has ITDs or SITDs */
qh = (struct ehci_qh *) dev->ep [i];
vdbg ("free_config, ep 0x%02x qh %p", i, qh);
if (!list_empty (&qh->qtd_list)) {
dbg ("ep 0x%02x qh %p not empty!", i, qh);
/* detect/report non-recoverable errors */
if (in_interrupt ())
why = "disconnect() didn't";
else if ((qh->hw_info2 & cpu_to_le32 (0xffff)) != 0
&& qh->qh_state != QH_STATE_IDLE)
why = "(active periodic)";
else
why = 0;
if (why) {
err ("dev %s-%s ep %d-%s error: %s",
hcd->self.bus_name, udev->devpath,
i & 0xf, (i & 0x10) ? "IN" : "OUT",
why);
BUG ();
}
dev->ep [i] = 0;
/* wait_ms() won't spin here -- we're a thread */
dev->ep [i] = 0;
if (qh->qh_state == QH_STATE_IDLE)
goto idle;
dbg ("free_config, async ep 0x%02x qh %p", i, qh);
/* scan_async() empties the ring as it does its work,
* using IAA, but doesn't (yet?) turn it off. if it
* doesn't empty this qh, likely it's the last entry.
*/
while (qh->qh_state == QH_STATE_LINKED
&& ehci->reclaim
&& ehci->hcd.state != USB_STATE_HALT
) {
spin_unlock_irqrestore (&ehci->lock, flags);
/* wait_ms() won't spin, we're a thread;
* and we know IRQ+tasklet can progress
*/
wait_ms (1);
spin_lock_irqsave (&ehci->lock, flags);
}
if (qh->qh_state == QH_STATE_LINKED) {
if (qh->qh_state == QH_STATE_LINKED)
start_unlink_async (ehci, qh);
while (qh->qh_state != QH_STATE_IDLE) {
spin_unlock_irqrestore (&ehci->lock,
flags);
wait_ms (1);
spin_lock_irqsave (&ehci->lock, flags);
}
while (qh->qh_state != QH_STATE_IDLE
&& ehci->hcd.state != USB_STATE_HALT) {
spin_unlock_irqrestore (&ehci->lock,
flags);
wait_ms (1);
spin_lock_irqsave (&ehci->lock, flags);
}
idle:
qh_put (ehci, qh);
}
}
......
......@@ -47,9 +47,11 @@ static int
qtd_fill (struct ehci_qtd *qtd, dma_addr_t buf, size_t len, int token)
{
int i, count;
u64 addr = buf;
/* one buffer entry per 4K ... first might be short or unaligned */
qtd->hw_buf [0] = cpu_to_le32 (buf);
qtd->hw_buf [0] = cpu_to_le32 ((u32)addr);
qtd->hw_buf_hi [0] = cpu_to_le32 ((u32)(addr >> 32));
count = 0x1000 - (buf & 0x0fff); /* rest of that page */
if (likely (len < count)) /* ... iff needed */
count = len;
......@@ -59,7 +61,7 @@ qtd_fill (struct ehci_qtd *qtd, dma_addr_t buf, size_t len, int token)
/* per-qtd limit: from 16K to 20K (best alignment) */
for (i = 1; count < len && i < 5; i++) {
u64 addr = buf;
addr = buf;
qtd->hw_buf [i] = cpu_to_le32 ((u32)addr);
qtd->hw_buf_hi [i] = cpu_to_le32 ((u32)(addr >> 32));
buf += 0x1000;
......@@ -157,30 +159,6 @@ static inline void qtd_copy_status (struct urb *urb, size_t length, u32 token)
}
}
static void ehci_urb_complete (
struct ehci_hcd *ehci,
dma_addr_t addr,
struct urb *urb
) {
if (urb->transfer_buffer_length && usb_pipein (urb->pipe))
pci_dma_sync_single (ehci->hcd.pdev, addr,
urb->transfer_buffer_length,
PCI_DMA_FROMDEVICE);
/* cleanse status if we saw no error */
if (likely (urb->status == -EINPROGRESS)) {
if (urb->actual_length != urb->transfer_buffer_length
&& (urb->transfer_flags & URB_SHORT_NOT_OK))
urb->status = -EREMOTEIO;
else
urb->status = 0;
}
/* only report unlinks once */
if (likely (urb->status != -ENOENT && urb->status != -ENOTCONN))
urb->complete (urb);
}
/* urb->lock ignored from here on (hcd is done with urb) */
static void ehci_urb_done (
......@@ -188,6 +166,11 @@ static void ehci_urb_done (
dma_addr_t addr,
struct urb *urb
) {
#ifdef INTR_AUTOMAGIC
struct urb *resubmit = 0;
struct usb_device *dev = 0;
#endif
if (urb->transfer_buffer_length)
pci_unmap_single (ehci->hcd.pdev,
addr,
......@@ -196,7 +179,23 @@ static void ehci_urb_done (
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE);
if (likely (urb->hcpriv != 0)) {
qh_put (ehci, (struct ehci_qh *) urb->hcpriv);
struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
/* S-mask in a QH means it's an interrupt urb */
if ((qh->hw_info2 & cpu_to_le32 (0x00ff)) != 0) {
/* ... update hc-wide periodic stats (for usbfs) */
ehci->hcd.self.bandwidth_int_reqs--;
#ifdef INTR_AUTOMAGIC
if (!((urb->status == -ENOENT)
|| (urb->status == -ECONNRESET))) {
resubmit = usb_get_urb (urb);
dev = urb->dev;
}
#endif
}
qh_put (ehci, qh);
urb->hcpriv = 0;
}
......@@ -208,33 +207,46 @@ static void ehci_urb_done (
urb->status = 0;
}
/* hand off urb ownership */
usb_hcd_giveback_urb (&ehci->hcd, urb);
#ifdef INTR_AUTOMAGIC
if (resubmit && ((urb->status == -ENOENT)
|| (urb->status == -ECONNRESET))) {
usb_put_urb (resubmit);
resubmit = 0;
}
// device drivers will soon be doing something like this
if (resubmit) {
int status;
resubmit->dev = dev;
status = usb_submit_urb (resubmit, SLAB_KERNEL);
if (status != 0)
err ("can't resubmit interrupt urb %p: status %d",
resubmit, status);
usb_put_urb (resubmit);
}
#endif
}
/*
* Process completed qtds for a qh, issuing completions if needed.
* When freeing: frees qtds, unmaps buf, returns URB to driver.
* When not freeing (queued periodic qh): retain qtds, mapping, and urb.
* Frees qtds, unmaps buf, returns URB to driver.
* Races up to qh->hw_current; returns number of urb completions.
*/
static int
qh_completions (
struct ehci_hcd *ehci,
struct ehci_qh *qh,
int freeing
) {
static void
qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
struct ehci_qtd *qtd, *last;
struct list_head *next, *qtd_list = &qh->qtd_list;
int unlink = 0, halted = 0;
unsigned long flags;
int retval = 0;
spin_lock_irqsave (&ehci->lock, flags);
if (unlikely (list_empty (qtd_list))) {
spin_unlock_irqrestore (&ehci->lock, flags);
return retval;
return;
}
/* scan QTDs till end of list, or we reach an active one */
......@@ -251,14 +263,8 @@ qh_completions (
if (likely (last->urb != urb)) {
/* complete() can reenter this HCD */
spin_unlock_irqrestore (&ehci->lock, flags);
if (likely (freeing != 0))
ehci_urb_done (ehci, last->buf_dma,
last->urb);
else
ehci_urb_complete (ehci, last->buf_dma,
last->urb);
ehci_urb_done (ehci, last->buf_dma, last->urb);
spin_lock_irqsave (&ehci->lock, flags);
retval++;
}
/* qh overlays can have HC's old cached copies of
......@@ -270,8 +276,7 @@ qh_completions (
qh->hw_qtd_next = last->hw_next;
}
if (likely (freeing != 0))
ehci_qtd_free (ehci, last);
ehci_qtd_free (ehci, last);
last = 0;
}
next = qtd->qtd_list.next;
......@@ -288,7 +293,7 @@ qh_completions (
/* fault: unlink the rest, since this qtd saw an error? */
if (unlikely ((token & QTD_STS_HALT) != 0)) {
freeing = unlink = 1;
unlink = 1;
/* status copied below */
/* QH halts only because of fault (above) or unlink (here). */
......@@ -296,13 +301,14 @@ qh_completions (
/* unlinking everything because of HC shutdown? */
if (ehci->hcd.state == USB_STATE_HALT) {
freeing = unlink = 1;
unlink = 1;
/* explicit unlink, maybe starting here? */
} else if (qh->qh_state == QH_STATE_IDLE
&& (urb->status == -ECONNRESET
|| urb->status == -ESHUTDOWN
|| urb->status == -ENOENT)) {
freeing = unlink = 1;
unlink = 1;
/* QH halted to unlink urbs _after_ this? */
} else if (!unlink && (token & QTD_STS_ACTIVE) != 0) {
......@@ -312,7 +318,7 @@ qh_completions (
/* unlink the rest? once we start unlinking, after
* a fault or explicit unlink, we unlink all later
* urbs. usb spec requires that.
* urbs. usb spec requires that for faults...
*/
if (unlink && urb->status == -EINPROGRESS)
urb->status = -ECONNRESET;
......@@ -330,31 +336,7 @@ qh_completions (
qtd_copy_status (urb, qtd->length, token);
spin_unlock (&urb->lock);
/*
* NOTE: this won't work right with interrupt urbs that
* need multiple qtds ... only the first scan of qh->qtd_list
* starts at the right qtd, yet multiple scans could happen
* for transfers that are scheduled across multiple uframes.
* (Such schedules are not currently allowed!)
*/
if (likely (freeing != 0))
list_del (&qtd->qtd_list);
else {
/* restore everything the HC could change
* from an interrupt QTD
*/
qtd->hw_token = (qtd->hw_token
& __constant_cpu_to_le32 (0x8300))
| cpu_to_le32 (qtd->length << 16)
| __constant_cpu_to_le32 (QTD_STS_ACTIVE
| (EHCI_TUNE_CERR << 10));
qtd->hw_buf [0] &= ~__constant_cpu_to_le32 (0x0fff);
/* this offset, and the length above,
* are likely wrong on QTDs #2..N
*/
qtd->hw_buf [0] |= cpu_to_le32 (0x0fff & qtd->buf_dma);
}
list_del (&qtd->qtd_list);
#if 0
if (urb->status == -EINPROGRESS)
......@@ -382,14 +364,9 @@ qh_completions (
/* last urb's completion might still need calling */
if (likely (last != 0)) {
if (likely (freeing != 0)) {
ehci_urb_done (ehci, last->buf_dma, last->urb);
ehci_qtd_free (ehci, last);
} else
ehci_urb_complete (ehci, last->buf_dma, last->urb);
retval++;
ehci_urb_done (ehci, last->buf_dma, last->urb);
ehci_qtd_free (ehci, last);
}
return retval;
}
/*-------------------------------------------------------------------------*/
......@@ -450,6 +427,7 @@ qh_urb_transaction (
struct ehci_qtd *qtd, *qtd_prev;
dma_addr_t buf, map_buf;
int len, maxpacket;
int is_input;
u32 token;
/*
......@@ -495,10 +473,11 @@ qh_urb_transaction (
* data transfer stage: buffer setup
*/
len = urb->transfer_buffer_length;
is_input = usb_pipein (urb->pipe);
if (likely (len > 0)) {
buf = map_buf = pci_map_single (ehci->hcd.pdev,
urb->transfer_buffer, len,
usb_pipein (urb->pipe)
is_input
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE);
if (unlikely (!buf))
......@@ -506,12 +485,11 @@ qh_urb_transaction (
} else
buf = map_buf = 0;
if (!buf || usb_pipein (urb->pipe))
if (!buf || is_input)
token |= (1 /* "in" */ << 8);
/* else it's already initted to "out" pid (0 << 8) */
maxpacket = usb_maxpacket (urb->dev, urb->pipe,
usb_pipeout (urb->pipe));
maxpacket = usb_maxpacket (urb->dev, urb->pipe, !is_input) & 0x03ff;
/*
* buffer gets wrapped in one or more qtds;
......@@ -607,6 +585,11 @@ clear_toggle (struct usb_device *udev, int ep, int is_out, struct ehci_qh *qh)
// That'd mean updating how usbcore talks to HCDs. (2.5?)
// high bandwidth multiplier, as encoded in highspeed endpoint descriptors
#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
// ... and packet size, for any kind of endpoint descriptor
#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x03ff)
/*
* Each QH holds a qtd list; a QH is used for everything except iso.
*
......@@ -624,6 +607,8 @@ ehci_qh_make (
) {
struct ehci_qh *qh = ehci_qh_alloc (ehci, flags);
u32 info1 = 0, info2 = 0;
int is_input, type;
int maxp = 0;
if (!qh)
return qh;
......@@ -634,6 +619,53 @@ ehci_qh_make (
info1 |= usb_pipeendpoint (urb->pipe) << 8;
info1 |= usb_pipedevice (urb->pipe) << 0;
is_input = usb_pipein (urb->pipe);
type = usb_pipetype (urb->pipe);
maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input);
/* Compute interrupt scheduling parameters just once, and save.
* - allowing for high bandwidth, how many nsec/uframe are used?
* - split transactions need a second CSPLIT uframe; same question
* - splits also need a schedule gap (for full/low speed I/O)
* - qh has a polling interval
*
* For control/bulk requests, the HC or TT handles these.
*/
if (type == PIPE_INTERRUPT) {
qh->usecs = usb_calc_bus_time (USB_SPEED_HIGH, is_input, 0,
hb_mult (maxp) * max_packet (maxp));
qh->start = NO_FRAME;
if (urb->dev->speed == USB_SPEED_HIGH) {
qh->c_usecs = 0;
qh->gap_uf = 0;
/* FIXME handle HS periods of less than 1 frame. */
qh->period = urb->interval >> 3;
if (qh->period < 1) {
dbg ("intr period %d uframes, NYET!",
urb->interval);
qh = 0;
goto done;
}
} else {
/* gap is f(FS/LS transfer times) */
qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed,
is_input, 0, maxp) / (125 * 1000);
/* FIXME this just approximates SPLIT/CSPLIT times */
if (is_input) { // SPLIT, gap, CSPLIT+DATA
qh->c_usecs = qh->usecs + HS_USECS (0);
qh->usecs = HS_USECS (1);
} else { // SPLIT+DATA, gap, CSPLIT
qh->usecs += HS_USECS (1);
qh->c_usecs = HS_USECS (0);
}
qh->period = urb->interval;
}
}
/* using TT? */
switch (urb->dev->speed) {
case USB_SPEED_LOW:
......@@ -643,67 +675,63 @@ ehci_qh_make (
case USB_SPEED_FULL:
/* EPS 0 means "full" */
info1 |= (EHCI_TUNE_RL_TT << 28);
if (usb_pipecontrol (urb->pipe)) {
if (type == PIPE_CONTROL) {
info1 |= (1 << 27); /* for TT */
info1 |= 1 << 14; /* toggle from qtd */
}
info1 |= usb_maxpacket (urb->dev, urb->pipe,
usb_pipeout (urb->pipe)) << 16;
info1 |= maxp << 16;
info2 |= (EHCI_TUNE_MULT_TT << 30);
info2 |= urb->dev->ttport << 23;
info2 |= urb->dev->tt->hub->devnum << 16;
/* NOTE: if (usb_pipeint (urb->pipe)) { scheduler sets c-mask }
* ... and a 0.96 scheduler might use FSTN nodes too
*/
/* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
break;
case USB_SPEED_HIGH: /* no TT involved */
info1 |= (2 << 12); /* EPS "high" */
info1 |= (EHCI_TUNE_RL_HS << 28);
if (usb_pipecontrol (urb->pipe)) {
if (type == PIPE_CONTROL) {
info1 |= 64 << 16; /* usb2 fixed maxpacket */
info1 |= 1 << 14; /* toggle from qtd */
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else if (usb_pipebulk (urb->pipe)) {
} else if (type == PIPE_BULK) {
info1 |= 512 << 16; /* usb2 fixed maxpacket */
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else {
u32 temp;
temp = usb_maxpacket (urb->dev, urb->pipe,
usb_pipeout (urb->pipe));
info1 |= (temp & 0x3ff) << 16; /* maxpacket */
/* HS intr can be "high bandwidth" */
temp = 1 + ((temp >> 11) & 0x03);
info2 |= temp << 30; /* mult */
} else { /* PIPE_INTERRUPT */
info1 |= max_packet (maxp) << 16;
info2 |= hb_mult (maxp) << 30;
}
break;
default:
#ifdef DEBUG
default:
BUG ();
#endif
}
/* NOTE: if (usb_pipeint (urb->pipe)) { scheduler sets s-mask } */
/* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
qh->qh_state = QH_STATE_IDLE;
qh->hw_info1 = cpu_to_le32 (info1);
qh->hw_info2 = cpu_to_le32 (info2);
/* initialize sw and hw queues with these qtds */
list_splice (qtd_list, &qh->qtd_list);
qh_update (qh, list_entry (qtd_list->next, struct ehci_qtd, qtd_list));
if (!list_empty (qtd_list)) {
list_splice (qtd_list, &qh->qtd_list);
qh_update (qh, list_entry (qtd_list->next, struct ehci_qtd, qtd_list));
} else {
qh->hw_qtd_next = qh->hw_alt_next = EHCI_LIST_END;
}
/* initialize data toggle state */
if (!usb_pipecontrol (urb->pipe))
clear_toggle (urb->dev,
usb_pipeendpoint (urb->pipe),
usb_pipeout (urb->pipe),
qh);
clear_toggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, qh);
done:
return qh;
}
#undef hb_mult
#undef hb_packet
/*-------------------------------------------------------------------------*/
......@@ -745,50 +773,48 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
/*-------------------------------------------------------------------------*/
static int
submit_async (
/*
* For control/bulk/interrupt, return QH with these TDs appended.
* Allocates and initializes the QH if necessary.
* Returns null if it can't allocate a QH it needs to.
* If the QH has TDs (urbs) already, that's great.
*/
static struct ehci_qh *qh_append_tds (
struct ehci_hcd *ehci,
struct urb *urb,
struct list_head *qtd_list,
int mem_flags
) {
struct ehci_qtd *qtd;
struct hcd_dev *dev;
int epnum;
unsigned long flags;
int epnum,
void **ptr
)
{
struct ehci_qh *qh = 0;
qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list);
dev = (struct hcd_dev *)urb->dev->hcpriv;
epnum = usb_pipeendpoint (urb->pipe);
if (usb_pipein (urb->pipe))
epnum |= 0x10;
vdbg ("%s: submit_async urb %p len %d ep %d-%s qtd %p [qh %p]",
ehci->hcd.self.bus_name, urb, urb->transfer_buffer_length,
epnum & 0x0f, (epnum & 0x10) ? "in" : "out",
qtd, dev ? dev->ep [epnum] : (void *)~0);
spin_lock_irqsave (&ehci->lock, flags);
qh = (struct ehci_qh *) dev->ep [epnum];
qh = (struct ehci_qh *) *ptr;
if (likely (qh != 0)) {
u32 hw_next = QTD_NEXT (qtd->qtd_dma);
struct ehci_qtd *qtd;
if (unlikely (list_empty (qtd_list)))
qtd = 0;
else
qtd = list_entry (qtd_list->next, struct ehci_qtd,
qtd_list);
/* maybe patch the qh used for set_address */
if (unlikely (epnum == 0
&& le32_to_cpu (qh->hw_info1 & 0x7f) == 0))
qh->hw_info1 |= cpu_to_le32 (usb_pipedevice(urb->pipe));
/* is an URB is queued to this qh already? */
if (unlikely (!list_empty (&qh->qtd_list))) {
/* append to tds already queued to this qh? */
if (unlikely (!list_empty (&qh->qtd_list) && qtd)) {
struct ehci_qtd *last_qtd;
int short_rx = 0;
u32 hw_next;
/* update the last qtd's "next" pointer */
// dbg_qh ("non-empty qh", ehci, qh);
last_qtd = list_entry (qh->qtd_list.prev,
struct ehci_qtd, qtd_list);
hw_next = QTD_NEXT (qtd->qtd_dma);
last_qtd->hw_next = hw_next;
/* previous urb allows short rx? maybe optimize. */
......@@ -803,6 +829,7 @@ submit_async (
* Interrupt code must cope with case of HC having it
* cached, and clobbering these updates.
* ... complicates getting rid of extra interrupts!
* (Or: use dummy td, so cache always stays valid.)
*/
if (qh->hw_current == cpu_to_le32 (last_qtd->qtd_dma)) {
wmb ();
......@@ -822,31 +849,61 @@ submit_async (
*/
/* usb_clear_halt() means qh data toggle gets reset */
if (usb_pipebulk (urb->pipe)
&& unlikely (!usb_gettoggle (urb->dev,
if (unlikely (!usb_gettoggle (urb->dev,
(epnum & 0x0f),
!(epnum & 0x10)))) {
clear_toggle (urb->dev,
epnum & 0x0f, !(epnum & 0x10), qh);
}
qh_update (qh, qtd);
if (qtd)
qh_update (qh, qtd);
}
list_splice (qtd_list, qh->qtd_list.prev);
} else {
/* can't sleep here, we have ehci->lock... */
qh = ehci_qh_make (ehci, urb, qtd_list, SLAB_ATOMIC);
if (likely (qh != 0)) {
// dbg_qh ("new qh", ehci, qh);
dev->ep [epnum] = qh;
}
// if (qh) dbg_qh ("new qh", ehci, qh);
*ptr = qh;
}
if (qh)
urb->hcpriv = qh_get (qh);
return qh;
}
/*-------------------------------------------------------------------------*/
static int
submit_async (
struct ehci_hcd *ehci,
struct urb *urb,
struct list_head *qtd_list,
int mem_flags
) {
struct ehci_qtd *qtd;
struct hcd_dev *dev;
int epnum;
unsigned long flags;
struct ehci_qh *qh = 0;
qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list);
dev = (struct hcd_dev *)urb->dev->hcpriv;
epnum = usb_pipeendpoint (urb->pipe);
if (usb_pipein (urb->pipe))
epnum |= 0x10;
vdbg ("%s: submit_async urb %p len %d ep %d-%s qtd %p [qh %p]",
ehci->hcd.self.bus_name, urb, urb->transfer_buffer_length,
epnum & 0x0f, (epnum & 0x10) ? "in" : "out",
qtd, dev ? dev->ep [epnum] : (void *)~0);
spin_lock_irqsave (&ehci->lock, flags);
qh = qh_append_tds (ehci, urb, qtd_list, epnum, &dev->ep [epnum]);
/* Control/bulk operations through TTs don't need scheduling,
* the HC and TT handle it when the TT has a buffer ready.
*/
if (likely (qh != 0)) {
urb->hcpriv = qh_get (qh);
if (likely (qh->qh_state == QH_STATE_IDLE))
qh_link_async (ehci, qh_get (qh));
}
......@@ -873,7 +930,7 @@ static void end_unlink_async (struct ehci_hcd *ehci)
ehci->reclaim = 0;
ehci->reclaim_ready = 0;
qh_completions (ehci, qh, 1);
qh_completions (ehci, qh);
// unlink any urb should now unlink all following urbs, so that
// relinking only happens for urbs before the unlinked ones.
......@@ -973,13 +1030,15 @@ static void scan_async (struct ehci_hcd *ehci)
spin_unlock_irqrestore (&ehci->lock, flags);
/* concurrent unlink could happen here */
qh_completions (ehci, qh, 1);
qh_completions (ehci, qh);
spin_lock_irqsave (&ehci->lock, flags);
qh_put (ehci, qh);
}
/* unlink idle entries (reduces PCI usage) */
/* unlink idle entries, reducing HC PCI usage as
* well as HCD schedule-scanning costs
*/
if (list_empty (&qh->qtd_list) && !ehci->reclaim) {
if (qh->qh_next.qh != qh) {
// dbg ("irq/empty");
......@@ -987,6 +1046,7 @@ static void scan_async (struct ehci_hcd *ehci)
} else {
// FIXME: arrange to stop
// after it's been idle a while.
// stop/restart isn't free...
}
}
qh = qh->qh_next.qh;
......
......@@ -220,31 +220,31 @@ static int disable_periodic (struct ehci_hcd *ehci)
/*-------------------------------------------------------------------------*/
// FIXME microframe periods not yet handled
static void intr_deschedule (
struct ehci_hcd *ehci,
unsigned frame,
struct ehci_qh *qh,
unsigned period
int wait
) {
unsigned long flags;
int status;
period >>= 3; // FIXME microframe periods not handled yet
unsigned frame = qh->start;
spin_lock_irqsave (&ehci->lock, flags);
do {
periodic_unlink (ehci, frame, qh);
qh_put (ehci, qh);
frame += period;
frame += qh->period;
} while (frame < ehci->periodic_size);
qh->qh_state = QH_STATE_UNLINK;
qh->qh_next.ptr = 0;
ehci->periodic_urbs--;
ehci->periodic_sched--;
/* maybe turn off periodic schedule */
if (!ehci->periodic_urbs)
if (!ehci->periodic_sched)
status = disable_periodic (ehci);
else {
status = 0;
......@@ -258,21 +258,35 @@ static void intr_deschedule (
* (yeech!) to be sure it's done.
* No other threads may be mucking with this qh.
*/
if (!status && ((ehci_get_frame (&ehci->hcd) - frame) % period) == 0)
udelay (125);
if (((ehci_get_frame (&ehci->hcd) - frame) % qh->period) == 0) {
if (wait) {
udelay (125);
qh->hw_next = EHCI_LIST_END;
} else {
/* we may not be IDLE yet, but if the qh is empty
* the race is very short. then if qh also isn't
* rescheduled soon, it won't matter. otherwise...
*/
vdbg ("intr_deschedule...");
}
} else
qh->hw_next = EHCI_LIST_END;
qh->qh_state = QH_STATE_IDLE;
qh->hw_next = EHCI_LIST_END;
/* update per-qh bandwidth utilization (for usbfs) */
ehci->hcd.self.bandwidth_allocated -=
(qh->usecs + qh->c_usecs) / qh->period;
vdbg ("descheduled qh %p, per = %d frame = %d count = %d, urbs = %d",
qh, period, frame,
atomic_read (&qh->refcount), ehci->periodic_urbs);
qh, qh->period, frame,
atomic_read (&qh->refcount), ehci->periodic_sched);
}
static int check_period (
struct ehci_hcd *ehci,
unsigned frame,
int uframe,
unsigned uframe,
unsigned period,
unsigned usecs
) {
......@@ -309,19 +323,142 @@ static int check_period (
return 1;
}
static int check_intr_schedule (
struct ehci_hcd *ehci,
unsigned frame,
unsigned uframe,
const struct ehci_qh *qh,
u32 *c_maskp
)
{
int retval = -ENOSPC;
if (!check_period (ehci, frame, uframe, qh->period, qh->usecs))
goto done;
if (!qh->c_usecs) {
retval = 0;
*c_maskp = cpu_to_le32 (0);
goto done;
}
/* This is a split transaction; check the bandwidth available for
* the completion too. Check both worst and best case gaps: worst
* case is SPLIT near uframe end, and CSPLIT near start ... best is
* vice versa. Difference can be almost two uframe times, but we
* reserve unnecessary bandwidth (waste it) this way. (Actually
* even better cases exist, like immediate device NAK.)
*
* FIXME don't even bother unless we know this TT is idle in that
* range of uframes ... for now, check_period() allows only one
* interrupt transfer per frame, so needn't check "TT busy" status
* when scheduling a split (QH, SITD, or FSTN).
*
* FIXME ehci 0.96 and above can use FSTNs
*/
if (!check_period (ehci, frame, uframe + qh->gap_uf + 1,
qh->period, qh->c_usecs))
goto done;
if (!check_period (ehci, frame, uframe + qh->gap_uf,
qh->period, qh->c_usecs))
goto done;
*c_maskp = cpu_to_le32 (0x03 << (8 + uframe + qh->gap_uf));
retval = 0;
done:
return retval;
}
static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
int status;
unsigned uframe;
u32 c_mask;
unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
qh->hw_next = EHCI_LIST_END;
frame = qh->start;
/* reuse the previous schedule slots, if we can */
if (frame < qh->period) {
uframe = ffs (le32_to_cpup (&qh->hw_info2) & 0x00ff);
status = check_intr_schedule (ehci, frame, --uframe,
qh, &c_mask);
} else {
uframe = 0;
c_mask = 0;
status = -ENOSPC;
}
/* else scan the schedule to find a group of slots such that all
* uframes have enough periodic bandwidth available.
*/
if (status) {
frame = qh->period - 1;
do {
for (uframe = 0; uframe < 8; uframe++) {
status = check_intr_schedule (ehci,
frame, uframe, qh,
&c_mask);
if (status == 0)
break;
}
} while (status && --frame);
if (status)
goto done;
qh->start = frame;
/* reset S-frame and (maybe) C-frame masks */
qh->hw_info2 &= ~0xffff;
qh->hw_info2 |= cpu_to_le32 (1 << uframe) | c_mask;
} else
dbg ("reused previous qh %p schedule", qh);
/* stuff into the periodic schedule */
qh->qh_state = QH_STATE_LINKED;
dbg ("qh %p usecs %d/%d period %d.0 starting %d.%d (gap %d)",
qh, qh->usecs, qh->c_usecs,
qh->period, frame, uframe, qh->gap_uf);
do {
if (unlikely (ehci->pshadow [frame].ptr != 0)) {
// FIXME -- just link toward the end, before any qh with a shorter period,
// AND accomodate it already having been linked here (after some other qh)
// AS WELL AS updating the schedule checking logic
BUG ();
} else {
ehci->pshadow [frame].qh = qh_get (qh);
ehci->periodic [frame] =
QH_NEXT (qh->qh_dma);
}
wmb ();
frame += qh->period;
} while (frame < ehci->periodic_size);
/* update per-qh bandwidth for usbfs */
ehci->hcd.self.bandwidth_allocated +=
(qh->usecs + qh->c_usecs) / qh->period;
/* maybe enable periodic schedule processing */
if (!ehci->periodic_sched++)
status = enable_periodic (ehci);
done:
return status;
}
static int intr_submit (
struct ehci_hcd *ehci,
struct urb *urb,
struct list_head *qtd_list,
int mem_flags
) {
unsigned epnum, period;
unsigned short usecs, c_usecs, gap_uf;
unsigned epnum;
unsigned long flags;
struct ehci_qh *qh;
struct hcd_dev *dev;
int is_input;
int status = 0;
struct list_head empty;
/* get endpoint and transfer/schedule data */
epnum = usb_pipeendpoint (urb->pipe);
......@@ -329,198 +466,30 @@ static int intr_submit (
if (is_input)
epnum |= 0x10;
/*
* HS interrupt transfers are simple -- only one microframe. FS/LS
* interrupt transfers involve a SPLIT in one microframe and CSPLIT
* sometime later. We need to know how much time each will be
* needed in each microframe and, for FS/LS, how many microframes
* separate the two in the best case.
*/
usecs = usb_calc_bus_time (USB_SPEED_HIGH, is_input, 0,
urb->transfer_buffer_length);
if (urb->dev->speed == USB_SPEED_HIGH) {
gap_uf = 0;
c_usecs = 0;
/* FIXME handle HS periods of less than 1 frame. */
period = urb->interval >> 3;
if (period < 1) {
dbg ("intr period %d uframes, NYET!", urb->interval);
status = -EINVAL;
goto done;
}
} else {
/* gap is a function of full/low speed transfer times */
gap_uf = 1 + usb_calc_bus_time (urb->dev->speed, is_input, 0,
urb->transfer_buffer_length) / (125 * 1000);
/* FIXME this just approximates SPLIT/CSPLIT times */
if (is_input) { // SPLIT, gap, CSPLIT+DATA
c_usecs = usecs + HS_USECS (0);
usecs = HS_USECS (1);
} else { // SPLIT+DATA, gap, CSPLIT
usecs = usecs + HS_USECS (1);
c_usecs = HS_USECS (0);
}
period = urb->interval;
}
spin_lock_irqsave (&ehci->lock, flags);
dev = (struct hcd_dev *)urb->dev->hcpriv;
/*
* NOTE: current completion/restart logic doesn't handle more than
* one qtd in a periodic qh ... 16-20 KB/urb is pretty big for this.
* such big requests need many periods to transfer.
*
* FIXME want to change hcd core submit model to expect queuing
* for all transfer types ... not just ISO and (with flag) BULK.
* that means: getting rid of this check; handling the "interrupt
* urb already queued" case below like bulk queuing is handled (no
* errors possible!); and completly getting rid of that annoying
* qh restart logic. simpler/smaller overall, and more flexible.
*/
if (unlikely (qtd_list->next != qtd_list->prev)) {
dbg ("only one intr qtd per urb allowed");
status = -EINVAL;
/* get qh and force any scheduling errors */
INIT_LIST_HEAD (&empty);
qh = qh_append_tds (ehci, urb, &empty, epnum, &dev->ep [epnum]);
if (qh == 0) {
status = -ENOMEM;
goto done;
}
spin_lock_irqsave (&ehci->lock, flags);
/* get the qh (must be empty and idle) */
dev = (struct hcd_dev *)urb->dev->hcpriv;
qh = (struct ehci_qh *) dev->ep [epnum];
if (qh) {
/* only allow one queued interrupt urb per EP */
if (unlikely (qh->qh_state != QH_STATE_IDLE
|| !list_empty (&qh->qtd_list))) {
dbg ("interrupt urb already queued");
status = -EBUSY;
} else {
/* maybe reset hardware's data toggle in the qh */
if (unlikely (!usb_gettoggle (urb->dev, epnum & 0x0f,
!(epnum & 0x10)))) {
qh->hw_token |=
__constant_cpu_to_le32 (QTD_TOGGLE);
usb_settoggle (urb->dev, epnum & 0x0f,
!(epnum & 0x10), 1);
}
/* trust the QH was set up as interrupt ... */
list_splice (qtd_list, &qh->qtd_list);
qh_update (qh, list_entry (qtd_list->next,
struct ehci_qtd, qtd_list));
qtd_list = &qh->qtd_list;
}
} else {
/* can't sleep here, we have ehci->lock... */
qh = ehci_qh_make (ehci, urb, qtd_list, SLAB_ATOMIC);
if (likely (qh != 0)) {
// dbg ("new INTR qh %p", qh);
dev->ep [epnum] = qh;
qtd_list = &qh->qtd_list;
} else
status = -ENOMEM;
if (qh->qh_state == QH_STATE_IDLE) {
if ((status = qh_schedule (ehci, qh)) != 0)
goto done;
}
/* Schedule this periodic QH. */
if (likely (status == 0)) {
unsigned frame = period;
qh->hw_next = EHCI_LIST_END;
qh->usecs = usecs;
qh->c_usecs = c_usecs;
urb->hcpriv = qh_get (qh);
status = -ENOSPC;
/* pick a set of schedule slots, link the QH into them */
do {
int uframe;
u32 c_mask = 0;
/* pick a set of slots such that all uframes have
* enough periodic bandwidth available.
*/
frame--;
for (uframe = 0; uframe < 8; uframe++) {
if (check_period (ehci, frame, uframe,
period, usecs) == 0)
continue;
/* If this is a split transaction, check the
* bandwidth available for the completion
* too. check both best and worst case gaps:
* worst case is SPLIT near uframe end, and
* CSPLIT near start ... best is vice versa.
* Difference can be almost two uframe times.
*
* FIXME don't even bother unless we know
* this TT is idle in that uframe ... right
* now we know only one interrupt transfer
* will be scheduled per frame, so we don't
* need to update/check TT state when we
* schedule a split (QH, SITD, or FSTN).
*
* FIXME ehci 0.96 and above can use FSTNs
*/
if (!c_usecs)
break;
if (check_period (ehci, frame,
uframe + gap_uf,
period, c_usecs) == 0)
continue;
if (check_period (ehci, frame,
uframe + gap_uf + 1,
period, c_usecs) == 0)
continue;
c_mask = 0x03 << (8 + uframe + gap_uf);
c_mask = cpu_to_le32 (c_mask);
break;
}
if (uframe == 8)
continue;
/* QH will run once each period, starting there */
urb->start_frame = frame;
status = 0;
/* reset S-frame and (maybe) C-frame masks */
qh->hw_info2 &= ~0xffff;
qh->hw_info2 |= cpu_to_le32 (1 << uframe) | c_mask;
// dbg_qh ("Schedule INTR qh", ehci, qh);
/* stuff into the periodic schedule */
qh->qh_state = QH_STATE_LINKED;
vdbg ("qh %p usecs %d period %d.0 starting %d.%d",
qh, qh->usecs, period, frame, uframe);
do {
if (unlikely (ehci->pshadow [frame].ptr != 0)) {
// FIXME -- just link toward the end, before any qh with a shorter period,
// AND handle it already being (implicitly) linked into this frame
// AS WELL AS updating the check_period() logic
BUG ();
} else {
ehci->pshadow [frame].qh = qh_get (qh);
ehci->periodic [frame] =
QH_NEXT (qh->qh_dma);
}
wmb ();
frame += period;
} while (frame < ehci->periodic_size);
/* update bandwidth utilization records (for usbfs) */
usb_claim_bandwidth (urb->dev, urb,
(usecs + c_usecs) / period, 0);
/* then queue the urb's tds to the qh */
qh = qh_append_tds (ehci, urb, qtd_list, epnum, &dev->ep [epnum]);
BUG_ON (qh == 0);
/* maybe enable periodic schedule processing */
if (!ehci->periodic_urbs++)
status = enable_periodic (ehci);
break;
/* ... update usbfs periodic stats */
ehci->hcd.self.bandwidth_int_reqs++;
} while (frame);
}
spin_unlock_irqrestore (&ehci->lock, flags);
done:
spin_unlock_irqrestore (&ehci->lock, flags);
if (status)
qtd_list_free (ehci, urb, qtd_list);
......@@ -534,10 +503,6 @@ intr_complete (
struct ehci_qh *qh,
unsigned long flags /* caller owns ehci->lock ... */
) {
struct ehci_qtd *qtd;
struct urb *urb;
int unlinking;
/* nothing to report? */
if (likely ((qh->hw_token & __constant_cpu_to_le32 (QTD_STS_ACTIVE))
!= 0))
......@@ -547,43 +512,14 @@ intr_complete (
return flags;
}
qtd = list_entry (qh->qtd_list.next, struct ehci_qtd, qtd_list);
urb = qtd->urb;
unlinking = (urb->status == -ENOENT) || (urb->status == -ECONNRESET);
/* call any completions, after patching for reactivation */
/* handle any completions */
spin_unlock_irqrestore (&ehci->lock, flags);
/* NOTE: currently restricted to one qtd per qh! */
if (qh_completions (ehci, qh, 0) == 0)
urb = 0;
qh_completions (ehci, qh);
spin_lock_irqsave (&ehci->lock, flags);
/* never reactivate requests that were unlinked ... */
if (likely (urb != 0)) {
if (unlinking
|| urb->status == -ECONNRESET
|| urb->status == -ENOENT
// || (urb->dev == null)
|| ehci->hcd.state == USB_STATE_HALT)
urb = 0;
// FIXME look at all those unlink cases ... we always
// need exactly one completion that reports unlink.
// the one above might not have been it!
}
if (unlikely (list_empty (&qh->qtd_list)))
intr_deschedule (ehci, qh, 0);
/* normally reactivate */
if (likely (urb != 0)) {
if (usb_pipeout (urb->pipe))
pci_dma_sync_single (ehci->hcd.pdev,
qtd->buf_dma,
urb->transfer_buffer_length,
PCI_DMA_TODEVICE);
urb->status = -EINPROGRESS;
urb->actual_length = 0;
/* patch qh and restart */
qh_update (qh, qtd);
}
return flags;
}
......@@ -806,7 +742,7 @@ static int get_iso_range (
/* calculate the legal range [start,max) */
now = readl (&ehci->regs->frame_index) + 1; /* next uframe */
if (!ehci->periodic_urbs)
if (!ehci->periodic_sched)
now += 8; /* startup delay */
now %= mod;
end = now + mod;
......@@ -926,7 +862,7 @@ itd_schedule (struct ehci_hcd *ehci, struct urb *urb)
usb_claim_bandwidth (urb->dev, urb, usecs, 1);
/* maybe enable periodic schedule processing */
if (!ehci->periodic_urbs++) {
if (!ehci->periodic_sched++) {
if ((status = enable_periodic (ehci)) != 0) {
// FIXME deschedule right away
err ("itd_schedule, enable = %d", status);
......@@ -1009,8 +945,8 @@ itd_complete (
spin_lock_irqsave (&ehci->lock, flags);
/* defer stopping schedule; completion can submit */
ehci->periodic_urbs--;
if (!ehci->periodic_urbs)
ehci->periodic_sched--;
if (!ehci->periodic_sched)
(void) disable_periodic (ehci);
return flags;
......
......@@ -50,7 +50,7 @@ struct ehci_hcd { /* one per controller */
union ehci_shadow *pshadow; /* mirror hw periodic table */
int next_uframe; /* scan periodic, start here */
unsigned periodic_urbs; /* how many urbs scheduled? */
unsigned periodic_sched; /* periodic activity count */
/* deferred work from IRQ, etc */
struct tasklet_struct tasklet;
......@@ -72,7 +72,7 @@ struct ehci_hcd { /* one per controller */
};
/* unwrap an HCD pointer to get an EHCI_HCD pointer */
#define hcd_to_ehci(hcd_ptr) list_entry(hcd_ptr, struct ehci_hcd, hcd)
#define hcd_to_ehci(hcd_ptr) container_of(hcd_ptr, struct ehci_hcd, hcd)
/* NOTE: urb->transfer_flags expected to not use this bit !!! */
#define EHCI_STATE_UNLINK 0x8000 /* urb being unlinked */
......@@ -287,12 +287,20 @@ struct ehci_qh {
struct list_head qtd_list; /* sw qtd list */
atomic_t refcount;
unsigned short usecs; /* intr bandwidth */
unsigned short c_usecs; /* ... split completion bw */
short qh_state;
u8 qh_state;
#define QH_STATE_LINKED 1 /* HC sees this */
#define QH_STATE_UNLINK 2 /* HC may still see this */
#define QH_STATE_IDLE 3 /* HC doesn't see this */
/* periodic schedule info */
u8 usecs; /* intr bandwidth */
u8 gap_uf; /* uframes split/csplit gap */
u8 c_usecs; /* ... split completion bw */
unsigned short period; /* polling interval */
unsigned short start; /* where polling starts */
#define NO_FRAME ((unsigned short)~0) /* pick new start */
} __attribute__ ((aligned (32)));
/*-------------------------------------------------------------------------*/
......
......@@ -938,7 +938,7 @@ static void finish_unlinks (struct ohci_hcd *ohci, u16 tick)
/* ED's now officially unlinked, hc doesn't see */
ed->state = ED_IDLE;
ed->hwINFO &= ~ED_SKIP;
ed->hwHeadP &= ~cpu_to_le32 (ED_H);
ed->hwHeadP &= ~ED_H;
ed->hwNextED = 0;
/* but if there's work queued, reschedule */
......
/*
* $Id$
*
* konicawc.c - konica webcam driver
*
* Author: Simon Evans <spse@secret.org.uk>
......@@ -8,7 +6,7 @@
* Copyright (C) 2002 Simon Evans
*
* Licence: GPL
*
*
* Driver for USB webcams based on Konica chipset. This
* chipset is used in Intel YC76 camera.
*
......@@ -18,6 +16,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include "usbvideo.h"
#define MAX_BRIGHTNESS 108
......@@ -26,9 +25,11 @@
#define MAX_SHARPNESS 108
#define MAX_WHITEBAL 372
#define MAX_SPEED 6
#define MAX_CAMERAS 1
#define DRIVER_VERSION "v1.1"
#define DRIVER_VERSION "v1.3"
#define DRIVER_DESC "Konica Webcam driver"
enum ctrl_req {
......@@ -41,18 +42,32 @@ enum ctrl_req {
enum frame_sizes {
SIZE_160X136 = 0,
SIZE_176X144 = 1,
SIZE_320X240 = 2,
SIZE_160X120 = 0,
SIZE_160X136 = 1,
SIZE_176X144 = 2,
SIZE_320X240 = 3,
};
#define MAX_FRAME_SIZE SIZE_320X240
static usbvideo_t *cams;
#ifdef CONFIG_USB_DEBUG
static int debug;
#define DEBUG(n, format, arg...) \
if (n <= debug) { \
printk(KERN_DEBUG __FILE__ ":%s(): " format "\n", __FUNCTION__ , ## arg); \
}
#else
#define DEBUG(n, arg...)
static const int debug = 0;
#endif
/* Some default values for inital camera settings,
can be set by modprobe */
static int debug;
static enum frame_sizes size;
static int speed = 6; /* Speed (fps) 0 (slowest) to 6 (fastest) */
static int brightness = MAX_BRIGHTNESS/2;
......@@ -61,31 +76,36 @@ static int saturation = MAX_SATURATION/2;
static int sharpness = MAX_SHARPNESS/2;
static int whitebal = 3*(MAX_WHITEBAL/4);
static int speed_to_interface[] = { 1, 0, 3, 2, 4, 5, 6 };
static int spd_to_iface[] = { 1, 0, 3, 2, 4, 5, 6 };
/* These FPS speeds are from the windows config box. They are
* indexed on size (0-2) and speed (0-6). Divide by 3 to get the
* real fps.
*/
static int speed_to_fps[3][7] = { { 24, 40, 48, 60, 72, 80, 100 },
{ 18, 30, 36, 45, 54, 60, 75 },
{ 6, 10, 12, 15, 18, 20, 25 } };
static int spd_to_fps[][7] = { { 24, 40, 48, 60, 72, 80, 100 },
{ 24, 40, 48, 60, 72, 80, 100 },
{ 18, 30, 36, 45, 54, 60, 75 },
{ 6, 10, 12, 15, 18, 21, 25 } };
static int camera_sizes[][2] = { { 160, 136 },
{ 176, 144 },
{ 320, 240 },
{ } /* List terminator */
struct cam_size {
u16 width;
u16 height;
u8 cmd;
};
static struct cam_size camera_sizes[] = { { 160, 120, 0x7 },
{ 160, 136, 0xa },
{ 176, 144, 0x4 },
{ 320, 240, 0x5 } };
struct konicawc {
u8 brightness; /* camera uses 0 - 9, x11 for real value */
u8 contrast; /* as above */
u8 saturation; /* as above */
u8 sharpness; /* as above */
u8 white_bal; /* 0 - 33, x11 for real value */
u8 speed; /* Stored as 0 - 6, used as index in speed_to_* (above) */
u8 speed; /* Stored as 0 - 6, used as index in spd_to_* (above) */
u8 size; /* Frame Size */
int height;
int width;
......@@ -93,6 +113,10 @@ struct konicawc {
u8 sts_buf[USBVIDEO_NUMSBUF][FRAMES_PER_DESC];
struct urb *last_data_urb;
int lastframe;
int cur_frame_size; /* number of bytes in current frame size */
int maxline; /* number of lines per frame */
int yplanesz; /* Number of bytes in the Y plane */
unsigned int buttonsts:1;
};
......@@ -110,42 +134,56 @@ static int konicawc_ctrl_msg(uvd_t *uvd, u8 dir, u8 request, u16 value, u16 inde
}
static inline void konicawc_camera_on(uvd_t *uvd)
{
DEBUG(0, "camera on");
konicawc_set_misc(uvd, 0x2, 1, 0x0b);
}
static inline void konicawc_camera_off(uvd_t *uvd)
{
DEBUG(0, "camera off");
konicawc_set_misc(uvd, 0x2, 0, 0x0b);
}
static void konicawc_set_camera_size(uvd_t *uvd)
{
struct konicawc *cam = (struct konicawc *)uvd->user_data;
konicawc_set_misc(uvd, 0x2, camera_sizes[cam->size].cmd, 0x08);
cam->width = camera_sizes[cam->size].width;
cam->height = camera_sizes[cam->size].height;
cam->yplanesz = cam->height * cam->width;
cam->cur_frame_size = (cam->yplanesz * 3) / 2;
cam->maxline = cam->yplanesz / 256;
uvd->videosize = VIDEOSIZE(cam->width, cam->height);
}
static int konicawc_setup_on_open(uvd_t *uvd)
{
struct konicawc *cam = (struct konicawc *)uvd->user_data;
konicawc_set_misc(uvd, 0x2, 0, 0x0b);
dbg("setting brightness to %d (%d)", cam->brightness,
DEBUG(1, "setting brightness to %d (%d)", cam->brightness,
cam->brightness * 11);
konicawc_set_value(uvd, cam->brightness, SetBrightness);
dbg("setting white balance to %d (%d)", cam->white_bal,
DEBUG(1, "setting white balance to %d (%d)", cam->white_bal,
cam->white_bal * 11);
konicawc_set_value(uvd, cam->white_bal, SetWhitebal);
dbg("setting contrast to %d (%d)", cam->contrast,
DEBUG(1, "setting contrast to %d (%d)", cam->contrast,
cam->contrast * 11);
konicawc_set_value(uvd, cam->contrast, SetContrast);
dbg("setting saturation to %d (%d)", cam->saturation,
DEBUG(1, "setting saturation to %d (%d)", cam->saturation,
cam->saturation * 11);
konicawc_set_value(uvd, cam->saturation, SetSaturation);
dbg("setting sharpness to %d (%d)", cam->sharpness,
DEBUG(1, "setting sharpness to %d (%d)", cam->sharpness,
cam->sharpness * 11);
konicawc_set_value(uvd, cam->sharpness, SetSharpness);
dbg("setting size %d", cam->size);
switch(cam->size) {
case 0:
konicawc_set_misc(uvd, 0x2, 0xa, 0x08);
break;
case 1:
konicawc_set_misc(uvd, 0x2, 4, 0x08);
break;
case 2:
konicawc_set_misc(uvd, 0x2, 5, 0x08);
break;
}
konicawc_set_misc(uvd, 0x2, 1, 0x0b);
cam->lastframe = -1;
konicawc_set_camera_size(uvd);
cam->lastframe = -2;
cam->buttonsts = 0;
return 0;
}
......@@ -154,23 +192,25 @@ static void konicawc_adjust_picture(uvd_t *uvd)
{
struct konicawc *cam = (struct konicawc *)uvd->user_data;
dbg("new brightness: %d", uvd->vpic.brightness);
konicawc_camera_off(uvd);
DEBUG(1, "new brightness: %d", uvd->vpic.brightness);
uvd->vpic.brightness = (uvd->vpic.brightness > MAX_BRIGHTNESS) ? MAX_BRIGHTNESS : uvd->vpic.brightness;
if(cam->brightness != uvd->vpic.brightness / 11) {
cam->brightness = uvd->vpic.brightness / 11;
dbg("setting brightness to %d (%d)", cam->brightness,
DEBUG(1, "setting brightness to %d (%d)", cam->brightness,
cam->brightness * 11);
konicawc_set_value(uvd, cam->brightness, SetBrightness);
}
dbg("new contrast: %d", uvd->vpic.contrast);
DEBUG(1, "new contrast: %d", uvd->vpic.contrast);
uvd->vpic.contrast = (uvd->vpic.contrast > MAX_CONTRAST) ? MAX_CONTRAST : uvd->vpic.contrast;
if(cam->contrast != uvd->vpic.contrast / 11) {
cam->contrast = uvd->vpic.contrast / 11;
dbg("setting contrast to %d (%d)", cam->contrast,
DEBUG(1, "setting contrast to %d (%d)", cam->contrast,
cam->contrast * 11);
konicawc_set_value(uvd, cam->contrast, SetContrast);
}
konicawc_camera_on(uvd);
}
......@@ -180,21 +220,20 @@ static int konicawc_compress_iso(uvd_t *uvd, struct urb *dataurb, struct urb *st
int i, totlen = 0;
unsigned char *status = stsurb->transfer_buffer;
int keep = 0, discard = 0, bad = 0;
static int buttonsts = 0;
struct konicawc *cam = (struct konicawc *)uvd->user_data;
for (i = 0; i < dataurb->number_of_packets; i++) {
int button = buttonsts;
int button = cam->buttonsts;
unsigned char sts;
int n = dataurb->iso_frame_desc[i].actual_length;
int st = dataurb->iso_frame_desc[i].status;
cdata = dataurb->transfer_buffer +
cdata = dataurb->transfer_buffer +
dataurb->iso_frame_desc[i].offset;
/* Detect and ignore errored packets */
if (st < 0) {
if (debug >= 1)
err("Data error: packet=%d. len=%d. status=%d.",
i, n, st);
DEBUG(1, "Data error: packet=%d. len=%d. status=%d.",
i, n, st);
uvd->stats.iso_err_count++;
continue;
}
......@@ -210,8 +249,8 @@ static int konicawc_compress_iso(uvd_t *uvd, struct urb *dataurb, struct urb *st
/* sts: 0x80-0xff: frame start with frame number (ie 0-7f)
* otherwise:
* bit 0 0:drop packet (padding data)
* 1 keep packet
* bit 0 0: keep packet
* 1: drop packet (padding data)
*
* bit 4 0 button not clicked
* 1 button clicked
......@@ -225,10 +264,10 @@ static int konicawc_compress_iso(uvd_t *uvd, struct urb *dataurb, struct urb *st
/* work out the button status, but dont do
anything with it for now */
if(button != buttonsts) {
dbg("button: %sclicked", button ? "" : "un");
buttonsts = button;
if(button != cam->buttonsts) {
DEBUG(2, "button: %sclicked", button ? "" : "un");
cam->buttonsts = button;
}
if(sts == 0x01) { /* drop frame */
......@@ -241,34 +280,52 @@ static int konicawc_compress_iso(uvd_t *uvd, struct urb *dataurb, struct urb *st
bad++;
continue;
}
if(!sts && cam->lastframe == -2) {
DEBUG(2, "dropping frame looking for image start");
continue;
}
keep++;
if(*(status+i) & 0x80) { /* frame start */
if(sts & 0x80) { /* frame start */
unsigned char marker[] = { 0, 0xff, 0, 0x00 };
if(debug > 1)
dbg("Adding Marker packet = %d, frame = %2.2x",
i, *(status+i));
marker[3] = *(status+i) - 0x80;
RingQueue_Enqueue(&uvd->dp, marker, 4);
if(cam->lastframe == -2) {
DEBUG(2, "found initial image");
cam->lastframe = -1;
}
marker[3] = sts & 0x7F;
RingQueue_Enqueue(&uvd->dp, marker, 4);
totlen += 4;
}
totlen += n; /* Little local accounting */
if(debug > 5)
dbg("Adding packet %d, bytes = %d", i, n);
RingQueue_Enqueue(&uvd->dp, cdata, n);
}
if(debug > 8) {
dbg("finished: keep = %d discard = %d bad = %d added %d bytes",
DEBUG(8, "finished: keep = %d discard = %d bad = %d added %d bytes",
keep, discard, bad, totlen);
}
return totlen;
}
static void resubmit_urb(uvd_t *uvd, struct urb *urb)
{
int i, ret;
for (i = 0; i < FRAMES_PER_DESC; i++) {
urb->iso_frame_desc[i].status = 0;
}
urb->dev = uvd->dev;
urb->status = 0;
ret = usb_submit_urb(urb, GFP_KERNEL);
DEBUG(3, "submitting urb of length %d", urb->transfer_buffer_length);
if(ret)
err("usb_submit_urb error (%d)", ret);
}
static void konicawc_isoc_irq(struct urb *urb)
{
int i, ret, len = 0;
uvd_t *uvd = urb->context;
struct konicawc *cam = (struct konicawc *)uvd->user_data;
......@@ -277,42 +334,35 @@ static void konicawc_isoc_irq(struct urb *urb)
return;
if (!uvd->streaming) {
if (debug >= 1)
info("Not streaming, but interrupt!");
DEBUG(1, "Not streaming, but interrupt!");
return;
}
if (urb->actual_length > 32) {
cam->last_data_urb = urb;
goto urb_done_with;
}
DEBUG(3, "got frame %d len = %d buflen =%d", urb->start_frame, urb->actual_length, urb->transfer_buffer_length);
uvd->stats.urb_count++;
if (urb->actual_length <= 0)
goto urb_done_with;
if (urb->transfer_buffer_length > 32) {
cam->last_data_urb = urb;
return;
}
/* Copy the data received into ring queue */
if(cam->last_data_urb) {
len = konicawc_compress_iso(uvd, cam->last_data_urb, urb);
for (i = 0; i < FRAMES_PER_DESC; i++) {
cam->last_data_urb->iso_frame_desc[i].status = 0;
}
int len = 0;
if(urb->start_frame != cam->last_data_urb->start_frame)
err("Lost sync on frames");
else if (!urb->status && !cam->last_data_urb->status)
len = konicawc_compress_iso(uvd, cam->last_data_urb, urb);
resubmit_urb(uvd, urb);
resubmit_urb(uvd, cam->last_data_urb);
cam->last_data_urb = NULL;
uvd->stats.urb_length = len;
uvd->stats.data_count += len;
if(len)
RingQueue_WakeUpInterruptible(&uvd->dp);
return;
}
uvd->stats.urb_length = len;
uvd->stats.data_count += len;
if(len)
RingQueue_WakeUpInterruptible(&uvd->dp);
urb_done_with:
for (i = 0; i < FRAMES_PER_DESC; i++) {
urb->iso_frame_desc[i].status = 0;
}
urb->dev = uvd->dev;
urb->status = 0;
ret = usb_submit_urb(urb, GFP_KERNEL);
if(ret)
err("usb_submit_urb error (%d)", ret);
return;
}
......@@ -322,13 +372,18 @@ static int konicawc_start_data(uvd_t *uvd)
struct usb_device *dev = uvd->dev;
int i, errFlag;
struct konicawc *cam = (struct konicawc *)uvd->user_data;
int pktsz;
struct usb_interface_descriptor *interface;
interface = &dev->actconfig->interface[uvd->iface].altsetting[spd_to_iface[cam->speed]];
pktsz = interface->endpoint[1].wMaxPacketSize;
DEBUG(1, "pktsz = %d", pktsz);
if (!CAMERA_IS_OPERATIONAL(uvd)) {
err("Camera is not operational");
return -EFAULT;
}
uvd->curframe = -1;
konicawc_camera_on(uvd);
/* Alternate interface 1 is is the biggest frame size */
i = usb_set_interface(dev, uvd->iface, uvd->ifaceAltActive);
if (i < 0) {
......@@ -349,10 +404,10 @@ static int konicawc_start_data(uvd_t *uvd)
urb->transfer_buffer = uvd->sbuf[i].data;
urb->complete = konicawc_isoc_irq;
urb->number_of_packets = FRAMES_PER_DESC;
urb->transfer_buffer_length = uvd->iso_packet_len * FRAMES_PER_DESC;
for (j=k=0; j < FRAMES_PER_DESC; j++, k += uvd->iso_packet_len) {
urb->transfer_buffer_length = pktsz * FRAMES_PER_DESC;
for (j=k=0; j < FRAMES_PER_DESC; j++, k += pktsz) {
urb->iso_frame_desc[j].offset = k;
urb->iso_frame_desc[j].length = uvd->iso_packet_len;
urb->iso_frame_desc[j].length = pktsz;
}
urb = cam->sts_urb[i];
......@@ -375,18 +430,17 @@ static int konicawc_start_data(uvd_t *uvd)
/* Submit all URBs */
for (i=0; i < USBVIDEO_NUMSBUF; i++) {
errFlag = usb_submit_urb(uvd->sbuf[i].urb, GFP_KERNEL);
if (errFlag)
err ("usb_submit_isoc(%d) ret %d", i, errFlag);
errFlag = usb_submit_urb(cam->sts_urb[i], GFP_KERNEL);
if (errFlag)
err("usb_submit_isoc(%d) ret %d", i, errFlag);
errFlag = usb_submit_urb(uvd->sbuf[i].urb, GFP_KERNEL);
if (errFlag)
err ("usb_submit_isoc(%d) ret %d", i, errFlag);
}
uvd->streaming = 1;
if (debug > 1)
dbg("streaming=1 video_endp=$%02x", uvd->video_endp);
DEBUG(1, "streaming=1 video_endp=$%02x", uvd->video_endp);
return 0;
}
......@@ -399,6 +453,8 @@ static void konicawc_stop_data(uvd_t *uvd)
if ((uvd == NULL) || (!uvd->streaming) || (uvd->dev == NULL))
return;
konicawc_camera_off(uvd);
uvd->streaming = 0;
cam = (struct konicawc *)uvd->user_data;
cam->last_data_urb = NULL;
......@@ -413,8 +469,6 @@ static void konicawc_stop_data(uvd_t *uvd)
err("usb_unlink_urb() error %d.", j);
}
uvd->streaming = 0;
if (!uvd->remove_pending) {
/* Set packet size to 0 */
j = usb_set_interface(uvd->dev, uvd->iface, uvd->ifaceAltInactive);
......@@ -428,42 +482,33 @@ static void konicawc_stop_data(uvd_t *uvd)
static void konicawc_process_isoc(uvd_t *uvd, usbvideo_frame_t *frame)
{
int n;
int maxline, yplanesz;
struct konicawc *cam = (struct konicawc *)uvd->user_data;
assert(uvd != NULL);
int maxline = cam->maxline;
int yplanesz = cam->yplanesz;
assert(frame != NULL);
maxline = (cam->height * cam->width * 3) / (2 * 384);
yplanesz = cam->height * cam->width;
if(debug > 5)
dbg("maxline = %d yplanesz = %d", maxline, yplanesz);
if(debug > 3)
dbg("Frame state = %d", frame->scanstate);
DEBUG(5, "maxline = %d yplanesz = %d", maxline, yplanesz);
DEBUG(3, "Frame state = %d", frame->scanstate);
if(frame->scanstate == ScanState_Scanning) {
int drop = 0;
int curframe;
int fdrops = 0;
if(debug > 3)
dbg("Searching for marker, queue len = %d", RingQueue_GetLength(&uvd->dp));
DEBUG(3, "Searching for marker, queue len = %d", RingQueue_GetLength(&uvd->dp));
while(RingQueue_GetLength(&uvd->dp) >= 4) {
if ((RING_QUEUE_PEEK(&uvd->dp, 0) == 0x00) &&
(RING_QUEUE_PEEK(&uvd->dp, 1) == 0xff) &&
(RING_QUEUE_PEEK(&uvd->dp, 2) == 0x00) &&
(RING_QUEUE_PEEK(&uvd->dp, 3) < 0x80)) {
curframe = RING_QUEUE_PEEK(&uvd->dp, 3);
if(cam->lastframe != -1) {
if(curframe < cam->lastframe) {
fdrops = (curframe + 0x80) - cam->lastframe;
} else {
fdrops = curframe - cam->lastframe;
}
if(cam->lastframe >= 0) {
fdrops = (0x80 + curframe - cam->lastframe) & 0x7F;
fdrops--;
if(fdrops)
if(fdrops) {
info("Dropped %d frames (%d -> %d)", fdrops,
cam->lastframe, curframe);
}
}
cam->lastframe = curframe;
frame->curline = 0;
......@@ -474,18 +519,20 @@ static void konicawc_process_isoc(uvd_t *uvd, usbvideo_frame_t *frame)
RING_QUEUE_DEQUEUE_BYTES(&uvd->dp, 1);
drop++;
}
if(drop)
DEBUG(2, "dropped %d bytes looking for new frame", drop);
}
if(frame->scanstate == ScanState_Scanning)
return;
/* Try to move data from queue into frame buffer
/* Try to move data from queue into frame buffer
* We get data in blocks of 384 bytes made up of:
* 256 Y, 64 U, 64 V.
* This needs to be written out as a Y plane, a U plane and a V plane.
*/
while ( frame->curline < maxline && (n = RingQueue_GetLength(&uvd->dp)) >= 384) {
while ( frame->curline < maxline && (RingQueue_GetLength(&uvd->dp) >= 384)) {
/* Y */
RingQueue_Dequeue(&uvd->dp, frame->data + (frame->curline * 256), 256);
/* U */
......@@ -497,8 +544,7 @@ static void konicawc_process_isoc(uvd_t *uvd, usbvideo_frame_t *frame)
}
/* See if we filled the frame */
if (frame->curline == maxline) {
if(debug > 5)
dbg("got whole frame");
DEBUG(5, "got whole frame");
frame->frameState = FrameState_Done_Hold;
frame->curline = 0;
......@@ -510,10 +556,8 @@ static void konicawc_process_isoc(uvd_t *uvd, usbvideo_frame_t *frame)
static int konicawc_calculate_fps(uvd_t *uvd)
{
struct konicawc *t = uvd->user_data;
dbg("fps = %d", speed_to_fps[t->size][t->speed]/3);
return speed_to_fps[t->size][t->speed]/3;
struct konicawc *cam = uvd->user_data;
return spd_to_fps[cam->size][cam->speed]/3;
}
......@@ -550,10 +594,10 @@ static void konicawc_configure_video(uvd_t *uvd)
uvd->vcap.type = VID_TYPE_CAPTURE;
uvd->vcap.channels = 1;
uvd->vcap.audios = 0;
uvd->vcap.minwidth = camera_sizes[cam->size][0];
uvd->vcap.minheight = camera_sizes[cam->size][1];
uvd->vcap.maxwidth = camera_sizes[cam->size][0];
uvd->vcap.maxheight = camera_sizes[cam->size][1];
uvd->vcap.minwidth = camera_sizes[cam->size].width;
uvd->vcap.minheight = camera_sizes[cam->size].height;
uvd->vcap.maxwidth = camera_sizes[cam->size].width;
uvd->vcap.maxheight = camera_sizes[cam->size].height;
memset(&uvd->vchan, 0, sizeof(uvd->vchan));
uvd->vchan.flags = 0 ;
......@@ -563,15 +607,14 @@ static void konicawc_configure_video(uvd_t *uvd)
strcpy(uvd->vchan.name, "Camera");
/* Talk to device */
dbg("device init");
DEBUG(1, "device init");
if(!konicawc_get_misc(uvd, 0x3, 0, 0x10, buf, 2))
dbg("3,10 -> %2.2x %2.2x", buf[0], buf[1]);
DEBUG(2, "3,10 -> %2.2x %2.2x", buf[0], buf[1]);
if(!konicawc_get_misc(uvd, 0x3, 0, 0x10, buf, 2))
dbg("3,10 -> %2.2x %2.2x", buf[0], buf[1]);
DEBUG(2, "3,10 -> %2.2x %2.2x", buf[0], buf[1]);
if(konicawc_set_misc(uvd, 0x2, 0, 0xd))
dbg("2,0,d failed");
dbg("setting initial values");
DEBUG(2, "2,0,d failed");
DEBUG(1, "setting initial values");
}
......@@ -582,8 +625,7 @@ static void *konicawc_probe(struct usb_device *dev, unsigned int ifnum, const st
int actInterface=-1, inactInterface=-1, maxPS=0;
unsigned char video_ep = 0;
if (debug >= 1)
dbg("konicawc_probe(%p,%u.)", dev, ifnum);
DEBUG(1, "konicawc_probe(%p,%u.)", dev, ifnum);
/* We don't handle multi-config cameras */
if (dev->descriptor.bNumConfigurations != 1)
......@@ -594,10 +636,8 @@ static void *konicawc_probe(struct usb_device *dev, unsigned int ifnum, const st
/* Validate found interface: must have one ISO endpoint */
nas = dev->actconfig->interface[ifnum].num_altsetting;
if (debug > 0)
info("Number of alternate settings=%d.", nas);
if (nas < 8) {
err("Too few alternate settings for this camera!");
if (nas != 8) {
err("Incorrect number of alternate settings (%d) for this camera!", nas);
return NULL;
}
/* Validate all alternate settings */
......@@ -612,7 +652,7 @@ static void *konicawc_probe(struct usb_device *dev, unsigned int ifnum, const st
return NULL;
}
endpoint = &interface->endpoint[1];
dbg("found endpoint: addr: 0x%2.2x maxps = 0x%4.4x",
DEBUG(1, "found endpoint: addr: 0x%2.2x maxps = 0x%4.4x",
endpoint->bEndpointAddress, endpoint->wMaxPacketSize);
if (video_ep == 0)
video_ep = endpoint->bEndpointAddress;
......@@ -636,22 +676,20 @@ static void *konicawc_probe(struct usb_device *dev, unsigned int ifnum, const st
return NULL;
}
} else {
if (i == speed_to_interface[speed]) {
if (i == spd_to_iface[speed]) {
/* This one is the requested one */
actInterface = i;
maxPS = endpoint->wMaxPacketSize;
if (debug > 0) {
info("Selecting requested active setting=%d. maxPS=%d.",
i, maxPS);
}
}
}
if(endpoint->wMaxPacketSize > maxPS)
maxPS = endpoint->wMaxPacketSize;
}
if(actInterface == -1) {
err("Cant find required endpoint");
return NULL;
}
DEBUG(1, "Selecting requested active setting=%d. maxPS=%d.", actInterface, maxPS);
/* Code below may sleep, need to lock module while we are here */
MOD_INC_USE_COUNT;
......@@ -670,26 +708,10 @@ static void *konicawc_probe(struct usb_device *dev, unsigned int ifnum, const st
}
}
cam->speed = speed;
switch(size) {
case SIZE_160X136:
default:
cam->height = 136;
cam->width = 160;
cam->size = SIZE_160X136;
break;
case SIZE_176X144:
cam->height = 144;
cam->width = 176;
cam->size = SIZE_176X144;
break;
case SIZE_320X240:
cam->height = 240;
cam->width = 320;
cam->size = SIZE_320X240;
break;
}
RESTRICT_TO_RANGE(size, SIZE_160X120, SIZE_320X240);
cam->width = camera_sizes[size].width;
cam->height = camera_sizes[size].height;
cam->size = size;
uvd->flags = 0;
uvd->debug = debug;
......@@ -773,9 +795,9 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Simon Evans <spse@secret.org.uk>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_PARM(speed, "i");
MODULE_PARM_DESC(speed, "FPS speed: 0 (slowest) - 6 (fastest)");
MODULE_PARM_DESC(speed, "Initial speed: 0 (slowest) - 6 (fastest)");
MODULE_PARM(size, "i");
MODULE_PARM_DESC(size, "Frame Size 0: 160x136 1: 176x144 2: 320x240");
MODULE_PARM_DESC(size, "Initial Size 0: 160x120 1: 160x136 2: 176x144 3: 320x240");
MODULE_PARM(brightness, "i");
MODULE_PARM_DESC(brightness, "Initial brightness 0 - 108");
MODULE_PARM(contrast, "i");
......@@ -786,7 +808,11 @@ MODULE_PARM(sharpness, "i");
MODULE_PARM_DESC(sharpness, "Initial brightness 0 - 108");
MODULE_PARM(whitebal, "i");
MODULE_PARM_DESC(whitebal, "Initial white balance 0 - 363");
#ifdef CONFIG_USB_DEBUG
MODULE_PARM(debug, "i");
MODULE_PARM_DESC(debug, "Debug level: 0-9 (default=0)");
#endif
module_init(konicawc_init);
module_exit(konicawc_cleanup);
......@@ -41,10 +41,6 @@
#include <linux/ticable.h>
#include "tiglusb.h"
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
# define minor(x) MINOR(x)
#endif
/*
* Version Information
*/
......
// Portions of this file taken from
// Portions of this file taken from
// Petko Manolov - Petkan (petkan@dce.bg)
// from his driver pegasus.c
......@@ -1170,23 +1170,20 @@ static void * CDCEther_probe( struct usb_device *usb, unsigned int ifnum,
if (rc) {
// Nope we couldn't find one we liked.
// This device was not meant for us to control.
kfree( ether_dev );
return NULL;
goto error_all;
}
// Now that we FOUND a configuration. let's try to make the
// Now that we FOUND a configuration. let's try to make the
// device go into it.
if ( usb_set_configuration( usb, ether_dev->bConfigurationValue ) ) {
err("usb_set_configuration() failed");
kfree( ether_dev );
return NULL;
goto error_all;
}
// Now set the communication interface up as required.
if (usb_set_interface(usb, ether_dev->comm_bInterfaceNumber, ether_dev->comm_bAlternateSetting)) {
err("usb_set_interface() failed");
kfree( ether_dev );
return NULL;
goto error_all;
}
// Only turn traffic on right now if we must...
......@@ -1194,23 +1191,21 @@ static void * CDCEther_probe( struct usb_device *usb, unsigned int ifnum,
// We found an alternate setting for the data
// interface that allows us to turn off traffic.
// We should use it.
if (usb_set_interface( usb,
ether_dev->data_bInterfaceNumber,
if (usb_set_interface( usb,
ether_dev->data_bInterfaceNumber,
ether_dev->data_bAlternateSetting_without_traffic)) {
err("usb_set_interface() failed");
kfree( ether_dev );
return NULL;
goto error_all;
}
} else {
// We didn't find an alternate setting for the data
// interface that would let us turn off traffic.
// Oh well, let's go ahead and do what we must...
if (usb_set_interface( usb,
ether_dev->data_bInterfaceNumber,
if (usb_set_interface( usb,
ether_dev->data_bInterfaceNumber,
ether_dev->data_bAlternateSetting_with_traffic)) {
err("usb_set_interface() failed");
kfree( ether_dev );
return NULL;
goto error_all;
}
}
......@@ -1220,8 +1215,7 @@ static void * CDCEther_probe( struct usb_device *usb, unsigned int ifnum,
// Hmm... The kernel is not sharing today...
// Fine, we didn't want it anyway...
err( "Unable to initialize ethernet device" );
kfree( ether_dev );
return NULL;
goto error_all;
}
// Now that we have an ethernet device, let's set it up
......@@ -1241,7 +1235,7 @@ static void * CDCEther_probe( struct usb_device *usb, unsigned int ifnum,
// We'll keep track of this information for later...
ether_dev->usb = usb;
ether_dev->net = net;
// and don't forget the MAC address.
set_ethernet_addr( ether_dev );
......@@ -1249,12 +1243,12 @@ static void * CDCEther_probe( struct usb_device *usb, unsigned int ifnum,
log_device_info( ether_dev );
// I claim this interface to be a CDC Ethernet Networking device
usb_driver_claim_interface( &CDCEther_driver,
&(usb->config[ether_dev->configuration_num].interface[ether_dev->comm_interface]),
usb_driver_claim_interface( &CDCEther_driver,
&(usb->config[ether_dev->configuration_num].interface[ether_dev->comm_interface]),
ether_dev );
// I claim this interface to be a CDC Ethernet Networking device
usb_driver_claim_interface( &CDCEther_driver,
&(usb->config[ether_dev->configuration_num].interface[ether_dev->data_interface]),
usb_driver_claim_interface( &CDCEther_driver,
&(usb->config[ether_dev->configuration_num].interface[ether_dev->data_interface]),
ether_dev );
// Does this REALLY do anything???
......@@ -1265,6 +1259,14 @@ static void * CDCEther_probe( struct usb_device *usb, unsigned int ifnum,
// Okay, we are finally done...
return NULL;
// bailing out with our tail between our knees
error_all:
usb_free_urb(ether_dev->tx_urb);
usb_free_urb(ether_dev->rx_urb);
usb_free_urb(ether_dev->intr_urb);
kfree( ether_dev );
return NULL;
}
......
......@@ -147,7 +147,8 @@ static int queuecommand( Scsi_Cmnd *srb , void (*done)(Scsi_Cmnd *))
srb->host_scribble = (unsigned char *)us;
/* enqueue the command */
BUG_ON(atomic_read(&us->sm_state) != US_STATE_IDLE || us->srb != NULL);
BUG_ON(atomic_read(&us->sm_state) != US_STATE_IDLE);
BUG_ON(us->srb != NULL);
srb->scsi_done = done;
us->srb = srb;
......
......@@ -203,16 +203,9 @@ extern void fill_inquiry_response(struct us_data *us,
/* The scsi_lock() and scsi_unlock() macros protect the sm_state and the
* single queue element srb for write access */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,3)
#define scsi_unlock(host) spin_unlock_irq(host->host_lock)
#define scsi_lock(host) spin_lock_irq(host->host_lock)
#define sg_address(psg) (page_address((psg)->page) + (psg)->offset)
#else
#define scsi_unlock(host) spin_unlock_irq(&io_request_lock)
#define scsi_lock(host) spin_lock_irq(&io_request_lock)
#define sg_address(psg) ((psg)->address)
#endif
#endif
......@@ -734,6 +734,7 @@ extern void usb_deregister_dev(int num_minors, int start_minor);
*/
#define URB_SHORT_NOT_OK 0x0001 /* report short reads as errors */
#define USB_ISO_ASAP 0x0002 /* iso-only, urb->start_frame ignored */
#define URB_NO_DMA_MAP 0x0004 /* urb->*_dma are valid on submit */
#define USB_ASYNC_UNLINK 0x0008 /* usb_unlink_urb() returns asap */
#define USB_NO_FSBR 0x0020 /* UHCI-specific */
#define USB_ZERO_PACKET 0x0040 /* Finish bulk OUTs with short packet */
......@@ -771,11 +772,15 @@ typedef void (*usb_complete_t)(struct urb *);
* @transfer_flags: A variety of flags may be used to affect how URB
* submission, unlinking, or operation are handled. Different
* kinds of URB can use different flags.
* @transfer_buffer: For non-iso transfers, this identifies the buffer
* to (or from) which the I/O request will be performed. This
* buffer must be suitable for DMA; allocate it with kmalloc()
* @transfer_buffer: This identifies the buffer to (or from) which
* the I/O request will be performed (unless URB_NO_DMA_MAP is set).
* This buffer must be suitable for DMA; allocate it with kmalloc()
* or equivalent. For transfers to "in" endpoints, contents of
* this buffer will be modified.
* this buffer will be modified. This buffer is used for data
* phases of control transfers.
* @transfer_dma: When transfer_flags includes URB_NO_DMA_MAP, the device
* driver is saying that it provided this DMA address, which the host
* controller driver should use instead of the transfer_buffer.
* @transfer_buffer_length: How big is transfer_buffer. The transfer may
* be broken up into chunks according to the current maximum packet
* size for the endpoint, which is a function of the configuration
......@@ -789,6 +794,11 @@ typedef void (*usb_complete_t)(struct urb *);
* @setup_packet: Only used for control transfers, this points to eight bytes
* of setup data. Control transfers always start by sending this data
* to the device. Then transfer_buffer is read or written, if needed.
* (Not used when URB_NO_DMA_MAP is set.)
* @setup_dma: For control transfers with URB_NO_DMA_MAP set, the device
* driver has provided this DMA address for the setup packet. The
* host controller driver should use instead of setup_buffer.
* If there is a data phase, its buffer is identified by transfer_dma.
* @start_frame: Returns the initial frame for interrupt or isochronous
* transfers.
* @number_of_packets: Lists the number of ISO transfer buffers.
......@@ -811,6 +821,23 @@ typedef void (*usb_complete_t)(struct urb *);
* are submitted using usb_submit_urb(), and pending requests may be canceled
* using usb_unlink_urb().
*
* Data Transfer Buffers:
*
* Normally drivers provide I/O buffers allocated with kmalloc() or otherwise
* taken from the general page pool. That is provided by transfer_buffer
* (control requests also use setup_packet), and host controller drivers
* perform a dma mapping (and unmapping) for each buffer transferred. Those
* mapping operations can be expensive on some platforms (such using a dma
* bounce buffer), although they're cheap on commodity x86 and ppc hardware.
*
* Alternatively, drivers may pass the URB_NO_DMA_MAP transfer flag, which
* tells the host controller driver that no such mapping is needed since
* the device driver is DMA-aware. For example, they might allocate a DMA
* buffer with usb_buffer_alloc(), or call usb_buffer_map().
* When this transfer flag is provided, host controller drivers will use the
* dma addresses found in the transfer_dma and/or setup_dma fields rather than
* determing a dma address themselves.
*
* Initialization:
*
* All URBs submitted must initialize dev, pipe,
......@@ -818,10 +845,10 @@ typedef void (*usb_complete_t)(struct urb *);
* The USB_ASYNC_UNLINK transfer flag affects later invocations of
* the usb_unlink_urb() routine.
*
* All non-isochronous URBs must also initialize
* All URBs must also initialize
* transfer_buffer and transfer_buffer_length. They may provide the
* URB_SHORT_NOT_OK transfer flag, indicating that short reads are
* to be treated as errors.
* to be treated as errors; that flag is invalid for write requests.
*
* Bulk URBs may
* use the USB_ZERO_PACKET transfer flag, indicating that bulk OUT transfers
......@@ -896,10 +923,12 @@ struct urb
int status; /* (return) non-ISO status */
unsigned int transfer_flags; /* (in) URB_SHORT_NOT_OK | ...*/
void *transfer_buffer; /* (in) associated data buffer */
dma_addr_t transfer_dma; /* (in) dma addr for transfer_buffer */
int transfer_buffer_length; /* (in) data buffer length */
int actual_length; /* (return) actual transfer length */
int bandwidth; /* bandwidth for INT/ISO request */
unsigned char *setup_packet; /* (in) setup packet (control only) */
dma_addr_t setup_dma; /* (in) dma addr for setup_packet */
int start_frame; /* (modify) start frame (INT/ISO) */
int number_of_packets; /* (in) number of ISO packets */
int interval; /* (in) transfer interval (INT/ISO) */
......@@ -910,6 +939,8 @@ struct urb
struct usb_iso_packet_descriptor iso_frame_desc[0]; /* (in) ISO ONLY */
};
/* -------------------------------------------------------------------------- */
/**
* usb_fill_control_urb - initializes a control urb
* @urb: pointer to the urb to initialize.
......@@ -1032,6 +1063,16 @@ extern struct urb *usb_get_urb(struct urb *urb);
extern int usb_submit_urb(struct urb *urb, int mem_flags);
extern int usb_unlink_urb(struct urb *urb);
#define HAVE_USB_BUFFERS
void *usb_buffer_alloc (struct usb_device *dev, size_t size,
int mem_flags, dma_addr_t *dma);
void usb_buffer_free (struct usb_device *dev, size_t size,
void *addr, dma_addr_t dma);
struct urb *usb_buffer_map (struct urb *urb);
void usb_buffer_dmasync (struct urb *urb);
void usb_buffer_unmap (struct urb *urb);
/*-------------------------------------------------------------------*
* SYNCHRONOUS CALL SUPPORT *
*-------------------------------------------------------------------*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment