Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
e9633ab9
Commit
e9633ab9
authored
Jul 06, 2002
by
Greg Kroah-Hartman
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
USB: removed the usb-uhci-hcd.o driver
parent
873077ea
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
0 additions
and
3205 deletions
+0
-3205
drivers/usb/host/Makefile
drivers/usb/host/Makefile
+0
-1
drivers/usb/host/usb-uhci-dbg.c
drivers/usb/host/usb-uhci-dbg.c
+0
-151
drivers/usb/host/usb-uhci-hcd.c
drivers/usb/host/usb-uhci-hcd.c
+0
-649
drivers/usb/host/usb-uhci-hcd.h
drivers/usb/host/usb-uhci-hcd.h
+0
-247
drivers/usb/host/usb-uhci-hub.c
drivers/usb/host/usb-uhci-hub.c
+0
-213
drivers/usb/host/usb-uhci-mem.c
drivers/usb/host/usb-uhci-mem.c
+0
-717
drivers/usb/host/usb-uhci-q.c
drivers/usb/host/usb-uhci-q.c
+0
-1227
No files found.
drivers/usb/host/Makefile
View file @
e9633ab9
...
...
@@ -7,7 +7,6 @@ export-objs := usb-ohci.o
obj-$(CONFIG_USB_EHCI_HCD)
+=
ehci-hcd.o
obj-$(CONFIG_USB_OHCI_HCD)
+=
ohci-hcd.o
obj-$(CONFIG_USB_UHCI_HCD)
+=
usb-uhci-hcd.o
obj-$(CONFIG_USB_UHCI_HCD_ALT)
+=
uhci-hcd.o
obj-$(CONFIG_USB_OHCI)
+=
usb-ohci.o usb-ohci-pci.o
...
...
drivers/usb/host/usb-uhci-dbg.c
deleted
100644 → 0
View file @
873077ea
/*
UHCI HCD (Host Controller Driver) for USB, debugging calls
(c) 1999-2002
Georg Acher + Deti Fliegl + Thomas Sailer
georg@acher.org deti@fliegl.de sailer@ife.ee.ethz.ch
$Id: usb-uhci-dbg.c,v 1.2 2002/05/21 21:40:16 acher Exp $
*/
#ifdef DEBUG
static
void
__attribute__
((
__unused__
))
uhci_show_qh
(
puhci_desc_t
qh
)
{
if
(
qh
->
type
!=
QH_TYPE
)
{
dbg
(
"qh has not QH_TYPE"
);
return
;
}
dbg
(
"QH @ %p/%08llX:"
,
qh
,
(
unsigned
long
long
)
qh
->
dma_addr
);
if
(
qh
->
hw
.
qh
.
head
&
UHCI_PTR_TERM
)
dbg
(
" Head Terminate"
);
else
dbg
(
" Head: %s @ %08X"
,
(
qh
->
hw
.
qh
.
head
&
UHCI_PTR_QH
?
"QH"
:
"TD"
),
qh
->
hw
.
qh
.
head
&
~
UHCI_PTR_BITS
);
if
(
qh
->
hw
.
qh
.
element
&
UHCI_PTR_TERM
)
dbg
(
" Element Terminate"
);
else
dbg
(
" Element: %s @ %08X"
,
(
qh
->
hw
.
qh
.
element
&
UHCI_PTR_QH
?
"QH"
:
"TD"
),
qh
->
hw
.
qh
.
element
&
~
UHCI_PTR_BITS
);
}
#endif
#if 0
static void uhci_show_td (puhci_desc_t td)
{
char *spid;
switch (td->hw.td.info & 0xff) {
case USB_PID_SETUP:
spid = "SETUP";
break;
case USB_PID_OUT:
spid = " OUT ";
break;
case USB_PID_IN:
spid = " IN ";
break;
default:
spid = " ? ";
break;
}
warn(" TD @ %p/%08X, MaxLen=%02x DT%d EP=%x Dev=%x PID=(%s) buf=%08x",
td, td->dma_addr,
td->hw.td.info >> 21,
((td->hw.td.info >> 19) & 1),
(td->hw.td.info >> 15) & 15,
(td->hw.td.info >> 8) & 127,
spid,
td->hw.td.buffer);
warn(" Len=%02x e%d %s%s%s%s%s%s%s%s%s%s",
td->hw.td.status & 0x7ff,
((td->hw.td.status >> 27) & 3),
(td->hw.td.status & TD_CTRL_SPD) ? "SPD " : "",
(td->hw.td.status & TD_CTRL_LS) ? "LS " : "",
(td->hw.td.status & TD_CTRL_IOC) ? "IOC " : "",
(td->hw.td.status & TD_CTRL_ACTIVE) ? "Active " : "",
(td->hw.td.status & TD_CTRL_STALLED) ? "Stalled " : "",
(td->hw.td.status & TD_CTRL_DBUFERR) ? "DataBufErr " : "",
(td->hw.td.status & TD_CTRL_BABBLE) ? "Babble " : "",
(td->hw.td.status & TD_CTRL_NAK) ? "NAK " : "",
(td->hw.td.status & TD_CTRL_CRCTIMEO) ? "CRC/Timeo " : "",
(td->hw.td.status & TD_CTRL_BITSTUFF) ? "BitStuff " : ""
);
if (td->hw.td.link & UHCI_PTR_TERM)
warn(" TD Link Terminate");
else
warn(" Link points to %s @ %08x, %s",
(td->hw.td.link & UHCI_PTR_QH?"QH":"TD"),
td->hw.td.link & ~UHCI_PTR_BITS,
(td->hw.td.link & UHCI_PTR_DEPTH ? "Depth first" : "Breadth first"));
}
#endif
#ifdef DEBUG
static
void
__attribute__
((
__unused__
))
uhci_show_sc
(
int
port
,
unsigned
short
status
)
{
dbg
(
" stat%d = %04x %s%s%s%s%s%s%s%s"
,
port
,
status
,
(
status
&
USBPORTSC_SUSP
)
?
"PortSuspend "
:
""
,
(
status
&
USBPORTSC_PR
)
?
"PortReset "
:
""
,
(
status
&
USBPORTSC_LSDA
)
?
"LowSpeed "
:
""
,
(
status
&
USBPORTSC_RD
)
?
"ResumeDetect "
:
""
,
(
status
&
USBPORTSC_PEC
)
?
"EnableChange "
:
""
,
(
status
&
USBPORTSC_PE
)
?
"PortEnabled "
:
""
,
(
status
&
USBPORTSC_CSC
)
?
"ConnectChange "
:
""
,
(
status
&
USBPORTSC_CCS
)
?
"PortConnected "
:
""
);
}
void
uhci_show_status
(
struct
uhci_hcd
*
uhci
)
{
unsigned
long
io_addr
=
(
unsigned
long
)
uhci
->
hcd
.
regs
;
unsigned
short
usbcmd
,
usbstat
,
usbint
,
usbfrnum
;
unsigned
int
flbaseadd
;
unsigned
char
sof
;
unsigned
short
portsc1
,
portsc2
;
usbcmd
=
inw
(
io_addr
+
0
);
usbstat
=
inw
(
io_addr
+
2
);
usbint
=
inw
(
io_addr
+
4
);
usbfrnum
=
inw
(
io_addr
+
6
);
flbaseadd
=
inl
(
io_addr
+
8
);
sof
=
inb
(
io_addr
+
12
);
portsc1
=
inw
(
io_addr
+
16
);
portsc2
=
inw
(
io_addr
+
18
);
dbg
(
" usbcmd = %04x %s%s%s%s%s%s%s%s"
,
usbcmd
,
(
usbcmd
&
USBCMD_MAXP
)
?
"Maxp64 "
:
"Maxp32 "
,
(
usbcmd
&
USBCMD_CF
)
?
"CF "
:
""
,
(
usbcmd
&
USBCMD_SWDBG
)
?
"SWDBG "
:
""
,
(
usbcmd
&
USBCMD_FGR
)
?
"FGR "
:
""
,
(
usbcmd
&
USBCMD_EGSM
)
?
"EGSM "
:
""
,
(
usbcmd
&
USBCMD_GRESET
)
?
"GRESET "
:
""
,
(
usbcmd
&
USBCMD_HCRESET
)
?
"HCRESET "
:
""
,
(
usbcmd
&
USBCMD_RS
)
?
"RS "
:
""
);
dbg
(
" usbstat = %04x %s%s%s%s%s%s"
,
usbstat
,
(
usbstat
&
USBSTS_HCH
)
?
"HCHalted "
:
""
,
(
usbstat
&
USBSTS_HCPE
)
?
"HostControllerProcessError "
:
""
,
(
usbstat
&
USBSTS_HSE
)
?
"HostSystemError "
:
""
,
(
usbstat
&
USBSTS_RD
)
?
"ResumeDetect "
:
""
,
(
usbstat
&
USBSTS_ERROR
)
?
"USBError "
:
""
,
(
usbstat
&
USBSTS_USBINT
)
?
"USBINT "
:
""
);
dbg
(
" usbint = %04x"
,
usbint
);
dbg
(
" usbfrnum = (%d)%03x"
,
(
usbfrnum
>>
10
)
&
1
,
0xfff
&
(
4
*
(
unsigned
int
)
usbfrnum
));
dbg
(
" flbaseadd = %08x"
,
flbaseadd
);
dbg
(
" sof = %02x"
,
sof
);
uhci_show_sc
(
1
,
portsc1
);
uhci_show_sc
(
2
,
portsc2
);
}
#endif
drivers/usb/host/usb-uhci-hcd.c
deleted
100644 → 0
View file @
873077ea
/*
UHCI HCD (Host Controller Driver) for USB, main part for HCD frame
(c) 1999-2002
Georg Acher + Deti Fliegl + Thomas Sailer
georg@acher.org deti@fliegl.de sailer@ife.ee.ethz.ch
with the help of
David Brownell, david-b@pacbell.net
Adam Richter, adam@yggdrasil.com
Roman Weissgaerber, weissg@vienna.at
HW-initalization based on material of
Randy Dunlap + Johannes Erdfelt + Gregory P. Smith + Linus Torvalds
$Id: usb-uhci-hcd.c,v 1.3 2002/05/25 16:42:41 acher Exp $
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
/* for in_interrupt () */
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/unaligned.h>
#include <asm/byteorder.h>
#include <linux/usb.h>
#ifdef CONFIG_USB_DEBUG
#define DEBUG
#else
#undef DEBUG
#endif
#include "../core/hcd.h"
#include "usb-uhci-hcd.h"
#define DRIVER_VERSION "$Revision: 1.3 $"
#define DRIVER_AUTHOR "Georg Acher, Deti Fliegl, Thomas Sailer"
#define DRIVER_DESC "USB 1.1 Universal Host Controller Interface driver (HCD)"
/*--------------------------------------------------------------------------*/
/* Values you may tweak with module parameters
*
* high_bw: 1=on (default), 0=off
* Turns on Full Speed Bandwidth Reclamation:
* Feature that puts a loop on the descriptor chain when
* there's some transfer going on. With FSBR, USB performance
* is optimal, but PCI can be slowed down up-to 5 times, slowing down
* system performance (eg. framebuffer devices).
*
* bulk_depth/ctrl_depth: 0=off (default), 1:on
* Puts descriptors for bulk/control transfers in depth-first mode.
* This has somehow similar effect to FSBR (higher speed), but does not
* slow PCI down. OTOH USB performace is slightly slower than
* in FSBR case and single device could hog whole USB, starving
* other devices. Some devices (e.g. STV680-based cameras) NEED this depth
* first search to work properly.
*
* Turning off both high_bw and bulk_depth/ctrl_depth
* will lead to <64KB/sec performance over USB for bulk transfers targeting
* one device's endpoint. You probably do not want to do that.
*/
// Other constants, there's usually no need to change them.
// stop bandwidth reclamation after (roughly) 50ms
#define IDLE_TIMEOUT (HZ/20)
// Suppress HC interrupt error messages for 5s
#define ERROR_SUPPRESSION_TIME (HZ*5)
// HC watchdog
#define WATCHDOG_TIMEOUT (4*HZ)
#define MAX_REANIMATIONS 5
#define DEBUG_SYMBOLS
#ifdef DEBUG_SYMBOLS
#ifndef EXPORT_SYMTAB
#define EXPORT_SYMTAB
#endif
#endif
#define queue_dbg dbg
#define async_dbg dbg
#define init_dbg dbg
/*--------------------------------------------------------------------------*/
// NO serviceable parts below!
/*--------------------------------------------------------------------------*/
/* Can be set by module parameters */
static
int
high_bw
=
1
;
static
int
ctrl_depth
=
0
;
/* 0: Breadth first, 1: Depth first */
static
int
bulk_depth
=
0
;
/* 0: Breadth first, 1: Depth first */
// How much URBs with ->next are walked
#define MAX_NEXT_COUNT 2048
static
struct
uhci
*
devs
=
NULL
;
/* used by userspace UHCI data structure dumper */
struct
uhci
**
uhci_devices
=
&
devs
;
/* A few prototypes */
static
int
uhci_urb_dequeue
(
struct
usb_hcd
*
hcd
,
struct
urb
*
urb
);
static
int
hc_reset
(
struct
uhci_hcd
*
uhci
);
static
void
uhci_stop
(
struct
usb_hcd
*
hcd
);
static
int
process_transfer
(
struct
uhci_hcd
*
uhci
,
struct
urb
*
urb
,
int
mode
);
static
int
process_iso
(
struct
uhci_hcd
*
uhci
,
struct
urb
*
urb
,
int
mode
);
static
int
process_interrupt
(
struct
uhci_hcd
*
uhci
,
struct
urb
*
urb
,
int
mode
);
static
int
process_urb
(
struct
uhci_hcd
*
uhci
,
struct
list_head
*
p
);
static
int
uhci_urb_enqueue
(
struct
usb_hcd
*
hcd
,
struct
urb
*
urb
,
int
mem_flags
);
static
int
hc_defibrillate
(
struct
uhci_hcd
*
uhci
);
static
int
hc_irq_run
(
struct
uhci_hcd
*
uhci
);
#include "usb-uhci-dbg.c"
#include "usb-uhci-mem.c"
#include "usb-uhci-hub.c"
#include "usb-uhci-q.c"
#define PIPESTRING(x) (x==PIPE_BULK?"Bulk":(x==PIPE_INTERRUPT?"Interrupt":(x==PIPE_CONTROL?"Control":"Iso")))
/*--------------------------------------------------------------------------*/
static
int
uhci_urb_enqueue
(
struct
usb_hcd
*
hcd
,
struct
urb
*
urb
,
int
mem_flags
)
{
struct
uhci_hcd
*
uhci
=
hcd_to_uhci
(
hcd
);
urb_priv_t
*
urb_priv
;
int
ret
=
0
,
type
;
unsigned
long
flags
;
struct
urb
*
queued_urb
=
NULL
;
int
bustime
;
type
=
usb_pipetype
(
urb
->
pipe
);
// err("submit_urb: scheduling %p (%s), tb %p, len %i", urb,
// PIPESTRING(type),urb->transfer_buffer,urb->transfer_buffer_length);
if
(
uhci
->
need_init
)
{
if
(
in_interrupt
())
return
-
ESHUTDOWN
;
spin_lock_irqsave
(
&
uhci
->
urb_list_lock
,
flags
);
ret
=
hc_defibrillate
(
uhci
);
spin_unlock_irqrestore
(
&
uhci
->
urb_list_lock
,
flags
);
if
(
ret
)
return
ret
;
}
if
(
!
uhci
->
running
)
return
-
ESHUTDOWN
;
spin_lock_irqsave
(
&
uhci
->
urb_list_lock
,
flags
);
queued_urb
=
search_dev_ep
(
uhci
,
urb
);
// returns already queued urb for that pipe
if
(
queued_urb
)
{
queue_dbg
(
"found bulk urb %p
\n
"
,
queued_urb
);
if
((
type
!=
PIPE_BULK
)
||
((
type
==
PIPE_BULK
)
&&
(
!
(
urb
->
transfer_flags
&
USB_QUEUE_BULK
)
||
!
(
queued_urb
->
transfer_flags
&
USB_QUEUE_BULK
))))
{
spin_unlock_irqrestore
(
&
uhci
->
urb_list_lock
,
flags
);
err
(
"ENXIO (%s) %08x, flags %x, urb %p, burb %p, probably device driver bug..."
,
PIPESTRING
(
type
),
urb
->
pipe
,
urb
->
transfer_flags
,
urb
,
queued_urb
);
return
-
ENXIO
;
// urb already queued
}
}
urb_priv
=
uhci_alloc_priv
(
mem_flags
);
if
(
!
urb_priv
)
{
spin_unlock_irqrestore
(
&
uhci
->
urb_list_lock
,
flags
);
return
-
ENOMEM
;
}
urb
->
hcpriv
=
urb_priv
;
urb_priv
->
urb
=
urb
;
INIT_LIST_HEAD
(
&
urb_priv
->
desc_list
);
if
(
type
==
PIPE_CONTROL
)
urb_priv
->
setup_packet_dma
=
pci_map_single
(
uhci
->
uhci_pci
,
urb
->
setup_packet
,
sizeof
(
struct
usb_ctrlrequest
),
PCI_DMA_TODEVICE
);
if
(
urb
->
transfer_buffer_length
)
urb_priv
->
transfer_buffer_dma
=
pci_map_single
(
uhci
->
uhci_pci
,
urb
->
transfer_buffer
,
urb
->
transfer_buffer_length
,
usb_pipein
(
urb
->
pipe
)
?
PCI_DMA_FROMDEVICE
:
PCI_DMA_TODEVICE
);
// for bulk queuing it is essential that interrupts are disabled until submission
// all other types enable interrupts again
switch
(
type
)
{
case
PIPE_BULK
:
if
(
queued_urb
)
{
while
(((
urb_priv_t
*
)
queued_urb
->
hcpriv
)
->
next_queued_urb
)
// find last queued bulk
queued_urb
=
((
urb_priv_t
*
)
queued_urb
->
hcpriv
)
->
next_queued_urb
;
((
urb_priv_t
*
)
queued_urb
->
hcpriv
)
->
next_queued_urb
=
urb
;
}
atomic_inc
(
&
uhci
->
avoid_bulk
);
ret
=
uhci_submit_bulk_urb
(
uhci
,
urb
,
queued_urb
);
atomic_dec
(
&
uhci
->
avoid_bulk
);
spin_unlock_irqrestore
(
&
uhci
->
urb_list_lock
,
flags
);
break
;
case
PIPE_ISOCHRONOUS
:
spin_unlock_irqrestore
(
&
uhci
->
urb_list_lock
,
flags
);
if
(
urb
->
bandwidth
==
0
)
{
/* not yet checked/allocated */
bustime
=
usb_check_bandwidth
(
urb
->
dev
,
urb
);
if
(
bustime
<
0
)
ret
=
bustime
;
else
{
ret
=
uhci_submit_iso_urb
(
uhci
,
urb
,
mem_flags
);
if
(
ret
==
0
)
usb_claim_bandwidth
(
urb
->
dev
,
urb
,
bustime
,
1
);
}
}
else
{
/* bandwidth is already set */
ret
=
uhci_submit_iso_urb
(
uhci
,
urb
,
mem_flags
);
}
break
;
case
PIPE_INTERRUPT
:
spin_unlock_irqrestore
(
&
uhci
->
urb_list_lock
,
flags
);
if
(
urb
->
bandwidth
==
0
)
{
/* not yet checked/allocated */
bustime
=
usb_check_bandwidth
(
urb
->
dev
,
urb
);
if
(
bustime
<
0
)
ret
=
bustime
;
else
{
ret
=
uhci_submit_int_urb
(
uhci
,
urb
);
if
(
ret
==
0
)
usb_claim_bandwidth
(
urb
->
dev
,
urb
,
bustime
,
0
);
}
}
else
{
/* bandwidth is already set */
ret
=
uhci_submit_int_urb
(
uhci
,
urb
);
}
break
;
case
PIPE_CONTROL
:
spin_unlock_irqrestore
(
&
uhci
->
urb_list_lock
,
flags
);
ret
=
uhci_submit_control_urb
(
uhci
,
urb
);
break
;
default:
spin_unlock_irqrestore
(
&
uhci
->
urb_list_lock
,
flags
);
ret
=
-
EINVAL
;
}
// err("submit_urb: scheduled with ret: %d", ret);
if
(
ret
!=
0
)
uhci_free_priv
(
uhci
,
urb
,
urb_priv
);
return
ret
;
}
/*--------------------------------------------------------------------------*/
static
int
uhci_urb_dequeue
(
struct
usb_hcd
*
hcd
,
struct
urb
*
urb
)
{
unsigned
long
flags
=
0
;
struct
uhci_hcd
*
uhci
=
hcd_to_uhci
(
hcd
);
int
ret
;
dbg
(
"uhci_urb_dequeue called for %p"
,
urb
);
spin_lock_irqsave
(
&
uhci
->
urb_list_lock
,
flags
);
ret
=
uhci_unlink_urb_async
(
uhci
,
urb
,
UNLINK_ASYNC_STORE_URB
);
spin_unlock_irqrestore
(
&
uhci
->
urb_list_lock
,
flags
);
return
ret
;
}
/*--------------------------------------------------------------------------*/
static
int
uhci_get_frame
(
struct
usb_hcd
*
hcd
)
{
struct
uhci_hcd
*
uhci
=
hcd_to_uhci
(
hcd
);
return
inw
((
int
)
uhci
->
hcd
.
regs
+
USBFRNUM
);
}
/*--------------------------------------------------------------------------*/
// Init and shutdown functions for HW
/*--------------------------------------------------------------------------*/
static
int
hc_reset
(
struct
uhci_hcd
*
uhci
)
{
unsigned
long
io_addr
=
(
unsigned
long
)
uhci
->
hcd
.
regs
;
uhci
->
apm_state
=
0
;
uhci
->
running
=
0
;
outw
(
USBCMD_GRESET
,
io_addr
+
USBCMD
);
uhci_wait_ms
(
50
);
/* Global reset for 50ms */
outw
(
0
,
io_addr
+
USBCMD
);
uhci_wait_ms
(
10
);
return
0
;
}
/*--------------------------------------------------------------------------*/
static
int
hc_irq_run
(
struct
uhci_hcd
*
uhci
)
{
unsigned
long
io_addr
=
(
unsigned
long
)
uhci
->
hcd
.
regs
;
/* Turn on all interrupts */
outw
(
USBINTR_TIMEOUT
|
USBINTR_RESUME
|
USBINTR_IOC
|
USBINTR_SP
,
io_addr
+
USBINTR
);
/* Start at frame 0 */
outw
(
0
,
io_addr
+
USBFRNUM
);
outl
(
uhci
->
framelist_dma
,
io_addr
+
USBFLBASEADD
);
/* Run and mark it configured with a 64-byte max packet */
outw
(
USBCMD_RS
|
USBCMD_CF
|
USBCMD_MAXP
,
io_addr
+
USBCMD
);
uhci
->
apm_state
=
1
;
uhci
->
running
=
1
;
uhci
->
last_hcd_irq
=
jiffies
+
4
*
HZ
;
return
0
;
}
/*--------------------------------------------------------------------------*/
static
int
hc_start
(
struct
uhci_hcd
*
uhci
)
{
unsigned
long
io_addr
=
(
unsigned
long
)
uhci
->
hcd
.
regs
;
int
timeout
=
10
;
struct
usb_device
*
udev
;
init_dbg
(
"hc_start uhci %p"
,
uhci
);
/*
* Reset the HC - this will force us to get a
* new notification of any already connected
* ports due to the virtual disconnect that it
* implies.
*/
outw
(
USBCMD_HCRESET
,
io_addr
+
USBCMD
);
while
(
inw
(
io_addr
+
USBCMD
)
&
USBCMD_HCRESET
)
{
if
(
!--
timeout
)
{
err
(
"USBCMD_HCRESET timed out!"
);
break
;
}
udelay
(
1
);
}
hc_irq_run
(
uhci
);
/* connect the virtual root hub */
uhci
->
hcd
.
self
.
root_hub
=
udev
=
usb_alloc_dev
(
NULL
,
&
uhci
->
hcd
.
self
);
uhci
->
hcd
.
state
=
USB_STATE_READY
;
if
(
!
udev
)
{
uhci
->
running
=
0
;
return
-
ENOMEM
;
}
usb_connect
(
udev
);
udev
->
speed
=
USB_SPEED_FULL
;
if
(
usb_register_root_hub
(
udev
,
&
uhci
->
hcd
.
pdev
->
dev
)
!=
0
)
{
usb_free_dev
(
udev
);
uhci
->
running
=
0
;
return
-
ENODEV
;
}
return
0
;
}
/*--------------------------------------------------------------------------*/
// Start up UHCI, find ports, init DMA lists
static
int
__devinit
uhci_start
(
struct
usb_hcd
*
hcd
)
{
struct
uhci_hcd
*
uhci
=
hcd_to_uhci
(
hcd
);
int
ret
;
unsigned
long
io_addr
=
(
unsigned
long
)
hcd
->
regs
,
io_size
=
0x20
;
init_dbg
(
"uhci_start hcd %p uhci %p, pdev %p"
,
hcd
,
uhci
,
hcd
->
pdev
);
/* disable legacy emulation, Linux takes over... */
pci_write_config_word
(
hcd
->
pdev
,
USBLEGSUP
,
0
);
/* UHCI specs says devices must have 2 ports, but goes on to say */
/* they may have more but give no way to determine how many they */
/* have, so default to 2 */
/* According to the UHCI spec, Bit 7 is always set to 1. So we try */
/* to use this to our advantage */
for
(
uhci
->
maxports
=
0
;
uhci
->
maxports
<
(
io_size
-
0x10
)
/
2
;
uhci
->
maxports
++
)
{
unsigned
int
portstatus
;
portstatus
=
inw
(
io_addr
+
0x10
+
(
uhci
->
maxports
*
2
));
dbg
(
"port %i, adr %x status %x"
,
uhci
->
maxports
,
io_addr
+
0x10
+
(
uhci
->
maxports
*
2
),
portstatus
);
if
(
!
(
portstatus
&
0x0080
))
break
;
}
warn
(
"Detected %d ports"
,
uhci
->
maxports
);
if
(
uhci
->
maxports
<
2
||
uhci
->
maxports
>
8
)
{
dbg
(
"Port count misdetected, forcing to 2 ports"
);
uhci
->
maxports
=
2
;
}
ret
=
init_skel
(
uhci
);
if
(
ret
)
return
ret
;
hc_reset
(
uhci
);
if
(
hc_start
(
uhci
)
<
0
)
{
err
(
"can't start %s"
,
uhci
->
hcd
.
self
.
bus_name
);
uhci_stop
(
hcd
);
return
-
EBUSY
;
}
// Enable PIRQ
pci_write_config_word
(
hcd
->
pdev
,
USBLEGSUP
,
USBLEGSUP_DEFAULT
);
set_td_ioc
(
uhci
->
td128ms
);
// start watchdog interrupt
uhci
->
last_hcd_irq
=
jiffies
+
5
*
HZ
;
return
0
;
}
/*--------------------------------------------------------------------------*/
static
void
uhci_free_config
(
struct
usb_hcd
*
hcd
,
struct
usb_device
*
udev
)
{
dbg
(
"uhci_free_config for dev %p"
,
udev
);
uhci_unlink_urbs
(
hcd_to_uhci
(
hcd
),
udev
,
0
);
// Forced unlink of remaining URBs
}
/*--------------------------------------------------------------------------*/
static
void
uhci_stop
(
struct
usb_hcd
*
hcd
)
{
struct
uhci_hcd
*
uhci
=
hcd_to_uhci
(
hcd
);
init_dbg
(
"%s: stop controller"
,
hcd
->
bus_name
);
uhci
->
running
=
0
;
hc_reset
(
uhci
);
wait_ms
(
1
);
uhci_unlink_urbs
(
uhci
,
0
,
CLEAN_FORCED
);
// Forced unlink of remaining URBs
uhci_cleanup_unlink
(
uhci
,
CLEAN_FORCED
);
// force cleanup of async killed URBs
cleanup_skel
(
uhci
);
}
/*--------------------------------------------------------------------------*/
// UHCI INTERRUPT PROCESSING
/*--------------------------------------------------------------------------*/
static
void
uhci_irq
(
struct
usb_hcd
*
hcd
)
{
struct
uhci_hcd
*
uhci
=
hcd_to_uhci
(
hcd
);
unsigned
long
io_addr
=
(
unsigned
long
)
hcd
->
regs
;
unsigned
short
status
;
struct
list_head
*
p
,
*
p2
;
int
restarts
,
work_done
;
/*
* Read the interrupt status, and write it back to clear the
* interrupt cause
*/
status
=
inw
(
io_addr
+
USBSTS
);
if
(
!
status
)
/* shared interrupt, not mine */
return
;
dbg
(
"interrupt"
);
uhci
->
last_hcd_irq
=
jiffies
;
// for watchdog
if
(
status
!=
1
)
{
// Avoid too much error messages at a time
if
(
time_after
(
jiffies
,
uhci
->
last_error_time
+
ERROR_SUPPRESSION_TIME
))
{
warn
(
"interrupt, status %x, frame# %i"
,
status
,
UHCI_GET_CURRENT_FRAME
(
uhci
));
uhci
->
last_error_time
=
jiffies
;
}
// remove host controller halted state
if
((
status
&
0x20
)
&&
(
uhci
->
running
))
{
err
(
"Host controller halted, waiting for timeout."
);
// outw (USBCMD_RS | inw(io_addr + USBCMD), io_addr + USBCMD);
}
//uhci_show_status (s);
}
/*
* traverse the list in *reverse* direction, because new entries
* may be added at the end.
* also, because process_urb may unlink the current urb,
* we need to advance the list before
* New: check for max. workload and restart count
*/
spin_lock
(
&
uhci
->
urb_list_lock
);
restarts
=
0
;
work_done
=
0
;
restart:
uhci
->
unlink_urb_done
=
0
;
p
=
uhci
->
urb_list
.
prev
;
while
(
p
!=
&
uhci
->
urb_list
&&
(
work_done
<
1024
))
{
p2
=
p
;
p
=
p
->
prev
;
process_urb
(
uhci
,
p2
);
work_done
++
;
if
(
uhci
->
unlink_urb_done
)
{
uhci
->
unlink_urb_done
=
0
;
restarts
++
;
if
(
restarts
<
16
)
// avoid endless restarts
goto
restart
;
else
break
;
}
}
if
(
time_after
(
jiffies
,
uhci
->
timeout_check
+
(
HZ
/
30
)))
uhci_check_timeouts
(
uhci
);
clean_descs
(
uhci
,
CLEAN_NOT_FORCED
);
uhci_cleanup_unlink
(
uhci
,
CLEAN_NOT_FORCED
);
uhci_switch_timer_int
(
uhci
);
spin_unlock
(
&
uhci
->
urb_list_lock
);
outw
(
status
,
io_addr
+
USBSTS
);
//dbg("uhci_interrupt: done");
}
/*--------------------------------------------------------------------------*/
// POWER MANAGEMENT
#ifdef CONFIG_PM
static
int
uhci_suspend
(
struct
usb_hcd
*
hcd
,
u32
state
)
{
struct
uhci_hcd
*
uhci
=
hcd_to_uhci
(
hcd
);
hc_reset
(
uhci
);
return
0
;
}
/*--------------------------------------------------------------------------*/
static
int
uhci_resume
(
struct
usb_hcd
*
hcd
)
{
struct
uhci_hcd
*
uhci
=
hcd_to_uhci
(
hcd
);
hc_start
(
uhci
);
return
0
;
}
#endif
/*--------------------------------------------------------------------------*/
static
const
char
hcd_name
[]
=
"usb-uhci-hcd"
;
static
const
struct
hc_driver
uhci_driver
=
{
description:
hcd_name
,
// generic hardware linkage
irq:
uhci_irq
,
flags:
HCD_USB11
,
// basic lifecycle operations
start:
uhci_start
,
#ifdef CONFIG_PM
suspend:
uhci_suspend
,
resume:
uhci_resume
,
#endif
stop:
uhci_stop
,
// memory lifecycle (except per-request)
hcd_alloc:
uhci_hcd_alloc
,
hcd_free:
uhci_hcd_free
,
// managing i/o requests and associated device resources
urb_enqueue:
uhci_urb_enqueue
,
urb_dequeue:
uhci_urb_dequeue
,
free_config:
uhci_free_config
,
// scheduling support
get_frame_number:
uhci_get_frame
,
// root hub support
hub_status_data:
uhci_hub_status_data
,
hub_control:
uhci_hub_control
,
};
#define DRIVER_INFO DRIVER_VERSION " " DRIVER_DESC
MODULE_AUTHOR
(
DRIVER_AUTHOR
);
MODULE_DESCRIPTION
(
DRIVER_INFO
);
MODULE_LICENSE
(
"GPL"
);
MODULE_PARM
(
high_bw
,
"i"
);
MODULE_PARM_DESC
(
high_bw
,
"high_hw: Enable high bandwidth mode, 1=on (default), 0=off"
);
MODULE_PARM
(
bulk_depth
,
"i"
);
MODULE_PARM_DESC
(
bulk_depth
,
"bulk_depth: Depth first processing for bulk transfers, 0=off (default), 1=on"
);
MODULE_PARM
(
ctrl_depth
,
"i"
);
MODULE_PARM_DESC
(
ctrl_depth
,
"ctrl_depth: Depth first processing for control transfers, 0=off (default), 1=on"
);
static
const
struct
pci_device_id
__devinitdata
pci_ids
[]
=
{
{
/* handle any USB UHCI controller */
class:
(
PCI_CLASS_SERIAL_USB
<<
8
)
|
0x00
,
class_mask:
~
0
,
driver_data:
(
unsigned
long
)
&
uhci_driver
,
/* no matter who makes it */
vendor:
PCI_ANY_ID
,
device:
PCI_ANY_ID
,
subvendor:
PCI_ANY_ID
,
subdevice:
PCI_ANY_ID
,
},
{
/* end: all zeroes */
}
};
MODULE_DEVICE_TABLE
(
pci
,
pci_ids
);
/* pci driver glue; this is a "new style" PCI driver module */
static
struct
pci_driver
uhci_pci_driver
=
{
name:
(
char
*
)
hcd_name
,
id_table:
pci_ids
,
probe:
usb_hcd_pci_probe
,
remove:
usb_hcd_pci_remove
,
#ifdef CONFIG_PM
suspend:
usb_hcd_pci_suspend
,
resume:
usb_hcd_pci_resume
,
#endif
};
/*-------------------------------------------------------------------------*/
static
int
__init
uhci_hcd_init
(
void
)
{
init_dbg
(
DRIVER_INFO
);
init_dbg
(
"block sizes: hq %d td %d"
,
sizeof
(
struct
qh
),
sizeof
(
struct
td
));
info
(
"High bandwidth mode %s.%s%s"
,
high_bw
?
"enabled"
:
"disabled"
,
ctrl_depth
?
"CTRL depth first enabled"
:
""
,
bulk_depth
?
"BULK depth first enabled"
:
""
);
return
pci_module_init
(
&
uhci_pci_driver
);
}
static
void
__exit
uhci_hcd_cleanup
(
void
)
{
pci_unregister_driver
(
&
uhci_pci_driver
);
}
module_init
(
uhci_hcd_init
);
module_exit
(
uhci_hcd_cleanup
);
drivers/usb/host/usb-uhci-hcd.h
deleted
100644 → 0
View file @
873077ea
#ifndef __LINUX_USB_UHCI_H
#define __LINUX_USB_UHCI_H
/* $Id: usb-uhci-hcd.h,v 1.1 2002/05/14 20:36:57 acher Exp $ */
#ifndef CONFIG_PCI
#error "UHCI needs the CONFIG_PCI option!"
#endif
#define MODNAME "usb-uhci-hcd"
#define UHCI_LATENCY_TIMER 0
static
__inline__
void
uhci_wait_ms
(
unsigned
int
ms
)
{
if
(
!
in_interrupt
())
{
current
->
state
=
TASK_UNINTERRUPTIBLE
;
schedule_timeout
(
1
+
ms
*
HZ
/
1000
);
}
else
mdelay
(
ms
);
}
/* Command register */
#define USBCMD 0
#define USBCMD_RS 0x0001
/* Run/Stop */
#define USBCMD_HCRESET 0x0002
/* Host reset */
#define USBCMD_GRESET 0x0004
/* Global reset */
#define USBCMD_EGSM 0x0008
/* Global Suspend Mode */
#define USBCMD_FGR 0x0010
/* Force Global Resume */
#define USBCMD_SWDBG 0x0020
/* SW Debug mode */
#define USBCMD_CF 0x0040
/* Config Flag (sw only) */
#define USBCMD_MAXP 0x0080
/* Max Packet (0 = 32, 1 = 64) */
/* Status register */
#define USBSTS 2
#define USBSTS_USBINT 0x0001
/* Interrupt due to IOC */
#define USBSTS_ERROR 0x0002
/* Interrupt due to error */
#define USBSTS_RD 0x0004
/* Resume Detect */
#define USBSTS_HSE 0x0008
/* Host System Error - basically PCI problems */
#define USBSTS_HCPE 0x0010
/* Host Controller Process Error - the scripts were buggy */
#define USBSTS_HCH 0x0020
/* HC Halted */
/* Interrupt enable register */
#define USBINTR 4
#define USBINTR_TIMEOUT 0x0001
/* Timeout/CRC error enable */
#define USBINTR_RESUME 0x0002
/* Resume interrupt enable */
#define USBINTR_IOC 0x0004
/* Interrupt On Complete enable */
#define USBINTR_SP 0x0008
/* Short packet interrupt enable */
#define USBFRNUM 6
#define USBFLBASEADD 8
#define USBSOF 12
/* USB port status and control registers */
#define USBPORTSC1 16
#define USBPORTSC2 18
#define USBPORTSC_CCS 0x0001
/* Current Connect Status ("device present") */
#define USBPORTSC_CSC 0x0002
/* Connect Status Change */
#define USBPORTSC_PE 0x0004
/* Port Enable */
#define USBPORTSC_PEC 0x0008
/* Port Enable Change */
#define USBPORTSC_LS 0x0030
/* Line Status */
#define USBPORTSC_RD 0x0040
/* Resume Detect */
#define USBPORTSC_LSDA 0x0100
/* Low Speed Device Attached */
#define USBPORTSC_PR 0x0200
/* Port Reset */
#define USBPORTSC_SUSP 0x1000
/* Suspend */
/* Legacy support register */
#define USBLEGSUP 0xc0
#define USBLEGSUP_DEFAULT 0x2000
/* only PIRQ enable set */
#define UHCI_NULL_DATA_SIZE 0x7ff
/* for UHCI controller TD */
#define UHCI_PID 0xff
/* PID MASK */
#define UHCI_PTR_BITS 0x000F
#define UHCI_PTR_TERM 0x0001
#define UHCI_PTR_QH 0x0002
#define UHCI_PTR_DEPTH 0x0004
#define UHCI_NUMFRAMES 1024
/* in the frame list [array] */
#define UHCI_MAX_SOF_NUMBER 2047
/* in an SOF packet */
#define CAN_SCHEDULE_FRAMES 1000
/* how far future frames can be scheduled */
/* for TD <status> */
#define TD_CTRL_SPD (1 << 29)
/* Short Packet Detect */
#define TD_CTRL_C_ERR_MASK (3 << 27)
/* Error Counter bits */
#define TD_CTRL_LS (1 << 26)
/* Low Speed Device */
#define TD_CTRL_IOS (1 << 25)
/* Isochronous Select */
#define TD_CTRL_IOC (1 << 24)
/* Interrupt on Complete */
#define TD_CTRL_ACTIVE (1 << 23)
/* TD Active */
#define TD_CTRL_STALLED (1 << 22)
/* TD Stalled */
#define TD_CTRL_DBUFERR (1 << 21)
/* Data Buffer Error */
#define TD_CTRL_BABBLE (1 << 20)
/* Babble Detected */
#define TD_CTRL_NAK (1 << 19)
/* NAK Received */
#define TD_CTRL_CRCTIMEO (1 << 18)
/* CRC/Time Out Error */
#define TD_CTRL_BITSTUFF (1 << 17)
/* Bit Stuff Error */
#define TD_CTRL_ACTLEN_MASK 0x7ff
/* actual length, encoded as n - 1 */
#define TD_CTRL_ANY_ERROR (TD_CTRL_STALLED | TD_CTRL_DBUFERR | \
TD_CTRL_BABBLE | TD_CTRL_CRCTIME | TD_CTRL_BITSTUFF)
#define uhci_status_bits(ctrl_sts) (ctrl_sts & 0xFE0000)
#define uhci_actual_length(desc) ((le32_to_cpu(desc->hw.td.status) + 1) & TD_CTRL_ACTLEN_MASK)
/* 1-based */
/* for TD <flags>: */
#define UHCI_TD_REMOVE 0x0001
/* Remove when done */
/* for TD <info>: (a.k.a. Token) */
#define TD_TOKEN_TOGGLE 19
#define uhci_maxlen(token) ((token) >> 21)
#define uhci_toggle(token) (((token) >> TD_TOKEN_TOGGLE) & 1)
#define uhci_endpoint(token) (((token) >> 15) & 0xf)
#define uhci_devaddr(token) (((token) >> 8) & 0x7f)
#define uhci_devep(token) (((token) >> 8) & 0x7ff)
#define uhci_packetid(token) ((token) & 0xff)
#define uhci_packetout(token) (uhci_packetid(token) != USB_PID_IN)
#define uhci_packetin(token) (uhci_packetid(token) == USB_PID_IN)
#define uhci_do_toggle(urb) usb_dotoggle (urb->dev, usb_pipeendpoint (urb->pipe), usb_pipeout (urb->pipe))
#define uhci_get_toggle(urb) usb_gettoggle (urb->dev, usb_pipeendpoint (urb->pipe), usb_pipeout (urb->pipe))
/* ------------------------------------------------------------------------------------
TD/QH-structures
------------------------------------------------------------------------------------ */
typedef
enum
{
TD_TYPE
,
QH_TYPE
}
uhci_desc_type_t
;
typedef
struct
{
__u32
link
;
__u32
status
;
__u32
info
;
__u32
buffer
;
}
uhci_td_t
,
*
puhci_td_t
;
typedef
struct
{
__u32
head
;
__u32
element
;
/* Queue element pointer */
}
uhci_qh_t
,
*
puhci_qh_t
;
typedef
struct
{
union
{
uhci_td_t
td
;
uhci_qh_t
qh
;
}
hw
;
uhci_desc_type_t
type
;
dma_addr_t
dma_addr
;
struct
list_head
horizontal
;
struct
list_head
vertical
;
struct
list_head
desc_list
;
int
last_used
;
}
uhci_desc_t
,
*
puhci_desc_t
;
typedef
struct
{
struct
list_head
desc_list
;
// list pointer to all corresponding TDs/QHs associated with this request
struct
list_head
urb_list
;
struct
urb
*
urb
;
// urb to which this data belongs
dma_addr_t
setup_packet_dma
;
dma_addr_t
transfer_buffer_dma
;
unsigned
long
started
;
struct
urb
*
next_queued_urb
;
// next queued urb for this EP
struct
urb
*
prev_queued_urb
;
uhci_desc_t
*
bottom_qh
;
uhci_desc_t
*
next_qh
;
// next helper QH
char
use_loop
;
char
flags
;
}
urb_priv_t
,
*
purb_priv_t
;
struct
uhci_hcd
{
unsigned
int
maxports
;
int
running
;
int
apm_state
;
struct
uhci_hcd
*
next
;
// chain of uhci device contexts
spinlock_t
urb_list_lock
;
// lock to keep consistency
spinlock_t
qh_lock
;
spinlock_t
td_lock
;
atomic_t
avoid_bulk
;
__u32
*
framelist
;
dma_addr_t
framelist_dma
;
uhci_desc_t
**
iso_td
;
uhci_desc_t
*
int_chain
[
8
];
uhci_desc_t
*
ls_control_chain
;
uhci_desc_t
*
control_chain
;
uhci_desc_t
*
bulk_chain
;
uhci_desc_t
*
chain_end
;
uhci_desc_t
*
td1ms
;
uhci_desc_t
*
td32ms
;
uhci_desc_t
*
td128ms
;
struct
list_head
urb_list
;
// list of all pending urbs
struct
list_head
free_desc_qh
;
// Cool down pool for QH
struct
list_head
free_desc_td
;
// Cool down pool for ISO/INT-TDs
struct
list_head
urb_unlinked
;
// list of all unlinked urbs
struct
pci_dev
*
uhci_pci
;
struct
pci_pool
*
desc_pool
;
int
unlink_urb_done
;
int
loop_usage
;
// URBs using bandwidth reclamation
long
timeout_check
;
int
timeout_urbs
;
long
last_error_time
;
// last error output in uhci_interrupt()
long
last_hcd_irq
;
int
reanimations
;
int
need_init
;
// Framework state
struct
usb_hcd
hcd
;
};
#define hcd_to_uhci(hcd_ptr) list_entry(hcd_ptr, struct uhci_hcd, hcd)
#define MAKE_TD_ADDR(a) ((a)->dma_addr&~UHCI_PTR_QH)
#define MAKE_QH_ADDR(a) ((a)->dma_addr|UHCI_PTR_QH)
#define UHCI_GET_CURRENT_FRAME(uhci) (inw ((int)(uhci->hcd.regs) + USBFRNUM))
#define CLEAN_TRANSFER_NO_DELETION 0
#define CLEAN_TRANSFER_REGULAR 1
#define CLEAN_TRANSFER_DELETION_MARK 2
#define CLEAN_NOT_FORCED 0
#define CLEAN_FORCED 1
#define PROCESS_ISO_REGULAR 0
#define PROCESS_ISO_FORCE 1
#define PROCESS_INT_REGULAR 0
#define PROCESS_INT_REMOVE 1
#define UNLINK_ASYNC_STORE_URB 0
#define UNLINK_ASYNC_DONT_STORE 1
#define is_td_active(desc) (desc->hw.td.status & cpu_to_le32(TD_CTRL_ACTIVE))
#define set_qh_head(desc,val) (desc)->hw.qh.head=cpu_to_le32(val)
#define set_qh_element(desc,val) (desc)->hw.qh.element=cpu_to_le32(val)
#define set_td_link(desc,val) (desc)->hw.td.link=cpu_to_le32(val)
#define set_td_ioc(desc) (desc)->hw.td.status |= cpu_to_le32(TD_CTRL_IOC)
#define clr_td_ioc(desc) (desc)->hw.td.status &= cpu_to_le32(~TD_CTRL_IOC)
#endif
drivers/usb/host/usb-uhci-hub.c
deleted
100644 → 0
View file @
873077ea
/*
UHCI HCD (Host Controller Driver) for USB, UHCI Root Hub
(c) 1999-2002
Georg Acher + Deti Fliegl + Thomas Sailer
georg@acher.org deti@fliegl.de sailer@ife.ee.ethz.ch
with the help of
David Brownell, david-b@pacbell.net
Adam Richter, adam@yggdrasil.com
Roman Weissgaerber, weissg@vienna.at
HW-initalization based on material of
Randy Dunlap + Johannes Erdfelt + Gregory P. Smith + Linus Torvalds
$Id: usb-uhci-hub.c,v 1.2 2002/05/21 21:40:16 acher Exp $
*/
#define CLR_RH_PORTSTAT(x) \
status = inw(io_addr+USBPORTSC1+2*(wIndex-1)); \
status = (status & 0xfff5) & ~(x); \
outw(status, io_addr+USBPORTSC1+2*(wIndex-1))
#define SET_RH_PORTSTAT(x) \
status = inw(io_addr+USBPORTSC1+2*(wIndex-1)); \
status = (status & 0xfff5) | (x); \
outw(status, io_addr+USBPORTSC1+2*(wIndex-1))
static
int
oldval
=-
1
;
/* build "status change" packet (one or two bytes) from HC registers
Since uhci_hub_status_data is called by a SW timer, it is also used
for monitoring HC health */
static
int
uhci_hub_status_data
(
struct
usb_hcd
*
hcd
,
char
*
buf
)
{
struct
uhci_hcd
*
uhci
=
hcd_to_uhci
(
hcd
);
unsigned
long
io_addr
=
(
unsigned
long
)
uhci
->
hcd
.
regs
;
int
i
,
len
=
0
,
data
=
0
,
portstate
;
int
changed
=
0
;
for
(
i
=
0
;
i
<
uhci
->
maxports
;
i
++
)
{
portstate
=
inw
(
io_addr
+
USBPORTSC1
+
i
*
2
);
#if 0
if (i==0 && (portstate&0xf) != (oldval&0xf))
err("Port %i: %x", i+1, portstate);
#endif
if
(
i
==
0
)
oldval
=
portstate
;
if
((
portstate
&
0xa
)
>
0
)
{
changed
=
1
;
}
data
|=
((
portstate
&
0xa
)
>
0
?
(
1
<<
(
i
+
1
))
:
0
);
len
=
(
i
+
1
)
/
8
+
1
;
}
*
(
__u16
*
)
buf
=
cpu_to_le16
(
data
);
// Watchdog
if
(
uhci
->
running
&&
time_after
(
jiffies
,
uhci
->
last_hcd_irq
+
WATCHDOG_TIMEOUT
))
{
if
(
uhci
->
reanimations
>
MAX_REANIMATIONS
)
{
err
(
"He's dead, Jim. Giving up reanimating the UHCI host controller.
\n
"
"Maybe a real module reload helps..."
);
uhci
->
running
=
0
;
}
else
{
uhci
->
running
=
0
;
uhci
->
need_init
=
1
;
// init done in the next submit_urb
}
}
return
changed
?
len
:
0
;
}
/*-------------------------------------------------------------------------*/
static
void
uhci_hub_descriptor
(
struct
uhci_hcd
*
uhci
,
struct
usb_hub_descriptor
*
desc
)
{
int
ports
=
uhci
->
maxports
;
u16
temp
;
desc
->
bDescriptorType
=
0x29
;
desc
->
bPwrOn2PwrGood
=
1
;
desc
->
bHubContrCurrent
=
0
;
desc
->
bNbrPorts
=
ports
;
temp
=
1
+
(
ports
/
8
);
desc
->
bDescLength
=
7
+
2
*
temp
;
desc
->
wHubCharacteristics
=
0
;
desc
->
bitmap
[
0
]
=
0
;
desc
->
bitmap
[
1
]
=
0xff
;
}
/*-------------------------------------------------------------------------*/
static
int
uhci_hub_control
(
struct
usb_hcd
*
hcd
,
u16
typeReq
,
u16
wValue
,
u16
wIndex
,
char
*
buf
,
u16
wLength
)
{
struct
uhci_hcd
*
uhci
=
hcd_to_uhci
(
hcd
);
int
status
=
0
;
int
stat
=
0
;
int
cstatus
;
unsigned
long
io_addr
=
(
unsigned
long
)
uhci
->
hcd
.
regs
;
int
ports
=
uhci
->
maxports
;
switch
(
typeReq
)
{
case
ClearHubFeature
:
break
;
case
ClearPortFeature
:
if
(
!
wIndex
||
wIndex
>
ports
)
goto
error
;
switch
(
wValue
)
{
case
USB_PORT_FEAT_ENABLE
:
CLR_RH_PORTSTAT
(
USBPORTSC_PE
);
break
;
case
USB_PORT_FEAT_C_ENABLE
:
SET_RH_PORTSTAT
(
USBPORTSC_PEC
);
break
;
case
USB_PORT_FEAT_SUSPEND
:
CLR_RH_PORTSTAT
(
USBPORTSC_SUSP
);
break
;
case
USB_PORT_FEAT_C_SUSPEND
:
/*** WR_RH_PORTSTAT(RH_PS_PSSC); */
break
;
case
USB_PORT_FEAT_POWER
:
break
;
/* port power ** */
case
USB_PORT_FEAT_C_CONNECTION
:
SET_RH_PORTSTAT
(
USBPORTSC_CSC
);
break
;
case
USB_PORT_FEAT_C_OVER_CURRENT
:
break
;
/* port power over current ** */
case
USB_PORT_FEAT_C_RESET
:
break
;
default:
goto
error
;
}
break
;
case
GetHubDescriptor
:
uhci_hub_descriptor
(
uhci
,
(
struct
usb_hub_descriptor
*
)
buf
);
break
;
case
GetHubStatus
:
*
(
u32
*
)
buf
=
cpu_to_le32
(
0
);
break
;
case
GetPortStatus
:
if
(
!
wIndex
||
wIndex
>
ports
)
goto
error
;
status
=
inw
(
io_addr
+
USBPORTSC1
+
2
*
(
wIndex
-
1
));
cstatus
=
((
status
&
USBPORTSC_CSC
)
>>
(
1
-
0
))
|
((
status
&
USBPORTSC_PEC
)
>>
(
3
-
1
));
status
=
(
status
&
USBPORTSC_CCS
)
|
((
status
&
USBPORTSC_PE
)
>>
(
2
-
1
))
|
((
status
&
USBPORTSC_SUSP
)
>>
(
12
-
2
))
|
((
status
&
USBPORTSC_PR
)
>>
(
9
-
4
))
|
(
1
<<
8
)
|
/* power on ** */
((
status
&
USBPORTSC_LSDA
)
<<
(
-
8
+
9
));
*
(
__u16
*
)
buf
=
cpu_to_le16
(
status
);
*
(
__u16
*
)
(
buf
+
2
)
=
cpu_to_le16
(
cstatus
);
break
;
case
SetHubFeature
:
// FIXME
break
;
case
SetPortFeature
:
if
(
!
wIndex
||
wIndex
>
ports
)
goto
error
;
switch
(
wValue
)
{
case
USB_PORT_FEAT_SUSPEND
:
SET_RH_PORTSTAT
(
USBPORTSC_SUSP
);
break
;
case
USB_PORT_FEAT_RESET
:
SET_RH_PORTSTAT
(
USBPORTSC_PR
);
uhci_wait_ms
(
10
);
CLR_RH_PORTSTAT
(
USBPORTSC_PR
);
udelay
(
10
);
SET_RH_PORTSTAT
(
USBPORTSC_PE
);
uhci_wait_ms
(
10
);
SET_RH_PORTSTAT
(
0xa
);
break
;
case
USB_PORT_FEAT_POWER
:
break
;
/* port power ** */
case
USB_PORT_FEAT_ENABLE
:
SET_RH_PORTSTAT
(
USBPORTSC_PE
);
break
;
default:
goto
error
;
}
break
;
default:
error:
stat
=
-
EPIPE
;
}
dbg
(
"Root-Hub stat port1: %x port2: %x"
,
inw
(
io_addr
+
USBPORTSC1
),
inw
(
io_addr
+
USBPORTSC2
));
return
stat
;
}
drivers/usb/host/usb-uhci-mem.c
deleted
100644 → 0
View file @
873077ea
/*
UHCI HCD (Host Controller Driver) for USB
UHCI memory allocation and basic descriptor handling
(c) 1999-2002
Georg Acher + Deti Fliegl + Thomas Sailer
georg@acher.org deti@fliegl.de sailer@ife.ee.ethz.ch
with the help of
David Brownell, david-b@pacbell.net
Adam Richter, adam@yggdrasil.com
Roman Weissgaerber, weissg@vienna.at
HW-initalization based on material of
Randy Dunlap + Johannes Erdfelt + Gregory P. Smith + Linus Torvalds
$Id: usb-uhci-mem.c,v 1.3 2002/05/25 16:42:41 acher Exp $
*/
/*###########################################################################*/
// UHCI STRUCTURE
/*###########################################################################*/
static
struct
usb_hcd
*
uhci_hcd_alloc
(
void
)
{
struct
uhci_hcd
*
uhci
;
int
len
;
len
=
sizeof
(
struct
uhci_hcd
);
uhci
=
(
struct
uhci_hcd
*
)
kmalloc
(
len
,
GFP_KERNEL
);
if
(
uhci
==
0
)
return
NULL
;
memset
(
uhci
,
0
,
len
);
init_dbg
(
"uhci @ %p, hcd @ %p"
,
uhci
,
&
(
uhci
->
hcd
));
INIT_LIST_HEAD
(
&
uhci
->
free_desc_qh
);
INIT_LIST_HEAD
(
&
uhci
->
free_desc_td
);
INIT_LIST_HEAD
(
&
uhci
->
urb_list
);
INIT_LIST_HEAD
(
&
uhci
->
urb_unlinked
);
spin_lock_init
(
&
uhci
->
urb_list_lock
);
spin_lock_init
(
&
uhci
->
qh_lock
);
spin_lock_init
(
&
uhci
->
td_lock
);
atomic_set
(
&
uhci
->
avoid_bulk
,
0
);
return
&
(
uhci
->
hcd
);
}
/*-------------------------------------------------------------------*/
static
void
uhci_hcd_free
(
struct
usb_hcd
*
hcd
)
{
kfree
(
hcd_to_uhci
(
hcd
));
}
/*###########################################################################*/
// DMA/PCI CONSISTENCY
/*###########################################################################*/
static
void
uhci_urb_dma_sync
(
struct
uhci_hcd
*
uhci
,
struct
urb
*
urb
,
urb_priv_t
*
urb_priv
)
{
if
(
urb_priv
->
setup_packet_dma
)
pci_dma_sync_single
(
uhci
->
uhci_pci
,
urb_priv
->
setup_packet_dma
,
sizeof
(
struct
usb_ctrlrequest
),
PCI_DMA_TODEVICE
);
if
(
urb_priv
->
transfer_buffer_dma
)
pci_dma_sync_single
(
uhci
->
uhci_pci
,
urb_priv
->
transfer_buffer_dma
,
urb
->
transfer_buffer_length
,
usb_pipein
(
urb
->
pipe
)
?
PCI_DMA_FROMDEVICE
:
PCI_DMA_TODEVICE
);
}
/*-------------------------------------------------------------------*/
static
void
uhci_urb_dma_unmap
(
struct
uhci_hcd
*
uhci
,
struct
urb
*
urb
,
urb_priv_t
*
urb_priv
)
{
if
(
urb_priv
->
setup_packet_dma
)
{
pci_unmap_single
(
uhci
->
uhci_pci
,
urb_priv
->
setup_packet_dma
,
sizeof
(
struct
usb_ctrlrequest
),
PCI_DMA_TODEVICE
);
urb_priv
->
setup_packet_dma
=
0
;
}
if
(
urb_priv
->
transfer_buffer_dma
)
{
pci_unmap_single
(
uhci
->
uhci_pci
,
urb_priv
->
transfer_buffer_dma
,
urb
->
transfer_buffer_length
,
usb_pipein
(
urb
->
pipe
)
?
PCI_DMA_FROMDEVICE
:
PCI_DMA_TODEVICE
);
urb_priv
->
transfer_buffer_dma
=
0
;
}
}
/*###########################################################################*/
// TRANSFER DESCRIPTORS (TD)
/*###########################################################################*/
static
void
fill_td
(
uhci_desc_t
*
td
,
int
status
,
int
info
,
__u32
buffer
)
{
td
->
hw
.
td
.
status
=
cpu_to_le32
(
status
);
td
->
hw
.
td
.
info
=
cpu_to_le32
(
info
);
td
->
hw
.
td
.
buffer
=
cpu_to_le32
(
buffer
);
}
/*-------------------------------------------------------------------*/
static
int
alloc_td
(
struct
uhci_hcd
*
uhci
,
uhci_desc_t
**
new
,
int
flags
)
{
dma_addr_t
dma_handle
;
*
new
=
pci_pool_alloc
(
uhci
->
desc_pool
,
GFP_DMA
|
GFP_ATOMIC
,
&
dma_handle
);
if
(
!*
new
)
return
-
ENOMEM
;
memset
(
*
new
,
0
,
sizeof
(
uhci_desc_t
));
(
*
new
)
->
dma_addr
=
dma_handle
;
set_td_link
((
*
new
),
UHCI_PTR_TERM
|
(
flags
&
UHCI_PTR_BITS
));
// last by default
(
*
new
)
->
type
=
TD_TYPE
;
mb
();
INIT_LIST_HEAD
(
&
(
*
new
)
->
vertical
);
INIT_LIST_HEAD
(
&
(
*
new
)
->
horizontal
);
return
0
;
}
/*-------------------------------------------------------------------*/
/* insert td at last position in td-list of qh (vertical) */
static
int
insert_td
(
struct
uhci_hcd
*
uhci
,
uhci_desc_t
*
qh
,
uhci_desc_t
*
new
,
int
flags
)
{
uhci_desc_t
*
prev
;
unsigned
long
cpuflags
;
spin_lock_irqsave
(
&
uhci
->
td_lock
,
cpuflags
);
list_add_tail
(
&
new
->
vertical
,
&
qh
->
vertical
);
prev
=
list_entry
(
new
->
vertical
.
prev
,
uhci_desc_t
,
vertical
);
if
(
qh
==
prev
)
{
// virgin qh without any tds
set_qh_element
(
qh
,
new
->
dma_addr
|
UHCI_PTR_TERM
);
}
else
{
// already tds inserted, implicitely remove TERM bit of prev
set_td_link
(
prev
,
new
->
dma_addr
|
(
flags
&
UHCI_PTR_DEPTH
));
}
mb
();
spin_unlock_irqrestore
(
&
uhci
->
td_lock
,
cpuflags
);
return
0
;
}
/*-------------------------------------------------------------------*/
/* insert new_td after td (horizontal) */
static
int
insert_td_horizontal
(
struct
uhci_hcd
*
uhci
,
uhci_desc_t
*
td
,
uhci_desc_t
*
new
)
{
uhci_desc_t
*
next
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
uhci
->
td_lock
,
flags
);
next
=
list_entry
(
td
->
horizontal
.
next
,
uhci_desc_t
,
horizontal
);
list_add
(
&
new
->
horizontal
,
&
td
->
horizontal
);
new
->
hw
.
td
.
link
=
td
->
hw
.
td
.
link
;
mb
();
set_td_link
(
td
,
new
->
dma_addr
);
mb
();
spin_unlock_irqrestore
(
&
uhci
->
td_lock
,
flags
);
return
0
;
}
/*-------------------------------------------------------------------*/
static
int
unlink_td
(
struct
uhci_hcd
*
uhci
,
uhci_desc_t
*
element
,
int
phys_unlink
)
{
uhci_desc_t
*
next
,
*
prev
;
int
dir
=
0
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
uhci
->
td_lock
,
flags
);
next
=
list_entry
(
element
->
vertical
.
next
,
uhci_desc_t
,
vertical
);
if
(
next
==
element
)
{
dir
=
1
;
prev
=
list_entry
(
element
->
horizontal
.
prev
,
uhci_desc_t
,
horizontal
);
}
else
prev
=
list_entry
(
element
->
vertical
.
prev
,
uhci_desc_t
,
vertical
);
if
(
phys_unlink
)
{
// really remove HW linking
if
(
prev
->
type
==
TD_TYPE
)
{
prev
->
hw
.
td
.
link
=
element
->
hw
.
td
.
link
;
}
else
prev
->
hw
.
qh
.
element
=
element
->
hw
.
td
.
link
;
}
mb
();
if
(
dir
==
0
)
list_del
(
&
element
->
vertical
);
else
list_del
(
&
element
->
horizontal
);
spin_unlock_irqrestore
(
&
uhci
->
td_lock
,
flags
);
return
0
;
}
/*###########################################################################*/
// QUEUE HEADS (QH)
/*###########################################################################*/
// Allocates qh element
static
int
alloc_qh
(
struct
uhci_hcd
*
uhci
,
uhci_desc_t
**
new
)
{
dma_addr_t
dma_handle
;
*
new
=
pci_pool_alloc
(
uhci
->
desc_pool
,
GFP_DMA
|
GFP_ATOMIC
,
&
dma_handle
);
if
(
!*
new
)
return
-
ENOMEM
;
memset
(
*
new
,
0
,
sizeof
(
uhci_desc_t
));
(
*
new
)
->
dma_addr
=
dma_handle
;
set_qh_head
(
*
new
,
UHCI_PTR_TERM
);
set_qh_element
(
*
new
,
UHCI_PTR_TERM
);
(
*
new
)
->
type
=
QH_TYPE
;
mb
();
INIT_LIST_HEAD
(
&
(
*
new
)
->
horizontal
);
INIT_LIST_HEAD
(
&
(
*
new
)
->
vertical
);
dbg
(
"Allocated qh @ %p"
,
*
new
);
return
0
;
}
/*-------------------------------------------------------------------*/
// inserts new qh before/after the qh at pos
// flags: 0: insert before pos, 1: insert after pos (for low speed transfers)
static
int
insert_qh
(
struct
uhci_hcd
*
uhci
,
uhci_desc_t
*
pos
,
uhci_desc_t
*
new
,
int
order
)
{
uhci_desc_t
*
old
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
uhci
->
qh_lock
,
flags
);
if
(
!
order
)
{
// (OLD) (POS) -> (OLD) (NEW) (POS)
old
=
list_entry
(
pos
->
horizontal
.
prev
,
uhci_desc_t
,
horizontal
);
list_add_tail
(
&
new
->
horizontal
,
&
pos
->
horizontal
);
set_qh_head
(
new
,
MAKE_QH_ADDR
(
pos
))
;
mb
();
if
(
!
(
old
->
hw
.
qh
.
head
&
cpu_to_le32
(
UHCI_PTR_TERM
)))
set_qh_head
(
old
,
MAKE_QH_ADDR
(
new
))
;
}
else
{
// (POS) (OLD) -> (POS) (NEW) (OLD)
old
=
list_entry
(
pos
->
horizontal
.
next
,
uhci_desc_t
,
horizontal
);
list_add
(
&
new
->
horizontal
,
&
pos
->
horizontal
);
set_qh_head
(
new
,
MAKE_QH_ADDR
(
old
));
mb
();
set_qh_head
(
pos
,
MAKE_QH_ADDR
(
new
))
;
}
mb
();
spin_unlock_irqrestore
(
&
uhci
->
qh_lock
,
flags
);
return
0
;
}
/*-------------------------------------------------------------------*/
// append a qh to td.link physically, the SW linkage is not affected
static
void
append_qh
(
struct
uhci_hcd
*
uhci
,
uhci_desc_t
*
td
,
uhci_desc_t
*
qh
,
int
flags
)
{
unsigned
long
cpuflags
;
spin_lock_irqsave
(
&
uhci
->
td_lock
,
cpuflags
);
set_td_link
(
td
,
qh
->
dma_addr
|
(
flags
&
UHCI_PTR_DEPTH
)
|
UHCI_PTR_QH
);
mb
();
spin_unlock_irqrestore
(
&
uhci
->
td_lock
,
cpuflags
);
}
/*-------------------------------------------------------------------*/
static
int
unlink_qh
(
struct
uhci_hcd
*
uhci
,
uhci_desc_t
*
element
)
{
uhci_desc_t
*
prev
;
unsigned
long
flags
;
__u32
old_head
;
spin_lock_irqsave
(
&
uhci
->
qh_lock
,
flags
);
prev
=
list_entry
(
element
->
horizontal
.
prev
,
uhci_desc_t
,
horizontal
);
old_head
=
element
->
hw
.
qh
.
head
;
element
->
hw
.
qh
.
head
=
UHCI_PTR_TERM
;
mb
();
prev
->
hw
.
qh
.
head
=
old_head
;
dbg
(
"unlink qh %p, pqh %p, nxqh %p, to %08x"
,
element
,
prev
,
list_entry
(
element
->
horizontal
.
next
,
uhci_desc_t
,
horizontal
),
le32_to_cpu
(
element
->
hw
.
qh
.
head
)
&~
15
);
list_del
(
&
element
->
horizontal
);
mb
();
spin_unlock_irqrestore
(
&
uhci
->
qh_lock
,
flags
);
return
0
;
}
/*-------------------------------------------------------------------*/
static
int
delete_desc
(
struct
uhci_hcd
*
uhci
,
uhci_desc_t
*
element
)
{
pci_pool_free
(
uhci
->
desc_pool
,
element
,
element
->
dma_addr
);
return
0
;
}
/*-------------------------------------------------------------------*/
static
int
delete_qh
(
struct
uhci_hcd
*
uhci
,
uhci_desc_t
*
qh
)
{
uhci_desc_t
*
td
;
struct
list_head
*
p
;
int
n
=
0
;
list_del
(
&
qh
->
horizontal
);
while
((
p
=
qh
->
vertical
.
next
)
!=
&
qh
->
vertical
&&
n
<
10000
)
{
td
=
list_entry
(
p
,
uhci_desc_t
,
vertical
);
dbg
(
"unlink td @ %p"
,
td
);
unlink_td
(
uhci
,
td
,
0
);
// no physical unlink
delete_desc
(
uhci
,
td
);
n
++
;
}
// never trust any software, not even your own...
if
(
n
>=
10000
)
err
(
"delete_qh: Garbage in QH list, giving up"
);
delete_desc
(
uhci
,
qh
);
return
0
;
}
/*###########################################################################*/
// DESCRIPTOR CHAINING HELPERS
/*###########################################################################*/
static
void
clean_td_chain
(
struct
uhci_hcd
*
uhci
,
uhci_desc_t
*
td
)
{
struct
list_head
*
p
;
uhci_desc_t
*
td1
;
if
(
!
td
)
return
;
while
((
p
=
td
->
horizontal
.
next
)
!=
&
td
->
horizontal
)
{
td1
=
list_entry
(
p
,
uhci_desc_t
,
horizontal
);
delete_desc
(
uhci
,
td1
);
}
delete_desc
(
uhci
,
td
);
}
/*-------------------------------------------------------------------*/
// Cleans up collected QHs/TDs, but not more than 100 in one go
void
clean_descs
(
struct
uhci_hcd
*
uhci
,
int
force
)
{
struct
list_head
*
q
;
uhci_desc_t
*
qh
,
*
td
;
int
now
=
UHCI_GET_CURRENT_FRAME
(
uhci
),
n
=
0
;
q
=
uhci
->
free_desc_qh
.
prev
;
while
(
q
!=
&
uhci
->
free_desc_qh
&&
(
force
||
n
<
100
))
{
qh
=
list_entry
(
q
,
uhci_desc_t
,
horizontal
);
q
=
qh
->
horizontal
.
prev
;
if
((
qh
->
last_used
!=
now
)
||
force
)
{
delete_qh
(
uhci
,
qh
);
}
n
++
;
}
q
=
uhci
->
free_desc_td
.
prev
;
n
=
0
;
while
(
q
!=
&
uhci
->
free_desc_td
&&
(
force
||
n
<
100
))
{
td
=
list_entry
(
q
,
uhci_desc_t
,
horizontal
);
q
=
td
->
horizontal
.
prev
;
if
(((
td
->
last_used
!=
now
)
&&
(
td
->
last_used
+
1
!=
now
))
||
force
)
{
list_del
(
&
td
->
horizontal
);
delete_desc
(
uhci
,
td
);
}
n
++
;
}
}
/*-------------------------------------------------------------------*/
static
void
uhci_switch_timer_int
(
struct
uhci_hcd
*
uhci
)
{
if
(
!
list_empty
(
&
uhci
->
urb_unlinked
))
set_td_ioc
(
uhci
->
td1ms
);
else
clr_td_ioc
(
uhci
->
td1ms
);
if
(
uhci
->
timeout_urbs
)
set_td_ioc
(
uhci
->
td32ms
);
else
clr_td_ioc
(
uhci
->
td32ms
);
wmb
();
}
/*-------------------------------------------------------------------*/
static
void
enable_desc_loop
(
struct
uhci_hcd
*
uhci
,
struct
urb
*
urb
)
{
unsigned
long
flags
;
if
(
urb
->
transfer_flags
&
USB_NO_FSBR
)
return
;
spin_lock_irqsave
(
&
uhci
->
qh_lock
,
flags
);
uhci
->
chain_end
->
hw
.
qh
.
head
&=
cpu_to_le32
(
~
UHCI_PTR_TERM
);
mb
();
uhci
->
loop_usage
++
;
((
urb_priv_t
*
)
urb
->
hcpriv
)
->
use_loop
=
1
;
spin_unlock_irqrestore
(
&
uhci
->
qh_lock
,
flags
);
}
/*-------------------------------------------------------------------*/
static
void
disable_desc_loop
(
struct
uhci_hcd
*
uhci
,
struct
urb
*
urb
)
{
unsigned
long
flags
;
if
(
urb
->
transfer_flags
&
USB_NO_FSBR
)
return
;
spin_lock_irqsave
(
&
uhci
->
qh_lock
,
flags
);
if
(((
urb_priv_t
*
)
urb
->
hcpriv
)
->
use_loop
)
{
uhci
->
loop_usage
--
;
if
(
!
uhci
->
loop_usage
)
{
uhci
->
chain_end
->
hw
.
qh
.
head
|=
cpu_to_le32
(
UHCI_PTR_TERM
);
mb
();
}
((
urb_priv_t
*
)
urb
->
hcpriv
)
->
use_loop
=
0
;
}
spin_unlock_irqrestore
(
&
uhci
->
qh_lock
,
flags
);
}
/*-------------------------------------------------------------------*/
static
void
queue_urb_unlocked
(
struct
uhci_hcd
*
uhci
,
struct
urb
*
urb
)
{
urb_priv_t
*
priv
=
(
urb_priv_t
*
)
urb
->
hcpriv
;
int
type
;
type
=
usb_pipetype
(
urb
->
pipe
);
if
(
high_bw
&&
((
type
==
PIPE_BULK
)
||
(
type
==
PIPE_CONTROL
)))
enable_desc_loop
(
uhci
,
urb
);
urb
->
status
=
-
EINPROGRESS
;
priv
->
started
=
jiffies
;
list_add
(
&
priv
->
urb_list
,
&
uhci
->
urb_list
);
if
(
urb
->
timeout
)
uhci
->
timeout_urbs
++
;
uhci_switch_timer_int
(
uhci
);
}
/*-------------------------------------------------------------------*/
static
void
queue_urb
(
struct
uhci_hcd
*
uhci
,
struct
urb
*
urb
)
{
unsigned
long
flags
=
0
;
spin_lock_irqsave
(
&
uhci
->
urb_list_lock
,
flags
);
queue_urb_unlocked
(
uhci
,
urb
);
spin_unlock_irqrestore
(
&
uhci
->
urb_list_lock
,
flags
);
}
/*-------------------------------------------------------------------*/
static
void
dequeue_urb
(
struct
uhci_hcd
*
uhci
,
struct
urb
*
urb
)
{
urb_priv_t
*
priv
=
(
urb_priv_t
*
)
urb
->
hcpriv
;
int
type
;
dbg
(
"dequeue URB %p"
,
urb
);
type
=
usb_pipetype
(
urb
->
pipe
);
if
(
high_bw
&&
((
type
==
PIPE_BULK
)
||
(
type
==
PIPE_CONTROL
)))
disable_desc_loop
(
uhci
,
urb
);
list_del
(
&
priv
->
urb_list
);
if
(
urb
->
timeout
&&
uhci
->
timeout_urbs
)
uhci
->
timeout_urbs
--
;
}
/*###########################################################################*/
// INIT/FREE FRAME LAYOUT IN MEMORY
/*###########################################################################*/
// Removes ALL qhs in chain (paranoia!)
static
void
cleanup_skel
(
struct
uhci_hcd
*
uhci
)
{
unsigned
int
n
;
uhci_desc_t
*
td
;
dbg
(
"cleanup_skel"
);
clean_descs
(
uhci
,
1
);
dbg
(
"clean_descs done"
);
if
(
uhci
->
td32ms
)
{
unlink_td
(
uhci
,
uhci
->
td32ms
,
1
);
delete_desc
(
uhci
,
uhci
->
td32ms
);
}
if
(
uhci
->
td128ms
)
{
unlink_td
(
uhci
,
uhci
->
td128ms
,
1
);
delete_desc
(
uhci
,
uhci
->
td128ms
);
}
for
(
n
=
0
;
n
<
8
;
n
++
)
{
td
=
uhci
->
int_chain
[
n
];
clean_td_chain
(
uhci
,
td
);
}
if
(
uhci
->
iso_td
)
{
for
(
n
=
0
;
n
<
1024
;
n
++
)
{
td
=
uhci
->
iso_td
[
n
];
clean_td_chain
(
uhci
,
td
);
}
kfree
(
uhci
->
iso_td
);
}
if
(
uhci
->
framelist
)
pci_free_consistent
(
uhci
->
uhci_pci
,
PAGE_SIZE
,
uhci
->
framelist
,
uhci
->
framelist_dma
);
if
(
uhci
->
control_chain
)
{
// completed init_skel?
struct
list_head
*
p
;
uhci_desc_t
*
qh
,
*
qh1
;
qh
=
uhci
->
control_chain
;
while
((
p
=
qh
->
horizontal
.
next
)
!=
&
qh
->
horizontal
)
{
qh1
=
list_entry
(
p
,
uhci_desc_t
,
horizontal
);
delete_qh
(
uhci
,
qh1
);
}
delete_qh
(
uhci
,
qh
);
}
else
{
if
(
uhci
->
ls_control_chain
)
delete_desc
(
uhci
,
uhci
->
ls_control_chain
);
if
(
uhci
->
control_chain
)
delete_desc
(
uhci
,
uhci
->
control_chain
);
if
(
uhci
->
bulk_chain
)
delete_desc
(
uhci
,
uhci
->
bulk_chain
);
if
(
uhci
->
chain_end
)
delete_desc
(
uhci
,
uhci
->
chain_end
);
}
if
(
uhci
->
desc_pool
)
{
pci_pool_destroy
(
uhci
->
desc_pool
);
uhci
->
desc_pool
=
NULL
;
}
uhci
->
ls_control_chain
=
NULL
;
uhci
->
control_chain
=
NULL
;
uhci
->
bulk_chain
=
NULL
;
uhci
->
chain_end
=
NULL
;
for
(
n
=
0
;
n
<
8
;
n
++
)
uhci
->
int_chain
[
n
]
=
NULL
;
dbg
(
"cleanup_skel finished"
);
}
/*-------------------------------------------------------------------*/
// allocates framelist and qh-skeletons
// only HW-links provide continous linking, SW-links stay in their domain (ISO/INT)
static
int
init_skel
(
struct
uhci_hcd
*
uhci
)
{
int
n
,
ret
;
uhci_desc_t
*
qh
,
*
td
;
init_dbg
(
"init_skel"
);
uhci
->
framelist
=
pci_alloc_consistent
(
uhci
->
uhci_pci
,
PAGE_SIZE
,
&
uhci
->
framelist_dma
);
if
(
!
uhci
->
framelist
)
return
-
ENOMEM
;
memset
(
uhci
->
framelist
,
0
,
4096
);
init_dbg
(
"creating descriptor pci_pool"
);
uhci
->
desc_pool
=
pci_pool_create
(
"uhci_desc"
,
uhci
->
uhci_pci
,
sizeof
(
uhci_desc_t
),
16
,
0
,
GFP_DMA
|
GFP_ATOMIC
);
if
(
!
uhci
->
desc_pool
)
goto
init_skel_cleanup
;
init_dbg
(
"allocating iso desc pointer list"
);
uhci
->
iso_td
=
(
uhci_desc_t
**
)
kmalloc
(
1024
*
sizeof
(
uhci_desc_t
*
),
GFP_KERNEL
);
if
(
!
uhci
->
iso_td
)
goto
init_skel_cleanup
;
uhci
->
ls_control_chain
=
NULL
;
uhci
->
control_chain
=
NULL
;
uhci
->
bulk_chain
=
NULL
;
uhci
->
chain_end
=
NULL
;
for
(
n
=
0
;
n
<
8
;
n
++
)
uhci
->
int_chain
[
n
]
=
NULL
;
init_dbg
(
"allocating iso descs"
);
for
(
n
=
0
;
n
<
1024
;
n
++
)
{
// allocate skeleton iso/irq-tds
if
(
alloc_td
(
uhci
,
&
td
,
0
))
goto
init_skel_cleanup
;
uhci
->
iso_td
[
n
]
=
td
;
uhci
->
framelist
[
n
]
=
cpu_to_le32
((
__u32
)
td
->
dma_addr
);
}
init_dbg
(
"allocating qh: chain_end"
);
if
(
alloc_qh
(
uhci
,
&
qh
))
goto
init_skel_cleanup
;
uhci
->
chain_end
=
qh
;
if
(
alloc_td
(
uhci
,
&
td
,
0
))
goto
init_skel_cleanup
;
fill_td
(
td
,
0
*
TD_CTRL_IOC
,
0
,
0
);
// generate 1ms interrupt (enabled on demand)
insert_td
(
uhci
,
qh
,
td
,
0
);
qh
->
hw
.
qh
.
element
&=
cpu_to_le32
(
~
UHCI_PTR_TERM
);
// remove TERM bit
uhci
->
td1ms
=
td
;
dbg
(
"allocating qh: bulk_chain"
);
if
(
alloc_qh
(
uhci
,
&
qh
))
goto
init_skel_cleanup
;
insert_qh
(
uhci
,
uhci
->
chain_end
,
qh
,
0
);
uhci
->
bulk_chain
=
qh
;
dbg
(
"allocating qh: control_chain"
);
if
((
ret
=
alloc_qh
(
uhci
,
&
qh
)))
goto
init_skel_cleanup
;
insert_qh
(
uhci
,
uhci
->
bulk_chain
,
qh
,
0
);
uhci
->
control_chain
=
qh
;
// disabled reclamation loop
if
(
high_bw
)
set_qh_head
(
uhci
->
chain_end
,
uhci
->
control_chain
->
dma_addr
|
UHCI_PTR_QH
|
UHCI_PTR_TERM
);
init_dbg
(
"allocating qh: ls_control_chain"
);
if
(
alloc_qh
(
uhci
,
&
qh
))
goto
init_skel_cleanup
;
insert_qh
(
uhci
,
uhci
->
control_chain
,
qh
,
0
);
uhci
->
ls_control_chain
=
qh
;
init_dbg
(
"allocating skeleton INT-TDs"
);
for
(
n
=
0
;
n
<
8
;
n
++
)
{
uhci_desc_t
*
td
;
if
(
alloc_td
(
uhci
,
&
td
,
0
))
goto
init_skel_cleanup
;
uhci
->
int_chain
[
n
]
=
td
;
if
(
n
==
0
)
{
set_td_link
(
uhci
->
int_chain
[
0
],
uhci
->
ls_control_chain
->
dma_addr
|
UHCI_PTR_QH
);
}
else
{
set_td_link
(
uhci
->
int_chain
[
n
],
uhci
->
int_chain
[
0
]
->
dma_addr
);
}
}
init_dbg
(
"Linking skeleton INT-TDs"
);
for
(
n
=
0
;
n
<
1024
;
n
++
)
{
// link all iso-tds to the interrupt chains
int
m
,
o
;
dbg
(
"framelist[%i]=%x"
,
n
,
le32_to_cpu
(
uhci
->
framelist
[
n
]));
if
((
n
&
127
)
==
127
)
((
uhci_desc_t
*
)
uhci
->
iso_td
[
n
])
->
hw
.
td
.
link
=
cpu_to_le32
(
uhci
->
int_chain
[
0
]
->
dma_addr
);
else
for
(
o
=
1
,
m
=
2
;
m
<=
128
;
o
++
,
m
+=
m
)
if
((
n
&
(
m
-
1
))
==
((
m
-
1
)
/
2
))
set_td_link
(((
uhci_desc_t
*
)
uhci
->
iso_td
[
n
]),
uhci
->
int_chain
[
o
]
->
dma_addr
);
}
if
(
alloc_td
(
uhci
,
&
td
,
0
))
goto
init_skel_cleanup
;
fill_td
(
td
,
0
*
TD_CTRL_IOC
,
0
,
0
);
// generate 32ms interrupt (activated later)
uhci
->
td32ms
=
td
;
insert_td_horizontal
(
uhci
,
uhci
->
int_chain
[
5
],
td
);
if
(
alloc_td
(
uhci
,
&
td
,
0
))
goto
init_skel_cleanup
;
fill_td
(
td
,
0
*
TD_CTRL_IOC
,
0
,
0
);
// generate 128ms interrupt (activated later)
uhci
->
td128ms
=
td
;
insert_td_horizontal
(
uhci
,
uhci
->
int_chain
[
7
],
td
);
mb
();
init_dbg
(
"init_skel exit"
);
return
0
;
init_skel_cleanup:
cleanup_skel
(
uhci
);
return
-
ENOMEM
;
}
/*###########################################################################*/
// UHCI PRIVATE DATA
/*###########################################################################*/
urb_priv_t
*
uhci_alloc_priv
(
int
mem_flags
)
{
urb_priv_t
*
p
;
#ifdef DEBUG_SLAB
p
=
kmem_cache_alloc
(
urb_priv_kmem
,
SLAB_FLAG
);
#else
p
=
kmalloc
(
sizeof
(
urb_priv_t
),
mem_flags
);
#endif
if
(
p
)
{
memset
(
p
,
0
,
sizeof
(
urb_priv_t
));
INIT_LIST_HEAD
(
&
p
->
urb_list
);
}
return
p
;
}
/*-------------------------------------------------------------------*/
void
uhci_free_priv
(
struct
uhci_hcd
*
uhci
,
struct
urb
*
urb
,
urb_priv_t
*
p
)
{
uhci_urb_dma_unmap
(
uhci
,
urb
,
p
);
#ifdef DEBUG_SLAB
err
(
"free_priv %p"
,
p
);
kmem_cache_free
(
urb_priv_kmem
,
p
);
#else
kfree
(
p
);
#endif
urb
->
hcpriv
=
NULL
;
}
drivers/usb/host/usb-uhci-q.c
deleted
100644 → 0
View file @
873077ea
/*
UHCI HCD (Host Controller Driver) for USB, UHCI transfer processing
(c) 1999-2002
Georg Acher + Deti Fliegl + Thomas Sailer
georg@acher.org deti@fliegl.de sailer@ife.ee.ethz.ch
with the help of
David Brownell, david-b@pacbell.net
Adam Richter, adam@yggdrasil.com
Roman Weissgaerber, weissg@vienna.at
HW-initalization based on material of
Randy Dunlap + Johannes Erdfelt + Gregory P. Smith + Linus Torvalds
$Id: usb-uhci-q.c,v 1.3 2002/05/25 16:42:41 acher Exp $
*/
/*-------------------------------------------------------------------*/
static
inline
void
finish_urb
(
struct
uhci_hcd
*
uhci
,
struct
urb
*
urb
)
{
if
(
urb
->
hcpriv
)
uhci_free_priv
(
uhci
,
urb
,
urb
->
hcpriv
);
usb_hcd_giveback_urb
(
&
uhci
->
hcd
,
urb
);
}
/*###########################################################################*/
// URB SUBMISSION STUFF
// assembles QHs und TDs for control, bulk, interrupt and isochronous
/*###########################################################################*/
// returns: 0 (no transfer queued), urb* (this urb already queued)
static
struct
urb
*
search_dev_ep
(
struct
uhci_hcd
*
uhci
,
struct
urb
*
urb
)
{
struct
list_head
*
p
;
struct
urb
*
tmp
;
urb_priv_t
*
priv
;
unsigned
int
mask
=
usb_pipecontrol
(
urb
->
pipe
)
?
(
~
USB_DIR_IN
)
:
(
~
0
);
p
=
uhci
->
urb_list
.
next
;
for
(;
p
!=
&
uhci
->
urb_list
;
p
=
p
->
next
)
{
priv
=
list_entry
(
p
,
urb_priv_t
,
urb_list
);
tmp
=
priv
->
urb
;
dbg
(
"search_dev_ep urb: %p"
,
tmp
);
// we can accept this urb if it is not queued at this time
// or if non-iso transfer requests should be scheduled for the same device and pipe
if
((
!
usb_pipeisoc
(
urb
->
pipe
)
&&
(
tmp
->
dev
==
urb
->
dev
)
&&
!
((
tmp
->
pipe
^
urb
->
pipe
)
&
mask
))
||
(
urb
==
tmp
))
return
tmp
;
// found another urb already queued for processing
}
return
0
;
}
/*-------------------------------------------------------------------*/
static
int
uhci_submit_control_urb
(
struct
uhci_hcd
*
uhci
,
struct
urb
*
urb
)
{
uhci_desc_t
*
qh
,
*
td
;
urb_priv_t
*
urb_priv
=
urb
->
hcpriv
;
unsigned
long
destination
,
status
;
int
maxsze
=
usb_maxpacket
(
urb
->
dev
,
urb
->
pipe
,
usb_pipeout
(
urb
->
pipe
));
int
depth_first
=
ctrl_depth
;
// UHCI descriptor chasing method
unsigned
long
len
;
char
*
data
;
// err("uhci_submit_control start, buf %p", urb->transfer_buffer);
if
(
alloc_qh
(
uhci
,
&
qh
))
// alloc qh for this request
return
-
ENOMEM
;
if
(
alloc_td
(
uhci
,
&
td
,
UHCI_PTR_DEPTH
*
depth_first
))
// get td for setup stage
goto
fail_unmap_enomem
;
/* The "pipe" thing contains the destination in bits 8--18 */
destination
=
(
urb
->
pipe
&
PIPE_DEVEP_MASK
)
|
USB_PID_SETUP
;
status
=
TD_CTRL_ACTIVE
|
(
urb
->
transfer_flags
&
USB_DISABLE_SPD
?
0
:
TD_CTRL_SPD
)
|
(
3
<<
27
);
/* 3 errors */
if
(
urb
->
dev
->
speed
==
USB_SPEED_LOW
)
status
|=
TD_CTRL_LS
;
/* Build the TD for the control request, try forever, 8 bytes of data */
fill_td
(
td
,
status
,
destination
|
(
7
<<
21
),
urb_priv
->
setup_packet_dma
);
insert_td
(
uhci
,
qh
,
td
,
0
);
// queue 'setup stage'-td in qh
#if 0
{
char *sp=urb->setup_packet;
dbg("SETUP to pipe %x: %x %x %x %x %x %x %x %x", urb->pipe,
sp[0],sp[1],sp[2],sp[3],sp[4],sp[5],sp[6],sp[7]);
}
//uhci_show_td(td);
#endif
len
=
urb
->
transfer_buffer_length
;
data
=
urb
->
transfer_buffer
;
/* If direction is "send", change the frame from SETUP (0x2D)
to OUT (0xE1). Else change it from SETUP to IN (0x69). */
destination
=
(
urb
->
pipe
&
PIPE_DEVEP_MASK
)
|
(
usb_pipeout
(
urb
->
pipe
)
?
USB_PID_OUT
:
USB_PID_IN
);
while
(
len
>
0
)
{
int
pktsze
=
len
;
if
(
alloc_td
(
uhci
,
&
td
,
UHCI_PTR_DEPTH
*
depth_first
))
goto
fail_unmap_enomem
;
if
(
pktsze
>
maxsze
)
pktsze
=
maxsze
;
destination
^=
1
<<
TD_TOKEN_TOGGLE
;
// toggle DATA0/1
// Status, pktsze bytes of data
fill_td
(
td
,
status
,
destination
|
((
pktsze
-
1
)
<<
21
),
urb_priv
->
transfer_buffer_dma
+
(
data
-
(
char
*
)
urb
->
transfer_buffer
));
insert_td
(
uhci
,
qh
,
td
,
UHCI_PTR_DEPTH
*
depth_first
);
// queue 'data stage'-td in qh
data
+=
pktsze
;
len
-=
pktsze
;
}
/* Build the final TD for control status
It's only IN if the pipe is out AND we aren't expecting data */
destination
&=
~
UHCI_PID
;
if
(
usb_pipeout
(
urb
->
pipe
)
||
(
urb
->
transfer_buffer_length
==
0
))
destination
|=
USB_PID_IN
;
else
destination
|=
USB_PID_OUT
;
destination
|=
1
<<
TD_TOKEN_TOGGLE
;
/* End in Data1 */
if
(
alloc_td
(
uhci
,
&
td
,
UHCI_PTR_DEPTH
))
goto
fail_unmap_enomem
;
status
&=~
TD_CTRL_SPD
;
/* no limit on errors on final packet, 0 bytes of data */
fill_td
(
td
,
status
|
TD_CTRL_IOC
,
destination
|
(
UHCI_NULL_DATA_SIZE
<<
21
),
0
);
insert_td
(
uhci
,
qh
,
td
,
UHCI_PTR_DEPTH
*
depth_first
);
// queue status td
list_add
(
&
qh
->
desc_list
,
&
urb_priv
->
desc_list
);
queue_urb
(
uhci
,
urb
);
// queue _before_ inserting in desc chain
qh
->
hw
.
qh
.
element
&=
cpu_to_le32
(
~
UHCI_PTR_TERM
);
/* Start it up... put low speed first */
if
(
urb
->
dev
->
speed
==
USB_SPEED_LOW
)
insert_qh
(
uhci
,
uhci
->
control_chain
,
qh
,
0
);
else
insert_qh
(
uhci
,
uhci
->
bulk_chain
,
qh
,
0
);
return
0
;
fail_unmap_enomem:
delete_qh
(
uhci
,
qh
);
return
-
ENOMEM
;
}
/*-------------------------------------------------------------------*/
// For queued bulk transfers, two additional QH helpers are allocated (nqh, bqh)
// Due to the linking with other bulk urbs, it has to be locked with urb_list_lock!
static
int
uhci_submit_bulk_urb
(
struct
uhci_hcd
*
uhci
,
struct
urb
*
urb
,
struct
urb
*
bulk_urb
)
{
urb_priv_t
*
urb_priv
=
urb
->
hcpriv
,
*
upriv
,
*
bpriv
=
NULL
;
uhci_desc_t
*
qh
,
*
td
,
*
nqh
=
NULL
,
*
bqh
=
NULL
,
*
first_td
=
NULL
;
unsigned
long
destination
,
status
;
char
*
data
;
unsigned
int
pipe
=
urb
->
pipe
;
int
maxsze
=
usb_maxpacket
(
urb
->
dev
,
pipe
,
usb_pipeout
(
pipe
));
int
info
,
len
,
last
;
int
depth_first
=
bulk_depth
;
// UHCI descriptor chasing method
if
(
usb_endpoint_halted
(
urb
->
dev
,
usb_pipeendpoint
(
pipe
),
usb_pipeout
(
pipe
)))
return
-
EPIPE
;
queue_dbg
(
"uhci_submit_bulk_urb: urb %p, old %p, pipe %08x, len %i"
,
urb
,
bulk_urb
,
urb
->
pipe
,
urb
->
transfer_buffer_length
);
upriv
=
(
urb_priv_t
*
)
urb
->
hcpriv
;
if
(
!
bulk_urb
)
{
if
(
alloc_qh
(
uhci
,
&
qh
))
// get qh for this request
return
-
ENOMEM
;
if
(
urb
->
transfer_flags
&
USB_QUEUE_BULK
)
{
if
(
alloc_qh
(
uhci
,
&
nqh
))
// placeholder for clean unlink
goto
fail_unmap_enomem
;
upriv
->
next_qh
=
nqh
;
queue_dbg
(
"new next qh %p"
,
nqh
);
}
}
else
{
bpriv
=
(
urb_priv_t
*
)
bulk_urb
->
hcpriv
;
qh
=
bpriv
->
bottom_qh
;
// re-use bottom qh and next qh
nqh
=
bpriv
->
next_qh
;
upriv
->
next_qh
=
nqh
;
upriv
->
prev_queued_urb
=
bulk_urb
;
}
if
(
urb
->
transfer_flags
&
USB_QUEUE_BULK
)
{
if
(
alloc_qh
(
uhci
,
&
bqh
))
// "bottom" QH
goto
fail_unmap_enomem
;
set_qh_element
(
bqh
,
UHCI_PTR_TERM
);
set_qh_head
(
bqh
,
nqh
->
dma_addr
|
UHCI_PTR_QH
);
// element
upriv
->
bottom_qh
=
bqh
;
}
queue_dbg
(
"uhci_submit_bulk: qh %p bqh %p nqh %p"
,
qh
,
bqh
,
nqh
);
/* The "pipe" thing contains the destination in bits 8--18. */
destination
=
(
pipe
&
PIPE_DEVEP_MASK
)
|
usb_packetid
(
pipe
);
status
=
TD_CTRL_ACTIVE
|
((
urb
->
transfer_flags
&
USB_DISABLE_SPD
)
?
0
:
TD_CTRL_SPD
)
|
(
3
<<
27
);
/* 3 errors */
if
(
urb
->
dev
->
speed
==
USB_SPEED_LOW
)
status
|=
TD_CTRL_LS
;
/* Build the TDs for the bulk request */
len
=
urb
->
transfer_buffer_length
;
data
=
urb
->
transfer_buffer
;
do
{
// TBD: Really allow zero-length packets?
int
pktsze
=
len
;
if
(
alloc_td
(
uhci
,
&
td
,
UHCI_PTR_DEPTH
*
depth_first
))
goto
fail_unmap_enomem
;
if
(
pktsze
>
maxsze
)
pktsze
=
maxsze
;
// pktsze bytes of data
info
=
destination
|
(((
pktsze
-
1
)
&
UHCI_NULL_DATA_SIZE
)
<<
21
)
|
(
uhci_get_toggle
(
urb
)
<<
TD_TOKEN_TOGGLE
);
fill_td
(
td
,
status
,
info
,
urb_priv
->
transfer_buffer_dma
+
(
data
-
(
char
*
)
urb
->
transfer_buffer
));
data
+=
pktsze
;
len
-=
pktsze
;
// Use USB_ZERO_PACKET to finish bulk OUTs always with a zero length packet
last
=
(
len
==
0
&&
(
usb_pipein
(
pipe
)
||
pktsze
<
maxsze
||
!
(
urb
->
transfer_flags
&
USB_ZERO_PACKET
)));
if
(
last
)
set_td_ioc
(
td
);
// last one generates INT
insert_td
(
uhci
,
qh
,
td
,
UHCI_PTR_DEPTH
*
depth_first
);
if
(
!
first_td
)
first_td
=
td
;
uhci_do_toggle
(
urb
);
}
while
(
!
last
);
if
(
bulk_urb
&&
bpriv
)
// everything went OK, link with old bulk URB
bpriv
->
next_queued_urb
=
urb
;
list_add
(
&
qh
->
desc_list
,
&
urb_priv
->
desc_list
);
if
(
urb
->
transfer_flags
&
USB_QUEUE_BULK
)
append_qh
(
uhci
,
td
,
bqh
,
UHCI_PTR_DEPTH
*
depth_first
);
queue_urb_unlocked
(
uhci
,
urb
);
if
(
urb
->
transfer_flags
&
USB_QUEUE_BULK
)
set_qh_element
(
qh
,
first_td
->
dma_addr
);
else
qh
->
hw
.
qh
.
element
&=
cpu_to_le32
(
~
UHCI_PTR_TERM
);
// arm QH
if
(
!
bulk_urb
)
{
// new bulk queue
if
(
urb
->
transfer_flags
&
USB_QUEUE_BULK
)
{
spin_lock
(
&
uhci
->
td_lock
);
// both QHs in one go
insert_qh
(
uhci
,
uhci
->
chain_end
,
qh
,
0
);
// Main QH
insert_qh
(
uhci
,
uhci
->
chain_end
,
nqh
,
0
);
// Helper QH
spin_unlock
(
&
uhci
->
td_lock
);
}
else
insert_qh
(
uhci
,
uhci
->
chain_end
,
qh
,
0
);
}
//dbg("uhci_submit_bulk_urb: exit\n");
return
0
;
fail_unmap_enomem:
delete_qh
(
uhci
,
qh
);
if
(
bqh
)
delete_qh
(
uhci
,
bqh
);
if
(
!
bulk_urb
&&
nqh
)
delete_qh
(
uhci
,
nqh
);
return
-
ENOMEM
;
}
/*---------------------------------------------------------------------------*/
// submits USB interrupt (ie. polling ;-)
// ASAP-flag set implicitely
// if period==0, the transfer is only done once
static
int
uhci_submit_int_urb
(
struct
uhci_hcd
*
uhci
,
struct
urb
*
urb
)
{
urb_priv_t
*
urb_priv
=
urb
->
hcpriv
;
int
nint
;
uhci_desc_t
*
td
;
int
status
,
destination
;
int
info
;
unsigned
int
pipe
=
urb
->
pipe
;
if
(
urb
->
interval
==
0
)
nint
=
0
;
else
{
// log2-function (urb->interval already 2^n)
nint
=
ffs
(
urb
->
interval
);
if
(
nint
>
7
)
nint
=
7
;
}
dbg
(
"INT-interval %i, chain %i"
,
urb
->
interval
,
nint
);
// remember start frame, just in case...
urb
->
start_frame
=
UHCI_GET_CURRENT_FRAME
(
uhci
)
&
1023
;
urb
->
number_of_packets
=
1
;
// INT allows only one packet
if
(
alloc_td
(
uhci
,
&
td
,
UHCI_PTR_DEPTH
))
return
-
ENOMEM
;
status
=
TD_CTRL_ACTIVE
|
TD_CTRL_IOC
|
(
urb
->
transfer_flags
&
USB_DISABLE_SPD
?
0
:
TD_CTRL_SPD
)
|
(
3
<<
27
);
if
(
urb
->
dev
->
speed
==
USB_SPEED_LOW
)
status
|=
TD_CTRL_LS
;
destination
=
(
pipe
&
PIPE_DEVEP_MASK
)
|
usb_packetid
(
pipe
)
|
(((
urb
->
transfer_buffer_length
-
1
)
&
0x7ff
)
<<
21
);
info
=
destination
|
(
uhci_get_toggle
(
urb
)
<<
TD_TOKEN_TOGGLE
);
fill_td
(
td
,
status
,
info
,
urb_priv
->
transfer_buffer_dma
);
list_add_tail
(
&
td
->
desc_list
,
&
urb_priv
->
desc_list
);
queue_urb
(
uhci
,
urb
);
insert_td_horizontal
(
uhci
,
uhci
->
int_chain
[
nint
],
td
);
// store in INT-TDs
uhci_do_toggle
(
urb
);
return
0
;
}
/*###########################################################################*/
// ISOCHRONOUS TRANSFERS
/*###########################################################################*/
// In case of ASAP iso transfer, search the URB-list for already queued URBs
// for this EP and calculate the earliest start frame for the new
// URB (easy seamless URB continuation!)
static
int
find_iso_limits
(
struct
uhci_hcd
*
uhci
,
struct
urb
*
urb
,
unsigned
int
*
start
,
unsigned
int
*
end
)
{
struct
urb
*
u
,
*
last_urb
=
NULL
;
urb_priv_t
*
priv
;
struct
list_head
*
p
;
int
ret
=-
1
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
uhci
->
urb_list_lock
,
flags
);
p
=
uhci
->
urb_list
.
prev
;
for
(;
p
!=
&
uhci
->
urb_list
;
p
=
p
->
prev
)
{
priv
=
list_entry
(
p
,
urb_priv_t
,
urb_list
);
u
=
priv
->
urb
;
// look for pending URBs with identical pipe handle
// works only because iso doesn't toggle the data bit!
if
((
urb
->
pipe
==
u
->
pipe
)
&&
(
urb
->
dev
==
u
->
dev
)
&&
(
u
->
status
==
-
EINPROGRESS
))
{
if
(
!
last_urb
)
*
start
=
u
->
start_frame
;
last_urb
=
u
;
}
}
if
(
last_urb
)
{
*
end
=
(
last_urb
->
start_frame
+
last_urb
->
number_of_packets
*
last_urb
->
interval
)
&
1023
;
ret
=
0
;
}
spin_unlock_irqrestore
(
&
uhci
->
urb_list_lock
,
flags
);
return
ret
;
}
/*-------------------------------------------------------------------*/
// adjust start_frame according to scheduling constraints (ASAP etc)
static
int
iso_find_start
(
struct
uhci_hcd
*
uhci
,
struct
urb
*
urb
)
{
unsigned
int
now
;
unsigned
int
start_limit
=
0
,
stop_limit
=
0
,
queued_size
,
number_of_frames
;
int
limits
;
now
=
UHCI_GET_CURRENT_FRAME
(
uhci
)
&
1023
;
number_of_frames
=
(
unsigned
)
(
urb
->
number_of_packets
*
urb
->
interval
);
if
(
number_of_frames
>
900
)
return
-
EFBIG
;
limits
=
find_iso_limits
(
uhci
,
urb
,
&
start_limit
,
&
stop_limit
);
queued_size
=
(
stop_limit
-
start_limit
)
&
1023
;
if
(
urb
->
transfer_flags
&
USB_ISO_ASAP
)
{
// first iso
if
(
limits
)
{
// 10ms setup should be enough //FIXME!
urb
->
start_frame
=
(
now
+
10
)
&
1023
;
}
else
{
urb
->
start_frame
=
stop_limit
;
// seamless linkage
if
(((
now
-
urb
->
start_frame
)
&
1023
)
<=
(
unsigned
)
number_of_frames
)
{
info
(
"iso_find_start: gap in seamless isochronous scheduling"
);
dbg
(
"iso_find_start: now %u start_frame %u number_of_packets %u interval %u pipe 0x%08x"
,
now
,
urb
->
start_frame
,
urb
->
number_of_packets
,
urb
->
interval
,
urb
->
pipe
);
urb
->
start_frame
=
(
now
+
5
)
&
1023
;
// 5ms setup should be enough
}
}
}
else
{
urb
->
start_frame
&=
1023
;
if
(((
now
-
urb
->
start_frame
)
&
1023
)
<
number_of_frames
)
{
dbg
(
"iso_find_start: now between start_frame and end"
);
return
-
EAGAIN
;
}
}
/* check if either start_frame or start_frame+number_of_packets-1 lies between start_limit and stop_limit */
if
(
limits
)
return
0
;
if
(((
urb
->
start_frame
-
start_limit
)
&
1023
)
<
queued_size
||
((
urb
->
start_frame
+
number_of_frames
-
1
-
start_limit
)
&
1023
)
<
queued_size
)
{
dbg
(
"iso_find_start: start_frame %u number_of_packets %u start_limit %u stop_limit %u"
,
urb
->
start_frame
,
urb
->
number_of_packets
,
start_limit
,
stop_limit
);
return
-
EAGAIN
;
}
return
0
;
}
/*-------------------------------------------------------------------*/
static
int
uhci_submit_iso_urb
(
struct
uhci_hcd
*
uhci
,
struct
urb
*
urb
,
int
mem_flags
)
{
urb_priv_t
*
urb_priv
=
urb
->
hcpriv
;
int
n
=
0
,
i
,
ret
,
last
=
0
;
uhci_desc_t
*
td
,
**
tdm
;
int
status
,
destination
;
unsigned
long
flags
;
tdm
=
(
uhci_desc_t
**
)
kmalloc
(
urb
->
number_of_packets
*
sizeof
(
uhci_desc_t
*
),
mem_flags
);
if
(
!
tdm
)
return
-
ENOMEM
;
memset
(
tdm
,
0
,
urb
->
number_of_packets
*
sizeof
(
uhci_desc_t
*
));
// First try to get all TDs. Cause: Removing already inserted TDs can only be done
// racefree in three steps: unlink TDs, wait one frame, delete TDs.
// So, this solutions seems simpler...
for
(
n
=
0
;
n
<
urb
->
number_of_packets
;
n
++
)
{
dbg
(
"n:%d urb->iso_frame_desc[n].length:%d"
,
n
,
urb
->
iso_frame_desc
[
n
].
length
);
if
(
!
urb
->
iso_frame_desc
[
n
].
length
)
continue
;
// allows ISO striping by setting length to zero in iso_descriptor
if
(
alloc_td
(
uhci
,
&
td
,
UHCI_PTR_DEPTH
))
{
ret
=
-
ENOMEM
;
goto
fail_unmap_tds
;
}
last
=
n
;
tdm
[
n
]
=
td
;
}
__save_flags
(
flags
);
__cli
();
// Disable IRQs to schedule all ISO-TDs in time
ret
=
iso_find_start
(
uhci
,
urb
);
// adjusts urb->start_frame for later use
if
(
ret
)
{
__restore_flags
(
flags
);
n
=
urb
->
number_of_packets
;
goto
fail_unmap_tds
;
}
status
=
TD_CTRL_ACTIVE
|
TD_CTRL_IOS
;
destination
=
(
urb
->
pipe
&
PIPE_DEVEP_MASK
)
|
usb_packetid
(
urb
->
pipe
);
// Queue all allocated TDs
for
(
n
=
0
;
n
<
urb
->
number_of_packets
;
n
++
)
{
td
=
tdm
[
n
];
if
(
!
td
)
continue
;
if
(
n
==
last
)
{
status
|=
TD_CTRL_IOC
;
queue_urb
(
uhci
,
urb
);
}
fill_td
(
td
,
status
,
destination
|
(((
urb
->
iso_frame_desc
[
n
].
length
-
1
)
&
0x7ff
)
<<
21
),
urb_priv
->
transfer_buffer_dma
+
urb
->
iso_frame_desc
[
n
].
offset
);
list_add_tail
(
&
td
->
desc_list
,
&
urb_priv
->
desc_list
);
insert_td_horizontal
(
uhci
,
uhci
->
iso_td
[(
urb
->
start_frame
+
n
*
urb
->
interval
)
&
1023
],
td
);
// store in iso-tds
}
kfree
(
tdm
);
dbg
(
"ISO-INT# %i, start %i, now %i"
,
urb
->
number_of_packets
,
urb
->
start_frame
,
UHCI_GET_CURRENT_FRAME
(
uhci
)
&
1023
);
ret
=
0
;
__restore_flags
(
flags
);
return
ret
;
// Cleanup allocated TDs
fail_unmap_tds:
dbg
(
"ISO failed, free %i, ret %i"
,
n
,
ret
);
for
(
i
=
0
;
i
<
n
;
i
++
)
if
(
tdm
[
i
])
delete_desc
(
uhci
,
tdm
[
i
]);
kfree
(
tdm
);
return
ret
;
}
/*###########################################################################*/
// URB UNLINK PROCESSING
/*###########################################################################*/
static
void
uhci_clean_iso_step1
(
struct
uhci_hcd
*
uhci
,
urb_priv_t
*
urb_priv
)
{
struct
list_head
*
p
;
uhci_desc_t
*
td
;
dbg
(
"uhci_clean_iso_step1"
);
for
(
p
=
urb_priv
->
desc_list
.
next
;
p
!=
&
urb_priv
->
desc_list
;
p
=
p
->
next
)
{
td
=
list_entry
(
p
,
uhci_desc_t
,
desc_list
);
unlink_td
(
uhci
,
td
,
1
);
}
}
/*-------------------------------------------------------------------*/
/* mode: CLEAN_TRANSFER_NO_DELETION: unlink but no deletion mark (step 1 of async_unlink)
CLEAN_TRANSFER_REGULAR: regular (unlink/delete-mark)
CLEAN_TRANSFER_DELETION_MARK: deletion mark for QH (step 2 of async_unlink)
looks a bit complicated because of all the bulk queueing goodies
*/
static
void
uhci_clean_transfer
(
struct
uhci_hcd
*
uhci
,
struct
urb
*
urb
,
uhci_desc_t
*
qh
,
int
mode
)
{
uhci_desc_t
*
bqh
,
*
nqh
,
*
prevqh
,
*
prevtd
;
urb_priv_t
*
priv
=
(
urb_priv_t
*
)
urb
->
hcpriv
;
int
now
=
UHCI_GET_CURRENT_FRAME
(
uhci
);
bqh
=
priv
->
bottom_qh
;
if
(
!
priv
->
next_queued_urb
)
{
// no more appended bulk queues
queue_dbg
(
"uhci_clean_transfer: No more bulks for urb %p, qh %p, bqh %p, nqh %p"
,
urb
,
qh
,
bqh
,
priv
->
next_qh
);
if
(
priv
->
prev_queued_urb
&&
mode
!=
CLEAN_TRANSFER_DELETION_MARK
)
{
// qh not top of the queue
unsigned
long
flags
;
urb_priv_t
*
ppriv
=
(
urb_priv_t
*
)
priv
->
prev_queued_urb
->
hcpriv
;
spin_lock_irqsave
(
&
uhci
->
qh_lock
,
flags
);
prevqh
=
list_entry
(
ppriv
->
desc_list
.
next
,
uhci_desc_t
,
desc_list
);
prevtd
=
list_entry
(
prevqh
->
vertical
.
prev
,
uhci_desc_t
,
vertical
);
set_td_link
(
prevtd
,
priv
->
bottom_qh
->
dma_addr
|
UHCI_PTR_QH
);
// skip current qh
mb
();
queue_dbg
(
"uhci_clean_transfer: relink pqh %p, ptd %p"
,
prevqh
,
prevtd
);
spin_unlock_irqrestore
(
&
uhci
->
qh_lock
,
flags
);
ppriv
->
bottom_qh
=
priv
->
bottom_qh
;
ppriv
->
next_queued_urb
=
NULL
;
}
else
{
// queue is dead, qh is top of the queue
if
(
mode
!=
CLEAN_TRANSFER_DELETION_MARK
)
unlink_qh
(
uhci
,
qh
);
// remove qh from horizontal chain
if
(
bqh
)
{
// remove remainings of bulk queue
nqh
=
priv
->
next_qh
;
if
(
mode
!=
CLEAN_TRANSFER_DELETION_MARK
)
unlink_qh
(
uhci
,
nqh
);
// remove nqh from horizontal chain
if
(
mode
!=
CLEAN_TRANSFER_NO_DELETION
)
{
// add helper QHs to free desc list
nqh
->
last_used
=
bqh
->
last_used
=
now
;
list_add_tail
(
&
nqh
->
horizontal
,
&
uhci
->
free_desc_qh
);
list_add_tail
(
&
bqh
->
horizontal
,
&
uhci
->
free_desc_qh
);
}
}
}
}
else
{
// there are queued urbs following
queue_dbg
(
"uhci_clean_transfer: urb %p, prevurb %p, nexturb %p, qh %p, bqh %p, nqh %p"
,
urb
,
priv
->
prev_queued_urb
,
priv
->
next_queued_urb
,
qh
,
bqh
,
priv
->
next_qh
);
if
(
mode
!=
CLEAN_TRANSFER_DELETION_MARK
)
{
// no work for cleanup at unlink-completion
struct
urb
*
nurb
;
unsigned
long
flags
;
nurb
=
priv
->
next_queued_urb
;
spin_lock_irqsave
(
&
uhci
->
qh_lock
,
flags
);
if
(
!
priv
->
prev_queued_urb
)
{
// top QH
prevqh
=
list_entry
(
qh
->
horizontal
.
prev
,
uhci_desc_t
,
horizontal
);
set_qh_head
(
prevqh
,
bqh
->
dma_addr
|
UHCI_PTR_QH
);
list_del
(
&
qh
->
horizontal
);
// remove this qh from horizontal chain
list_add
(
&
bqh
->
horizontal
,
&
prevqh
->
horizontal
);
// insert next bqh in horizontal chain
}
else
{
// intermediate QH
urb_priv_t
*
ppriv
=
(
urb_priv_t
*
)
priv
->
prev_queued_urb
->
hcpriv
;
urb_priv_t
*
npriv
=
(
urb_priv_t
*
)
nurb
->
hcpriv
;
uhci_desc_t
*
bnqh
;
bnqh
=
list_entry
(
npriv
->
desc_list
.
next
,
uhci_desc_t
,
desc_list
);
ppriv
->
bottom_qh
=
bnqh
;
ppriv
->
next_queued_urb
=
nurb
;
prevqh
=
list_entry
(
ppriv
->
desc_list
.
next
,
uhci_desc_t
,
desc_list
);
set_qh_head
(
prevqh
,
bqh
->
dma_addr
|
UHCI_PTR_QH
);
}
mb
();
((
urb_priv_t
*
)
nurb
->
hcpriv
)
->
prev_queued_urb
=
priv
->
prev_queued_urb
;
spin_unlock_irqrestore
(
&
uhci
->
qh_lock
,
flags
);
}
}
if
(
mode
!=
CLEAN_TRANSFER_NO_DELETION
)
{
qh
->
last_used
=
now
;
list_add_tail
(
&
qh
->
horizontal
,
&
uhci
->
free_desc_qh
);
// mark qh for later deletion/kfree
}
}
/*-------------------------------------------------------------------*/
// async unlink_urb completion/cleanup work
// has to be protected by urb_list_lock!
// features: if set in transfer_flags, the resulting status of the killed
// transaction is not overwritten
static
void
uhci_cleanup_unlink
(
struct
uhci_hcd
*
uhci
,
int
force
)
{
struct
list_head
*
q
;
struct
urb
*
urb
;
urb_priv_t
*
urb_priv
;
int
type
,
now
=
UHCI_GET_CURRENT_FRAME
(
uhci
);
q
=
uhci
->
urb_unlinked
.
next
;
while
(
q
!=
&
uhci
->
urb_unlinked
)
{
urb_priv
=
list_entry
(
q
,
urb_priv_t
,
urb_list
);
urb
=
urb_priv
->
urb
;
q
=
urb_priv
->
urb_list
.
next
;
if
(
force
||
((
urb_priv
->
started
!=
~
0
)
&&
(
urb_priv
->
started
!=
now
)))
{
async_dbg
(
"async cleanup %p"
,
urb
);
type
=
usb_pipetype
(
urb
->
pipe
);
switch
(
type
)
{
// process descriptors
case
PIPE_CONTROL
:
// usb_show_device(urb->dev);
process_transfer
(
uhci
,
urb
,
CLEAN_TRANSFER_DELETION_MARK
);
// don't unlink (already done)
// usb_show_device(urb->dev);
break
;
case
PIPE_BULK
:
if
(
!
uhci
->
avoid_bulk
.
counter
)
process_transfer
(
uhci
,
urb
,
CLEAN_TRANSFER_DELETION_MARK
);
// don't unlink (already done)
else
continue
;
break
;
case
PIPE_ISOCHRONOUS
:
process_iso
(
uhci
,
urb
,
PROCESS_ISO_FORCE
);
// force, don't unlink
break
;
case
PIPE_INTERRUPT
:
process_interrupt
(
uhci
,
urb
,
PROCESS_INT_REMOVE
);
break
;
}
list_del
(
&
urb_priv
->
urb_list
);
uhci_urb_dma_sync
(
uhci
,
urb
,
urb_priv
);
// clean up descriptors for INT/ISO
// if (type==PIPE_ISOCHRONOUS || type==PIPE_INTERRUPT)
// uhci_clean_iso_step2(uhci, urb_priv);
uhci_free_priv
(
uhci
,
urb
,
urb_priv
);
if
(
!
(
urb
->
transfer_flags
&
USB_TIMEOUT_KILLED
))
urb
->
status
=
-
ENOENT
;
// now the urb is really dead
spin_unlock
(
&
uhci
->
urb_list_lock
);
usb_hcd_giveback_urb
(
&
uhci
->
hcd
,
urb
);
spin_lock
(
&
uhci
->
urb_list_lock
);
}
}
}
/*-------------------------------------------------------------------*/
/* needs urb_list_lock!
mode: UNLINK_ASYNC_STORE_URB: unlink and move URB into unlinked list
UNLINK_ASYNC_DONT_STORE: unlink, don't move URB into unlinked list
*/
static
int
uhci_unlink_urb_async
(
struct
uhci_hcd
*
uhci
,
struct
urb
*
urb
,
int
mode
)
{
uhci_desc_t
*
qh
;
urb_priv_t
*
urb_priv
;
async_dbg
(
"unlink_urb_async called %p"
,
urb
);
urb_priv
=
(
urb_priv_t
*
)
urb
->
hcpriv
;
if
(
urb_priv
==
0
)
{
err
(
"hc_priv for URB %p is zero!"
,
urb
);
return
-
EINVAL
;
}
urb_priv
->
started
=
~
0
;
// mark
dequeue_urb
(
uhci
,
urb
);
if
(
mode
==
UNLINK_ASYNC_STORE_URB
)
list_add_tail
(
&
urb_priv
->
urb_list
,
&
uhci
->
urb_unlinked
);
// store urb
uhci_switch_timer_int
(
uhci
);
uhci
->
unlink_urb_done
=
1
;
switch
(
usb_pipetype
(
urb
->
pipe
))
{
case
PIPE_INTERRUPT
:
urb_priv
->
flags
=
0
;
// mark as deleted (if called from completion)
uhci_do_toggle
(
urb
);
case
PIPE_ISOCHRONOUS
:
uhci_clean_iso_step1
(
uhci
,
urb_priv
);
break
;
case
PIPE_BULK
:
case
PIPE_CONTROL
:
qh
=
list_entry
(
urb_priv
->
desc_list
.
next
,
uhci_desc_t
,
desc_list
);
uhci_clean_transfer
(
uhci
,
urb
,
qh
,
CLEAN_TRANSFER_NO_DELETION
);
break
;
}
urb_priv
->
started
=
UHCI_GET_CURRENT_FRAME
(
uhci
);
return
0
;
// completion will follow
}
/*-------------------------------------------------------------------*/
// unlink urbs for specific device or all devices
static
void
uhci_unlink_urbs
(
struct
uhci_hcd
*
uhci
,
struct
usb_device
*
usb_dev
,
int
remove_all
)
{
struct
list_head
*
p
;
struct
list_head
*
p2
;
struct
urb
*
urb
;
urb_priv_t
*
priv
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
uhci
->
urb_list_lock
,
flags
);
p
=
uhci
->
urb_list
.
prev
;
while
(
p
!=
&
uhci
->
urb_list
)
{
p2
=
p
;
p
=
p
->
prev
;
priv
=
list_entry
(
p2
,
urb_priv_t
,
urb_list
);
urb
=
priv
->
urb
;
// err("unlink urb: %p, dev %p, ud %p", urb, usb_dev,urb->dev);
if
(
remove_all
||
(
usb_dev
==
urb
->
dev
))
{
spin_unlock_irqrestore
(
&
uhci
->
urb_list_lock
,
flags
);
err
(
"forced removing of queued URB %p due to disconnect"
,
urb
);
uhci_urb_dequeue
(
&
uhci
->
hcd
,
urb
);
urb
->
dev
=
NULL
;
// avoid further processing of this URB
spin_lock_irqsave
(
&
uhci
->
urb_list_lock
,
flags
);
p
=
uhci
->
urb_list
.
prev
;
}
}
spin_unlock_irqrestore
(
&
uhci
->
urb_list_lock
,
flags
);
}
/*-------------------------------------------------------------------*/
// Checks for URB timeout and removes bandwidth reclamation if URB idles too long
static
void
uhci_check_timeouts
(
struct
uhci_hcd
*
uhci
)
{
struct
list_head
*
p
,
*
p2
;
struct
urb
*
urb
;
int
type
;
p
=
uhci
->
urb_list
.
prev
;
while
(
p
!=
&
uhci
->
urb_list
)
{
urb_priv_t
*
hcpriv
;
p2
=
p
;
p
=
p
->
prev
;
hcpriv
=
list_entry
(
p2
,
urb_priv_t
,
urb_list
);
urb
=
hcpriv
->
urb
;
type
=
usb_pipetype
(
urb
->
pipe
);
if
(
urb
->
timeout
&&
time_after
(
jiffies
,
hcpriv
->
started
+
urb
->
timeout
))
{
urb
->
transfer_flags
|=
USB_TIMEOUT_KILLED
;
async_dbg
(
"uhci_check_timeout: timeout for %p"
,
urb
);
uhci_unlink_urb_async
(
uhci
,
urb
,
UNLINK_ASYNC_STORE_URB
);
}
else
if
(
high_bw
&&
((
type
==
PIPE_BULK
)
||
(
type
==
PIPE_CONTROL
))
&&
(
hcpriv
->
use_loop
)
&&
time_after
(
jiffies
,
hcpriv
->
started
+
IDLE_TIMEOUT
))
disable_desc_loop
(
uhci
,
urb
);
}
uhci
->
timeout_check
=
jiffies
;
}
/*###########################################################################*/
// INTERRUPT PROCESSING ROUTINES
/*###########################################################################*/
/*
* Map status to standard result codes
*
* <status> is (td->status & 0xFE0000) [a.k.a. uhci_status_bits(td->status)
* <dir_out> is True for output TDs and False for input TDs.
*/
static
int
uhci_map_status
(
int
status
,
int
dir_out
)
{
if
(
!
status
)
return
0
;
if
(
status
&
TD_CTRL_BITSTUFF
)
/* Bitstuff error */
return
-
EPROTO
;
if
(
status
&
TD_CTRL_CRCTIMEO
)
{
/* CRC/Timeout */
if
(
dir_out
)
return
-
ETIMEDOUT
;
else
return
-
EILSEQ
;
}
if
(
status
&
TD_CTRL_NAK
)
/* NAK */
return
-
ETIMEDOUT
;
if
(
status
&
TD_CTRL_BABBLE
)
/* Babble */
return
-
EOVERFLOW
;
if
(
status
&
TD_CTRL_DBUFERR
)
/* Buffer error */
return
-
ENOSR
;
if
(
status
&
TD_CTRL_STALLED
)
/* Stalled */
return
-
EPIPE
;
if
(
status
&
TD_CTRL_ACTIVE
)
/* Active */
return
0
;
return
-
EPROTO
;
}
/*-------------------------------------------------------------------*/
static
void
correct_data_toggles
(
struct
urb
*
urb
)
{
usb_settoggle
(
urb
->
dev
,
usb_pipeendpoint
(
urb
->
pipe
),
usb_pipeout
(
urb
->
pipe
),
!
uhci_get_toggle
(
urb
));
while
(
urb
)
{
urb_priv_t
*
priv
=
urb
->
hcpriv
;
uhci_desc_t
*
qh
=
list_entry
(
priv
->
desc_list
.
next
,
uhci_desc_t
,
desc_list
);
struct
list_head
*
p
=
qh
->
vertical
.
next
;
uhci_desc_t
*
td
;
dbg
(
"URB to correct %p
\n
"
,
urb
);
for
(;
p
!=
&
qh
->
vertical
;
p
=
p
->
next
)
{
td
=
list_entry
(
p
,
uhci_desc_t
,
vertical
);
td
->
hw
.
td
.
info
^=
cpu_to_le32
(
1
<<
TD_TOKEN_TOGGLE
);
}
urb
=
priv
->
next_queued_urb
;
}
}
/*-------------------------------------------------------------------*/
/*
* For IN-control transfers, process_transfer gets a bit more complicated,
* since there are devices that return less data (eg. strings) than they
* have announced. This leads to a queue abort due to the short packet,
* the status stage is not executed. If this happens, the status stage
* is manually re-executed.
* mode: PROCESS_TRANSFER_REGULAR: regular (unlink QH)
* PROCESS_TRANSFER_DONT_UNLINK: QHs already unlinked (for async unlink_urb)
*/
static
int
process_transfer
(
struct
uhci_hcd
*
uhci
,
struct
urb
*
urb
,
int
mode
)
{
urb_priv_t
*
urb_priv
=
urb
->
hcpriv
;
struct
list_head
*
qhl
=
urb_priv
->
desc_list
.
next
;
uhci_desc_t
*
qh
=
list_entry
(
qhl
,
uhci_desc_t
,
desc_list
);
struct
list_head
*
p
=
qh
->
vertical
.
next
;
uhci_desc_t
*
desc
=
list_entry
(
urb_priv
->
desc_list
.
prev
,
uhci_desc_t
,
desc_list
);
uhci_desc_t
*
last_desc
=
list_entry
(
desc
->
vertical
.
prev
,
uhci_desc_t
,
vertical
);
int
data_toggle
=
uhci_get_toggle
(
urb
);
// save initial data_toggle
int
maxlength
;
// extracted and remapped info from TD
int
actual_length
;
int
status
=
0
,
ret
=
0
;
//dbg("process_transfer: urb %p, urb_priv %p, qh %p last_desc %p\n",urb,urb_priv, qh, last_desc);
/* if the status phase has been retriggered and the
queue is empty or the last status-TD is inactive, the retriggered
status stage is completed
*/
if
(
urb_priv
->
flags
&&
((
qh
->
hw
.
qh
.
element
==
cpu_to_le32
(
UHCI_PTR_TERM
))
||
!
is_td_active
(
desc
)))
goto
transfer_finished
;
urb
->
actual_length
=
0
;
for
(;
p
!=
&
qh
->
vertical
;
p
=
p
->
next
)
{
desc
=
list_entry
(
p
,
uhci_desc_t
,
vertical
);
if
(
is_td_active
(
desc
))
{
// do not process active TDs
if
(
mode
==
CLEAN_TRANSFER_DELETION_MARK
)
// if called from async_unlink
uhci_clean_transfer
(
uhci
,
urb
,
qh
,
CLEAN_TRANSFER_DELETION_MARK
);
return
ret
;
}
actual_length
=
uhci_actual_length
(
desc
);
// extract transfer parameters from TD
maxlength
=
(((
le32_to_cpu
(
desc
->
hw
.
td
.
info
)
>>
21
)
&
0x7ff
)
+
1
)
&
0x7ff
;
status
=
uhci_map_status
(
uhci_status_bits
(
le32_to_cpu
(
desc
->
hw
.
td
.
status
)),
usb_pipeout
(
urb
->
pipe
));
if
(
status
==
-
EPIPE
)
{
// see if EP is stalled
// set up stalled condition
usb_endpoint_halt
(
urb
->
dev
,
usb_pipeendpoint
(
urb
->
pipe
),
usb_pipeout
(
urb
->
pipe
));
}
if
(
status
&&
(
status
!=
-
EPIPE
)
&&
(
status
!=
-
EOVERFLOW
))
{
// if any error occurred stop processing of further TDs
// only set ret if status returned an error
ret
=
status
;
urb
->
error_count
++
;
break
;
}
else
if
((
le32_to_cpu
(
desc
->
hw
.
td
.
info
)
&
0xff
)
!=
USB_PID_SETUP
)
urb
->
actual_length
+=
actual_length
;
// got less data than requested
if
(
(
actual_length
<
maxlength
))
{
if
(
urb
->
transfer_flags
&
USB_DISABLE_SPD
)
{
status
=
-
EREMOTEIO
;
// treat as real error
dbg
(
"process_transfer: SPD!!"
);
break
;
// exit after this TD because SP was detected
}
// short read during control-IN: re-start status stage
if
((
usb_pipetype
(
urb
->
pipe
)
==
PIPE_CONTROL
))
{
if
(
uhci_packetid
(
le32_to_cpu
(
last_desc
->
hw
.
td
.
info
))
==
USB_PID_OUT
)
{
set_qh_element
(
qh
,
last_desc
->
dma_addr
);
// re-trigger status stage
dbg
(
"short packet during control transfer, retrigger status stage @ %p"
,
last_desc
);
urb_priv
->
flags
=
1
;
// mark as short control packet
return
0
;
}
}
// all other cases: short read is OK
data_toggle
=
uhci_toggle
(
le32_to_cpu
(
desc
->
hw
.
td
.
info
));
break
;
}
else
if
(
status
)
{
ret
=
status
;
urb
->
error_count
++
;
break
;
}
data_toggle
=
uhci_toggle
(
le32_to_cpu
(
desc
->
hw
.
td
.
info
));
queue_dbg
(
"process_transfer: len:%d status:%x mapped:%x toggle:%d"
,
actual_length
,
le32_to_cpu
(
desc
->
hw
.
td
.
status
),
status
,
data_toggle
);
}
/* toggle correction for short bulk transfers (nonqueued/queued) */
if
(
usb_pipetype
(
urb
->
pipe
)
==
PIPE_BULK
)
{
urb_priv_t
*
priv
=
(
urb_priv_t
*
)
urb
->
hcpriv
;
struct
urb
*
next_queued_urb
=
priv
->
next_queued_urb
;
if
(
next_queued_urb
)
{
urb_priv_t
*
next_priv
=
(
urb_priv_t
*
)
next_queued_urb
->
hcpriv
;
uhci_desc_t
*
qh
=
list_entry
(
next_priv
->
desc_list
.
next
,
uhci_desc_t
,
desc_list
);
uhci_desc_t
*
first_td
=
list_entry
(
qh
->
vertical
.
next
,
uhci_desc_t
,
vertical
);
if
(
data_toggle
==
uhci_toggle
(
le32_to_cpu
(
first_td
->
hw
.
td
.
info
)))
{
err
(
"process_transfer: fixed toggle"
);
correct_data_toggles
(
next_queued_urb
);
}
}
else
usb_settoggle
(
urb
->
dev
,
usb_pipeendpoint
(
urb
->
pipe
),
usb_pipeout
(
urb
->
pipe
),
!
data_toggle
);
}
transfer_finished:
uhci_clean_transfer
(
uhci
,
urb
,
qh
,
mode
);
urb
->
status
=
status
;
if
(
high_bw
)
disable_desc_loop
(
uhci
,
urb
);
dbg
(
"process_transfer: (end) urb %p, wanted len %d, len %d status %x err %d"
,
urb
,
urb
->
transfer_buffer_length
,
urb
->
actual_length
,
urb
->
status
,
urb
->
error_count
);
return
ret
;
}
/*-------------------------------------------------------------------*/
static
int
process_interrupt
(
struct
uhci_hcd
*
uhci
,
struct
urb
*
urb
,
int
mode
)
{
urb_priv_t
*
urb_priv
=
urb
->
hcpriv
;
struct
list_head
*
p
=
urb_priv
->
desc_list
.
next
;
uhci_desc_t
*
desc
=
list_entry
(
urb_priv
->
desc_list
.
prev
,
uhci_desc_t
,
desc_list
);
int
actual_length
,
status
=
0
,
i
,
ret
=
-
EINPROGRESS
;
//dbg("urb contains interrupt request");
for
(
i
=
0
;
p
!=
&
urb_priv
->
desc_list
;
p
=
p
->
next
,
i
++
)
// Maybe we allow more than one TD later ;-)
{
desc
=
list_entry
(
p
,
uhci_desc_t
,
desc_list
);
if
(
is_td_active
(
desc
)
||
!
(
desc
->
hw
.
td
.
status
&
cpu_to_le32
(
TD_CTRL_IOC
)))
{
// do not process active TDs or one-shot TDs (->no recycling)
//dbg("TD ACT Status @%p %08x",desc,le32_to_cpu(desc->hw.td.status));
break
;
}
// extract transfer parameters from TD
actual_length
=
uhci_actual_length
(
desc
);
status
=
uhci_map_status
(
uhci_status_bits
(
le32_to_cpu
(
desc
->
hw
.
td
.
status
)),
usb_pipeout
(
urb
->
pipe
));
// see if EP is stalled
if
(
status
==
-
EPIPE
)
{
// set up stalled condition
usb_endpoint_halt
(
urb
->
dev
,
usb_pipeendpoint
(
urb
->
pipe
),
usb_pipeout
(
urb
->
pipe
));
}
// if any error occurred: ignore this td, and continue
if
(
status
!=
0
)
{
//uhci_show_td (desc);
urb
->
error_count
++
;
goto
recycle
;
}
else
urb
->
actual_length
=
actual_length
;
recycle:
((
urb_priv_t
*
)
urb
->
hcpriv
)
->
flags
=
1
;
// set to detect unlink during completion
uhci_urb_dma_sync
(
uhci
,
urb
,
urb
->
hcpriv
);
if
(
urb
->
complete
)
{
//dbg("process_interrupt: calling completion, status %i",status);
urb
->
status
=
status
;
spin_unlock
(
&
uhci
->
urb_list_lock
);
urb
->
complete
((
struct
urb
*
)
urb
);
spin_lock
(
&
uhci
->
urb_list_lock
);
}
if
((
urb
->
status
!=
-
ECONNABORTED
)
&&
(
urb
->
status
!=
ECONNRESET
)
&&
(
urb
->
status
!=
-
ENOENT
)
&&
((
urb_priv_t
*
)
urb
->
hcpriv
)
->
flags
)
{
urb
->
status
=
-
EINPROGRESS
;
// Recycle INT-TD if interval!=0, else mark TD as one-shot
if
(
urb
->
interval
)
{
desc
->
hw
.
td
.
info
&=
cpu_to_le32
(
~
(
1
<<
TD_TOKEN_TOGGLE
));
if
(
status
==
0
)
{
((
urb_priv_t
*
)
urb
->
hcpriv
)
->
started
=
jiffies
;
desc
->
hw
.
td
.
info
|=
cpu_to_le32
((
uhci_get_toggle
(
urb
)
<<
TD_TOKEN_TOGGLE
));
uhci_do_toggle
(
urb
);
}
else
{
desc
->
hw
.
td
.
info
|=
cpu_to_le32
((
!
uhci_get_toggle
(
urb
)
<<
TD_TOKEN_TOGGLE
));
}
desc
->
hw
.
td
.
status
=
cpu_to_le32
(
TD_CTRL_ACTIVE
|
TD_CTRL_IOC
|
(
urb
->
transfer_flags
&
USB_DISABLE_SPD
?
0
:
TD_CTRL_SPD
)
|
(
3
<<
27
));
if
(
urb
->
dev
->
speed
==
USB_SPEED_LOW
)
desc
->
hw
.
td
.
status
|=
__constant_cpu_to_le32
(
TD_CTRL_LS
);
mb
();
}
else
{
uhci_unlink_urb_async
(
uhci
,
urb
,
UNLINK_ASYNC_STORE_URB
);
uhci_do_toggle
(
urb
);
// correct toggle after unlink
clr_td_ioc
(
desc
);
// inactivate TD
}
}
if
(
mode
==
PROCESS_INT_REMOVE
)
{
INIT_LIST_HEAD
(
&
desc
->
horizontal
);
list_add_tail
(
&
desc
->
horizontal
,
&
uhci
->
free_desc_td
);
desc
->
last_used
=
UHCI_GET_CURRENT_FRAME
(
uhci
);
}
}
return
ret
;
}
/*-------------------------------------------------------------------*/
// mode: PROCESS_ISO_REGULAR: processing only for done TDs, unlink TDs
// mode: PROCESS_ISO_FORCE: force processing, don't unlink TDs (already unlinked)
static
int
process_iso
(
struct
uhci_hcd
*
uhci
,
struct
urb
*
urb
,
int
mode
)
{
urb_priv_t
*
urb_priv
=
urb
->
hcpriv
;
struct
list_head
*
p
=
urb_priv
->
desc_list
.
next
,
*
p_tmp
;
uhci_desc_t
*
desc
=
list_entry
(
urb_priv
->
desc_list
.
prev
,
uhci_desc_t
,
desc_list
);
int
i
,
ret
=
0
;
int
now
=
UHCI_GET_CURRENT_FRAME
(
uhci
);
dbg
(
"urb contains iso request"
);
if
(
is_td_active
(
desc
)
&&
mode
==
PROCESS_ISO_REGULAR
)
return
-
EXDEV
;
// last TD not finished
urb
->
error_count
=
0
;
urb
->
actual_length
=
0
;
urb
->
status
=
0
;
dbg
(
"process iso urb %p, %li, %i, %i, %i %08x"
,
urb
,
jiffies
,
UHCI_GET_CURRENT_FRAME
(
s
),
urb
->
number_of_packets
,
mode
,
le32_to_cpu
(
desc
->
hw
.
td
.
status
));
for
(
i
=
0
;
p
!=
&
urb_priv
->
desc_list
;
i
++
)
{
desc
=
list_entry
(
p
,
uhci_desc_t
,
desc_list
);
//uhci_show_td(desc);
if
(
is_td_active
(
desc
))
{
// means we have completed the last TD, but not the TDs before
desc
->
hw
.
td
.
status
&=
cpu_to_le32
(
~
TD_CTRL_ACTIVE
);
dbg
(
"TD still active (%x)- grrr. paranoia!"
,
le32_to_cpu
(
desc
->
hw
.
td
.
status
));
ret
=
-
EXDEV
;
urb
->
iso_frame_desc
[
i
].
status
=
ret
;
unlink_td
(
uhci
,
desc
,
1
);
goto
err
;
}
if
(
mode
==
PROCESS_ISO_REGULAR
)
unlink_td
(
uhci
,
desc
,
1
);
if
(
urb
->
number_of_packets
<=
i
)
{
dbg
(
"urb->number_of_packets (%d)<=(%d)"
,
urb
->
number_of_packets
,
i
);
ret
=
-
EINVAL
;
goto
err
;
}
urb
->
iso_frame_desc
[
i
].
actual_length
=
uhci_actual_length
(
desc
);
urb
->
iso_frame_desc
[
i
].
status
=
uhci_map_status
(
uhci_status_bits
(
le32_to_cpu
(
desc
->
hw
.
td
.
status
)),
usb_pipeout
(
urb
->
pipe
));
urb
->
actual_length
+=
urb
->
iso_frame_desc
[
i
].
actual_length
;
err:
if
(
urb
->
iso_frame_desc
[
i
].
status
!=
0
)
{
urb
->
error_count
++
;
urb
->
status
=
urb
->
iso_frame_desc
[
i
].
status
;
}
dbg
(
"process_iso: %i: len:%d %08x status:%x"
,
i
,
urb
->
iso_frame_desc
[
i
].
actual_length
,
le32_to_cpu
(
desc
->
hw
.
td
.
status
),
urb
->
iso_frame_desc
[
i
].
status
);
p_tmp
=
p
;
p
=
p
->
next
;
list_del
(
p_tmp
);
// add to cool down pool
INIT_LIST_HEAD
(
&
desc
->
horizontal
);
list_add_tail
(
&
desc
->
horizontal
,
&
uhci
->
free_desc_td
);
desc
->
last_used
=
now
;
}
dbg
(
"process_iso: exit %i (%d), actual_len %i"
,
i
,
ret
,
urb
->
actual_length
);
return
ret
;
}
/*-------------------------------------------------------------------*/
// called with urb_list_lock set
static
int
process_urb
(
struct
uhci_hcd
*
uhci
,
struct
list_head
*
p
)
{
struct
urb
*
urb
;
urb_priv_t
*
priv
;
int
type
,
ret
=
0
;
priv
=
list_entry
(
p
,
urb_priv_t
,
urb_list
);
urb
=
priv
->
urb
;
// dbg("process_urb p %p, udev %p",urb, urb->dev);
type
=
usb_pipetype
(
urb
->
pipe
);
switch
(
type
)
{
case
PIPE_CONTROL
:
ret
=
process_transfer
(
uhci
,
urb
,
CLEAN_TRANSFER_REGULAR
);
break
;
case
PIPE_BULK
:
// if a submit is fiddling with bulk queues, ignore it for now
if
(
!
uhci
->
avoid_bulk
.
counter
)
ret
=
process_transfer
(
uhci
,
urb
,
CLEAN_TRANSFER_REGULAR
);
else
return
0
;
break
;
case
PIPE_ISOCHRONOUS
:
ret
=
process_iso
(
uhci
,
urb
,
PROCESS_ISO_REGULAR
);
break
;
case
PIPE_INTERRUPT
:
ret
=
process_interrupt
(
uhci
,
urb
,
PROCESS_INT_REGULAR
);
break
;
}
if
(
urb
->
status
!=
-
EINPROGRESS
&&
type
!=
PIPE_INTERRUPT
)
{
dequeue_urb
(
uhci
,
urb
);
uhci_free_priv
(
uhci
,
urb
,
urb
->
hcpriv
);
spin_unlock
(
&
uhci
->
urb_list_lock
);
dbg
(
"giveback urb %p, status %i, length %i
\n
"
,
urb
,
urb
->
status
,
urb
->
transfer_buffer_length
);
usb_hcd_giveback_urb
(
&
uhci
->
hcd
,
urb
);
spin_lock
(
&
uhci
->
urb_list_lock
);
}
return
ret
;
}
/*###########################################################################*/
// EMERGENCY ROOM
/*###########################################################################*/
/* used to reanimate a halted hostcontroller which signals no interrupts anymore.
This is a shortcut for unloading and reloading the module, and should be only
used as the last resort, but some VIA chips need it.
*/
static
int
hc_defibrillate
(
struct
uhci_hcd
*
uhci
)
{
int
ret
;
err
(
"Watchdog timeout, host controller obviously clinically dead, defibrillating...
\n
"
"Expect disconnections for all devices on this controller!"
);
uhci
->
running
=
0
;
outw
(
USBCMD_HCRESET
,
(
int
)
uhci
->
hcd
.
regs
+
USBCMD
);
uhci_stop
(
&
uhci
->
hcd
);
ret
=
init_skel
(
uhci
);
if
(
ret
)
return
-
ENOMEM
;
set_td_ioc
(
uhci
->
td128ms
);
// enable watchdog interrupt
hc_irq_run
(
uhci
);
uhci
->
reanimations
++
;
err
(
"Host controller restart done..."
);
return
0
;
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment