Commit 8e14bc0b authored by Ben Collins's avatar Ben Collins Committed by Jens Axboe

[PATCH] IEEE-1394 Updates

- Converts Video1394 to PCI dma.
- Converts ioctl's to standard interface.
- Various minor fixes
- Merges from 2.5.x tree
parent fe2701f1
......@@ -18,6 +18,8 @@ obj-$(CONFIG_IEEE1394_CMP) += cmp.o
clean-files := oui.c
ieee1394.o: $(ieee1394-objs)
$(LD) $(LDFLAGS) -r -o $@ $(ieee1394-objs)
ifeq ($(obj),)
obj = .
......
......@@ -598,6 +598,8 @@ static struct buffer *buffer_alloc(int size)
struct buffer *b;
b = kmalloc(sizeof *b + size, SLAB_KERNEL);
if (b == NULL)
return NULL;
b->head = 0;
b->tail = 0;
b->length = 0;
......
......@@ -3,8 +3,8 @@
#ifndef __AMDTP_H
#define __AMDTP_H
#include <asm/ioctl.h>
#include <asm/types.h>
#include "ieee1394-ioctl.h"
/* The userspace interface for the Audio & Music Data Transmission
* Protocol driver is really simple. First, open /dev/amdtp, use the
......@@ -57,13 +57,6 @@
*
*/
/* We use '#' for our ioctl magic number because it's cool. */
#define AMDTP_IOC_CHANNEL _IOW('#', 0, sizeof (struct amdtp_ioctl))
#define AMDTP_IOC_PLUG _IOW('#', 1, sizeof (struct amdtp_ioctl))
#define AMDTP_IOC_PING _IOW('#', 2, sizeof (struct amdtp_ioctl))
#define AMDTP_IOC_ZAP _IO('#', 3)
enum {
AMDTP_FORMAT_RAW,
AMDTP_FORMAT_IEC958_PCM,
......
......@@ -25,9 +25,8 @@ void dma_prog_region_init(struct dma_prog_region *prog)
int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes, struct pci_dev *dev)
{
/* round up to page size */
if(n_bytes % PAGE_SIZE)
n_bytes += PAGE_SIZE - (n_bytes & PAGE_SIZE);
n_bytes = round_up_to_page(n_bytes);
prog->n_pages = n_bytes / PAGE_SIZE;
prog->kvirt = pci_alloc_consistent(dev, prog->n_pages * PAGE_SIZE, &prog->bus_addr);
......@@ -47,7 +46,7 @@ void dma_prog_region_free(struct dma_prog_region *prog)
if(prog->kvirt) {
pci_free_consistent(prog->dev, prog->n_pages * PAGE_SIZE, prog->kvirt, prog->bus_addr);
}
prog->kvirt = NULL;
prog->dev = NULL;
prog->n_pages = 0;
......@@ -70,11 +69,10 @@ int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_d
unsigned int i, n_pages;
/* round up to page size */
if(n_bytes % PAGE_SIZE)
n_bytes += PAGE_SIZE - (n_bytes & PAGE_SIZE);
n_bytes = round_up_to_page(n_bytes);
n_pages = n_bytes / PAGE_SIZE;
dma->kvirt = vmalloc_32(n_pages * PAGE_SIZE);
if(!dma->kvirt) {
printk(KERN_ERR "dma_region_alloc: vmalloc_32() failed\n");
......@@ -82,7 +80,7 @@ int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_d
}
dma->n_pages = n_pages;
/* Clear the ram out, no junk to the user */
memset(dma->kvirt, 0, n_pages * PAGE_SIZE);
......@@ -114,7 +112,7 @@ int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_d
dma->dev = dev;
dma->direction = direction;
return 0;
err:
......@@ -148,7 +146,7 @@ static inline int dma_region_find(struct dma_region *dma, unsigned long offset,
{
int i;
unsigned long off = offset;
for(i = 0; i < dma->n_dma_pages; i++) {
if(off < sg_dma_len(&dma->sglist[i])) {
*rem = off;
......@@ -157,14 +155,14 @@ static inline int dma_region_find(struct dma_region *dma, unsigned long offset,
off -= sg_dma_len(&dma->sglist[i]);
}
panic("dma_region_find: offset %lu beyond end of DMA mapping\n", offset);
}
dma_addr_t dma_region_offset_to_bus(struct dma_region *dma, unsigned long offset)
{
unsigned long rem;
struct scatterlist *sg = &dma->sglist[dma_region_find(dma, offset, &rem)];
return sg_dma_address(sg) + rem;
}
......@@ -176,10 +174,10 @@ void dma_region_sync(struct dma_region *dma, unsigned long offset, unsigned long
if(!len)
len = 1;
first = dma_region_find(dma, offset, &rem);
last = dma_region_find(dma, offset + len - 1, &rem);
pci_dma_sync_sg(dma->dev, &dma->sglist[first], last - first + 1, dma->direction);
}
......@@ -210,13 +208,13 @@ dma_region_pagefault(struct vm_area_struct *area, unsigned long address, int wri
}
static struct vm_operations_struct dma_region_vm_ops = {
nopage: dma_region_pagefault,
.nopage = dma_region_pagefault,
};
int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma)
{
unsigned long size;
if(!dma->kvirt)
return -EINVAL;
......@@ -233,6 +231,6 @@ int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_st
vma->vm_private_data = dma;
vma->vm_file = file;
vma->vm_flags |= VM_RESERVED;
return 0;
}
......@@ -73,4 +73,12 @@ int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_s
relative to the beginning of the dma_region */
dma_addr_t dma_region_offset_to_bus(struct dma_region *dma, unsigned long offset);
/* round up a number of bytes to be a multiple of the PAGE_SIZE */
static inline unsigned long round_up_to_page(unsigned long len)
{
if(len % PAGE_SIZE)
len += PAGE_SIZE - (len % PAGE_SIZE);
return len;
}
#endif /* IEEE1394_DMA_H */
......@@ -918,6 +918,7 @@ static int do_dv1394_init(struct video_card *video, struct dv1394_init *init)
u64 chan_mask;
int retval = -EINVAL;
debug_printk( "dv1394: initialising %d\n", video->id );
if(init->api_version != DV1394_API_VERSION)
goto err;
......@@ -1186,6 +1187,7 @@ static void stop_dma(struct video_card *video)
if( (reg_read(video->ohci, video->ohci_IsoXmitContextControlClear) & (1 << 10)) ||
(reg_read(video->ohci, video->ohci_IsoRcvContextControlClear) & (1 << 10)) ) {
/* still active */
debug_printk("dv1394: stop_dma: DMA not stopped yet\n" );
mb();
} else {
debug_printk("dv1394: stop_dma: DMA stopped safely after %d ms\n", i/10);
......@@ -1199,7 +1201,9 @@ static void stop_dma(struct video_card *video)
printk(KERN_ERR "dv1394: stop_dma: DMA still going after %d ms!\n", i/10);
}
}
else
debug_printk("dv1394: stop_dma: already stopped.\n");
spin_unlock_irqrestore(&video->spinlock, flags);
}
......@@ -1226,7 +1230,8 @@ static int do_dv1394_shutdown(struct video_card *video, int free_dv_buf)
/* disable interrupts for IT context */
reg_write(video->ohci, OHCI1394_IsoXmitIntMaskClear, (1 << video->ohci_it_ctx));
clear_bit(video->ohci_it_ctx, &video->ohci->it_ctx_usage);
/* remove tasklet */
ohci1394_unregister_iso_tasklet(video->ohci, &video->it_tasklet);
debug_printk("dv1394: IT context %d released\n", video->ohci_it_ctx);
video->ohci_it_ctx = -1;
}
......@@ -1240,23 +1245,14 @@ static int do_dv1394_shutdown(struct video_card *video, int free_dv_buf)
/* disable interrupts for IR context */
reg_write(video->ohci, OHCI1394_IsoRecvIntMaskClear, (1 << video->ohci_ir_ctx));
clear_bit(video->ohci_ir_ctx, &video->ohci->ir_ctx_usage);
/* remove tasklet */
ohci1394_unregister_iso_tasklet(video->ohci, &video->ir_tasklet);
debug_printk("dv1394: IR context %d released\n", video->ohci_ir_ctx);
video->ohci_ir_ctx = -1;
}
spin_unlock_irqrestore(&video->spinlock, flags);
/* remove tasklets */
if(video->ohci_it_ctx != -1) {
ohci1394_unregister_iso_tasklet(video->ohci, &video->it_tasklet);
video->ohci_it_ctx = -1;
}
if(video->ohci_ir_ctx != -1) {
ohci1394_unregister_iso_tasklet(video->ohci, &video->ir_tasklet);
video->ohci_ir_ctx = -1;
}
/* release the ISO channel */
if(video->channel != -1) {
u64 chan_mask;
......@@ -1612,7 +1608,7 @@ static int dv1394_ioctl(struct inode *inode, struct file *file,
switch(cmd)
{
case DV1394_SUBMIT_FRAMES: {
case DV1394_IOC_SUBMIT_FRAMES: {
unsigned int n_submit;
if( !video_card_initialized(video) ) {
......@@ -1666,7 +1662,7 @@ static int dv1394_ioctl(struct inode *inode, struct file *file,
break;
}
case DV1394_WAIT_FRAMES: {
case DV1394_IOC_WAIT_FRAMES: {
unsigned int n_wait;
if( !video_card_initialized(video) ) {
......@@ -1715,7 +1711,7 @@ static int dv1394_ioctl(struct inode *inode, struct file *file,
break;
}
case DV1394_RECEIVE_FRAMES: {
case DV1394_IOC_RECEIVE_FRAMES: {
unsigned int n_recv;
if( !video_card_initialized(video) ) {
......@@ -1748,7 +1744,7 @@ static int dv1394_ioctl(struct inode *inode, struct file *file,
break;
}
case DV1394_START_RECEIVE: {
case DV1394_IOC_START_RECEIVE: {
if( !video_card_initialized(video) ) {
ret = do_dv1394_init_default(video);
if(ret)
......@@ -1765,7 +1761,7 @@ static int dv1394_ioctl(struct inode *inode, struct file *file,
break;
}
case DV1394_INIT: {
case DV1394_IOC_INIT: {
struct dv1394_init init;
if(arg == (unsigned long) NULL) {
ret = do_dv1394_init_default(video);
......@@ -1779,12 +1775,12 @@ static int dv1394_ioctl(struct inode *inode, struct file *file,
break;
}
case DV1394_SHUTDOWN:
case DV1394_IOC_SHUTDOWN:
ret = do_dv1394_shutdown(video, 0);
break;
case DV1394_GET_STATUS: {
case DV1394_IOC_GET_STATUS: {
struct dv1394_status status;
if( !video_card_initialized(video) ) {
......@@ -2346,6 +2342,7 @@ static void ir_tasklet_func(unsigned long data)
dbc = (int) (p->cip_h1 >> 24);
if ( video->continuity_counter != -1 && dbc > ((video->continuity_counter + 1) % 256) )
{
printk(KERN_WARNING "dv1394: discontinuity detected, dropping all frames\n" );
video->dropped_frames += video->n_clear_frames + 1;
video->first_frame = 0;
video->n_clear_frames = 0;
......@@ -2364,9 +2361,8 @@ static void ir_tasklet_func(unsigned long data)
video->n_clear_frames++;
if (video->n_clear_frames > video->n_frames) {
video->dropped_frames++;
video->n_clear_frames--;
if (video->n_clear_frames < 0)
video->n_clear_frames = 0;
printk(KERN_WARNING "dv1394: dropped a frame during reception\n" );
video->n_clear_frames = video->n_frames-1;
video->first_clear_frame = (video->first_clear_frame + 1) % video->n_frames;
}
if (video->first_clear_frame == -1)
......@@ -2375,7 +2371,6 @@ static void ir_tasklet_func(unsigned long data)
/* get the next frame */
video->active_frame = (video->active_frame + 1) % video->n_frames;
f = video->frames[video->active_frame];
irq_printk(" frame received, active_frame = %d, n_clear_frames = %d, first_clear_frame = %d\n",
video->active_frame, video->n_clear_frames, video->first_clear_frame);
}
......
......@@ -200,48 +200,7 @@
/* ioctl() commands */
enum {
/* I don't like using 0 as a valid ioctl() */
DV1394_INVALID = 0,
/* get the driver ready to transmit video.
pass a struct dv1394_init* as the parameter (see below),
or NULL to get default parameters */
DV1394_INIT,
/* stop transmitting video and free the ringbuffer */
DV1394_SHUTDOWN,
/* submit N new frames to be transmitted, where
the index of the first new frame is first_clear_buffer,
and the index of the last new frame is
(first_clear_buffer + N) % n_frames */
DV1394_SUBMIT_FRAMES,
/* block until N buffers are clear (pass N as the parameter)
Because we re-transmit the last frame on underrun, there
will at most be n_frames - 1 clear frames at any time */
DV1394_WAIT_FRAMES,
/* capture new frames that have been received, where
the index of the first new frame is first_clear_buffer,
and the index of the last new frame is
(first_clear_buffer + N) % n_frames */
DV1394_RECEIVE_FRAMES,
DV1394_START_RECEIVE,
/* pass a struct dv1394_status* as the parameter (see below) */
DV1394_GET_STATUS,
};
#include "ieee1394-ioctl.h"
enum pal_or_ntsc {
......
......@@ -77,7 +77,7 @@
printk(KERN_ERR fmt, ## args)
static char version[] __devinitdata =
"$Rev: 641 $ Ben Collins <bcollins@debian.org>";
"$Rev: 770 $ Ben Collins <bcollins@debian.org>";
/* Our ieee1394 highlevel driver */
#define ETHER1394_DRIVER_NAME "ether1394"
......@@ -368,6 +368,7 @@ static void ether1394_add_host (struct hpsb_host *host)
if (register_netdev (dev)) {
ETH1394_PRINT (KERN_ERR, dev->name, "Error registering network driver\n");
kfree (dev);
kfree (hi);
return;
}
......
......@@ -164,17 +164,19 @@ int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, u64 start)
return retval;
}
void hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
int hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
unsigned int channel)
{
if (channel > 63) {
HPSB_ERR("%s called with invalid channel", __FUNCTION__);
return;
return -EINVAL;
}
if (host->iso_listen_count[channel]++ == 0) {
host->driver->devctl(host, ISO_LISTEN_CHANNEL, channel);
return host->driver->devctl(host, ISO_LISTEN_CHANNEL, channel);
}
return 0;
}
void hpsb_unlisten_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
......
......@@ -150,8 +150,8 @@ int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, u64 start);
* Enable or disable receving a certain isochronous channel through the
* iso_receive op.
*/
void hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
unsigned int channel);
int hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
unsigned int channel);
void hpsb_unlisten_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
unsigned int channel);
......
......@@ -38,9 +38,15 @@ static int dummy_devctl(struct hpsb_host *h, enum devctl_cmd c, int arg)
return -1;
}
static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command, unsigned long arg)
{
return -1;
}
static struct hpsb_host_driver dummy_driver = {
.transmit_packet = dummy_transmit_packet,
.devctl = dummy_devctl
.devctl = dummy_devctl,
.isoctl = dummy_isoctl
};
/**
......@@ -63,9 +69,11 @@ int hpsb_ref_host(struct hpsb_host *host)
spin_lock_irqsave(&hosts_lock, flags);
list_for_each(lh, &hosts) {
if (host == list_entry(lh, struct hpsb_host, host_list)) {
host->driver->devctl(host, MODIFY_USAGE, 1);
host->refcount++;
retval = 1;
if (host->driver->devctl(host, MODIFY_USAGE, 1)) {
host->driver->devctl(host, MODIFY_USAGE, 1);
host->refcount++;
retval = 1;
}
break;
}
}
......
......@@ -108,19 +108,22 @@ enum devctl_cmd {
enum isoctl_cmd {
/* rawiso API - see iso.h for the meanings of these commands
* INIT = allocate resources
* START = begin transmission/reception (arg: cycle to start on)
* START = begin transmission/reception
* STOP = halt transmission/reception
* QUEUE/RELEASE = produce/consume packets (arg: # of packets)
* QUEUE/RELEASE = produce/consume packets
* SHUTDOWN = deallocate resources
*/
XMIT_INIT,
XMIT_START,
XMIT_STOP,
XMIT_QUEUE,
XMIT_SHUTDOWN,
RECV_INIT,
RECV_LISTEN_CHANNEL, /* multi-channel only */
RECV_UNLISTEN_CHANNEL, /* multi-channel only */
RECV_SET_CHANNEL_MASK, /* multi-channel only; arg is a *u64 */
RECV_START,
RECV_STOP,
RECV_RELEASE,
......@@ -170,11 +173,12 @@ struct hpsb_host_driver {
*/
int (*devctl) (struct hpsb_host *host, enum devctl_cmd command, int arg);
/* ISO transmission/reception functions. Return 0 on success, -1 on failure.
* If the low-level driver does not support the new ISO API, set isoctl to NULL.
/* ISO transmission/reception functions. Return 0 on success, -1
* (or -EXXX errno code) on failure. If the low-level driver does not
* support the new ISO API, set isoctl to NULL.
*/
int (*isoctl) (struct hpsb_iso *iso, enum isoctl_cmd command, int arg);
int (*isoctl) (struct hpsb_iso *iso, enum isoctl_cmd command, unsigned long arg);
/* This function is mainly to redirect local CSR reads/locks to the iso
* management registers (bus manager id, bandwidth available, channels
* available) to the hardware registers in OHCI. reg is 0,1,2,3 for bus
......
/* Base file for all ieee1394 ioctl's. Linux-1394 has allocated base '#'
* with a range of 0x00-0x3f. */
#ifndef __IEEE1394_IOCTL_H
#define __IEEE1394_IOCTL_H
#include <asm/ioctl.h>
#include <asm/types.h>
/* AMDTP Gets 6 */
#define AMDTP_IOC_CHANNEL _IOW('#', 0x00, struct amdtp_ioctl)
#define AMDTP_IOC_PLUG _IOW('#', 0x01, struct amdtp_ioctl)
#define AMDTP_IOC_PING _IOW('#', 0x02, struct amdtp_ioctl)
#define AMDTP_IOC_ZAP _IO ('#', 0x03)
/* DV1394 Gets 10 */
/* Get the driver ready to transmit video. pass a struct dv1394_init* as
* the parameter (see below), or NULL to get default parameters */
#define DV1394_IOC_INIT _IOW('#', 0x06, struct dv1394_init)
/* Stop transmitting video and free the ringbuffer */
#define DV1394_IOC_SHUTDOWN _IO ('#', 0x07)
/* Submit N new frames to be transmitted, where the index of the first new
* frame is first_clear_buffer, and the index of the last new frame is
* (first_clear_buffer + N) % n_frames */
#define DV1394_IOC_SUBMIT_FRAMES _IO ('#', 0x08)
/* Block until N buffers are clear (pass N as the parameter) Because we
* re-transmit the last frame on underrun, there will at most be n_frames
* - 1 clear frames at any time */
#define DV1394_IOC_WAIT_FRAMES _IO ('#', 0x09)
/* Capture new frames that have been received, where the index of the
* first new frame is first_clear_buffer, and the index of the last new
* frame is (first_clear_buffer + N) % n_frames */
#define DV1394_IOC_RECEIVE_FRAMES _IO ('#', 0x0a)
/* Tell card to start receiving DMA */
#define DV1394_IOC_START_RECEIVE _IO ('#', 0x0b)
/* Pass a struct dv1394_status* as the parameter */
#define DV1394_IOC_GET_STATUS _IOR('#', 0x0c, struct dv1394_status)
/* Video1394 Gets 10 */
#define VIDEO1394_IOC_LISTEN_CHANNEL \
_IOWR('#', 0x10, struct video1394_mmap)
#define VIDEO1394_IOC_UNLISTEN_CHANNEL \
_IOW ('#', 0x11, int)
#define VIDEO1394_IOC_LISTEN_QUEUE_BUFFER \
_IOW ('#', 0x12, struct video1394_wait)
#define VIDEO1394_IOC_LISTEN_WAIT_BUFFER \
_IOWR('#', 0x13, struct video1394_wait)
#define VIDEO1394_IOC_TALK_CHANNEL \
_IOWR('#', 0x14, struct video1394_mmap)
#define VIDEO1394_IOC_UNTALK_CHANNEL \
_IOW ('#', 0x15, int)
#define VIDEO1394_IOC_TALK_QUEUE_BUFFER \
_IOW ('#', 0x16, sizeof (struct video1394_wait) + \
sizeof (struct video1394_queue_variable))
#define VIDEO1394_IOC_TALK_WAIT_BUFFER \
_IOW ('#', 0x17, struct video1394_wait)
#define VIDEO1394_IOC_LISTEN_POLL_BUFFER \
_IOWR('#', 0x18, struct video1394_wait)
/* Raw1394's ISO interface */
#define RAW1394_IOC_ISO_XMIT_INIT \
_IOW ('#', 0x1a, struct raw1394_iso_status)
#define RAW1394_IOC_ISO_RECV_INIT \
_IOWR('#', 0x1b, struct raw1394_iso_status)
#define RAW1394_IOC_ISO_RECV_START \
_IOC (_IOC_WRITE, '#', 0x1c, sizeof(int) * 3)
#define RAW1394_IOC_ISO_XMIT_START \
_IOC (_IOC_WRITE, '#', 0x1d, sizeof(int) * 2)
#define RAW1394_IOC_ISO_XMIT_RECV_STOP \
_IO ('#', 0x1e)
#define RAW1394_IOC_ISO_GET_STATUS \
_IOR ('#', 0x1f, struct raw1394_iso_status)
#define RAW1394_IOC_ISO_SHUTDOWN \
_IO ('#', 0x20)
#define RAW1394_IOC_ISO_QUEUE_ACTIVITY \
_IO ('#', 0x21)
#define RAW1394_IOC_ISO_RECV_LISTEN_CHANNEL \
_IOW ('#', 0x22, unsigned char)
#define RAW1394_IOC_ISO_RECV_UNLISTEN_CHANNEL \
_IOW ('#', 0x23, unsigned char)
#define RAW1394_IOC_ISO_RECV_SET_CHANNEL_MASK \
_IOW ('#', 0x24, u64)
#define RAW1394_IOC_ISO_RECV_PACKETS \
_IOW ('#', 0x25, struct raw1394_iso_packets)
#define RAW1394_IOC_ISO_RECV_RELEASE_PACKETS \
_IOW ('#', 0x26, unsigned int)
#define RAW1394_IOC_ISO_XMIT_PACKETS \
_IOW ('#', 0x27, struct raw1394_iso_packets)
#define RAW1394_IOC_ISO_XMIT_SYNC \
_IO ('#', 0x28)
#endif /* __IEEE1394_IOCTL_H */
......@@ -1000,7 +1000,7 @@ int ieee1394_register_chardev(int blocknum,
struct file_operations *file_ops)
{
int retval;
if( (blocknum < 0) || (blocknum > 15) )
return -EINVAL;
......@@ -1016,7 +1016,7 @@ int ieee1394_register_chardev(int blocknum,
/* block already taken */
retval = -EBUSY;
}
write_unlock(&ieee1394_chardevs_lock);
return retval;
......@@ -1027,14 +1027,14 @@ void ieee1394_unregister_chardev(int blocknum)
{
if( (blocknum < 0) || (blocknum > 15) )
return;
write_lock(&ieee1394_chardevs_lock);
if(ieee1394_chardevs[blocknum].file_ops) {
ieee1394_chardevs[blocknum].file_ops = NULL;
ieee1394_chardevs[blocknum].module = NULL;
}
write_unlock(&ieee1394_chardevs_lock);
}
......@@ -1055,7 +1055,7 @@ static int ieee1394_get_chardev(int blocknum,
{
int ret = 0;
if( (blocknum < 0) || (blocknum > 15) )
if ((blocknum < 0) || (blocknum > 15))
return ret;
read_lock(&ieee1394_chardevs_lock);
......@@ -1063,15 +1063,15 @@ static int ieee1394_get_chardev(int blocknum,
*module = ieee1394_chardevs[blocknum].module;
*file_ops = ieee1394_chardevs[blocknum].file_ops;
if(*file_ops == NULL)
if (*file_ops == NULL)
goto out;
if(!try_module_get(*module))
if (!try_module_get(*module))
goto out;
/* success! */
ret = 1;
out:
read_unlock(&ieee1394_chardevs_lock);
return ret;
......@@ -1101,11 +1101,11 @@ static int ieee1394_dispatch_open(struct inode *inode, struct file *file)
reference count of whatever module file->f_op->owner points
to, immediately after this function returns.
*/
/* shift away lower four bits of the minor
to get the index of the ieee1394_driver
we want */
blocknum = (minor(inode->i_rdev) >> 4) & 0xF;
/* look up the driver */
......@@ -1126,14 +1126,14 @@ static int ieee1394_dispatch_open(struct inode *inode, struct file *file)
if(retval == 0) {
/* If the open() succeeded, then ieee1394 will be left
with an extra module reference, so we discard it here.
The task-specific driver still has the extra
reference given to it by ieee1394_get_chardev().
This extra reference prevents the module from
unloading while the file is open, and will be
dropped by the VFS when the file is released.
*/
* with an extra module reference, so we discard it here.
*
* The task-specific driver still has the extra reference
* given to it by ieee1394_get_chardev(). This extra
* reference prevents the module from unloading while the
* file is open, and will be dropped by the VFS when the
* file is released. */
module_put(THIS_MODULE);
} else {
/* point the file's f_ops back to ieee1394. The VFS will then
......@@ -1142,11 +1142,10 @@ static int ieee1394_dispatch_open(struct inode *inode, struct file *file)
file->f_op = &ieee1394_chardev_ops;
/* if the open() failed, then we need to drop the
extra reference we gave to the task-specific
driver */
/* If the open() failed, then we need to drop the extra
* reference we gave to the task-specific driver. */
module_put(module);
}
return retval;
......@@ -1199,10 +1198,10 @@ static void __exit ieee1394_cleanup(void)
kmem_cache_destroy(hpsb_packet_cache);
unregister_chrdev(IEEE1394_MAJOR, "ieee1394");
/* it's ok to pass a NULL devfs_handle to devfs_unregister */
devfs_unregister(ieee1394_devfs_handle);
remove_proc_entry("ieee1394", proc_bus);
}
......@@ -1298,10 +1297,16 @@ EXPORT_SYMBOL(hpsb_iso_xmit_init);
EXPORT_SYMBOL(hpsb_iso_recv_init);
EXPORT_SYMBOL(hpsb_iso_xmit_start);
EXPORT_SYMBOL(hpsb_iso_recv_start);
EXPORT_SYMBOL(hpsb_iso_recv_listen_channel);
EXPORT_SYMBOL(hpsb_iso_recv_unlisten_channel);
EXPORT_SYMBOL(hpsb_iso_recv_set_channel_mask);
EXPORT_SYMBOL(hpsb_iso_stop);
EXPORT_SYMBOL(hpsb_iso_shutdown);
EXPORT_SYMBOL(hpsb_iso_xmit_queue_packets);
EXPORT_SYMBOL(hpsb_iso_xmit_queue_packet);
EXPORT_SYMBOL(hpsb_iso_xmit_sync);
EXPORT_SYMBOL(hpsb_iso_recv_release_packets);
EXPORT_SYMBOL(hpsb_iso_n_ready);
EXPORT_SYMBOL(hpsb_iso_packet_data);
EXPORT_SYMBOL(hpsb_iso_packet_info);
EXPORT_SYMBOL(hpsb_iso_packet_sent);
EXPORT_SYMBOL(hpsb_iso_packet_received);
EXPORT_SYMBOL(hpsb_iso_wake);
......@@ -17,7 +17,8 @@ void hpsb_iso_stop(struct hpsb_iso *iso)
if(!iso->flags & HPSB_ISO_DRIVER_STARTED)
return;
iso->host->driver->isoctl(iso, iso->type == HPSB_ISO_XMIT ? XMIT_STOP : RECV_STOP, 0);
iso->host->driver->isoctl(iso, iso->type == HPSB_ISO_XMIT ?
XMIT_STOP : RECV_STOP, 0);
iso->flags &= ~HPSB_ISO_DRIVER_STARTED;
}
......@@ -25,94 +26,84 @@ void hpsb_iso_shutdown(struct hpsb_iso *iso)
{
if(iso->flags & HPSB_ISO_DRIVER_INIT) {
hpsb_iso_stop(iso);
iso->host->driver->isoctl(iso, iso->type == HPSB_ISO_XMIT ? XMIT_SHUTDOWN : RECV_SHUTDOWN, 0);
iso->host->driver->isoctl(iso, iso->type == HPSB_ISO_XMIT ?
XMIT_SHUTDOWN : RECV_SHUTDOWN, 0);
iso->flags &= ~HPSB_ISO_DRIVER_INIT;
}
dma_region_free(&iso->buf);
dma_region_free(&iso->data_buf);
kfree(iso->infos);
kfree(iso);
}
static struct hpsb_iso* hpsb_iso_common_init(struct hpsb_host *host, enum hpsb_iso_type type,
unsigned int data_buf_size,
unsigned int buf_packets,
unsigned int max_packet_size,
int channel,
int irq_interval,
void (*callback)(struct hpsb_iso*))
{
struct hpsb_iso *iso;
unsigned int packet_plus_info;
int dma_direction;
int iso_header_bytes;
const int info_bytes = sizeof(struct hpsb_iso_packet_info);
/* make sure driver supports the ISO API */
if(!host->driver->isoctl)
return NULL;
if(type == HPSB_ISO_RECV) {
/* when receiving, leave 8 extra bytes in front
of the data payload for the iso header */
iso_header_bytes = 8;
} else {
iso_header_bytes = 0;
}
/* sanitize parameters */
if(buf_packets < 2)
buf_packets = 2;
if(irq_interval < 1 || irq_interval > buf_packets / 2)
irq_interval = buf_packets / 2;
if(max_packet_size + info_bytes + iso_header_bytes > PAGE_SIZE)
if(channel < -1 || channel >= 64)
return NULL;
/* channel = -1 is OK for multi-channel recv but not for xmit */
if(type == HPSB_ISO_XMIT && channel < 0)
return NULL;
/* size of packet payload plus the per-packet info must be a power of 2
and at most equal to the page size */
for(packet_plus_info = 256; packet_plus_info < PAGE_SIZE; packet_plus_info *= 2) {
if(packet_plus_info >= (max_packet_size + info_bytes + iso_header_bytes)) {
break;
}
}
/* allocate and write the struct hpsb_iso */
iso = kmalloc(sizeof(*iso), SLAB_KERNEL);
if(!iso)
return NULL;
/* allocate ringbuffer of packet descriptors */
iso->infos = kmalloc(buf_packets * sizeof(struct hpsb_iso_packet_info), SLAB_KERNEL);
if(!iso->infos)
return NULL;
iso->type = type;
iso->host = host;
iso->hostdata = NULL;
iso->callback = callback;
init_waitqueue_head(&iso->waitq);
iso->channel = channel;
iso->irq_interval = irq_interval;
dma_region_init(&iso->buf);
dma_region_init(&iso->data_buf);
iso->buf_size = round_up_to_page(data_buf_size);
iso->buf_packets = buf_packets;
iso->buf_stride = packet_plus_info;
iso->max_packet_size = max_packet_size;
iso->packet_data_offset = iso_header_bytes;
iso->packet_info_offset = iso_header_bytes + max_packet_size;
iso->pkt_dma = 0;
iso->first_packet = 0;
spin_lock_init(&iso->lock);
if(iso->type == HPSB_ISO_XMIT) {
atomic_set(&iso->n_dma_packets, 0);
iso->n_ready_packets = iso->buf_packets;
dma_direction = PCI_DMA_TODEVICE;
} else {
atomic_set(&iso->n_dma_packets, iso->buf_packets);
iso->n_ready_packets = 0;
dma_direction = PCI_DMA_FROMDEVICE;
}
atomic_set(&iso->overflows, 0);
iso->flags = 0;
iso->prebuffer = 0;
/* allocate the packet buffer */
if(dma_region_alloc(&iso->buf, iso->buf_packets * iso->buf_stride,
host->pdev, dma_direction))
if(dma_region_alloc(&iso->data_buf, iso->buf_size, host->pdev, dma_direction))
goto err;
return iso;
......@@ -124,26 +115,33 @@ static struct hpsb_iso* hpsb_iso_common_init(struct hpsb_host *host, enum hpsb_i
int hpsb_iso_n_ready(struct hpsb_iso* iso)
{
return iso->buf_packets - atomic_read(&iso->n_dma_packets);
unsigned long flags;
int val;
spin_lock_irqsave(&iso->lock, flags);
val = iso->n_ready_packets;
spin_unlock_irqrestore(&iso->lock, flags);
return val;
}
struct hpsb_iso* hpsb_iso_xmit_init(struct hpsb_host *host,
unsigned int data_buf_size,
unsigned int buf_packets,
unsigned int max_packet_size,
int channel,
int speed,
int irq_interval,
void (*callback)(struct hpsb_iso*))
{
struct hpsb_iso *iso = hpsb_iso_common_init(host, HPSB_ISO_XMIT,
buf_packets, max_packet_size,
data_buf_size, buf_packets,
channel, irq_interval, callback);
if(!iso)
return NULL;
iso->speed = speed;
/* tell the driver to start working */
if(host->driver->isoctl(iso, XMIT_INIT, 0))
goto err;
......@@ -157,14 +155,14 @@ struct hpsb_iso* hpsb_iso_xmit_init(struct hpsb_host *host,
}
struct hpsb_iso* hpsb_iso_recv_init(struct hpsb_host *host,
unsigned int data_buf_size,
unsigned int buf_packets,
unsigned int max_packet_size,
int channel,
int irq_interval,
void (*callback)(struct hpsb_iso*))
{
struct hpsb_iso *iso = hpsb_iso_common_init(host, HPSB_ISO_RECV,
buf_packets, max_packet_size,
data_buf_size, buf_packets,
channel, irq_interval, callback);
if(!iso)
return NULL;
......@@ -181,6 +179,27 @@ struct hpsb_iso* hpsb_iso_recv_init(struct hpsb_host *host,
return NULL;
}
int hpsb_iso_recv_listen_channel(struct hpsb_iso *iso, unsigned char channel)
{
if(iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64)
return -EINVAL;
return iso->host->driver->isoctl(iso, RECV_LISTEN_CHANNEL, channel);
}
int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel)
{
if(iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64)
return -EINVAL;
return iso->host->driver->isoctl(iso, RECV_UNLISTEN_CHANNEL, channel);
}
int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
{
if(iso->type != HPSB_ISO_RECV || iso->channel != -1)
return -EINVAL;
return iso->host->driver->isoctl(iso, RECV_SET_CHANNEL_MASK, (unsigned long) &mask);
}
static int do_iso_xmit_start(struct hpsb_iso *iso, int cycle)
{
int retval = iso->host->driver->isoctl(iso, XMIT_START, cycle);
......@@ -195,11 +214,20 @@ int hpsb_iso_xmit_start(struct hpsb_iso *iso, int cycle, int prebuffer)
{
if(iso->type != HPSB_ISO_XMIT)
return -1;
if(iso->flags & HPSB_ISO_DRIVER_STARTED)
return 0;
if(prebuffer < 1)
if(cycle < -1)
cycle = -1;
else if(cycle >= 8000)
cycle %= 8000;
iso->xmit_cycle = cycle;
if(prebuffer < 0)
prebuffer = iso->buf_packets;
else if(prebuffer == 0)
prebuffer = 1;
if(prebuffer > iso->buf_packets)
......@@ -207,41 +235,39 @@ int hpsb_iso_xmit_start(struct hpsb_iso *iso, int cycle, int prebuffer)
iso->prebuffer = prebuffer;
if(cycle != -1) {
/* pre-fill info->cycle */
int pkt = iso->first_packet;
int c, i;
cycle %= 8000;
c = cycle;
for(i = 0; i < iso->buf_packets; i++) {
struct hpsb_iso_packet_info *info = hpsb_iso_packet_info(iso, pkt);
info->cycle = c;
c = (c+1) % 8000;
pkt = (pkt+1) % iso->buf_packets;
}
}
/* remember the starting cycle; DMA will commence from xmit_queue_packets() */
/* remember the starting cycle; DMA will commence from xmit_queue_packets()
once enough packets have been buffered */
iso->start_cycle = cycle;
return 0;
}
int hpsb_iso_recv_start(struct hpsb_iso *iso, int cycle)
int hpsb_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
{
int retval = 0;
int isoctl_args[3];
if(iso->type != HPSB_ISO_RECV)
return -1;
if(iso->flags & HPSB_ISO_DRIVER_STARTED)
return 0;
retval = iso->host->driver->isoctl(iso, RECV_START, cycle);
if(cycle < -1)
cycle = -1;
else if(cycle >= 8000)
cycle %= 8000;
isoctl_args[0] = cycle;
if(tag_mask < 0)
/* match all tags */
tag_mask = 0xF;
isoctl_args[1] = tag_mask;
isoctl_args[2] = sync;
retval = iso->host->driver->isoctl(iso, RECV_START, (unsigned long) &isoctl_args[0]);
if(retval)
return retval;
......@@ -249,58 +275,162 @@ int hpsb_iso_recv_start(struct hpsb_iso *iso, int cycle)
return retval;
}
int hpsb_iso_xmit_queue_packets(struct hpsb_iso *iso, unsigned int n_packets)
/* check to make sure the user has not supplied bogus values of offset/len
that would cause the kernel to access memory outside the buffer */
static int hpsb_iso_check_offset_len(struct hpsb_iso *iso,
unsigned int offset, unsigned short len,
unsigned int *out_offset, unsigned short *out_len)
{
if(offset >= iso->buf_size)
return -EFAULT;
/* make sure the packet does not go beyond the end of the buffer */
if(offset + len > iso->buf_size)
return -EFAULT;
/* check for wrap-around */
if(offset + len < offset)
return -EFAULT;
/* now we can trust 'offset' and 'length' */
*out_offset = offset;
*out_len = len;
return 0;
}
int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, u8 tag, u8 sy)
{
int i, retval;
int pkt = iso->first_packet;
struct hpsb_iso_packet_info *info;
unsigned long flags;
int rv;
if(iso->type != HPSB_ISO_XMIT)
return -1;
/* check packet sizes for sanity */
for(i = 0; i < n_packets; i++) {
struct hpsb_iso_packet_info *info = hpsb_iso_packet_info(iso, pkt);
if(info->len > iso->max_packet_size) {
printk(KERN_ERR "hpsb_iso_xmit_queue_packets: packet too long (%u, max is %u)\n",
info->len, iso->max_packet_size);
return -EINVAL;
}
pkt = (pkt+1) % iso->buf_packets;
return -EINVAL;
/* is there space in the buffer? */
if(iso->n_ready_packets <= 0) {
return -EBUSY;
}
retval = iso->host->driver->isoctl(iso, XMIT_QUEUE, n_packets);
if(retval)
return retval;
info = &iso->infos[iso->first_packet];
/* check for bogus offset/length */
if(hpsb_iso_check_offset_len(iso, offset, len, &info->offset, &info->len))
return -EFAULT;
info->tag = tag;
info->sy = sy;
spin_lock_irqsave(&iso->lock, flags);
rv = iso->host->driver->isoctl(iso, XMIT_QUEUE, (unsigned long) info);
if(rv)
goto out;
/* increment cursors */
iso->first_packet = (iso->first_packet+1) % iso->buf_packets;
iso->xmit_cycle = (iso->xmit_cycle+1) % 8000;
iso->n_ready_packets--;
if(iso->prebuffer != 0) {
iso->prebuffer -= n_packets;
iso->prebuffer--;
if(iso->prebuffer <= 0) {
iso->prebuffer = 0;
return do_iso_xmit_start(iso,
iso->start_cycle);
rv = do_iso_xmit_start(iso, iso->start_cycle);
}
}
return 0;
out:
spin_unlock_irqrestore(&iso->lock, flags);
return rv;
}
int hpsb_iso_xmit_sync(struct hpsb_iso *iso)
{
if(iso->type != HPSB_ISO_XMIT)
return -EINVAL;
return wait_event_interruptible(iso->waitq, hpsb_iso_n_ready(iso) == iso->buf_packets);
}
void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error)
{
unsigned long flags;
spin_lock_irqsave(&iso->lock, flags);
/* predict the cycle of the next packet to be queued */
/* jump ahead by the number of packets that are already buffered */
cycle += iso->buf_packets - iso->n_ready_packets;
cycle %= 8000;
iso->xmit_cycle = cycle;
iso->n_ready_packets++;
iso->pkt_dma = (iso->pkt_dma + 1) % iso->buf_packets;
if(iso->n_ready_packets == iso->buf_packets || error != 0) {
/* the buffer has run empty! */
atomic_inc(&iso->overflows);
}
spin_unlock_irqrestore(&iso->lock, flags);
}
void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len,
u16 cycle, u8 channel, u8 tag, u8 sy)
{
unsigned long flags;
spin_lock_irqsave(&iso->lock, flags);
if(iso->n_ready_packets == iso->buf_packets) {
/* overflow! */
atomic_inc(&iso->overflows);
} else {
struct hpsb_iso_packet_info *info = &iso->infos[iso->pkt_dma];
info->offset = offset;
info->len = len;
info->cycle = cycle;
info->channel = channel;
info->tag = tag;
info->sy = sy;
iso->pkt_dma = (iso->pkt_dma+1) % iso->buf_packets;
iso->n_ready_packets++;
}
spin_unlock_irqrestore(&iso->lock, flags);
}
int hpsb_iso_recv_release_packets(struct hpsb_iso *iso, unsigned int n_packets)
{
unsigned long flags;
unsigned int i;
int rv = 0;
if(iso->type != HPSB_ISO_RECV)
return -1;
return iso->host->driver->isoctl(iso, RECV_RELEASE, n_packets);
}
unsigned char* hpsb_iso_packet_data(struct hpsb_iso *iso, unsigned int pkt)
{
return (iso->buf.kvirt + pkt * iso->buf_stride)
+ iso->packet_data_offset;
spin_lock_irqsave(&iso->lock, flags);
for(i = 0; i < n_packets; i++) {
rv = iso->host->driver->isoctl(iso, RECV_RELEASE,
(unsigned long) &iso->infos[iso->first_packet]);
if(rv)
break;
iso->first_packet = (iso->first_packet+1) % iso->buf_packets;
iso->n_ready_packets--;
}
spin_unlock_irqrestore(&iso->lock, flags);
return rv;
}
struct hpsb_iso_packet_info* hpsb_iso_packet_info(struct hpsb_iso *iso, unsigned int pkt)
void hpsb_iso_wake(struct hpsb_iso *iso)
{
return (struct hpsb_iso_packet_info*) ((iso->buf.kvirt + pkt * iso->buf_stride)
+ iso->packet_info_offset);
wake_up_interruptible(&iso->waitq);
if(iso->callback)
iso->callback(iso);
}
......@@ -17,25 +17,37 @@
/* high-level ISO interface */
/* per-packet data embedded in the ringbuffer */
/* This API sends and receives isochronous packets on a large,
virtually-contiguous kernel memory buffer. The buffer may be mapped
into a user-space process for zero-copy transmission and reception.
There are no explicit boundaries between packets in the buffer. A
packet may be transmitted or received at any location. However,
low-level drivers may impose certain restrictions on alignment or
size of packets. (e.g. in OHCI no packet may cross a page boundary,
and packets should be quadlet-aligned)
*/
/* Packet descriptor - the API maintains a ring buffer of these packet
descriptors in kernel memory (hpsb_iso.infos[]). */
struct hpsb_iso_packet_info {
unsigned short len;
unsigned short cycle;
unsigned char channel; /* recv only */
unsigned char tag;
unsigned char sy;
};
/* offset of data payload relative to the first byte of the buffer */
__u32 offset;
/*
* each packet in the ringbuffer consists of three things:
* 1. the packet's data payload (no isochronous header)
* 2. a struct hpsb_iso_packet_info
* 3. some empty space before the next packet
*
* packets are separated by hpsb_iso.buf_stride bytes
* an even number of packets fit on one page
* no packet can be larger than one page
*/
/* length of the data payload, in bytes (not including the isochronous header) */
__u16 len;
/* (recv only) the cycle number (mod 8000) on which the packet was received */
__u16 cycle;
/* (recv only) channel on which the packet was received */
__u8 channel;
/* 2-bit 'tag' and 4-bit 'sy' fields of the isochronous header */
__u8 tag;
__u8 sy;
};
enum hpsb_iso_type { HPSB_ISO_RECV = 0, HPSB_ISO_XMIT = 1 };
......@@ -45,48 +57,49 @@ struct hpsb_iso {
/* pointer to low-level driver and its private data */
struct hpsb_host *host;
void *hostdata;
/* function to be called (from interrupt context) when the iso status changes */
/* a function to be called (from interrupt context) after
outgoing packets have been sent, or incoming packets have
arrived */
void (*callback)(struct hpsb_iso*);
/* wait for buffer space */
wait_queue_head_t waitq;
int speed; /* SPEED_100, 200, or 400 */
int channel;
int channel; /* -1 if multichannel */
/* greatest # of packets between interrupts - controls
the maximum latency of the buffer */
int irq_interval;
/* the packet ringbuffer */
struct dma_region buf;
/* the buffer for packet data payloads */
struct dma_region data_buf;
/* size of data_buf, in bytes (always a multiple of PAGE_SIZE) */
unsigned int buf_size;
/* ringbuffer of packet descriptors in regular kernel memory */
struct hpsb_iso_packet_info *infos;
/* # of packets in the ringbuffer */
unsigned int buf_packets;
/* offset between successive packets, in bytes -
you can assume that this is a power of 2,
and less than or equal to the page size */
int buf_stride;
/* largest possible packet size, in bytes */
unsigned int max_packet_size;
/* offset relative to (buf.kvirt + N*buf_stride) at which
the data payload begins for packet N */
int packet_data_offset;
/* offset relative to (buf.kvirt + N*buf_stride) at which the
struct hpsb_iso_packet_info is stored for packet N */
int packet_info_offset;
/* protects packet cursors */
spinlock_t lock;
/* the index of the next packet that will be produced
or consumed by the user */
int first_packet;
/* number of packets owned by the low-level driver and
queued for transmission or reception.
this is related to the number of packets available
to the user process: n_ready = buf_packets - n_dma_packets */
atomic_t n_dma_packets;
/* the index of the next packet that will be transmitted
or received by the 1394 hardware */
int pkt_dma;
/* how many packets, starting at first_packet:
(transmit) are ready to be filled with data
(receive) contain received data */
int n_ready_packets;
/* how many times the buffer has overflowed or underflowed */
atomic_t overflows;
......@@ -99,8 +112,12 @@ struct hpsb_iso {
/* # of packets left to prebuffer (xmit only) */
int prebuffer;
/* starting cycle (xmit only) */
/* starting cycle for DMA (xmit only) */
int start_cycle;
/* cycle at which next packet will be transmitted,
-1 if not known */
int xmit_cycle;
};
/* functions available to high-level drivers (e.g. raw1394) */
......@@ -108,30 +125,40 @@ struct hpsb_iso {
/* allocate the buffer and DMA context */
struct hpsb_iso* hpsb_iso_xmit_init(struct hpsb_host *host,
unsigned int data_buf_size,
unsigned int buf_packets,
unsigned int max_packet_size,
int channel,
int speed,
int irq_interval,
void (*callback)(struct hpsb_iso*));
/* note: if channel = -1, multi-channel receive is enabled */
struct hpsb_iso* hpsb_iso_recv_init(struct hpsb_host *host,
unsigned int data_buf_size,
unsigned int buf_packets,
unsigned int max_packet_size,
int channel,
int irq_interval,
void (*callback)(struct hpsb_iso*));
/* multi-channel only */
int hpsb_iso_recv_listen_channel(struct hpsb_iso *iso, unsigned char channel);
int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel);
int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask);
/* start/stop DMA */
int hpsb_iso_xmit_start(struct hpsb_iso *iso, int start_on_cycle, int prebuffer);
int hpsb_iso_recv_start(struct hpsb_iso *iso, int start_on_cycle);
int hpsb_iso_recv_start(struct hpsb_iso *iso, int start_on_cycle, int tag_mask, int sync);
void hpsb_iso_stop(struct hpsb_iso *iso);
/* deallocate buffer and DMA context */
void hpsb_iso_shutdown(struct hpsb_iso *iso);
/* N packets have been written to the buffer; queue them for transmission */
int hpsb_iso_xmit_queue_packets(struct hpsb_iso *xmit, unsigned int n_packets);
/* queue a packet for transmission. 'offset' is relative to the beginning of the
DMA buffer, where the packet's data payload should already have been placed */
int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, u8 tag, u8 sy);
/* wait until all queued packets have been transmitted to the bus */
int hpsb_iso_xmit_sync(struct hpsb_iso *iso);
/* N packets have been read out of the buffer, re-use the buffer space */
int hpsb_iso_recv_release_packets(struct hpsb_iso *recv, unsigned int n_packets);
......@@ -139,10 +166,19 @@ int hpsb_iso_recv_release_packets(struct hpsb_iso *recv, unsigned int n_packets
/* returns # of packets ready to send or receive */
int hpsb_iso_n_ready(struct hpsb_iso *iso);
/* returns a pointer to the payload of packet 'pkt' */
unsigned char* hpsb_iso_packet_data(struct hpsb_iso *iso, unsigned int pkt);
/* the following are callbacks available to low-level drivers */
/* call after a packet has been transmitted to the bus (interrupt context is OK)
'cycle' is the _exact_ cycle the packet was sent on
'error' should be non-zero if some sort of error occurred when sending the packet
*/
void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error);
/* call after a packet has been received (interrupt context OK) */
void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len,
u16 cycle, u8 channel, u8 tag, u8 sy);
/* returns a pointer to the info struct of packet 'pkt' */
struct hpsb_iso_packet_info* hpsb_iso_packet_info(struct hpsb_iso *iso, unsigned int pkt);
/* call to wake waiting processes after buffer space has opened up. */
void hpsb_iso_wake(struct hpsb_iso *iso);
#endif /* IEEE1394_ISO_H */
......@@ -1089,8 +1089,7 @@ static int read_businfo_block(struct hpsb_host *host, nodeid_t nodeid, unsigned
static void nodemgr_remove_node(struct node_entry *ne)
{
HPSB_DEBUG("%s removed: Node[" NODE_BUS_FMT "] GUID[%016Lx] [%s]",
(ne->host->node_id == ne->nodeid) ? "Host" : "Device",
HPSB_DEBUG("Device removed: Node[" NODE_BUS_FMT "] GUID[%016Lx] [%s]",
NODE_BUS_ARGS(ne->nodeid), (unsigned long long)ne->guid,
ne->vendor_name ?: "Unknown");
......
......@@ -160,7 +160,7 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
printk(level "%s_%d: " fmt "\n" , OHCI1394_DRIVER_NAME, card , ## args)
static char version[] __devinitdata =
"$Rev: 675 $ Ben Collins <bcollins@debian.org>";
"$Rev: 762 $ Ben Collins <bcollins@debian.org>";
/* Module Parameters */
MODULE_PARM(phys_dma,"i");
......@@ -170,6 +170,15 @@ static int phys_dma = 1;
static void dma_trm_tasklet(unsigned long data);
static void dma_trm_reset(struct dma_trm_ctx *d);
static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
enum context_type type, int ctx, int num_desc,
int buf_size, int split_buf_size, int context_base);
static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
enum context_type type, int ctx, int num_desc,
int context_base);
static void ohci1394_pci_remove(struct pci_dev *pdev);
#ifndef __LITTLE_ENDIAN
......@@ -311,7 +320,7 @@ static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
/* SelfID Ok, reset error counter. */
ohci->self_id_errors = 0;
size = ((self_id_count & 0x00001FFC) >> 2) - 1;
q++;
......@@ -373,12 +382,12 @@ static int run_context(struct ti_ohci *ohci, int reg, char *msg)
"Running dma failed because Node ID == 63");
return -1;
}
/* Run the dma context */
reg_write(ohci, reg, 0x8000);
if (msg) PRINT(KERN_DEBUG, ohci->id, "%s", msg);
return 0;
}
......@@ -415,10 +424,28 @@ static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
d->buf_ind = 0;
d->buf_offset = 0;
if(d->type == DMA_CTX_ISO) {
/* Clear contextControl */
reg_write(ohci, d->ctrlClear, 0xffffffff);
/* Set bufferFill, isochHeader, multichannel for IR context */
reg_write(ohci, d->ctrlSet, 0xd0000000);
/* Set the context match register to match on all tags */
reg_write(ohci, d->ctxtMatch, 0xf0000000);
/* Clear the multi channel mask high and low registers */
reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
/* Set up isoRecvIntMask to generate interrupts */
reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
}
/* Tell the controller where the first AR program is */
reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
/* Run AR context */
/* Run context */
reg_write(ohci, d->ctrlSet, 0x00008000);
DBGMSG(ohci->id, "Receive DMA ctx=%d initialized", d->ctx);
......@@ -439,6 +466,11 @@ static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
INIT_LIST_HEAD(&d->fifo_list);
INIT_LIST_HEAD(&d->pending_list);
if(d->type == DMA_CTX_ISO) {
/* enable interrupts */
reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
}
DBGMSG(ohci->id, "Transmit DMA ctx=%d initialized", d->ctx);
}
......@@ -450,7 +482,7 @@ static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
reg_write(ohci, reg, 0xffffffff);
tmp = reg_read(ohci, reg);
DBGMSG(ohci->id,"Iso contexts reg: %08x implemented: %08x", reg, tmp);
/* Count the number of contexts */
......@@ -515,12 +547,6 @@ static void ohci_initialize(struct ti_ohci *ohci)
/* Don't accept phy packets into AR request context */
reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
/* Set bufferFill, isochHeader, multichannel for IR context */
reg_write(ohci, OHCI1394_IsoRcvContextControlSet, 0xd0000000);
/* Set the context match register to match on all tags */
reg_write(ohci, OHCI1394_IsoRcvContextMatch, 0xf0000000);
/* Clear the interrupt mask */
reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
......@@ -529,10 +555,6 @@ static void ohci_initialize(struct ti_ohci *ohci)
reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
/* Clear the multi channel mask high and low registers */
reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
/* Initialize AR dma */
initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
......@@ -541,19 +563,6 @@ static void ohci_initialize(struct ti_ohci *ohci)
initialize_dma_trm_ctx(&ohci->at_req_context);
initialize_dma_trm_ctx(&ohci->at_resp_context);
/* Initialize IR dma */
initialize_dma_rcv_ctx(&ohci->ir_context, 1);
/* Initialize IT dma */
initialize_dma_trm_ctx(&ohci->it_context);
/* Set up isoRecvIntMask to generate interrupts for context 0
(thanks to Michael Greger for seeing that I forgot this) */
reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 0x00000001);
/* Set up isoXmitIntMask to generate interrupts for context 0 */
reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 0x00000001);
/*
* Accept AT requests from all nodes. This probably
* will have to be controlled from the subsystem
......@@ -598,7 +607,7 @@ static void ohci_initialize(struct ti_ohci *ohci)
}
/*
* Insert a packet in the AT DMA fifo and generate the DMA prg
* Insert a packet in the DMA fifo and generate the DMA prg
* FIXME: rewrite the program in order to accept packets crossing
* page boundaries.
* check also that a single dma descriptor doesn't cross a
......@@ -757,8 +766,8 @@ static void insert_packet(struct ti_ohci *ohci,
}
/*
* This function fills the AT FIFO with the (eventual) pending packets
* and runs or wakes up the AT DMA prg if necessary.
* This function fills the FIFO with the (eventual) pending packets
* and runs or wakes up the DMA prg if necessary.
*
* The function MUST be called with the d->lock held.
*/
......@@ -774,7 +783,7 @@ static int dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
idx = d->prg_ind;
z = (p->data_size) ? 3 : 2;
/* insert the packets into the at dma fifo */
/* insert the packets into the dma fifo */
while (d->free_prgs > 0 && !list_empty(&d->pending_list)) {
struct hpsb_packet *p = driver_packet(d->pending_list.next);
list_del(&p->driver_list);
......@@ -796,13 +805,15 @@ static int dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
/* Wake up the dma context if necessary */
if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
DBGMSG(ohci->id,"Waking transmit DMA ctx=%d",d->ctx);
reg_write(ohci, d->ctrlSet, 0x1000);
}
/* do this always, to avoid race condition */
reg_write(ohci, d->ctrlSet, 0x1000);
}
return 1;
}
/* Transmission of an async packet */
/* Transmission of an async or iso packet */
static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
{
struct ti_ohci *ohci = host->hostdata;
......@@ -819,9 +830,33 @@ static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
/* Decide wether we have an iso, a request, or a response packet */
if (packet->type == hpsb_raw)
d = &ohci->at_req_context;
else if (packet->tcode == TCODE_ISO_DATA)
d = &ohci->it_context;
else if (packet->tcode & 0x02)
else if (packet->tcode == TCODE_ISO_DATA) {
/* The legacy IT DMA context is initialized on first
* use. However, the alloc cannot be run from
* interrupt context, so we bail out if that is the
* case. I don't see anyone sending ISO packets from
* interrupt context anyway... */
if(ohci->it_legacy_context.ohci == NULL) {
if(in_interrupt()) {
PRINT(KERN_ERR, ohci->id,
"legacy IT context cannot be initialized during interrupt");
return 0;
}
if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
DMA_CTX_ISO, 0, IT_NUM_DESC,
OHCI1394_IsoXmitContextBase) < 0) {
PRINT(KERN_ERR, ohci->id,
"error initializing legacy IT context");
return 0;
}
initialize_dma_trm_ctx(&ohci->it_legacy_context);
}
d = &ohci->it_legacy_context;
} else if (packet->tcode & 0x02)
d = &ohci->at_resp_context;
else
d = &ohci->at_req_context;
......@@ -829,7 +864,7 @@ static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
spin_lock_irqsave(&d->lock,flags);
list_add_tail(&packet->driver_list, &d->pending_list);
dma_trm_flush(ohci, d);
spin_unlock_irqrestore(&d->lock,flags);
......@@ -898,11 +933,11 @@ static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
case GET_CYCLE_COUNTER:
retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
break;
case SET_CYCLE_COUNTER:
reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
break;
case SET_BUS_ID:
PRINT(KERN_ERR, ohci->id, "devctl command SET_BUS_ID err");
break;
......@@ -932,12 +967,13 @@ static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
break;
case MODIFY_USAGE:
if (arg) {
MOD_INC_USE_COUNT;
} else {
MOD_DEC_USE_COUNT;
}
retval = 1;
if (arg) {
if (try_module_get(THIS_MODULE))
retval = 1;
} else {
module_put(THIS_MODULE);
retval = 1;
}
break;
case ISO_LISTEN_CHANNEL:
......@@ -962,8 +998,26 @@ static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
return -EFAULT;
}
/* activate the legacy IR context */
if(ohci->ir_legacy_context.ohci == NULL) {
if(alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,
DMA_CTX_ISO, 0, IR_NUM_DESC,
IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
OHCI1394_IsoRcvContextBase) < 0) {
PRINT(KERN_ERR, ohci->id,
"%s: failed to allocate an IR context",
__FUNCTION__);
return -ENOMEM;
}
ohci->ir_legacy_channels = 0;
initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
DBGMSG(ohci->id, "ISO receive legacy context activated");
}
ohci->ISO_channel_usage |= mask;
ohci->ir_legacy_channels |= mask;
if (arg>31)
reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
......@@ -1000,6 +1054,7 @@ static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
}
ohci->ISO_channel_usage &= ~mask;
ohci->ir_legacy_channels &= ~mask;
if (arg>31)
reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
......@@ -1010,6 +1065,11 @@ static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
DBGMSG(ohci->id, "Listening disabled on channel %d", arg);
if(ohci->ir_legacy_channels == 0) {
free_dma_rcv_ctx(&ohci->ir_legacy_context);
DBGMSG(ohci->id, "ISO receive legacy context deactivated");
}
break;
}
default:
......@@ -1024,61 +1084,159 @@ static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
* rawiso ISO reception *
***********************************/
/*
We use either buffer-fill or packet-per-buffer DMA mode. The DMA
buffer is split into "blocks" (regions described by one DMA
descriptor). Each block must be one page or less in size, and
must not cross a page boundary.
There is one little wrinkle with buffer-fill mode: a packet that
starts in the final block may wrap around into the first block. But
the user API expects all packets to be contiguous. Our solution is
to keep the very last page of the DMA buffer in reserve - if a
packet spans the gap, we copy its tail into this page.
*/
struct ohci_iso_recv {
struct ti_ohci *ohci;
/* memory and PCI mapping for the DMA descriptors */
struct dma_prog_region prog;
struct ohci1394_iso_tasklet task;
int task_active;
/* index of next packet to arrive */
int pkt_dma;
enum { BUFFER_FILL_MODE,
PACKET_PER_BUFFER_MODE } dma_mode;
/* memory and PCI mapping for the DMA descriptors */
struct dma_prog_region prog;
struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
/* how many DMA blocks fit in the buffer */
unsigned int nblocks;
/* stride of DMA blocks */
unsigned int buf_stride;
/* number of blocks to batch between interrupts */
int block_irq_interval;
/* block that DMA will finish next */
int block_dma;
/* (buffer-fill only) block that the reader will release next */
int block_reader;
/* (buffer-fill only) bytes of buffer the reader has released,
less than one block */
int released_bytes;
/* (buffer-fill only) buffer offset at which the next packet will appear */
int dma_offset;
/* OHCI DMA context control registers */
u32 ContextControlSet;
u32 ContextControlClear;
u32 CommandPtr;
u32 ContextMatch;
};
static void ohci_iso_recv_task(unsigned long data);
static void ohci_iso_recv_bufferfill_task(unsigned long data);
static void ohci_iso_recv_packetperbuf_task(unsigned long data);
static void ohci_iso_recv_stop(struct hpsb_iso *iso);
static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
static void ohci_iso_recv_program(struct hpsb_iso *iso);
static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle);
static int ohci_iso_recv_init(struct hpsb_iso *iso)
{
struct ti_ohci *ohci = iso->host->hostdata;
struct ohci_iso_recv *recv;
unsigned int prog_size;
int ctx;
int ret = -ENOMEM;
recv = kmalloc(sizeof(*recv), SLAB_KERNEL);
if(!recv)
return -ENOMEM;
iso->hostdata = recv;
recv->ohci = iso->host->hostdata;
recv->ohci = ohci;
recv->task_active = 0;
recv->pkt_dma = iso->first_packet;
dma_prog_region_init(&recv->prog);
recv->block = NULL;
/* use buffer-fill mode, unless irq_interval is 1
(note: multichannel requires buffer-fill) */
/* size of DMA program = one INPUT_LAST per packet in the buffer */
prog_size = sizeof(struct dma_cmd) * iso->buf_packets;
if(iso->irq_interval == 1 && iso->channel != -1) {
recv->dma_mode = PACKET_PER_BUFFER_MODE;
} else {
recv->dma_mode = BUFFER_FILL_MODE;
}
/* set nblocks, buf_stride, block_irq_interval */
if(recv->dma_mode == BUFFER_FILL_MODE) {
recv->buf_stride = PAGE_SIZE;
/* one block per page of data in the DMA buffer, minus the final guard page */
recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
if(recv->nblocks < 3) {
DBGMSG(ohci->id, "ohci_iso_recv_init: DMA buffer too small");
goto err;
}
/* iso->irq_interval is in packets - translate that to blocks */
/* (err, sort of... 1 is always the safest value) */
recv->block_irq_interval = iso->irq_interval / recv->nblocks;
if(recv->block_irq_interval < 1)
recv->block_irq_interval = 1;
else if(recv->block_irq_interval*4 > recv->nblocks)
recv->block_irq_interval = recv->nblocks/4;
} else {
int max_packet_size;
recv->nblocks = iso->buf_packets;
recv->block_irq_interval = 1;
/* choose a buffer stride */
/* must be a power of 2, and <= PAGE_SIZE */
max_packet_size = iso->buf_size / iso->buf_packets;
for(recv->buf_stride = 8; recv->buf_stride < max_packet_size;
recv->buf_stride *= 2);
if(dma_prog_region_alloc(&recv->prog, prog_size, recv->ohci->dev))
if(recv->buf_stride*iso->buf_packets > iso->buf_size ||
recv->buf_stride > PAGE_SIZE) {
/* this shouldn't happen, but anyway... */
DBGMSG(ohci->id, "ohci_iso_recv_init: problem choosing a buffer stride");
goto err;
}
}
recv->block_reader = 0;
recv->released_bytes = 0;
recv->block_dma = 0;
recv->dma_offset = 0;
/* size of DMA program = one descriptor per block */
if (dma_prog_region_alloc(&recv->prog,
sizeof(struct dma_cmd) * recv->nblocks,
recv->ohci->dev))
goto err;
ohci1394_init_iso_tasklet(&recv->task, OHCI_ISO_RECEIVE,
ohci_iso_recv_task, (unsigned long) iso);
recv->block = (struct dma_cmd*) recv->prog.kvirt;
ohci1394_init_iso_tasklet(&recv->task,
iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
OHCI_ISO_RECEIVE,
recv->dma_mode == BUFFER_FILL_MODE ?
ohci_iso_recv_bufferfill_task :
ohci_iso_recv_packetperbuf_task,
(unsigned long) iso);
if(ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0)
goto err;
recv->task_active = 1;
/* recv context registers are spaced 32 bytes apart */
......@@ -1088,12 +1246,22 @@ static int ohci_iso_recv_init(struct hpsb_iso *iso)
recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
/* enable interrupts */
reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << ctx);
if(iso->channel == -1) {
/* clear multi-channel selection mask */
reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
}
/* write the DMA program */
ohci_iso_recv_program(iso);
DBGMSG(ohci->id, "ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
" (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
recv->dma_mode == BUFFER_FILL_MODE ?
"buffer-fill" : "packet-per-buffer",
iso->buf_size/PAGE_SIZE, iso->buf_size,
recv->nblocks, recv->buf_stride, recv->block_irq_interval);
return 0;
err:
......@@ -1104,7 +1272,10 @@ static int ohci_iso_recv_init(struct hpsb_iso *iso)
static void ohci_iso_recv_stop(struct hpsb_iso *iso)
{
struct ohci_iso_recv *recv = iso->hostdata;
/* disable interrupts */
reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
/* halt DMA */
ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
}
......@@ -1114,100 +1285,139 @@ static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
struct ohci_iso_recv *recv = iso->hostdata;
if(recv->task_active) {
/* halt DMA */
ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
/* disable interrupts */
reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
ohci_iso_recv_stop(iso);
ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
recv->task_active = 0;
}
dma_prog_region_free(&recv->prog);
kfree(recv);
iso->hostdata = NULL;
}
/* set up a "gapped" ring buffer DMA program */
static void ohci_iso_recv_program(struct hpsb_iso *iso)
{
struct ohci_iso_recv *recv = iso->hostdata;
int blk;
/* address of 'branch' field in previous DMA descriptor */
u32 *prev_branch = NULL;
/* start at pkt_dma and go around the whole buffer */
int pkt = recv->pkt_dma;
int i;
for(i = 0; i < iso->buf_packets; i++) {
int want_interrupt;
unsigned int data_size;
/* pointer to the DMA descriptor */
struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + pkt;
for (blk = 0; blk < recv->nblocks; blk++) {
/* the DMA descriptor */
struct dma_cmd *cmd = &recv->block[blk];
/* offset of the DMA descriptor relative to the DMA prog buffer */
unsigned long prog_offset = pkt * sizeof(struct dma_cmd);
/* offset of this bus_cycle within the DMA buffer */
unsigned long buf_offset = hpsb_iso_packet_data(iso, pkt) - iso->buf.kvirt;
unsigned long prog_offset = blk * sizeof(struct dma_cmd);
/* back up 8 bytes for the iso header */
buf_offset -= 8;
data_size = iso->max_packet_size + 8;
/* offset of this packet's data within the DMA buffer */
unsigned long buf_offset = blk * recv->buf_stride;
/* ask for an interrupt every now and then, and
always interrupt on the final descriptor */
if( ((i % iso->irq_interval) == 0) ||
(i == (iso->buf_packets - 1)) ) {
want_interrupt = 1;
if (recv->dma_mode == BUFFER_FILL_MODE) {
cmd->control = 2 << 28; /* INPUT_MORE */
} else {
want_interrupt = 0;
cmd->control = 3 << 28; /* INPUT_LAST */
}
/* write the DMA descriptor */
il->control = 3 << 28; /* INPUT_LAST */
il->control |= 8 << 24; /* s = 1, update xferStatus and resCount */
if(want_interrupt)
il->control |= 3 << 20;
il->control |= 0xC << 16; /* enable branch to address */
il->control |= data_size;
il->address = dma_region_offset_to_bus(&iso->buf, buf_offset);
il->branchAddress = 0; /* filled in on next loop */
il->status = data_size;
cmd->control |= 8 << 24; /* s = 1, update xferStatus and resCount */
/* interrupt on last block, and at intervals */
if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
cmd->control |= 3 << 20; /* want interrupt */
}
cmd->control |= 3 << 18; /* enable branch to address */
cmd->control |= recv->buf_stride;
cmd->address = dma_region_offset_to_bus(&iso->data_buf, buf_offset);
cmd->branchAddress = 0; /* filled in on next loop */
cmd->status = recv->buf_stride;
/* link the previous descriptor to this one */
if(prev_branch) {
if (prev_branch) {
*prev_branch = dma_prog_region_offset_to_bus(&recv->prog, prog_offset);
*prev_branch |= 1; /* set Z=1 */
}
prev_branch = &il->branchAddress;
pkt = (pkt + 1) % iso->buf_packets;
prev_branch = &cmd->branchAddress;
}
/* the final descriptor's branch address and Z should be left at 0 */
}
static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle)
/* listen or unlisten to a specific channel (multi-channel mode only) */
static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
{
struct ohci_iso_recv *recv = iso->hostdata;
int reg, i;
if(channel < 32) {
reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
i = channel;
} else {
reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
i = channel - 32;
}
reg_write(recv->ohci, reg, (1 << i));
/* issue a dummy read to force all PCI writes to be posted immediately */
mb();
reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
}
static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
{
struct ohci_iso_recv *recv = iso->hostdata;
int i;
for(i = 0; i < 64; i++) {
if(mask & (1ULL << i)) {
if(i < 32)
reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
else
reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
} else {
if(i < 32)
reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
else
reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
}
}
/* issue a dummy read to force all PCI writes to be posted immediately */
mb();
reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
}
static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
{
struct ohci_iso_recv *recv = iso->hostdata;
u32 command, contextMatch;
reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
wmb();
/* use packet-per-buffer mode, and keep ISO headers */
reg_write(recv->ohci, recv->ContextControlSet, (1 << 30));
/* always keep ISO headers */
command = (1 << 30);
if(recv->dma_mode == BUFFER_FILL_MODE)
command |= (1 << 31);
reg_write(recv->ohci, recv->ContextControlSet, command);
/* match on specified tags */
contextMatch = tag_mask << 28;
if(iso->channel == -1) {
/* enable multichannel reception */
reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
} else {
/* listen on channel */
contextMatch |= iso->channel;
}
/* match on all tags, listen on channel */
contextMatch = 0xF0000000 | iso->channel;
if(cycle != -1) {
u32 seconds;
......@@ -1229,15 +1439,29 @@ static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle)
contextMatch |= cycle << 12;
}
if(sync != -1) {
/* set sync flag on first DMA descriptor */
struct dma_cmd *cmd = &recv->block[recv->block_dma];
cmd->control |= DMA_CTL_WAIT;
/* match sync field */
contextMatch |= (sync&0xf)<<8;
}
reg_write(recv->ohci, recv->ContextMatch, contextMatch);
/* address of first descriptor block */
command = dma_prog_region_offset_to_bus(&recv->prog, recv->pkt_dma * sizeof(struct dma_cmd));
command = dma_prog_region_offset_to_bus(&recv->prog,
recv->block_dma * sizeof(struct dma_cmd));
command |= 1; /* Z=1 */
reg_write(recv->ohci, recv->CommandPtr, command);
/* enable interrupts */
reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
wmb();
/* run */
reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
......@@ -1248,7 +1472,8 @@ static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle)
/* check RUN */
if(!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
PRINT(KERN_ERR, recv->ohci->id, "Error starting IR DMA (ContextControl 0x%08x)\n",
PRINT(KERN_ERR, recv->ohci->id,
"Error starting IR DMA (ContextControl 0x%08x)\n",
reg_read(recv->ohci, recv->ContextControlSet));
return -1;
}
......@@ -1256,62 +1481,253 @@ static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle)
return 0;
}
static void ohci_iso_recv_release_one(struct hpsb_iso *iso)
static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
{
struct ohci_iso_recv *recv = iso->hostdata;
/* re-use the DMA descriptor for first_packet */
/* re-use the DMA descriptor for the block */
/* by linking the previous descriptor to it */
int next_i = iso->first_packet;
int prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
struct dma_cmd *next = dma_region_i(&recv->prog, struct dma_cmd, next_i);
struct dma_cmd *prev = dma_region_i(&recv->prog, struct dma_cmd, prev_i);
int next_i = block;
int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
struct dma_cmd *next = &recv->block[next_i];
struct dma_cmd *prev = &recv->block[prev_i];
/* 'next' becomes the new end of the DMA chain */
next->control |= 3 << 20; /* enable interrupt */
next->branchAddress = 0; /* disable branch */
/* 'next' becomes the new end of the DMA chain,
so disable branch and enable interrupt */
next->branchAddress = 0;
next->control |= 3 << 20;
/* link prev to next */
if(prev_i % iso->irq_interval) {
prev->control &= ~(3 << 20); /* no interrupt */
} else {
prev->branchAddress = dma_prog_region_offset_to_bus(&recv->prog,
sizeof(struct dma_cmd) * next_i)
| 1; /* Z=1 */
/* disable interrupt on previous DMA descriptor, except at intervals */
if((prev_i % recv->block_irq_interval) == 0) {
prev->control |= 3 << 20; /* enable interrupt */
} else {
prev->control &= ~(3<<20); /* disable interrupt */
}
prev->branchAddress = dma_prog_region_offset_to_bus(&recv->prog,
sizeof(struct dma_cmd) * next_i) | 1;
wmb();
/* wake up DMA in case it fell asleep */
reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
}
static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
struct hpsb_iso_packet_info *info)
{
int len;
/* release the memory where the packet was */
len = info->len;
/* add the wasted space for padding to 4 bytes */
if(len % 4)
len += 4 - (len % 4);
/* add 8 bytes for the OHCI DMA data format overhead */
len += 8;
/* advance packet cursors */
iso->first_packet = (iso->first_packet+1) % iso->buf_packets;
atomic_inc(&iso->n_dma_packets);
recv->released_bytes += len;
/* have we released enough memory for one block? */
while(recv->released_bytes > recv->buf_stride) {
ohci_iso_recv_release_block(recv, recv->block_reader);
recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
recv->released_bytes -= recv->buf_stride;
}
}
static void ohci_iso_recv_release(struct hpsb_iso *iso, int n_packets)
static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
{
int i;
for(i = 0; i < n_packets; i++)
ohci_iso_recv_release_one(iso);
struct ohci_iso_recv *recv = iso->hostdata;
if(recv->dma_mode == BUFFER_FILL_MODE) {
ohci_iso_recv_bufferfill_release(recv, info);
} else {
ohci_iso_recv_release_block(recv, info - iso->infos);
}
}
/* parse all packets from blocks that have been fully received */
static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
{
int wake = 0;
int runaway = 0;
while(1) {
/* we expect the next parsable packet to begin at recv->dma_offset */
/* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
unsigned int offset;
unsigned short len, cycle;
unsigned char channel, tag, sy;
unsigned char *p = iso->data_buf.kvirt;
unsigned int this_block = recv->dma_offset/recv->buf_stride;
/* don't loop indefinitely */
if(runaway++ > 100000) {
atomic_inc(&iso->overflows);
PRINT(KERN_ERR, recv->ohci->id,
"IR DMA error - Runaway during buffer parsing!\n");
break;
}
/* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
if(this_block == recv->block_dma)
break;
wake = 1;
/* parse data length, tag, channel, and sy */
/* note: we keep our own local copies of 'len' and 'offset'
so the user can't mess with them by poking in the mmap area */
len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
if(len > 4096) {
PRINT(KERN_ERR, recv->ohci->id,
"IR DMA error - bogus 'len' value %u\n", len);
}
channel = p[recv->dma_offset+1] & 0x3F;
tag = p[recv->dma_offset+1] >> 6;
sy = p[recv->dma_offset+0] & 0xF;
/* advance to data payload */
recv->dma_offset += 4;
/* check for wrap-around */
if(recv->dma_offset >= recv->buf_stride*recv->nblocks) {
recv->dma_offset -= recv->buf_stride*recv->nblocks;
}
/* dma_offset now points to the first byte of the data payload */
offset = recv->dma_offset;
/* advance to xferStatus/timeStamp */
recv->dma_offset += len;
/* payload is padded to 4 bytes */
if(len % 4) {
recv->dma_offset += 4 - (len%4);
}
/* check for wrap-around */
if(recv->dma_offset >= recv->buf_stride*recv->nblocks) {
/* uh oh, the packet data wraps from the last
to the first DMA block - make the packet
contiguous by copying its "tail" into the
guard page */
int guard_off = recv->buf_stride*recv->nblocks;
int tail_len = len - (guard_off - offset);
if(tail_len > 0 && tail_len < recv->buf_stride) {
memcpy(iso->data_buf.kvirt + guard_off,
iso->data_buf.kvirt,
tail_len);
}
recv->dma_offset -= recv->buf_stride*recv->nblocks;
}
/* parse timestamp */
cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
cycle &= 0x1FFF;
/* advance to next packet */
recv->dma_offset += 4;
/* check for wrap-around */
if(recv->dma_offset >= recv->buf_stride*recv->nblocks) {
recv->dma_offset -= recv->buf_stride*recv->nblocks;
}
hpsb_iso_packet_received(iso, offset, len, cycle, channel, tag, sy);
}
if(wake)
hpsb_iso_wake(iso);
}
static void ohci_iso_recv_task(unsigned long data)
static void ohci_iso_recv_bufferfill_task(unsigned long data)
{
struct hpsb_iso *iso = (struct hpsb_iso*) data;
struct ohci_iso_recv *recv = iso->hostdata;
int loop;
/* loop over all blocks */
for(loop = 0; loop < recv->nblocks; loop++) {
/* check block_dma to see if it's done */
struct dma_cmd *im = &recv->block[recv->block_dma];
/* check the DMA descriptor for new writes to xferStatus */
u16 xferstatus = im->status >> 16;
/* rescount is the number of bytes *remaining to be written* in the block */
u16 rescount = im->status & 0xFFFF;
unsigned char event = xferstatus & 0x1F;
if(!event) {
/* nothing has happened to this block yet */
break;
}
if(event != 0x11) {
atomic_inc(&iso->overflows);
PRINT(KERN_ERR, recv->ohci->id,
"IR DMA error - OHCI error code 0x%02x\n", event);
}
if(rescount != 0) {
/* the card is still writing to this block;
we can't touch it until it's done */
break;
}
/* OK, the block is finished... */
/* sync our view of the block */
dma_region_sync(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
/* reset the DMA descriptor */
im->status = recv->buf_stride;
/* advance block_dma */
recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
if((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
atomic_inc(&iso->overflows);
DBGMSG(recv->ohci->id, "ISO reception overflow - "
"ran out of DMA blocks");
}
}
/* parse any packets that have arrived */
ohci_iso_recv_bufferfill_parse(iso, recv);
}
static void ohci_iso_recv_packetperbuf_task(unsigned long data)
{
struct hpsb_iso *iso = (struct hpsb_iso*) data;
struct ohci_iso_recv *recv = iso->hostdata;
int count;
int wake = 0;
/* loop over the entire buffer */
for(count = 0; count < iso->buf_packets; count++) {
for(count = 0; count < recv->nblocks; count++) {
u32 packet_len = 0;
/* pointer to the DMA descriptor */
struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + recv->pkt_dma;
struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
/* check the DMA descriptor for new writes to xferStatus */
u16 xferstatus = il->status >> 16;
......@@ -1329,7 +1745,7 @@ static void ohci_iso_recv_task(unsigned long data)
/* rescount is the number of bytes *remaining* in the packet buffer,
after the packet was written */
packet_len = iso->max_packet_size - rescount;
packet_len = recv->buf_stride - rescount;
} else if(event == 0x02) {
PRINT(KERN_ERR, recv->ohci->id, "IR DMA error - packet too long for buffer\n");
......@@ -1338,42 +1754,42 @@ static void ohci_iso_recv_task(unsigned long data)
}
/* sync our view of the buffer */
dma_region_sync(&iso->buf, recv->pkt_dma * iso->buf_stride, iso->buf_stride);
dma_region_sync(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
/* record the per-packet info */
{
/* iso header is 8 bytes ahead of the data payload */
unsigned char *hdr = hpsb_iso_packet_data(iso, recv->pkt_dma) - 8;
struct hpsb_iso_packet_info *info = hpsb_iso_packet_info(iso, recv->pkt_dma);
unsigned char *hdr;
unsigned int offset;
unsigned short cycle;
unsigned char channel, tag, sy;
offset = iso->pkt_dma * recv->buf_stride;
hdr = iso->data_buf.kvirt + offset;
/* skip iso header */
offset += 8;
packet_len -= 8;
info->len = packet_len;
info->cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
info->channel = hdr[5] & 0x3F;
info->tag = hdr[5] >> 6;
info->sy = hdr[4] & 0xF;
cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
channel = hdr[5] & 0x3F;
tag = hdr[5] >> 6;
sy = hdr[4] & 0xF;
hpsb_iso_packet_received(iso, offset, packet_len, cycle, channel, tag, sy);
}
/* at least one packet came in, so wake up the reader */
wake = 1;
/* reset the DMA descriptor */
il->status = iso->max_packet_size;
/* advance DMA packet cursor */
recv->pkt_dma = (recv->pkt_dma + 1) % iso->buf_packets;
il->status = recv->buf_stride;
/* one more packet for the user, one less for us */
if(atomic_dec_and_test(&iso->n_dma_packets)) {
/* if n_dma_packets reaches zero, we have an overflow */
atomic_inc(&iso->overflows);
}
wake = 1;
recv->block_dma = iso->pkt_dma;
}
out:
if(wake && iso->callback) {
iso->callback(iso);
}
if(wake)
hpsb_iso_wake(iso);
}
......@@ -1386,7 +1802,6 @@ struct ohci_iso_xmit {
struct dma_prog_region prog;
struct ohci1394_iso_tasklet task;
int task_active;
int pkt_dma;
u32 ContextControlSet;
u32 ContextControlClear;
......@@ -1415,7 +1830,7 @@ static int ohci_iso_xmit_init(struct hpsb_iso *iso)
unsigned int prog_size;
int ctx;
int ret = -ENOMEM;
xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL);
if(!xmit)
return -ENOMEM;
......@@ -1423,21 +1838,20 @@ static int ohci_iso_xmit_init(struct hpsb_iso *iso)
iso->hostdata = xmit;
xmit->ohci = iso->host->hostdata;
xmit->task_active = 0;
xmit->pkt_dma = iso->first_packet;
dma_prog_region_init(&xmit->prog);
prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
if(dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
goto err;
ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
ohci_iso_xmit_task, (unsigned long) iso);
if(ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0)
goto err;
xmit->task_active = 1;
/* xmit context registers are spaced 16 bytes apart */
......@@ -1446,11 +1860,6 @@ static int ohci_iso_xmit_init(struct hpsb_iso *iso)
xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
/* enable interrupts */
reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << ctx);
return 0;
err:
......@@ -1461,7 +1870,17 @@ static int ohci_iso_xmit_init(struct hpsb_iso *iso)
static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
{
struct ohci_iso_xmit *xmit = iso->hostdata;
ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL);
/* disable interrupts */
reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
/* halt DMA */
if(ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
/* XXX the DMA context will lock up if you try to send too much data! */
PRINT(KERN_ERR, xmit->ohci->id,
"you probably exceeded the OHCI card's bandwidth limit - "
"reload the module and reduce xmit bandwidth");
}
}
static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
......@@ -1469,16 +1888,11 @@ static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
struct ohci_iso_xmit *xmit = iso->hostdata;
if(xmit->task_active) {
/* halt DMA */
ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL);
/* disable interrupts */
reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
ohci_iso_xmit_stop(iso);
ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
xmit->task_active = 0;
}
dma_prog_region_free(&xmit->prog);
kfree(xmit);
iso->hostdata = NULL;
......@@ -1493,9 +1907,10 @@ static void ohci_iso_xmit_task(unsigned long data)
/* check the whole buffer if necessary, starting at pkt_dma */
for(count = 0; count < iso->buf_packets; count++) {
int cycle;
/* DMA descriptor */
struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, xmit->pkt_dma);
struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
/* check for new writes to xferStatus */
u16 xferstatus = cmd->output_last.status >> 16;
......@@ -1503,70 +1918,68 @@ static void ohci_iso_xmit_task(unsigned long data)
if(!event) {
/* packet hasn't been sent yet; we are done for now */
goto out;
break;
}
if(event != 0x11) {
PRINT(KERN_ERR, xmit->ohci->id, "IT DMA error - OHCI error code 0x%02x\n", event);
}
if(event != 0x11)
PRINT(KERN_ERR, xmit->ohci->id,
"IT DMA error - OHCI error code 0x%02x\n", event);
/* at least one packet went out, so wake up the writer */
wake = 1;
/* predict the timestamp pkt_dma will have next time around the buffer */
{
struct hpsb_iso_packet_info* info = hpsb_iso_packet_info(iso, xmit->pkt_dma);
unsigned int cycle = cmd->output_last.status & 0x1FFF;
/* parse cycle */
cycle = cmd->output_last.status & 0x1FFF;
cycle += iso->buf_packets;
while(cycle > 8000)
cycle -= 8000;
info->cycle = cycle;
}
/* tell the subsystem the packet has gone out */
hpsb_iso_packet_sent(iso, cycle, event != 0x11);
/* reset the DMA descriptor for next time */
cmd->output_last.status = 0;
/* advance packet cursor */
xmit->pkt_dma = (xmit->pkt_dma + 1) % iso->buf_packets;
/* one less packet for us */
if(atomic_dec_and_test(&iso->n_dma_packets)) {
/* underflow */
atomic_inc(&iso->overflows);
}
}
out:
if(wake && iso->callback) {
iso->callback(iso);
}
if(wake)
hpsb_iso_wake(iso);
}
static void ohci_iso_xmit_queue_one(struct hpsb_iso *iso)
static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
{
struct ohci_iso_xmit *xmit = iso->hostdata;
struct hpsb_iso_packet_info *info;
int next_i, prev_i;
struct iso_xmit_cmd *next, *prev;
unsigned int offset;
unsigned short len;
unsigned char tag, sy;
/* check that the packet doesn't cross a page boundary
(we could allow this if we added OUTPUT_MORE descriptor support) */
if(cross_bound(info->offset, info->len)) {
PRINT(KERN_ERR, xmit->ohci->id,
"rawiso xmit: packet %u crosses a page boundary",
iso->first_packet);
return -EINVAL;
}
offset = info->offset;
len = info->len;
tag = info->tag;
sy = info->sy;
/* sync up the card's view of the buffer */
dma_region_sync(&iso->buf, iso->first_packet * iso->buf_stride, iso->buf_stride);
dma_region_sync(&iso->data_buf, offset, len);
/* append first_packet to the DMA chain */
/* by linking the previous descriptor to it */
/* (next will become the new end of the DMA chain) */
next_i = iso->first_packet;
prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
/* retrieve the packet info stashed in the buffer */
info = hpsb_iso_packet_info(iso, iso->first_packet);
/* set up the OUTPUT_MORE_IMMEDIATE descriptor */
memset(next, 0, sizeof(struct iso_xmit_cmd));
next->output_more_immediate.control = 0x02000008;
......@@ -1574,29 +1987,28 @@ static void ohci_iso_xmit_queue_one(struct hpsb_iso *iso)
/* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
/* tcode = 0xA, and sy */
next->iso_hdr[0] = 0xA0 | (info->sy & 0xF);
next->iso_hdr[0] = 0xA0 | (sy & 0xF);
/* tag and channel number */
next->iso_hdr[1] = (info->tag << 6) | (iso->channel & 0x3F);
next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
/* transmission speed */
next->iso_hdr[2] = iso->speed & 0x7;
/* payload size */
next->iso_hdr[6] = info->len & 0xFF;
next->iso_hdr[7] = info->len >> 8;
next->iso_hdr[6] = len & 0xFF;
next->iso_hdr[7] = len >> 8;
/* set up the OUTPUT_LAST */
next->output_last.control = 1 << 28;
next->output_last.control |= 1 << 27; /* update timeStamp */
next->output_last.control |= 3 << 20; /* want interrupt */
next->output_last.control |= 3 << 18; /* enable branch */
next->output_last.control |= info->len;
next->output_last.control |= len;
/* payload bus address */
next->output_last.address = dma_region_offset_to_bus(&iso->buf,
hpsb_iso_packet_data(iso, iso->first_packet) - iso->buf.kvirt);
next->output_last.address = dma_region_offset_to_bus(&iso->data_buf, offset);
/* leave branchAddress at zero for now */
/* re-write the previous DMA descriptor to chain to this one */
......@@ -1604,7 +2016,7 @@ static void ohci_iso_xmit_queue_one(struct hpsb_iso *iso)
/* set prev branch address to point to next (Z=3) */
prev->output_last.branchAddress =
dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3;
/* disable interrupt, unless required by the IRQ interval */
if(prev_i % iso->irq_interval) {
prev->output_last.control &= ~(3 << 20); /* no interrupt */
......@@ -1622,16 +2034,7 @@ static void ohci_iso_xmit_queue_one(struct hpsb_iso *iso)
mb();
reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
/* increment cursors */
iso->first_packet = (iso->first_packet+1) % iso->buf_packets;
atomic_inc(&iso->n_dma_packets);
}
static void ohci_iso_xmit_queue(struct hpsb_iso *iso, int n_packets)
{
int i;
for(i = 0; i < n_packets; i++)
ohci_iso_xmit_queue_one(iso);
return 0;
}
static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
......@@ -1644,7 +2047,7 @@ static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
/* address and length of first descriptor block (Z=3) */
reg_write(xmit->ohci, xmit->CommandPtr,
dma_prog_region_offset_to_bus(&xmit->prog, xmit->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
/* cycle match */
if(cycle != -1) {
......@@ -1662,26 +2065,29 @@ static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
}
/* enable interrupts */
reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
/* run */
reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
mb();
/* wait 100 usec to give the card time to go active */
udelay(100);
/* check the RUN bit */
if(!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
PRINT(KERN_ERR, xmit->ohci->id, "Error starting IT DMA (ContextControl 0x%08x)\n",
reg_read(xmit->ohci, xmit->ContextControlSet));
return -1;
}
return 0;
}
static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, int arg)
static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
{
switch(cmd) {
case XMIT_INIT:
return ohci_iso_xmit_init(iso);
......@@ -1691,26 +2097,36 @@ static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, int arg)
ohci_iso_xmit_stop(iso);
return 0;
case XMIT_QUEUE:
ohci_iso_xmit_queue(iso, arg);
return 0;
return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
case XMIT_SHUTDOWN:
ohci_iso_xmit_shutdown(iso);
return 0;
case RECV_INIT:
return ohci_iso_recv_init(iso);
case RECV_START:
return ohci_iso_recv_start(iso, arg);
case RECV_START: {
int *args = (int*) arg;
return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
}
case RECV_STOP:
ohci_iso_recv_stop(iso);
return 0;
case RECV_RELEASE:
ohci_iso_recv_release(iso, arg);
ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
return 0;
case RECV_SHUTDOWN:
ohci_iso_recv_shutdown(iso);
return 0;
case RECV_LISTEN_CHANNEL:
ohci_iso_recv_change_channel(iso, arg, 1);
return 0;
case RECV_UNLISTEN_CHANNEL:
ohci_iso_recv_change_channel(iso, arg, 0);
return 0;
case RECV_SET_CHANNEL_MASK:
ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
return 0;
default:
PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
cmd);
......@@ -1780,7 +2196,7 @@ static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
tasklet_schedule(&t->tasklet);
if (t->type == OHCI_ISO_RECEIVE && rx_event & mask)
else if (rx_event & mask)
tasklet_schedule(&t->tasklet);
}
......@@ -1839,7 +2255,7 @@ static void ohci_irq_handler(int irq, void *dev_id,
for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
PRINT(KERN_ERR, ohci->id, "Async Iso Xmit %d Context died: "
PRINT(KERN_ERR, ohci->id, "Iso Xmit %d Context died: "
"ctrl[%08x] cmdptr[%08x]", ctx,
reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
......@@ -1847,7 +2263,7 @@ static void ohci_irq_handler(int irq, void *dev_id,
for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
PRINT(KERN_ERR, ohci->id, "Async Iso Recv %d Context died: "
PRINT(KERN_ERR, ohci->id, "Iso Recv %d Context died: "
"ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
......@@ -2008,8 +2424,8 @@ static void ohci_irq_handler(int irq, void *dev_id,
PRINT(KERN_ERR, ohci->id,
"SelfID received outside of bus reset sequence");
event &= ~OHCI1394_selfIDComplete;
selfid_not_valid:
event &= ~OHCI1394_selfIDComplete;
}
/* Make sure we handle everything, just in case we accidentally
......@@ -2035,8 +2451,10 @@ static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
PRINT(KERN_INFO, ohci->id,
"Waking dma ctx=%d ... processing is probably too slow",
d->ctx);
reg_write(ohci, d->ctrlSet, 0x1000);
}
/* do this always, to avoid race condition */
reg_write(ohci, d->ctrlSet, 0x1000);
}
#define cond_le32_to_cpu(data, noswap) \
......@@ -2222,7 +2640,7 @@ static void dma_trm_tasklet (unsigned long data)
struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
struct hpsb_packet *packet;
unsigned long flags;
u32 ack;
u32 status, ack;
size_t datasize;
spin_lock_irqsave(&d->lock, flags);
......@@ -2231,25 +2649,16 @@ static void dma_trm_tasklet (unsigned long data)
packet = driver_packet(d->fifo_list.next);
datasize = packet->data_size;
if (datasize && packet->type != hpsb_raw)
ack = le32_to_cpu(
status = le32_to_cpu(
d->prg_cpu[d->sent_ind]->end.status) >> 16;
else
ack = le32_to_cpu(
status = le32_to_cpu(
d->prg_cpu[d->sent_ind]->begin.status) >> 16;
if (ack == 0)
if (status == 0)
/* this packet hasn't been sent yet*/
break;
if (!(ack & 0x10)) {
/* XXX: This is an OHCI evt_* code. We need to handle
* this specially! For right now, we just fake an
* ackx_send_error. */
PRINT(KERN_DEBUG, ohci->id, "Received OHCI evt_* error 0x%x",
ack & 0xf);
ack = (ack & 0xffe0) | ACK_BUSY_A;
}
#ifdef OHCI1394_DEBUG
if (datasize)
DBGMSG(ohci->id,
......@@ -2261,7 +2670,7 @@ static void dma_trm_tasklet (unsigned long data)
>>4)&0xf,
(le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
>>10)&0x3f,
ack&0x1f, (ack>>5)&0x3,
status&0x1f, (status>>5)&0x3,
le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])
>>16,
d->ctx);
......@@ -2275,13 +2684,64 @@ static void dma_trm_tasklet (unsigned long data)
>>4)&0xf,
(le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
>>10)&0x3f,
ack&0x1f, (ack>>5)&0x3,
status&0x1f, (status>>5)&0x3,
le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
d->ctx);
#endif
if (status & 0x10) {
ack = status & 0xf;
} else {
switch (status & 0x1f) {
case EVT_NO_STATUS: /* that should never happen */
case EVT_RESERVED_A: /* that should never happen */
case EVT_LONG_PACKET: /* that should never happen */
PRINT(KERN_WARNING, ohci->id, "Received OHCI evt_* error 0x%x", status & 0x1f);
ack = ACKX_SEND_ERROR;
break;
case EVT_MISSING_ACK:
ack = ACKX_TIMEOUT;
break;
case EVT_UNDERRUN:
ack = ACKX_SEND_ERROR;
break;
case EVT_OVERRUN: /* that should never happen */
PRINT(KERN_WARNING, ohci->id, "Received OHCI evt_* error 0x%x", status & 0x1f);
ack = ACKX_SEND_ERROR;
break;
case EVT_DESCRIPTOR_READ:
case EVT_DATA_READ:
case EVT_DATA_WRITE:
ack = ACKX_SEND_ERROR;
break;
case EVT_BUS_RESET: /* that should never happen */
PRINT(KERN_WARNING, ohci->id, "Received OHCI evt_* error 0x%x", status & 0x1f);
ack = ACKX_SEND_ERROR;
break;
case EVT_TIMEOUT:
ack = ACKX_TIMEOUT;
break;
case EVT_TCODE_ERR:
ack = ACKX_SEND_ERROR;
break;
case EVT_RESERVED_B: /* that should never happen */
case EVT_RESERVED_C: /* that should never happen */
PRINT(KERN_WARNING, ohci->id, "Received OHCI evt_* error 0x%x", status & 0x1f);
ack = ACKX_SEND_ERROR;
break;
case EVT_UNKNOWN:
case EVT_FLUSHED:
ack = ACKX_SEND_ERROR;
break;
default:
PRINT(KERN_ERR, ohci->id, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
ack = ACKX_SEND_ERROR;
BUG();
}
}
list_del(&packet->driver_list);
hpsb_packet_sent(ohci->host, packet, ack & 0xf);
hpsb_packet_sent(ohci->host, packet, ack);
if (datasize) {
pci_unmap_single(ohci->dev,
......@@ -2307,13 +2767,18 @@ static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
return;
DBGMSG(d->ohci->id, "Freeing dma_rcv_ctx %d", d->ctx);
ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
if (d->type == DMA_CTX_ISO)
ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_tasklet);
else
tasklet_kill(&d->task);
if(d->ctrlClear) {
ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
if (d->type == DMA_CTX_ISO) {
/* disable interrupts */
reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx);
ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet);
} else {
tasklet_kill(&d->task);
}
}
if (d->buf_cpu) {
for (i=0; i<d->num_desc; i++)
......@@ -2358,9 +2823,9 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
d->buf_size = buf_size;
d->split_buf_size = split_buf_size;
d->ctrlSet = context_base + OHCI1394_ContextControlSet;
d->ctrlClear = context_base + OHCI1394_ContextControlClear;
d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
d->ctrlSet = 0;
d->ctrlClear = 0;
d->cmdPtr = 0;
d->buf_cpu = kmalloc(d->num_desc * sizeof(quadlet_t*), GFP_KERNEL);
d->buf_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
......@@ -2428,17 +2893,30 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
spin_lock_init(&d->lock);
if (type == DMA_CTX_ISO) {
ohci1394_init_iso_tasklet(&ohci->ir_tasklet, OHCI_ISO_RECEIVE,
ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet,
OHCI_ISO_MULTICHANNEL_RECEIVE,
dma_rcv_tasklet, (unsigned long) d);
if (ohci1394_register_iso_tasklet(ohci,
&ohci->ir_tasklet) < 0) {
&ohci->ir_legacy_tasklet) < 0) {
PRINT(KERN_ERR, ohci->id, "No IR DMA context available");
free_dma_rcv_ctx(d);
return -EBUSY;
}
}
else
/* the IR context can be assigned to any DMA context
* by ohci1394_register_iso_tasklet */
d->ctx = ohci->ir_legacy_tasklet.context;
d->ctrlSet = OHCI1394_IsoRcvContextControlSet + 32*d->ctx;
d->ctrlClear = OHCI1394_IsoRcvContextControlClear + 32*d->ctx;
d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;
d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;
} else {
d->ctrlSet = context_base + OHCI1394_ContextControlSet;
d->ctrlClear = context_base + OHCI1394_ContextControlClear;
d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
}
return 0;
}
......@@ -2452,12 +2930,18 @@ static void free_dma_trm_ctx(struct dma_trm_ctx *d)
DBGMSG(d->ohci->id, "Freeing dma_trm_ctx %d", d->ctx);
ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
if(d->ctrlClear) {
ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
if (d->type == DMA_CTX_ISO)
ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->it_tasklet);
else
tasklet_kill(&d->task);
if (d->type == DMA_CTX_ISO) {
/* disable interrupts */
reg_write(d->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << d->ctx);
ohci1394_unregister_iso_tasklet(d->ohci,
&d->ohci->it_legacy_tasklet);
} else {
tasklet_kill(&d->task);
}
}
if (d->prg_cpu) {
for (i=0; i<d->num_desc; i++)
......@@ -2486,9 +2970,9 @@ alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
d->type = type;
d->ctx = ctx;
d->num_desc = num_desc;
d->ctrlSet = context_base + OHCI1394_ContextControlSet;
d->ctrlClear = context_base + OHCI1394_ContextControlClear;
d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
d->ctrlSet = 0;
d->ctrlClear = 0;
d->cmdPtr = 0;
d->prg_cpu = kmalloc(d->num_desc * sizeof(struct at_dma_prg*),
GFP_KERNEL);
......@@ -2524,17 +3008,26 @@ alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
/* initialize tasklet */
if (type == DMA_CTX_ISO) {
ohci1394_init_iso_tasklet(&ohci->it_tasklet, OHCI_ISO_TRANSMIT,
dma_rcv_tasklet, (unsigned long) d);
ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT,
dma_trm_tasklet, (unsigned long) d);
if (ohci1394_register_iso_tasklet(ohci,
&ohci->it_tasklet) < 0) {
&ohci->it_legacy_tasklet) < 0) {
PRINT(KERN_ERR, ohci->id, "No IT DMA context available");
free_dma_trm_ctx(d);
return -EBUSY;
}
}
else
/* IT can be assigned to any context by register_iso_tasklet */
d->ctx = ohci->it_legacy_tasklet.context;
d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx;
d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx;
d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx;
} else {
d->ctrlSet = context_base + OHCI1394_ContextControlSet;
d->ctrlClear = context_base + OHCI1394_ContextControlClear;
d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
}
return 0;
}
......@@ -2739,7 +3232,7 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
struct hpsb_host *host;
struct ti_ohci *ohci; /* shortcut to currently handled device */
unsigned long ohci_base;
if (version_printed++ == 0)
PRINT_G(KERN_INFO, "%s", version);
......@@ -2815,7 +3308,7 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
&ohci->selfid_buf_bus);
OHCI_DMA_ALLOC("consistent selfid_buf");
if (ohci->selfid_buf_cpu == NULL)
FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
......@@ -2888,19 +3381,11 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
ohci->ISO_channel_usage = 0;
spin_lock_init(&ohci->IR_channel_lock);
/* IR DMA context */
if (alloc_dma_rcv_ctx(ohci, &ohci->ir_context,
DMA_CTX_ISO, 0, IR_NUM_DESC,
IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
OHCI1394_IsoRcvContextBase) < 0)
FAIL(-ENOMEM, "Failed to allocate IR context");
/* the IR DMA context is allocated on-demand; mark it inactive */
ohci->ir_legacy_context.ohci = NULL;
/* IT DMA context allocation */
if (alloc_dma_trm_ctx(ohci, &ohci->it_context,
DMA_CTX_ISO, 0, IT_NUM_DESC,
OHCI1394_IsoXmitContextBase) < 0)
FAIL(-ENOMEM, "Failed to allocate IT context");
/* same for the IT DMA context */
ohci->it_legacy_context.ohci = NULL;
if (request_irq(dev->irq, ohci_irq_handler, SA_SHIRQ,
OHCI1394_DRIVER_NAME, ohci))
......@@ -2945,11 +3430,11 @@ static void ohci1394_pci_remove(struct pci_dev *pdev)
free_dma_trm_ctx(&ohci->at_resp_context);
/* Free IR dma */
free_dma_rcv_ctx(&ohci->ir_context);
free_dma_rcv_ctx(&ohci->ir_legacy_context);
/* Free IT dma */
free_dma_trm_ctx(&ohci->it_context);
free_dma_trm_ctx(&ohci->it_legacy_context);
case OHCI_INIT_HAVE_SELFID_BUFFER:
pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
ohci->selfid_buf_cpu,
......@@ -3025,7 +3510,7 @@ static struct pci_driver ohci1394_pci_driver = {
/* essentially the only purpose of this code is to allow another
module to hook into ohci's interrupt handler */
void ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
{
int i=0;
......@@ -3037,11 +3522,15 @@ void ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
i++;
if (i>5000) {
PRINT(KERN_ERR, ohci->id,
"Runaway loop while stopping context...");
break;
"Runaway loop while stopping context: %s...", msg ? msg : "");
return 1;
}
mb();
udelay(10);
}
if (msg) PRINT(KERN_ERR, ohci->id, "%s: dma prg stopped", msg);
return 0;
}
void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
......@@ -3067,6 +3556,13 @@ int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
else {
n = ohci->nb_iso_rcv_ctx;
usage = &ohci->ir_ctx_usage;
/* only one receive context can be multichannel (OHCI sec 10.4.1) */
if(tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
if(test_and_set_bit(0, &ohci->ir_multichannel_used)) {
return r;
}
}
}
spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
......@@ -3080,7 +3576,7 @@ int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
}
spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
return r;
}
......@@ -3095,9 +3591,14 @@ void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
if (tasklet->type == OHCI_ISO_TRANSMIT)
clear_bit(tasklet->context, &ohci->it_ctx_usage);
else
else {
clear_bit(tasklet->context, &ohci->ir_ctx_usage);
if(tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
clear_bit(0, &ohci->ir_multichannel_used);
}
}
list_del(&tasklet->link);
spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
......
......@@ -109,6 +109,7 @@ struct dma_rcv_ctx {
int ctrlClear;
int ctrlSet;
int cmdPtr;
int ctxtMatch;
};
/* DMA transmit context */
......@@ -145,7 +146,8 @@ struct ohci1394_iso_tasklet {
struct tasklet_struct tasklet;
struct list_head link;
int context;
enum { OHCI_ISO_TRANSMIT, OHCI_ISO_RECEIVE } type;
enum { OHCI_ISO_TRANSMIT, OHCI_ISO_RECEIVE,
OHCI_ISO_MULTICHANNEL_RECEIVE } type;
};
struct ti_ohci {
......@@ -170,7 +172,7 @@ struct ti_ohci {
/* dma buffer for self-id packets */
quadlet_t *selfid_buf_cpu;
dma_addr_t selfid_buf_bus;
/* buffer for csr config rom */
quadlet_t *csr_config_rom_cpu;
dma_addr_t csr_config_rom_bus;
......@@ -187,18 +189,28 @@ struct ti_ohci {
struct dma_trm_ctx at_req_context;
/* iso receive */
struct dma_rcv_ctx ir_context;
struct ohci1394_iso_tasklet ir_tasklet;
spinlock_t IR_channel_lock;
int nb_iso_rcv_ctx;
unsigned long ir_ctx_usage; /* use test_and_set_bit() for atomicity */
unsigned long ir_multichannel_used; /* ditto */
spinlock_t IR_channel_lock;
/* iso receive (legacy API) */
u64 ir_legacy_channels; /* note: this differs from ISO_channel_usage;
it only accounts for channels listened to
by the legacy API, so that we can know when
it is safe to free the legacy API context */
struct dma_rcv_ctx ir_legacy_context;
struct ohci1394_iso_tasklet ir_legacy_tasklet;
/* iso transmit */
struct dma_trm_ctx it_context;
struct ohci1394_iso_tasklet it_tasklet;
int nb_iso_xmit_ctx;
unsigned long it_ctx_usage; /* use test_and_set_bit() for atomicity */
/* iso transmit (legacy API) */
struct dma_trm_ctx it_legacy_context;
struct ohci1394_iso_tasklet it_legacy_tasklet;
u64 ISO_channel_usage;
/* IEEE-1394 part follows */
......@@ -213,10 +225,10 @@ struct ti_ohci {
/* Tasklets for iso receive and transmit, used by video1394,
* amdtp and dv1394 */
struct list_head iso_tasklet_list;
spinlock_t iso_tasklet_list_lock;
/* Swap the selfid buffer? */
unsigned int selfid_swap:1;
/* Some Apple chipset seem to swap incoming headers for us */
......@@ -385,7 +397,7 @@ static inline u32 reg_read(const struct ti_ohci *ohci, int offset)
/* OHCI evt_* error types, table 3-2 of the OHCI 1.1 spec. */
#define EVT_NO_STATUS 0x0 /* No event status */
#define EVT_RESERVED 0x1 /* Reserved, not used !!! */
#define EVT_RESERVED_A 0x1 /* Reserved, not used !!! */
#define EVT_LONG_PACKET 0x2 /* The revc data was longer than the buf */
#define EVT_MISSING_ACK 0x3 /* A subaction gap was detected before an ack
arrived, or recv'd ack had a parity error */
......@@ -404,6 +416,17 @@ static inline u32 reg_read(const struct ti_ohci *ohci, int offset)
16-bit host memory write */
#define EVT_BUS_RESET 0x9 /* Identifies a PHY packet in the recv buffer as
being a synthesized bus reset packet */
#define EVT_TIMEOUT 0xa /* Indicates that the asynchronous transmit response
packet expired and was not transmitted, or that an
IT DMA context experienced a skip processing overflow */
#define EVT_TCODE_ERR 0xb /* A bad tCode is associated with this packet.
The packet was flushed */
#define EVT_RESERVED_B 0xc /* Reserved, not used !!! */
#define EVT_RESERVED_C 0xd /* Reserved, not used !!! */
#define EVT_UNKNOWN 0xe /* An error condition has occurred that cannot be
represented by any other event codes defined herein. */
#define EVT_FLUSHED 0xf /* Send by the link side of output FIFO when asynchronous
packets are being flushed due to a bus reset. */
#define OHCI1394_TCODE_PHY 0xE
......@@ -416,8 +439,8 @@ int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
struct ohci1394_iso_tasklet *tasklet);
void ohci1394_stop_context (struct ti_ohci *ohci, int reg, char *msg);
/* returns zero if successful, one if DMA context is locked up */
int ohci1394_stop_context (struct ti_ohci *ohci, int reg, char *msg);
struct ti_ohci *ohci1394_get_struct(int card_num);
#endif
......@@ -27,6 +27,7 @@
* eeprom via i2c and storing it in config ROM
* Reworked code for initiating bus resets
* (long, short, with or without hold-off)
* Enhancements in async and iso send code
*/
#include <linux/config.h>
......@@ -127,23 +128,20 @@ static int bit_unreg(struct i2c_client *client)
}
static struct i2c_algo_bit_data bit_data = {
NULL,
bit_setsda,
bit_setscl,
bit_getsda,
bit_getscl,
5, 5, 100, /* waits, timeout */
.setsda = bit_setsda,
.setscl = bit_setscl,
.getsda = bit_getsda,
.getscl = bit_getscl,
.udelay = 5,
.mdelay = 5,
.timeout = 100,
};
static struct i2c_adapter bit_ops = {
"PCILynx I2C adapter",
0xAA, //FIXME: probably we should get an id in i2c-id.h
NULL,
NULL,
NULL,
NULL,
bit_reg,
bit_unreg,
.name = "PCILynx I2C adapter",
.id = 0xAA, //FIXME: probably we should get an id in i2c-id.h
.client_register = bit_reg,
.client_unregister = bit_unreg,
};
......@@ -476,7 +474,14 @@ static void send_next(struct ti_lynx *lynx, int what)
struct hpsb_packet *packet;
d = (what == hpsb_iso ? &lynx->iso_send : &lynx->async);
if (!list_empty(&d->pcl_queue)) {
PRINT(KERN_ERR, lynx->id, "trying to queue a new packet in nonempty fifo");
BUG();
}
packet = driver_packet(d->queue.next);
list_del(&packet->driver_list);
list_add_tail(&packet->driver_list, &d->pcl_queue);
d->header_dma = pci_map_single(lynx->dev, packet->header,
packet->header_size, PCI_DMA_TODEVICE);
......@@ -490,6 +495,7 @@ static void send_next(struct ti_lynx *lynx, int what)
pcl.next = PCL_NEXT_INVALID;
pcl.async_error_next = PCL_NEXT_INVALID;
pcl.pcl_status = 0;
#ifdef __BIG_ENDIAN
pcl.buffer[0].control = packet->speed_code << 14 | packet->header_size;
#else
......@@ -556,7 +562,7 @@ static int lynx_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
spin_lock_irqsave(&d->queue_lock, flags);
list_add_tail(&packet->driver_list, &d->queue);
if (d->queue.next == &packet->driver_list)
if (list_empty(&d->pcl_queue))
send_next(lynx, packet->type);
spin_unlock_irqrestore(&d->queue_lock, flags);
......@@ -748,7 +754,44 @@ static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
list_splice(&lynx->async.queue, &packet_list);
INIT_LIST_HEAD(&lynx->async.queue);
spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
if (list_empty(&lynx->async.pcl_queue)) {
spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
PRINTD(KERN_DEBUG, lynx->id, "no async packet in PCL to cancel");
} else {
struct ti_pcl pcl;
u32 ack;
struct hpsb_packet *packet;
PRINT(KERN_INFO, lynx->id, "cancelling async packet, that was already in PCL");
get_pcl(lynx, lynx->async.pcl, &pcl);
packet = driver_packet(lynx->async.pcl_queue.next);
list_del(&packet->driver_list);
pci_unmap_single(lynx->dev, lynx->async.header_dma,
packet->header_size, PCI_DMA_TODEVICE);
if (packet->data_size) {
pci_unmap_single(lynx->dev, lynx->async.data_dma,
packet->data_size, PCI_DMA_TODEVICE);
}
spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
ack = (pcl.pcl_status >> 15) & 0xf;
PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
} else {
ack = (pcl.pcl_status >> 15) & 0xf;
}
} else {
PRINT(KERN_INFO, lynx->id, "async packet was not completed");
ack = ACKX_ABORTED;
}
hpsb_packet_sent(host, packet, ack);
}
while (!list_empty(&packet_list)) {
packet = driver_packet(packet_list.next);
......@@ -759,13 +802,14 @@ static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
break;
case MODIFY_USAGE:
if (arg) {
MOD_INC_USE_COUNT;
} else {
MOD_DEC_USE_COUNT;
}
if (arg) {
if (try_module_get(THIS_MODULE))
retval = 1;
} else {
module_put(THIS_MODULE);
retval = 1;
}
retval = 1;
break;
case ISO_LISTEN_CHANNEL:
......@@ -1021,20 +1065,12 @@ static ssize_t mem_read(struct file *file, char *buffer, size_t count,
ssize_t retval;
void *membase;
if (*offset != off) /* Check for EOF before we trust wrap */
return 0;
/* FIXME: Signed wrap is undefined in C - wants fixing up */
if (off + count > off)
return 0;
if ((off + count) > PCILYNX_MAX_MEMORY + 1) {
count = PCILYNX_MAX_MEMORY + 1 - off;
}
if (count == 0) {
return 0;
if ((off + count) > PCILYNX_MAX_MEMORY+1) {
count = PCILYNX_MAX_MEMORY+1 - off;
}
if (count == 0 || off > PCILYNX_MAX_MEMORY) {
return -ENOSPC;
}
switch (md->type) {
case rom:
......@@ -1145,11 +1181,11 @@ static void lynx_irq_handler(int irq, void *dev_id,
linkint = reg_read(lynx, LINK_INT_STATUS);
intmask = reg_read(lynx, PCI_INT_STATUS);
if (!(intmask & PCI_INT_INT_PEND)) return;
PRINTD(KERN_DEBUG, lynx->id, "interrupt: 0x%08x / 0x%08x", intmask,
linkint);
if (!(intmask & PCI_INT_INT_PEND)) return;
reg_write(lynx, LINK_INT_STATUS, linkint);
reg_write(lynx, PCI_INT_STATUS, intmask);
......@@ -1248,62 +1284,96 @@ static void lynx_irq_handler(int irq, void *dev_id,
}
if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_SEND)) {
u32 ack;
struct hpsb_packet *packet;
PRINTD(KERN_DEBUG, lynx->id, "async sent");
spin_lock(&lynx->async.queue_lock);
ack = reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ASYNC_SEND));
if (list_empty(&lynx->async.pcl_queue)) {
spin_unlock(&lynx->async.queue_lock);
PRINT(KERN_WARNING, lynx->id, "async dma halted, but no queued packet (maybe it was cancelled)");
} else {
struct ti_pcl pcl;
u32 ack;
struct hpsb_packet *packet;
packet = driver_packet(lynx->async.queue.next);
list_del(&packet->driver_list);
get_pcl(lynx, lynx->async.pcl, &pcl);
pci_unmap_single(lynx->dev, lynx->async.header_dma,
packet->header_size, PCI_DMA_TODEVICE);
if (packet->data_size) {
pci_unmap_single(lynx->dev, lynx->async.data_dma,
packet->data_size, PCI_DMA_TODEVICE);
}
packet = driver_packet(lynx->async.pcl_queue.next);
list_del(&packet->driver_list);
if (!list_empty(&lynx->async.queue)) {
send_next(lynx, hpsb_async);
}
pci_unmap_single(lynx->dev, lynx->async.header_dma,
packet->header_size, PCI_DMA_TODEVICE);
if (packet->data_size) {
pci_unmap_single(lynx->dev, lynx->async.data_dma,
packet->data_size, PCI_DMA_TODEVICE);
}
spin_unlock(&lynx->async.queue_lock);
if (!list_empty(&lynx->async.queue)) {
send_next(lynx, hpsb_async);
}
if (ack & DMA_CHAN_STAT_SPECIALACK) {
ack = (ack >> 15) & 0xf;
PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
} else {
ack = (ack >> 15) & 0xf;
spin_unlock(&lynx->async.queue_lock);
if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
ack = (pcl.pcl_status >> 15) & 0xf;
PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
} else {
ack = (pcl.pcl_status >> 15) & 0xf;
}
} else {
PRINT(KERN_INFO, lynx->id, "async packet was not completed");
ack = ACKX_SEND_ERROR;
}
hpsb_packet_sent(host, packet, ack);
}
hpsb_packet_sent(host, packet, ack);
}
if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_SEND)) {
struct hpsb_packet *packet;
PRINTD(KERN_DEBUG, lynx->id, "iso sent");
spin_lock(&lynx->iso_send.queue_lock);
packet = driver_packet(lynx->iso_send.queue.next);
list_del(&packet->driver_list);
if (list_empty(&lynx->iso_send.pcl_queue)) {
spin_unlock(&lynx->iso_send.queue_lock);
PRINT(KERN_ERR, lynx->id, "iso send dma halted, but no queued packet");
} else {
struct ti_pcl pcl;
u32 ack;
struct hpsb_packet *packet;
pci_unmap_single(lynx->dev, lynx->iso_send.header_dma,
packet->header_size, PCI_DMA_TODEVICE);
if (packet->data_size) {
pci_unmap_single(lynx->dev, lynx->iso_send.data_dma,
packet->data_size, PCI_DMA_TODEVICE);
}
get_pcl(lynx, lynx->iso_send.pcl, &pcl);
if (!list_empty(&lynx->iso_send.queue)) {
send_next(lynx, hpsb_iso);
}
packet = driver_packet(lynx->iso_send.pcl_queue.next);
list_del(&packet->driver_list);
pci_unmap_single(lynx->dev, lynx->iso_send.header_dma,
packet->header_size, PCI_DMA_TODEVICE);
if (packet->data_size) {
pci_unmap_single(lynx->dev, lynx->iso_send.data_dma,
packet->data_size, PCI_DMA_TODEVICE);
}
if (!list_empty(&lynx->iso_send.queue)) {
send_next(lynx, hpsb_iso);
}
spin_unlock(&lynx->iso_send.queue_lock);
spin_unlock(&lynx->iso_send.queue_lock);
hpsb_packet_sent(host, packet, ACK_COMPLETE);
if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
ack = (pcl.pcl_status >> 15) & 0xf;
PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
} else {
ack = (pcl.pcl_status >> 15) & 0xf;
}
} else {
PRINT(KERN_INFO, lynx->id, "iso send packet was not completed");
ack = ACKX_SEND_ERROR;
}
hpsb_packet_sent(host, packet, ack); //FIXME: maybe we should just use ACK_COMPLETE and ACKX_SEND_ERROR
}
}
if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_RCV)) {
......@@ -1605,7 +1675,9 @@ static int __devinit add_card(struct pci_dev *dev,
lynx->phy_reg0 = -1;
INIT_LIST_HEAD(&lynx->async.queue);
INIT_LIST_HEAD(&lynx->async.pcl_queue);
INIT_LIST_HEAD(&lynx->iso_send.queue);
INIT_LIST_HEAD(&lynx->iso_send.pcl_queue);
pcl.next = pcl_bus(lynx, lynx->rcv_pcl);
put_pcl(lynx, lynx->rcv_pcl_start, &pcl);
......
......@@ -95,6 +95,7 @@ struct ti_lynx {
struct lynx_send_data {
pcl_t pcl_start, pcl;
struct list_head queue;
struct list_head pcl_queue; /* this queue contains at most one packet */
spinlock_t queue_lock;
dma_addr_t header_dma, data_dma;
int channel;
......@@ -514,13 +515,13 @@ static inline void run_pcl(const struct ti_lynx *lynx, pcl_t pclid, int dmachan)
static quadlet_t lynx_csr_rom[] = {
/* bus info block offset (hex) */
_(0x04040000), /* info/CRC length, CRC 400 */
_(0x04046aaf), /* info/CRC length, CRC 400 */
_(0x31333934), /* 1394 magic number 404 */
_(0xf064a000), /* misc. settings 408 */
_(0x08002850), /* vendor ID, chip ID high 40c */
_(0x0000ffff), /* chip ID low 410 */
/* root directory */
_(0x00090000), /* directory length, CRC 414 */
_(0x00095778), /* directory length, CRC 414 */
_(0x03080028), /* vendor ID (Texas Instr.) 418 */
_(0x81000008), /* offset to textual ID 41c */
_(0x0c000200), /* node capabilities 420 */
......@@ -530,8 +531,8 @@ static quadlet_t lynx_csr_rom[] = {
_(0x81000014), /* offset to textual ID 430 */
_(0x09000000), /* node hardware version 434 */
_(0x81000018), /* offset to textual ID 438 */
/* module vendor ID textual */
_(0x00070000), /* CRC length, CRC 43c */
/* module vendor ID textual */
_(0x00070812), /* CRC length, CRC 43c */
_(0x00000000), /* 440 */
_(0x00000000), /* 444 */
_(0x54455841), /* "Texas Instruments" 448 */
......@@ -540,25 +541,25 @@ static quadlet_t lynx_csr_rom[] = {
_(0x4d454e54), /* 454 */
_(0x53000000), /* 458 */
/* node unique ID leaf */
_(0x00020000), /* CRC length, CRC 45c */
_(0x00022ead), /* CRC length, CRC 45c */
_(0x08002850), /* vendor ID, chip ID high 460 */
_(0x0000ffff), /* chip ID low 464 */
/* module dependent info */
_(0x00050000), /* CRC length, CRC 468 */
_(0x0005d837), /* CRC length, CRC 468 */
_(0x81000012), /* offset to module textual ID 46c */
_(0x81000017), /* textual descriptor 470 */
_(0x39010000), /* SRAM size 474 */
_(0x3a010000), /* AUXRAM size 478 */
_(0x3b000000), /* AUX device 47c */
/* module textual ID */
_(0x00050000), /* CRC length, CRC 480 */
_(0x000594df), /* CRC length, CRC 480 */
_(0x00000000), /* 484 */
_(0x00000000), /* 488 */
_(0x54534231), /* "TSB12LV21" 48c */
_(0x324c5632), /* 490 */
_(0x31000000), /* 494 */
/* part number */
_(0x00060000), /* CRC length, CRC 498 */
_(0x00068405), /* CRC length, CRC 498 */
_(0x00000000), /* 49c */
_(0x00000000), /* 4a0 */
_(0x39383036), /* "9806000-0001" 4a4 */
......@@ -566,14 +567,14 @@ static quadlet_t lynx_csr_rom[] = {
_(0x30303031), /* 4ac */
_(0x20000001), /* 4b0 */
/* module hardware version textual */
_(0x00050000), /* CRC length, CRC 4b4 */
_(0x00056501), /* CRC length, CRC 4b4 */
_(0x00000000), /* 4b8 */
_(0x00000000), /* 4bc */
_(0x5453424b), /* "TSBKPCITST" 4c0 */
_(0x50434954), /* 4c4 */
_(0x53540000), /* 4c8 */
/* node hardware version textual */
_(0x00050000), /* CRC length, CRC 4d0 */
_(0x0005d805), /* CRC length, CRC 4d0 */
_(0x00000000), /* 4d4 */
_(0x00000000), /* 4d8 */
_(0x54534232), /* "TSB21LV03" 4dc */
......
#ifndef IEEE1394_RAW1394_PRIVATE_H
#define IEEE1394_RAW1394_PRIVATE_H
/* header for definitions that are private to the raw1394 driver
and not visible to user-space */
#define RAW1394_DEVICE_MAJOR 171
#define RAW1394_DEVICE_NAME "raw1394"
struct iso_block_store {
atomic_t refcount;
size_t data_size;
quadlet_t data[0];
};
enum raw1394_iso_state { RAW1394_ISO_INACTIVE = 0,
RAW1394_ISO_RECV = 1,
RAW1394_ISO_XMIT = 2 };
struct file_info {
struct list_head list;
enum { opened, initialized, connected } state;
unsigned int protocol_version;
struct hpsb_host *host;
struct list_head req_pending;
struct list_head req_complete;
struct semaphore complete_sem;
spinlock_t reqlists_lock;
wait_queue_head_t poll_wait_complete;
struct list_head addr_list;
u8 *fcp_buffer;
/* old ISO API */
u64 listen_channels;
quadlet_t *iso_buffer;
size_t iso_buffer_length;
u8 notification; /* (busreset-notification) RAW1394_NOTIFY_OFF/ON */
/* new rawiso API */
enum raw1394_iso_state iso_state;
struct hpsb_iso *iso_handle;
};
struct arm_addr {
struct list_head addr_list; /* file_info list */
u64 start, end;
u64 arm_tag;
u8 access_rights;
u8 notification_options;
u8 client_transactions;
u64 recvb;
u16 rec_length;
u8 *addr_space_buffer; /* accessed by read/write/lock */
};
struct pending_request {
struct list_head list;
struct file_info *file_info;
struct hpsb_packet *packet;
struct iso_block_store *ibs;
quadlet_t *data;
int free_data;
struct raw1394_request req;
};
struct host_info {
struct list_head list;
struct hpsb_host *host;
struct list_head file_info_list;
};
#endif /* IEEE1394_RAW1394_PRIVATE_H */
......@@ -50,7 +50,7 @@
#include "iso.h"
#include "ieee1394_transactions.h"
#include "raw1394.h"
#include "raw1394-private.h"
#if BITS_PER_LONG == 64
#define int2ptr(x) ((void *)x)
......@@ -558,7 +558,7 @@ static int state_initialized(struct file_info *fi, struct pending_request *req)
lh = lh->next;
}
hi = list_entry(lh, struct host_info, list);
hpsb_ref_host(hi->host);
hpsb_ref_host(hi->host); // XXX Need to handle failure case
list_add_tail(&fi->list, &hi->file_info_list);
fi->host = hi->host;
fi->state = connected;
......@@ -603,10 +603,13 @@ static void handle_iso_listen(struct file_info *fi, struct pending_request *req)
if (fi->listen_channels & (1ULL << channel)) {
req->req.error = RAW1394_ERROR_ALREADY;
} else {
fi->listen_channels |= 1ULL << channel;
hpsb_listen_channel(hl_handle, fi->host, channel);
fi->iso_buffer = int2ptr(req->req.recvb);
fi->iso_buffer_length = req->req.length;
if(hpsb_listen_channel(hl_handle, fi->host, channel)) {
req->req.error = RAW1394_ERROR_ALREADY;
} else {
fi->listen_channels |= 1ULL << channel;
fi->iso_buffer = int2ptr(req->req.recvb);
fi->iso_buffer_length = req->req.length;
}
}
} else {
/* deallocate channel (one's complement neg) req.misc */
......@@ -2004,7 +2007,7 @@ static inline int __rawiso_event_in_queue(struct file_info *fi)
{
struct list_head *lh;
struct pending_request *req;
list_for_each(lh, &fi->req_complete) {
req = list_entry(lh, struct pending_request, list);
if(req->req.type == RAW1394_REQ_RAWISO_ACTIVITY) {
......@@ -2015,59 +2018,63 @@ static inline int __rawiso_event_in_queue(struct file_info *fi)
return 0;
}
/* put a RAWISO_ACTIVITY event in the queue, if one isn't there already */
static void queue_rawiso_event(struct file_info *fi)
{
unsigned long flags;
spin_lock_irqsave(&fi->reqlists_lock, flags);
/* only one ISO activity event may be in the queue */
if(!__rawiso_event_in_queue(fi)) {
struct pending_request *req = __alloc_pending_request(SLAB_ATOMIC);
if(req) {
req->file_info = fi;
req->req.type = RAW1394_REQ_RAWISO_ACTIVITY;
req->req.generation = get_hpsb_generation(fi->host);
__queue_complete_req(req);
} else {
/* on allocation failure, signal an overflow */
if(fi->iso_handle) {
atomic_inc(&fi->iso_handle->overflows);
}
}
}
spin_unlock_irqrestore(&fi->reqlists_lock, flags);
}
static void rawiso_activity_cb(struct hpsb_iso *iso)
{
unsigned long host_flags;
unsigned long flags;
struct list_head *lh;
struct host_info *hi;
spin_lock_irqsave(&host_info_lock, host_flags);
spin_lock_irqsave(&host_info_lock, flags);
hi = find_host_info(iso->host);
if (hi != NULL) {
list_for_each(lh, &hi->file_info_list) {
unsigned long reqlist_flags;
struct file_info *fi = list_entry(lh, struct file_info, list);
spin_lock_irqsave(&fi->reqlists_lock, reqlist_flags);
/* only one ISO activity event may be in the queue */
if(!__rawiso_event_in_queue(fi)) {
struct pending_request *req = __alloc_pending_request(SLAB_ATOMIC);
if(req) {
req->file_info = fi;
req->req.type = RAW1394_REQ_RAWISO_ACTIVITY;
req->req.generation = get_hpsb_generation(iso->host);
__queue_complete_req(req);
} else {
/* on allocation failure, signal an overflow */
if(fi->iso_handle) {
atomic_inc(&fi->iso_handle->overflows);
}
}
}
spin_unlock_irqrestore(&fi->reqlists_lock, reqlist_flags);
if(fi->iso_handle == iso)
queue_rawiso_event(fi);
}
}
spin_unlock_irqrestore(&host_info_lock, host_flags);
spin_unlock_irqrestore(&host_info_lock, flags);
}
/* helper function - gather all the kernel iso status bits for returning to user-space */
static void raw1394_iso_fill_status(struct hpsb_iso *iso, struct raw1394_iso_status *stat)
{
stat->config.data_buf_size = iso->buf_size;
stat->config.buf_packets = iso->buf_packets;
stat->config.max_packet_size = iso->max_packet_size;
stat->config.channel = iso->channel;
stat->config.speed = iso->speed;
stat->config.irq_interval = iso->irq_interval;
stat->buf_stride = iso->buf_stride;
stat->packet_data_offset = iso->packet_data_offset;
stat->packet_info_offset = iso->packet_info_offset;
stat->first_packet = iso->first_packet;
stat->n_packets = hpsb_iso_n_ready(iso);
stat->overflows = atomic_read(&iso->overflows);
stat->xmit_cycle = iso->xmit_cycle;
}
static int raw1394_iso_xmit_init(struct file_info *fi, void *uaddr)
......@@ -2076,10 +2083,10 @@ static int raw1394_iso_xmit_init(struct file_info *fi, void *uaddr)
if(copy_from_user(&stat, uaddr, sizeof(stat)))
return -EFAULT;
fi->iso_handle = hpsb_iso_xmit_init(fi->host,
stat.config.data_buf_size,
stat.config.buf_packets,
stat.config.max_packet_size,
stat.config.channel,
stat.config.speed,
stat.config.irq_interval,
......@@ -2088,14 +2095,14 @@ static int raw1394_iso_xmit_init(struct file_info *fi, void *uaddr)
return -ENOMEM;
fi->iso_state = RAW1394_ISO_XMIT;
raw1394_iso_fill_status(fi->iso_handle, &stat);
if(copy_to_user(uaddr, &stat, sizeof(stat)))
return -EFAULT;
/* queue an event to get things started */
rawiso_activity_cb(fi->iso_handle);
return 0;
}
......@@ -2105,10 +2112,10 @@ static int raw1394_iso_recv_init(struct file_info *fi, void *uaddr)
if(copy_from_user(&stat, uaddr, sizeof(stat)))
return -EFAULT;
fi->iso_handle = hpsb_iso_recv_init(fi->host,
stat.config.data_buf_size,
stat.config.buf_packets,
stat.config.max_packet_size,
stat.config.channel,
stat.config.irq_interval,
rawiso_activity_cb);
......@@ -2116,7 +2123,7 @@ static int raw1394_iso_recv_init(struct file_info *fi, void *uaddr)
return -ENOMEM;
fi->iso_state = RAW1394_ISO_RECV;
raw1394_iso_fill_status(fi->iso_handle, &stat);
if(copy_to_user(uaddr, &stat, sizeof(stat)))
return -EFAULT;
......@@ -2134,7 +2141,72 @@ static int raw1394_iso_get_status(struct file_info *fi, void *uaddr)
/* reset overflow counter */
atomic_set(&iso->overflows, 0);
return 0;
}
/* copy N packet_infos out of the ringbuffer into user-supplied array */
static int raw1394_iso_recv_packets(struct file_info *fi, void *uaddr)
{
struct raw1394_iso_packets upackets;
unsigned int packet = fi->iso_handle->first_packet;
int i;
if(copy_from_user(&upackets, uaddr, sizeof(upackets)))
return -EFAULT;
if(upackets.n_packets > hpsb_iso_n_ready(fi->iso_handle))
return -EINVAL;
/* ensure user-supplied buffer is accessible and big enough */
if(verify_area(VERIFY_WRITE, upackets.infos,
upackets.n_packets * sizeof(struct raw1394_iso_packet_info)))
return -EFAULT;
/* copy the packet_infos out */
for(i = 0; i < upackets.n_packets; i++) {
if(__copy_to_user(&upackets.infos[i],
&fi->iso_handle->infos[packet],
sizeof(struct raw1394_iso_packet_info)))
return -EFAULT;
packet = (packet + 1) % fi->iso_handle->buf_packets;
}
return 0;
}
/* copy N packet_infos from user to ringbuffer, and queue them for transmission */
static int raw1394_iso_send_packets(struct file_info *fi, void *uaddr)
{
struct raw1394_iso_packets upackets;
int i, rv;
if(copy_from_user(&upackets, uaddr, sizeof(upackets)))
return -EFAULT;
if(upackets.n_packets > hpsb_iso_n_ready(fi->iso_handle))
return -EINVAL;
/* ensure user-supplied buffer is accessible and big enough */
if(verify_area(VERIFY_READ, upackets.infos,
upackets.n_packets * sizeof(struct raw1394_iso_packet_info)))
return -EFAULT;
/* copy the infos structs in and queue the packets */
for(i = 0; i < upackets.n_packets; i++) {
struct raw1394_iso_packet_info info;
if(__copy_from_user(&info, &upackets.infos[i],
sizeof(struct raw1394_iso_packet_info)))
return -EFAULT;
rv = hpsb_iso_xmit_queue_packet(fi->iso_handle, info.offset,
info.len, info.tag, info.sy);
if(rv)
return rv;
}
return 0;
}
......@@ -2142,7 +2214,7 @@ static void raw1394_iso_shutdown(struct file_info *fi)
{
if(fi->iso_handle)
hpsb_iso_shutdown(fi->iso_handle);
fi->iso_handle = NULL;
fi->iso_state = RAW1394_ISO_INACTIVE;
}
......@@ -2155,20 +2227,20 @@ static int raw1394_mmap(struct file *file, struct vm_area_struct *vma)
if(fi->iso_state == RAW1394_ISO_INACTIVE)
return -EINVAL;
return dma_region_mmap(&fi->iso_handle->buf, file, vma);
return dma_region_mmap(&fi->iso_handle->data_buf, file, vma);
}
/* ioctl is only used for rawiso operations */
static int raw1394_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
{
struct file_info *fi = file->private_data;
switch(fi->iso_state) {
case RAW1394_ISO_INACTIVE:
switch(cmd) {
case RAW1394_ISO_XMIT_INIT:
case RAW1394_IOC_ISO_XMIT_INIT:
return raw1394_iso_xmit_init(fi, (void*) arg);
case RAW1394_ISO_RECV_INIT:
case RAW1394_IOC_ISO_RECV_INIT:
return raw1394_iso_recv_init(fi, (void*) arg);
default:
break;
......@@ -2176,51 +2248,71 @@ static int raw1394_ioctl(struct inode *inode, struct file *file, unsigned int cm
break;
case RAW1394_ISO_RECV:
switch(cmd) {
case RAW1394_ISO_RECV_START:
return hpsb_iso_recv_start(fi->iso_handle, arg);
case RAW1394_ISO_STOP:
case RAW1394_IOC_ISO_RECV_START: {
/* copy args from user-space */
int args[3];
if(copy_from_user(&args[0], (void*) arg, sizeof(args)))
return -EFAULT;
return hpsb_iso_recv_start(fi->iso_handle, args[0], args[1], args[2]);
}
case RAW1394_IOC_ISO_XMIT_RECV_STOP:
hpsb_iso_stop(fi->iso_handle);
return 0;
case RAW1394_ISO_GET_STATUS:
case RAW1394_IOC_ISO_RECV_LISTEN_CHANNEL:
return hpsb_iso_recv_listen_channel(fi->iso_handle, arg);
case RAW1394_IOC_ISO_RECV_UNLISTEN_CHANNEL:
return hpsb_iso_recv_unlisten_channel(fi->iso_handle, arg);
case RAW1394_IOC_ISO_RECV_SET_CHANNEL_MASK: {
/* copy the u64 from user-space */
u64 mask;
if(copy_from_user(&mask, (void*) arg, sizeof(mask)))
return -EFAULT;
return hpsb_iso_recv_set_channel_mask(fi->iso_handle, mask);
}
case RAW1394_IOC_ISO_GET_STATUS:
return raw1394_iso_get_status(fi, (void*) arg);
case RAW1394_ISO_PRODUCE_CONSUME:
case RAW1394_IOC_ISO_RECV_PACKETS:
return raw1394_iso_recv_packets(fi, (void*) arg);
case RAW1394_IOC_ISO_RECV_RELEASE_PACKETS:
return hpsb_iso_recv_release_packets(fi->iso_handle, arg);
case RAW1394_ISO_SHUTDOWN:
case RAW1394_IOC_ISO_SHUTDOWN:
raw1394_iso_shutdown(fi);
return 0;
case RAW1394_IOC_ISO_QUEUE_ACTIVITY:
queue_rawiso_event(fi);
return 0;
}
break;
case RAW1394_ISO_XMIT:
switch(cmd) {
case RAW1394_ISO_XMIT_START: {
case RAW1394_IOC_ISO_XMIT_START: {
/* copy two ints from user-space */
int args[2];
if(copy_from_user(&args[0], (void*) arg, sizeof(args)))
return -EFAULT;
return hpsb_iso_xmit_start(fi->iso_handle, args[0], args[1]);
}
case RAW1394_ISO_STOP:
case RAW1394_IOC_ISO_XMIT_SYNC:
return hpsb_iso_xmit_sync(fi->iso_handle);
case RAW1394_IOC_ISO_XMIT_RECV_STOP:
hpsb_iso_stop(fi->iso_handle);
return 0;
case RAW1394_ISO_GET_STATUS:
case RAW1394_IOC_ISO_GET_STATUS:
return raw1394_iso_get_status(fi, (void*) arg);
case RAW1394_ISO_PRODUCE_CONSUME:
return hpsb_iso_xmit_queue_packets(fi->iso_handle, arg);
case RAW1394_ISO_SHUTDOWN:
case RAW1394_IOC_ISO_XMIT_PACKETS:
return raw1394_iso_send_packets(fi, (void*) arg);
case RAW1394_IOC_ISO_SHUTDOWN:
raw1394_iso_shutdown(fi);
return 0;
case RAW1394_IOC_ISO_QUEUE_ACTIVITY:
queue_rawiso_event(fi);
return 0;
}
break;
default:
break;
}
return -EINVAL;
}
......@@ -2236,7 +2328,7 @@ static unsigned int raw1394_poll(struct file *file, poll_table *pt)
mask |= POLLIN | POLLRDNORM;
}
spin_unlock_irq(&fi->reqlists_lock);
return mask;
}
......@@ -2286,7 +2378,7 @@ static int raw1394_release(struct inode *inode, struct file *file)
if(fi->iso_state != RAW1394_ISO_INACTIVE)
raw1394_iso_shutdown(fi);
for (i = 0; i < 64; i++) {
if (fi->listen_channels & (1ULL << i)) {
hpsb_unlisten_channel(hl_handle, fi->host, i);
......
#ifndef IEEE1394_RAW1394_H
#define IEEE1394_RAW1394_H
#define RAW1394_DEVICE_MAJOR 171
#define RAW1394_DEVICE_NAME "raw1394"
/* header for the raw1394 API that is exported to user-space */
#define RAW1394_KERNELAPI_VERSION 4
......@@ -94,21 +93,21 @@ struct raw1394_khost_list {
};
typedef struct arm_request {
nodeid_t destination_nodeid;
nodeid_t source_nodeid;
nodeaddr_t destination_offset;
u8 tlabel;
u8 tcode;
u_int8_t extended_transaction_code;
u_int32_t generation;
arm_length_t buffer_length;
byte_t *buffer;
__u16 destination_nodeid;
__u16 source_nodeid;
__u64 destination_offset;
__u8 tlabel;
__u8 tcode;
__u8 extended_transaction_code;
__u32 generation;
__u16 buffer_length;
__u8 *buffer;
} *arm_request_t;
typedef struct arm_response {
int response_code;
arm_length_t buffer_length;
byte_t *buffer;
__s32 response_code;
__u16 buffer_length;
__u8 *buffer;
} *arm_response_t;
typedef struct arm_request_response {
......@@ -117,133 +116,59 @@ typedef struct arm_request_response {
} *arm_request_response_t;
/* rawiso API */
/* ioctls */
#define RAW1394_ISO_XMIT_INIT 1 /* arg: raw1394_iso_status* */
#define RAW1394_ISO_RECV_INIT 2 /* arg: raw1394_iso_status* */
#define RAW1394_ISO_RECV_START 3 /* arg: int, starting cycle */
#define RAW1394_ISO_XMIT_START 8 /* arg: int[2], { starting cycle, prebuffer } */
#define RAW1394_ISO_STOP 4
#define RAW1394_ISO_GET_STATUS 5 /* arg: raw1394_iso_status* */
#define RAW1394_ISO_PRODUCE_CONSUME 6 /* arg: int, # of packets */
#define RAW1394_ISO_SHUTDOWN 7
#include "ieee1394-ioctl.h"
/* per-packet metadata embedded in the ringbuffer */
/* must be identical to hpsb_iso_packet_info in iso.h! */
struct raw1394_iso_packet_info {
unsigned short len;
unsigned short cycle;
unsigned char channel; /* recv only */
unsigned char tag;
unsigned char sy;
__u32 offset;
__u16 len;
__u16 cycle; /* recv only */
__u8 channel; /* recv only */
__u8 tag;
__u8 sy;
};
/* argument for RAW1394_ISO_RECV/XMIT_PACKETS ioctls */
struct raw1394_iso_packets {
__u32 n_packets;
struct raw1394_iso_packet_info *infos;
};
struct raw1394_iso_config {
unsigned int buf_packets;
unsigned int max_packet_size;
int channel;
int speed; /* xmit only */
int irq_interval;
/* size of packet data buffer, in bytes (will be rounded up to PAGE_SIZE) */
__u32 data_buf_size;
/* # of packets to buffer */
__u32 buf_packets;
/* iso channel (set to -1 for multi-channel recv) */
__s32 channel;
/* xmit only - iso transmission speed */
__u8 speed;
/* max. latency of buffer, in packets (-1 if you don't care) */
__s32 irq_interval;
};
/* argument to RAW1394_ISO_XMIT/RECV_INIT and RAW1394_ISO_GET_STATUS */
struct raw1394_iso_status {
/* current settings */
struct raw1394_iso_config config;
/* byte offset between successive packets in the buffer */
int buf_stride;
/* byte offset of data payload within each packet */
int packet_data_offset;
/* byte offset of struct iso_packet_info within each packet */
int packet_info_offset;
/* index of next packet to fill with data (ISO transmission)
or next packet containing data recieved (ISO reception) */
unsigned int first_packet;
/* number of packets waiting to be filled with data (ISO transmission)
or containing data received (ISO reception) */
unsigned int n_packets;
__u32 n_packets;
/* approximate number of packets dropped due to overflow or
underflow of the packet buffer (a value of zero guarantees
that no packets have been dropped) */
unsigned int overflows;
};
__u32 overflows;
#ifdef __KERNEL__
struct iso_block_store {
atomic_t refcount;
size_t data_size;
quadlet_t data[0];
/* cycle number at which next packet will be transmitted;
-1 if not known */
__s16 xmit_cycle;
};
enum raw1394_iso_state { RAW1394_ISO_INACTIVE = 0,
RAW1394_ISO_RECV = 1,
RAW1394_ISO_XMIT = 2 };
struct file_info {
struct list_head list;
enum { opened, initialized, connected } state;
unsigned int protocol_version;
struct hpsb_host *host;
struct list_head req_pending;
struct list_head req_complete;
struct semaphore complete_sem;
spinlock_t reqlists_lock;
wait_queue_head_t poll_wait_complete;
struct list_head addr_list;
u8 *fcp_buffer;
/* old ISO API */
u64 listen_channels;
quadlet_t *iso_buffer;
size_t iso_buffer_length;
u8 notification; /* (busreset-notification) RAW1394_NOTIFY_OFF/ON */
/* new rawiso API */
enum raw1394_iso_state iso_state;
struct hpsb_iso *iso_handle;
};
struct arm_addr {
struct list_head addr_list; /* file_info list */
u64 start, end;
u64 arm_tag;
u8 access_rights;
u8 notification_options;
u8 client_transactions;
u64 recvb;
u16 rec_length;
u8 *addr_space_buffer; /* accessed by read/write/lock */
};
struct pending_request {
struct list_head list;
struct file_info *file_info;
struct hpsb_packet *packet;
struct iso_block_store *ibs;
quadlet_t *data;
int free_data;
struct raw1394_request req;
};
struct host_info {
struct list_head list;
struct hpsb_host *host;
struct list_head file_info_list;
};
#endif /* __KERNEL__ */
#endif /* IEEE1394_RAW1394_H */
......@@ -296,6 +296,10 @@
* returns the dead bit in status. Thanks to Chandan (chandan@toad.net) for this one.
* 04/27/02 - Fix sbp2 login problem on SMP systems, enable real spinlocks by default. (JSG)
* 06/09/02 - Don't force 36-bute SCSI inquiry, but leave in a define for badly behaved devices. (JSG)
* 02/04/03 - Fixed a SMP deadlock (don't hold sbp2_command_lock while calling sbp2scsi_complete_command).
* Also save/restore irq flags in sbp2scsi_complete_command - Sancho Dauskardt <sda@bdit.de>
* 02/06/03 - Removed spinlock debugging; use kernel stuff instead (sda)
*
*/
......@@ -346,7 +350,7 @@
#include "sbp2.h"
static char version[] __devinitdata =
"$Rev: 697 $ James Goodwin <jamesg@filanet.com>";
"$Rev: 779 $ James Goodwin <jamesg@filanet.com>";
/*
* Module load parameter definitions
......@@ -488,19 +492,6 @@ static u32 global_outstanding_dmas = 0;
#define SBP2_ERR(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
/*
* Spinlock debugging stuff.
*/
#define SBP2_USE_REAL_SPINLOCKS
#ifdef SBP2_USE_REAL_SPINLOCKS
#define sbp2_spin_lock(lock, flags) spin_lock_irqsave(lock, flags)
#define sbp2_spin_unlock(lock, flags) spin_unlock_irqrestore(lock, flags);
static spinlock_t sbp2_host_info_lock = SPIN_LOCK_UNLOCKED;
#else
#define sbp2_spin_lock(lock, flags) do {save_flags(flags); cli();} while (0)
#define sbp2_spin_unlock(lock, flags) do {restore_flags(flags);} while (0)
#endif
/*
* Globals
......@@ -512,6 +503,8 @@ static u8 sbp2_speedto_maxrec[] = { 0x7, 0x8, 0x9 };
static LIST_HEAD(sbp2_host_info_list);
static spinlock_t sbp2_host_info_lock = SPIN_LOCK_UNLOCKED;
static struct hpsb_highlevel *sbp2_hl_handle = NULL;
static struct hpsb_highlevel_ops sbp2_hl_ops = {
......@@ -686,12 +679,12 @@ static int sbp2util_create_command_orb_pool(struct scsi_id_instance_data *scsi_i
orbs = sbp2_serialize_io ? 2 : SBP2_MAX_COMMAND_ORBS;
sbp2_spin_lock(&scsi_id->sbp2_command_orb_lock, flags);
spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
for (i = 0; i < orbs; i++) {
command = (struct sbp2_command_info *)
kmalloc(sizeof(struct sbp2_command_info), GFP_KERNEL);
if (!command) {
sbp2_spin_unlock(&scsi_id->sbp2_command_orb_lock, flags);
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
return(-ENOMEM);
}
memset(command, '\0', sizeof(struct sbp2_command_info));
......@@ -708,7 +701,7 @@ static int sbp2util_create_command_orb_pool(struct scsi_id_instance_data *scsi_i
INIT_LIST_HEAD(&command->list);
list_add_tail(&command->list, &scsi_id->sbp2_command_orb_completed);
}
sbp2_spin_unlock(&scsi_id->sbp2_command_orb_lock, flags);
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
return 0;
}
......@@ -722,7 +715,7 @@ static void sbp2util_remove_command_orb_pool(struct scsi_id_instance_data *scsi_
struct sbp2_command_info *command;
unsigned long flags;
sbp2_spin_lock(&scsi_id->sbp2_command_orb_lock, flags);
spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
if (!list_empty(&scsi_id->sbp2_command_orb_completed)) {
list_for_each_safe(lh, next, &scsi_id->sbp2_command_orb_completed) {
command = list_entry(lh, struct sbp2_command_info, list);
......@@ -740,7 +733,7 @@ static void sbp2util_remove_command_orb_pool(struct scsi_id_instance_data *scsi_
kfree(command);
}
}
sbp2_spin_unlock(&scsi_id->sbp2_command_orb_lock, flags);
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
return;
}
......@@ -755,17 +748,17 @@ static struct sbp2_command_info *sbp2util_find_command_for_orb(
struct sbp2_command_info *command;
unsigned long flags;
sbp2_spin_lock(&scsi_id->sbp2_command_orb_lock, flags);
spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
if (!list_empty(&scsi_id->sbp2_command_orb_inuse)) {
list_for_each(lh, &scsi_id->sbp2_command_orb_inuse) {
command = list_entry(lh, struct sbp2_command_info, list);
if (command->command_orb_dma == orb) {
sbp2_spin_unlock(&scsi_id->sbp2_command_orb_lock, flags);
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
return (command);
}
}
}
sbp2_spin_unlock(&scsi_id->sbp2_command_orb_lock, flags);
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
SBP2_ORB_DEBUG("could not match command orb %x", (unsigned int)orb);
......@@ -782,17 +775,17 @@ static struct sbp2_command_info *sbp2util_find_command_for_SCpnt(struct scsi_id_
struct sbp2_command_info *command;
unsigned long flags;
sbp2_spin_lock(&scsi_id->sbp2_command_orb_lock, flags);
spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
if (!list_empty(&scsi_id->sbp2_command_orb_inuse)) {
list_for_each(lh, &scsi_id->sbp2_command_orb_inuse) {
command = list_entry(lh, struct sbp2_command_info, list);
if (command->Current_SCpnt == SCpnt) {
sbp2_spin_unlock(&scsi_id->sbp2_command_orb_lock, flags);
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
return (command);
}
}
}
sbp2_spin_unlock(&scsi_id->sbp2_command_orb_lock, flags);
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
return(NULL);
}
......@@ -809,7 +802,7 @@ static struct sbp2_command_info *sbp2util_allocate_command_orb(
struct sbp2_command_info *command = NULL;
unsigned long flags;
sbp2_spin_lock(&scsi_id->sbp2_command_orb_lock, flags);
spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
if (!list_empty(&scsi_id->sbp2_command_orb_completed)) {
lh = scsi_id->sbp2_command_orb_completed.next;
list_del(lh);
......@@ -820,7 +813,7 @@ static struct sbp2_command_info *sbp2util_allocate_command_orb(
} else {
SBP2_ERR("sbp2util_allocate_command_orb - No orbs available!");
}
sbp2_spin_unlock(&scsi_id->sbp2_command_orb_lock, flags);
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
return (command);
}
......@@ -865,11 +858,11 @@ static void sbp2util_mark_command_completed(struct scsi_id_instance_data *scsi_i
{
unsigned long flags;
sbp2_spin_lock(&scsi_id->sbp2_command_orb_lock, flags);
spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
list_del(&command->list);
sbp2util_free_command_dma(command);
list_add_tail(&command->list, &scsi_id->sbp2_command_orb_completed);
sbp2_spin_unlock(&scsi_id->sbp2_command_orb_lock, flags);
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
}
......@@ -992,9 +985,9 @@ static void sbp2_update(struct unit_directory *ud)
/* Complete any pending commands with busy (so they get
* retried) and remove them from our queue
*/
sbp2_spin_lock(&hi->sbp2_command_lock, flags);
spin_lock_irqsave(&hi->sbp2_command_lock, flags);
sbp2scsi_complete_all_commands(hi, scsi_id, DID_BUS_BUSY);
sbp2_spin_unlock(&hi->sbp2_command_lock, flags);
spin_unlock_irqrestore(&hi->sbp2_command_lock, flags);
}
/*
......@@ -1024,9 +1017,9 @@ static void sbp2_add_host(struct hpsb_host *host)
hi->host = host;
hi->sbp2_command_lock = SPIN_LOCK_UNLOCKED;
sbp2_spin_lock(&sbp2_host_info_lock, flags);
spin_lock_irqsave(&sbp2_host_info_lock, flags);
list_add_tail(&hi->list, &sbp2_host_info_list);
sbp2_spin_unlock(&sbp2_host_info_lock, flags);
spin_unlock_irqrestore(&sbp2_host_info_lock, flags);
/* Register our host with the SCSI stack. */
hi->scsi_host = scsi_register (&scsi_driver_template, sizeof(void *));
......@@ -1085,7 +1078,7 @@ static void sbp2_remove_host(struct hpsb_host *host)
SBP2_DEBUG("sbp2_remove_host");
sbp2_spin_lock(&sbp2_host_info_lock, flags);
spin_lock_irqsave(&sbp2_host_info_lock, flags);
hi = sbp2_find_host_info(host);
if (hi != NULL) {
......@@ -1095,7 +1088,7 @@ static void sbp2_remove_host(struct hpsb_host *host)
else
SBP2_ERR("attempt to remove unknown host %p", host);
sbp2_spin_unlock(&sbp2_host_info_lock, flags);
spin_unlock_irqrestore(&sbp2_host_info_lock, flags);
}
/*
......@@ -2521,16 +2514,16 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
return(RCODE_ADDRESS_ERROR);
}
sbp2_spin_lock(&sbp2_host_info_lock, flags);
spin_lock_irqsave(&sbp2_host_info_lock, flags);
hi = sbp2_find_host_info(host);
sbp2_spin_unlock(&sbp2_host_info_lock, flags);
spin_unlock_irqrestore(&sbp2_host_info_lock, flags);
if (!hi) {
SBP2_ERR("host info is NULL - this is bad!");
return(RCODE_ADDRESS_ERROR);
}
sbp2_spin_lock(&hi->sbp2_command_lock, flags);
spin_lock_irqsave(&hi->sbp2_command_lock, flags);
/*
* Find our scsi_id structure by looking at the status fifo address written to by
......@@ -2541,7 +2534,7 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
if (!scsi_id) {
SBP2_ERR("scsi_id is NULL - device is gone?");
sbp2_spin_unlock(&hi->sbp2_command_lock, flags);
spin_unlock_irqrestore(&hi->sbp2_command_lock, flags);
return(RCODE_ADDRESS_ERROR);
}
......@@ -2605,13 +2598,6 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
}
SBP2_ORB_DEBUG("completing command orb %p", &command->command_orb);
/*
* Complete the SCSI command
*/
SBP2_DEBUG("Completing SCSI command");
sbp2scsi_complete_command(hi, scsi_id, scsi_status, SCpnt, command->Current_done);
SBP2_ORB_DEBUG("command orb completed");
}
/*
......@@ -2635,7 +2621,23 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
}
}
sbp2_spin_unlock(&hi->sbp2_command_lock, flags);
spin_unlock_irqrestore(&hi->sbp2_command_lock, flags);
if (SCpnt) {
/*
* Complete the SCSI command.
*
* Only do it after we've released the sbp2_command_lock,
* as it might otherwise deadlock with the
* io_request_lock (in sbp2scsi_queuecommand).
*/
SBP2_DEBUG("Completing SCSI command");
sbp2scsi_complete_command(hi, scsi_id, scsi_status, SCpnt, command->Current_done);
SBP2_ORB_DEBUG("command orb completed");
}
return(RCODE_COMPLETE);
}
......@@ -2715,12 +2717,12 @@ static int sbp2scsi_queuecommand (Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
/*
* Try and send our SCSI command
*/
sbp2_spin_lock(&hi->sbp2_command_lock, flags);
spin_lock_irqsave(&hi->sbp2_command_lock, flags);
if (sbp2_send_command(hi, scsi_id, SCpnt, done)) {
SBP2_ERR("Error sending SCSI command");
sbp2scsi_complete_command(hi, scsi_id, SBP2_SCSI_STATUS_SELECTION_TIMEOUT, SCpnt, done);
}
sbp2_spin_unlock(&hi->sbp2_command_lock, flags);
spin_unlock_irqrestore(&hi->sbp2_command_lock, flags);
return(0);
}
......@@ -2761,10 +2763,14 @@ static void sbp2scsi_complete_all_commands(struct sbp2scsi_host_info *hi,
/*
* This function is called in order to complete a regular SBP-2 command.
*
* This can be called in interrupt context.
*/
static void sbp2scsi_complete_command(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id, u32 scsi_status,
Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
{
unsigned long flags;
SBP2_DEBUG("sbp2scsi_complete_command");
/*
......@@ -2864,13 +2870,13 @@ static void sbp2scsi_complete_command(struct sbp2scsi_host_info *hi, struct scsi
* Tell scsi stack that we're done with this command
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
spin_lock_irq(&io_request_lock);
spin_lock_irqsave(&io_request_lock,flags);
done (SCpnt);
spin_unlock_irq(&io_request_lock);
spin_unlock_irqrestore(&io_request_lock,flags);
#else
spin_lock_irq(hi->scsi_host->host_lock);
spin_lock_irqsave(hi->scsi_host->host_lock,flags);
done (SCpnt);
spin_unlock_irq(hi->scsi_host->host_lock);
spin_unlock_irqrestore(hi->scsi_host->host_lock,flags);
#endif
return;
......@@ -2896,7 +2902,7 @@ static int sbp2scsi_abort (Scsi_Cmnd *SCpnt)
* Right now, just return any matching command structures
* to the free pool.
*/
sbp2_spin_lock(&hi->sbp2_command_lock, flags);
spin_lock_irqsave(&hi->sbp2_command_lock, flags);
command = sbp2util_find_command_for_SCpnt(scsi_id, SCpnt);
if (command) {
SBP2_DEBUG("Found command to abort");
......@@ -2921,7 +2927,7 @@ static int sbp2scsi_abort (Scsi_Cmnd *SCpnt)
*/
sbp2_agent_reset(hi, scsi_id, 0);
sbp2scsi_complete_all_commands(hi, scsi_id, DID_BUS_BUSY);
sbp2_spin_unlock(&hi->sbp2_command_lock, flags);
spin_unlock_irqrestore(&hi->sbp2_command_lock, flags);
}
return(SUCCESS);
......
......@@ -50,6 +50,7 @@
#include "ieee1394_core.h"
#include "highlevel.h"
#include "video1394.h"
#include "dma.h"
#include "ohci1394.h"
......@@ -94,9 +95,14 @@ struct dma_iso_ctx {
unsigned int packet_size;
unsigned int left_size;
unsigned int nb_cmd;
unsigned char *buf;
struct dma_region dma;
struct dma_prog_region *prg_reg;
struct dma_cmd **ir_prg;
struct it_dma_prg **it_prg;
unsigned int *buffer_status;
struct timeval *buffer_time; /* time when the buffer was received */
unsigned int *last_used_cmd; /* For ISO Transmit with
......@@ -159,74 +165,6 @@ static spinlock_t video1394_cards_lock = SPIN_LOCK_UNLOCKED;
static devfs_handle_t devfs_handle;
static struct hpsb_highlevel *hl_handle = NULL;
/* Code taken from bttv.c */
/*******************************/
/* Memory management functions */
/*******************************/
static inline unsigned long kvirt_to_bus(unsigned long adr)
{
unsigned long kva, ret;
kva = (unsigned long) page_address(vmalloc_to_page((void *)adr));
kva |= adr & (PAGE_SIZE-1); /* restore the offset */
ret = virt_to_bus((void *)kva);
return ret;
}
/* Here we want the physical address of the memory.
* This is used when initializing the contents of the area.
*/
static inline unsigned long kvirt_to_pa(unsigned long adr)
{
unsigned long kva, ret;
kva = (unsigned long) page_address(vmalloc_to_page((void *)adr));
kva |= adr & (PAGE_SIZE-1); /* restore the offset */
ret = __pa(kva);
return ret;
}
static void * rvmalloc(unsigned long size)
{
void * mem;
unsigned long adr;
size=PAGE_ALIGN(size);
mem=vmalloc_32(size);
if (mem)
{
memset(mem, 0, size); /* Clear the ram out,
no junk to the user */
adr=(unsigned long) mem;
while (size > 0)
{
mem_map_reserve(vmalloc_to_page((void *)adr));
adr+=PAGE_SIZE;
size-=PAGE_SIZE;
}
}
return mem;
}
static void rvfree(void * mem, unsigned long size)
{
unsigned long adr;
if (mem)
{
adr=(unsigned long) mem;
while ((long) size > 0)
{
mem_map_unreserve(vmalloc_to_page((void *)adr));
adr+=PAGE_SIZE;
size-=PAGE_SIZE;
}
vfree(mem);
}
}
/* End of code taken from bttv.c */
static int free_dma_iso_ctx(struct dma_iso_ctx *d)
{
......@@ -238,20 +176,19 @@ static int free_dma_iso_ctx(struct dma_iso_ctx *d)
if (d->iso_tasklet.link.next != NULL)
ohci1394_unregister_iso_tasklet(d->ohci, &d->iso_tasklet);
if (d->buf)
rvfree((void *)d->buf, d->num_desc * d->buf_size);
dma_region_free(&d->dma);
if (d->ir_prg) {
for (i=0;i<d->num_desc;i++)
if (d->ir_prg[i]) kfree(d->ir_prg[i]);
kfree(d->ir_prg);
if (d->prg_reg) {
for (i = 0; i < d->num_desc; i++)
dma_prog_region_free(&d->prg_reg[i]);
kfree(d->prg_reg);
}
if (d->it_prg) {
for (i=0;i<d->num_desc;i++)
if (d->it_prg[i]) kfree(d->it_prg[i]);
if (d->ir_prg)
kfree(d->ir_prg);
if (d->it_prg)
kfree(d->it_prg);
}
if (d->buffer_status)
kfree(d->buffer_status);
......@@ -291,18 +228,17 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
d->frame_size = buf_size;
d->buf_size = PAGE_ALIGN(buf_size);
d->last_buffer = -1;
d->buf = NULL;
d->ir_prg = NULL;
init_waitqueue_head(&d->waitq);
d->buf = rvmalloc(d->num_desc * d->buf_size);
/* Init the regions for easy cleanup */
dma_region_init(&d->dma);
if (d->buf == NULL) {
if (dma_region_alloc(&d->dma, d->num_desc * d->buf_size, ohci->dev,
PCI_DMA_BIDIRECTIONAL)) {
PRINT(KERN_ERR, ohci->id, "Failed to allocate dma buffer");
free_dma_iso_ctx(d);
return NULL;
}
memset(d->buf, 0, d->num_desc * d->buf_size);
if (type == OHCI_ISO_RECEIVE)
ohci1394_init_iso_tasklet(&d->iso_tasklet, type,
......@@ -321,6 +257,17 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
}
d->ctx = d->iso_tasklet.context;
d->prg_reg = kmalloc(d->num_desc * sizeof(struct dma_prog_region),
GFP_KERNEL);
if (d->prg_reg == NULL) {
PRINT(KERN_ERR, ohci->id, "Failed to allocate ir prg regs");
free_dma_iso_ctx(d);
return NULL;
}
/* Makes for easier cleanup */
for (i = 0; i < d->num_desc; i++)
dma_prog_region_init(&d->prg_reg[i]);
if (type == OHCI_ISO_RECEIVE) {
d->ctrlSet = OHCI1394_IsoRcvContextControlSet+32*d->ctx;
d->ctrlClear = OHCI1394_IsoRcvContextControlClear+32*d->ctx;
......@@ -331,31 +278,27 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
GFP_KERNEL);
if (d->ir_prg == NULL) {
PRINT(KERN_ERR, ohci->id,
"Failed to allocate dma ir prg");
PRINT(KERN_ERR, ohci->id, "Failed to allocate dma ir prg");
free_dma_iso_ctx(d);
return NULL;
}
memset(d->ir_prg, 0, d->num_desc * sizeof(struct dma_cmd *));
d->nb_cmd = d->buf_size / PAGE_SIZE + 1;
d->left_size = (d->frame_size % PAGE_SIZE) ?
d->frame_size % PAGE_SIZE : PAGE_SIZE;
for (i=0;i<d->num_desc;i++) {
d->ir_prg[i] = kmalloc(d->nb_cmd *
sizeof(struct dma_cmd),
GFP_KERNEL);
if (d->ir_prg[i] == NULL) {
PRINT(KERN_ERR, ohci->id,
"Failed to allocate dma ir prg");
for (i = 0;i < d->num_desc; i++) {
if (dma_prog_region_alloc(&d->prg_reg[i], d->nb_cmd *
sizeof(struct dma_cmd), ohci->dev)) {
PRINT(KERN_ERR, ohci->id, "Failed to allocate dma ir prg");
free_dma_iso_ctx(d);
return NULL;
}
d->ir_prg[i] = (struct dma_cmd *)d->prg_reg[i].kvirt;
}
}
else { /* OHCI_ISO_TRANSMIT */
} else { /* OHCI_ISO_TRANSMIT */
d->ctrlSet = OHCI1394_IsoXmitContextControlSet+16*d->ctx;
d->ctrlClear = OHCI1394_IsoXmitContextControlClear+16*d->ctx;
d->cmdPtr = OHCI1394_IsoXmitCommandPtr+16*d->ctx;
......@@ -386,20 +329,17 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
if (d->frame_size % d->packet_size) {
d->nb_cmd++;
d->left_size = d->frame_size % d->packet_size;
}
else
} else
d->left_size = d->packet_size;
for (i=0;i<d->num_desc;i++) {
d->it_prg[i] = kmalloc(d->nb_cmd *
sizeof(struct it_dma_prg),
GFP_KERNEL);
if (d->it_prg[i] == NULL) {
PRINT(KERN_ERR, ohci->id,
"Failed to allocate dma it prg");
for (i = 0; i < d->num_desc; i++) {
if (dma_prog_region_alloc(&d->prg_reg[i], d->nb_cmd *
sizeof(struct it_dma_prg), ohci->dev)) {
PRINT(KERN_ERR, ohci->id, "Failed to allocate dma it prg");
free_dma_iso_ctx(d);
return NULL;
}
d->it_prg[i] = (struct it_dma_prg *)d->prg_reg[i].kvirt;
}
}
......@@ -452,7 +392,7 @@ static void reset_ir_status(struct dma_iso_ctx *d, int n)
int i;
d->ir_prg[n][0].status = cpu_to_le32(4);
d->ir_prg[n][1].status = cpu_to_le32(PAGE_SIZE-4);
for (i=2;i<d->nb_cmd-1;i++)
for (i = 2; i < d->nb_cmd - 1; i++)
d->ir_prg[n][i].status = cpu_to_le32(PAGE_SIZE);
d->ir_prg[n][i].status = cpu_to_le32(d->left_size);
}
......@@ -460,7 +400,8 @@ static void reset_ir_status(struct dma_iso_ctx *d, int n)
static void initialize_dma_ir_prg(struct dma_iso_ctx *d, int n, int flags)
{
struct dma_cmd *ir_prg = d->ir_prg[n];
unsigned long buf = (unsigned long)d->buf+n*d->buf_size;
struct dma_prog_region *ir_reg = &d->prg_reg[n];
unsigned long buf = (unsigned long)d->dma.kvirt + n * d->buf_size;
int i;
/* the first descriptor will read only 4 bytes */
......@@ -471,31 +412,37 @@ static void initialize_dma_ir_prg(struct dma_iso_ctx *d, int n, int flags)
if (flags & VIDEO1394_SYNC_FRAMES)
ir_prg[0].control |= cpu_to_le32(DMA_CTL_WAIT);
ir_prg[0].address = cpu_to_le32(kvirt_to_bus(buf));
ir_prg[0].branchAddress = cpu_to_le32((virt_to_bus(&(ir_prg[1].control))
& 0xfffffff0) | 0x1);
ir_prg[0].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma, buf -
(unsigned long)d->dma.kvirt));
ir_prg[0].branchAddress = cpu_to_le32((dma_prog_region_offset_to_bus(ir_reg,
1 * sizeof(struct dma_cmd)) & 0xfffffff0) | 0x1);
/* the second descriptor will read PAGE_SIZE-4 bytes */
ir_prg[1].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
DMA_CTL_BRANCH | (PAGE_SIZE-4));
ir_prg[1].address = cpu_to_le32(kvirt_to_bus(buf+4));
ir_prg[1].branchAddress = cpu_to_le32((virt_to_bus(&(ir_prg[2].control))
& 0xfffffff0) | 0x1);
ir_prg[1].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma, (buf + 4) -
(unsigned long)d->dma.kvirt));
ir_prg[1].branchAddress = cpu_to_le32((dma_prog_region_offset_to_bus(ir_reg,
2 * sizeof(struct dma_cmd)) & 0xfffffff0) | 0x1);
for (i=2;i<d->nb_cmd-1;i++) {
ir_prg[i].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
DMA_CTL_BRANCH | PAGE_SIZE);
ir_prg[i].address = cpu_to_le32(kvirt_to_bus(buf+(i-1)*PAGE_SIZE));
ir_prg[i].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma,
(buf+(i-1)*PAGE_SIZE) -
(unsigned long)d->dma.kvirt));
ir_prg[i].branchAddress =
cpu_to_le32((virt_to_bus(&(ir_prg[i+1].control))
& 0xfffffff0) | 0x1);
ir_prg[i].branchAddress =
cpu_to_le32((dma_prog_region_offset_to_bus(ir_reg,
(i + 1) * sizeof(struct dma_cmd)) & 0xfffffff0) | 0x1);
}
/* the last descriptor will generate an interrupt */
ir_prg[i].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
DMA_CTL_IRQ | DMA_CTL_BRANCH | d->left_size);
ir_prg[i].address = cpu_to_le32(kvirt_to_bus(buf+(i-1)*PAGE_SIZE));
ir_prg[i].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma,
(buf+(i-1)*PAGE_SIZE) -
(unsigned long)d->dma.kvirt));
}
static void initialize_dma_ir_ctx(struct dma_iso_ctx *d, int tag, int flags)
......@@ -574,7 +521,7 @@ void wakeup_dma_ir_ctx(unsigned long l)
static inline void put_timestamp(struct ti_ohci *ohci, struct dma_iso_ctx * d,
int n)
{
unsigned char* buf = d->buf + n * d->buf_size;
unsigned char* buf = d->dma.kvirt + n * d->buf_size;
u32 cycleTimer;
u32 timeStamp;
......@@ -603,7 +550,7 @@ static inline void put_timestamp(struct ti_ohci *ohci, struct dma_iso_ctx * d,
if (n == -1) {
return;
}
buf = d->buf + n * d->buf_size;
buf = d->dma.kvirt + n * d->buf_size;
timeStamp += (d->last_used_cmd[n] << 12) & 0xffff;
......@@ -650,7 +597,8 @@ void wakeup_dma_it_ctx(unsigned long l)
static void initialize_dma_it_prg(struct dma_iso_ctx *d, int n, int sync_tag)
{
struct it_dma_prg *it_prg = d->it_prg[n];
unsigned long buf = (unsigned long)d->buf+n*d->buf_size;
struct dma_prog_region *it_reg = &d->prg_reg[n];
unsigned long buf = (unsigned long)d->dma.kvirt + n * d->buf_size;
int i;
d->last_used_cmd[n] = d->nb_cmd - 1;
for (i=0;i<d->nb_cmd;i++) {
......@@ -674,18 +622,18 @@ static void initialize_dma_it_prg(struct dma_iso_ctx *d, int n, int sync_tag)
it_prg[i].end.control = cpu_to_le32(DMA_CTL_OUTPUT_LAST |
DMA_CTL_BRANCH);
it_prg[i].end.address =
cpu_to_le32(kvirt_to_bus(buf+i*d->packet_size));
cpu_to_le32(dma_region_offset_to_bus(&d->dma, (buf+i*d->packet_size) -
(unsigned long)d->dma.kvirt));
if (i<d->nb_cmd-1) {
it_prg[i].end.control |= cpu_to_le32(d->packet_size);
it_prg[i].begin.branchAddress =
cpu_to_le32((virt_to_bus(&(it_prg[i+1].begin.control))
& 0xfffffff0) | 0x3);
cpu_to_le32((dma_prog_region_offset_to_bus(it_reg, (i + 1) *
sizeof(struct it_dma_prg)) & 0xfffffff0) | 0x3);
it_prg[i].end.branchAddress =
cpu_to_le32((virt_to_bus(&(it_prg[i+1].begin.control))
& 0xfffffff0) | 0x3);
}
else {
cpu_to_le32((dma_prog_region_offset_to_bus(it_reg, (i + 1) *
sizeof(struct it_dma_prg)) & 0xfffffff0) | 0x3);
} else {
/* the last prg generates an interrupt */
it_prg[i].end.control |= cpu_to_le32(DMA_CTL_UPDATE |
DMA_CTL_IRQ | d->left_size);
......@@ -694,15 +642,6 @@ static void initialize_dma_it_prg(struct dma_iso_ctx *d, int n, int sync_tag)
it_prg[i].end.branchAddress = 0;
}
it_prg[i].end.status = 0;
#if 0
printk("%d:%d: %08x-%08x ctrl %08x brch %08x d0 %08x d1 %08x\n",n,i,
virt_to_bus(&(it_prg[i].begin.control)),
virt_to_bus(&(it_prg[i].end.control)),
it_prg[i].end.control,
it_prg[i].end.branchAddress,
it_prg[i].data[0], it_prg[i].data[1]);
#endif
}
}
......@@ -711,6 +650,7 @@ static void initialize_dma_it_prg_var_packet_queue(
struct ti_ohci *ohci)
{
struct it_dma_prg *it_prg = d->it_prg[n];
struct dma_prog_region *it_reg = &d->prg_reg[n];
int i;
#if 0
......@@ -732,12 +672,12 @@ static void initialize_dma_it_prg_var_packet_queue(
if (i < d->nb_cmd-1 && packet_sizes[i+1] != 0) {
it_prg[i].end.control |= cpu_to_le32(size);
it_prg[i].begin.branchAddress =
cpu_to_le32((virt_to_bus(&(it_prg[i+1].begin.control))
& 0xfffffff0) | 0x3);
it_prg[i].end.branchAddress =
cpu_to_le32((virt_to_bus(&(it_prg[i+1].begin.control))
& 0xfffffff0) | 0x3);
it_prg[i].begin.branchAddress =
cpu_to_le32((dma_prog_region_offset_to_bus(it_reg, (i + 1) *
sizeof(struct it_dma_prg)) & 0xfffffff0) | 0x3);
it_prg[i].end.branchAddress =
cpu_to_le32((dma_prog_region_offset_to_bus(it_reg, (i + 1) *
sizeof(struct it_dma_prg)) & 0xfffffff0) | 0x3);
} else {
/* the last prg generates an interrupt */
it_prg[i].end.control |= cpu_to_le32(DMA_CTL_UPDATE |
......@@ -769,37 +709,6 @@ static void initialize_dma_it_ctx(struct dma_iso_ctx *d, int sync_tag,
reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1<<d->ctx);
}
static int do_iso_mmap(struct ti_ohci *ohci, struct dma_iso_ctx *d,
struct vm_area_struct *vma)
{
unsigned long start = vma->vm_start;
unsigned long size = vma->vm_end - vma->vm_start;
unsigned long page, pos;
if (size > d->num_desc * d->buf_size) {
PRINT(KERN_ERR, ohci->id,
"iso context %d buf size is different from mmap size",
d->ctx);
return -EINVAL;
}
if (!d->buf) {
PRINT(KERN_ERR, ohci->id,
"iso context %d is not allocated", d->ctx);
return -EINVAL;
}
pos = (unsigned long) d->buf;
while (size > 0) {
page = kvirt_to_pa(pos);
if (remap_page_range_1394(vma, start, page, PAGE_SIZE, PAGE_SHARED))
return -EAGAIN;
start += PAGE_SIZE;
pos += PAGE_SIZE;
size -= PAGE_SIZE;
}
return 0;
}
static int video1394_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
......@@ -810,8 +719,8 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
switch(cmd)
{
case VIDEO1394_LISTEN_CHANNEL:
case VIDEO1394_TALK_CHANNEL:
case VIDEO1394_IOC_LISTEN_CHANNEL:
case VIDEO1394_IOC_TALK_CHANNEL:
{
struct video1394_mmap v;
u64 mask;
......@@ -870,7 +779,7 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
return -EFAULT;
}
if (cmd == VIDEO1394_LISTEN_CHANNEL) {
if (cmd == VIDEO1394_IOC_LISTEN_CHANNEL) {
d = alloc_dma_iso_ctx(ohci, OHCI_ISO_RECEIVE,
v.nb_buffers, v.buf_size,
v.channel, 0);
......@@ -920,8 +829,8 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
return 0;
}
case VIDEO1394_UNLISTEN_CHANNEL:
case VIDEO1394_UNTALK_CHANNEL:
case VIDEO1394_IOC_UNLISTEN_CHANNEL:
case VIDEO1394_IOC_UNTALK_CHANNEL:
{
int channel;
u64 mask;
......@@ -945,7 +854,7 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
/* Mark this channel as unused */
ohci->ISO_channel_usage &= ~mask;
if (cmd == VIDEO1394_UNLISTEN_CHANNEL)
if (cmd == VIDEO1394_IOC_UNLISTEN_CHANNEL)
d = find_ctx(&ctx->context_list, OHCI_ISO_RECEIVE, channel);
else
d = find_ctx(&ctx->context_list, OHCI_ISO_TRANSMIT, channel);
......@@ -957,7 +866,7 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
return 0;
}
case VIDEO1394_LISTEN_QUEUE_BUFFER:
case VIDEO1394_IOC_LISTEN_QUEUE_BUFFER:
{
struct video1394_wait v;
struct dma_iso_ctx *d;
......@@ -984,10 +893,10 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
d->buffer_status[v.buffer]=VIDEO1394_BUFFER_QUEUED;
if (d->last_buffer>=0)
d->ir_prg[d->last_buffer][d->nb_cmd-1].branchAddress =
cpu_to_le32((virt_to_bus(&(d->ir_prg[v.buffer][0].control))
& 0xfffffff0) | 0x1);
if (d->last_buffer>=0)
d->ir_prg[d->last_buffer][d->nb_cmd-1].branchAddress =
cpu_to_le32((dma_prog_region_offset_to_bus(&d->prg_reg[v.buffer], 0)
& 0xfffffff0) | 0x1);
d->last_buffer = v.buffer;
......@@ -1000,9 +909,9 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
DBGMSG(ohci->id, "Starting iso DMA ctx=%d",d->ctx);
/* Tell the controller where the first program is */
reg_write(ohci, d->cmdPtr,
virt_to_bus(&(d->ir_prg[v.buffer][0]))|0x1);
reg_write(ohci, d->cmdPtr,
dma_prog_region_offset_to_bus(&d->prg_reg[v.buffer], 0) | 0x1);
/* Run IR context */
reg_write(ohci, d->ctrlSet, 0x8000);
}
......@@ -1017,8 +926,8 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
return 0;
}
case VIDEO1394_LISTEN_WAIT_BUFFER:
case VIDEO1394_LISTEN_POLL_BUFFER:
case VIDEO1394_IOC_LISTEN_WAIT_BUFFER:
case VIDEO1394_IOC_LISTEN_POLL_BUFFER:
{
struct video1394_wait v;
struct dma_iso_ctx *d;
......@@ -1045,7 +954,7 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
d->buffer_status[v.buffer]=VIDEO1394_BUFFER_FREE;
break;
case VIDEO1394_BUFFER_QUEUED:
if (cmd == VIDEO1394_LISTEN_POLL_BUFFER) {
if (cmd == VIDEO1394_IOC_LISTEN_POLL_BUFFER) {
/* for polling, return error code EINTR */
spin_unlock_irqrestore(&d->lock, flags);
return -EINTR;
......@@ -1099,7 +1008,7 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
return 0;
}
case VIDEO1394_TALK_QUEUE_BUFFER:
case VIDEO1394_IOC_TALK_QUEUE_BUFFER:
{
struct video1394_wait v;
struct video1394_queue_variable qv;
......@@ -1142,18 +1051,16 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
d->buffer_status[v.buffer]=VIDEO1394_BUFFER_QUEUED;
if (d->last_buffer>=0) {
if (d->last_buffer >= 0) {
d->it_prg[d->last_buffer]
[ d->last_used_cmd[d->last_buffer]
].end.branchAddress =
cpu_to_le32((virt_to_bus(&(d->it_prg[v.buffer][0].begin.control))
& 0xfffffff0) | 0x3);
[ d->last_used_cmd[d->last_buffer] ].end.branchAddress =
cpu_to_le32((dma_prog_region_offset_to_bus(&d->prg_reg[v.buffer],
0) & 0xfffffff0) | 0x3);
d->it_prg[d->last_buffer]
[d->last_used_cmd[d->last_buffer]
].begin.branchAddress =
cpu_to_le32((virt_to_bus(&(d->it_prg[v.buffer][0].begin.control))
& 0xfffffff0) | 0x3);
[ d->last_used_cmd[d->last_buffer] ].begin.branchAddress =
cpu_to_le32((dma_prog_region_offset_to_bus(&d->prg_reg[v.buffer],
0) & 0xfffffff0) | 0x3);
d->next_buffer[d->last_buffer] = v.buffer;
}
d->last_buffer = v.buffer;
......@@ -1170,9 +1077,9 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
put_timestamp(ohci, d, d->last_buffer);
/* Tell the controller where the first program is */
reg_write(ohci, d->cmdPtr,
virt_to_bus(&(d->it_prg[v.buffer][0]))|0x3);
reg_write(ohci, d->cmdPtr,
dma_prog_region_offset_to_bus(&d->prg_reg[v.buffer], 0) | 0x3);
/* Run IT context */
reg_write(ohci, d->ctrlSet, 0x8000);
}
......@@ -1189,7 +1096,7 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
return 0;
}
case VIDEO1394_TALK_WAIT_BUFFER:
case VIDEO1394_IOC_TALK_WAIT_BUFFER:
{
struct video1394_wait v;
struct dma_iso_ctx *d;
......@@ -1254,7 +1161,7 @@ int video1394_mmap(struct file *file, struct vm_area_struct *vma)
if (ctx->current_ctx == NULL) {
PRINT(KERN_ERR, ctx->video->ohci->id, "Current iso context not set");
} else
res = do_iso_mmap(ctx->video->ohci, ctx->current_ctx, vma);
res = dma_region_mmap(&ctx->current_ctx->dma, file, vma);
unlock_kernel();
return res;
......
......@@ -21,6 +21,8 @@
#ifndef _VIDEO_1394_H
#define _VIDEO_1394_H
#include "ieee1394-ioctl.h"
#define VIDEO1394_DRIVER_NAME "video1394"
#define VIDEO1394_MAX_SIZE 0x4000000
......@@ -31,18 +33,6 @@ enum {
VIDEO1394_BUFFER_READY
};
enum {
VIDEO1394_LISTEN_CHANNEL = 0,
VIDEO1394_UNLISTEN_CHANNEL,
VIDEO1394_LISTEN_QUEUE_BUFFER,
VIDEO1394_LISTEN_WAIT_BUFFER, // wait until buffer is ready
VIDEO1394_TALK_CHANNEL,
VIDEO1394_UNTALK_CHANNEL,
VIDEO1394_TALK_QUEUE_BUFFER,
VIDEO1394_TALK_WAIT_BUFFER,
VIDEO1394_LISTEN_POLL_BUFFER // return immediately with -EINTR if not ready
};
#define VIDEO1394_SYNC_FRAMES 0x00000001
#define VIDEO1394_INCLUDE_ISO_HEADERS 0x00000002
#define VIDEO1394_VARIABLE_PACKET_SIZE 0x00000004
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment