Commit 959a9729 authored by Ben Collins's avatar Ben Collins Committed by Linus Torvalds

[PATCH] IEEE-1394 updates

Syncronizes with our SVN repo. Merged in all changes from your tree.
parent c9c13c7b
......@@ -32,7 +32,7 @@
* That is outside the scope of this driver, and furthermore it is not
* really standardized yet.
*
* The Audio and Music Data Tranmission Protocol is avaiable at
* The Audio and Music Data Tranmission Protocol is available at
*
* http://www.1394ta.org/Download/Technology/Specifications/2001/AM20Final-jf2.pdf
*
......@@ -46,8 +46,6 @@
*
* - Fix DMA stop after bus reset!
*
* - Implement poll.
*
* - Clean up iso context handling in ohci1394.
*
*
......@@ -75,6 +73,7 @@
#include <linux/wait.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/poll.h>
#include <asm/uaccess.h>
#include <asm/atomic.h>
......@@ -89,9 +88,13 @@
#define FMT_AMDTP 0x10
#define FDF_AM824 0x00
#define FDF_SFC_32KHZ 0x00 /* 32kHz */
#define FDF_SFC_44K1HZ 0x01 /* 44.1kHz */
#define FDF_SFC_48KHZ 0x02 /* 44.1kHz */
#define FDF_SFC_32KHZ 0x00
#define FDF_SFC_44K1HZ 0x01
#define FDF_SFC_48KHZ 0x02
#define FDF_SFC_88K2HZ 0x03
#define FDF_SFC_96KHZ 0x04
#define FDF_SFC_176K4HZ 0x05
#define FDF_SFC_192KHZ 0x06
struct descriptor_block {
struct output_more_immediate {
......@@ -113,15 +116,70 @@ struct descriptor_block {
struct packet {
struct descriptor_block *db;
dma_addr_t db_bus;
quadlet_t *payload;
struct iso_packet *payload;
dma_addr_t payload_bus;
};
#include <asm/byteorder.h>
#if defined __BIG_ENDIAN_BITFIELD
struct iso_packet {
/* First quadlet */
unsigned int dbs : 8;
unsigned int eoh0 : 2;
unsigned int sid : 6;
unsigned int dbc : 8;
unsigned int fn : 2;
unsigned int qpc : 3;
unsigned int sph : 1;
unsigned int reserved : 2;
/* Second quadlet */
unsigned int fdf : 8;
unsigned int eoh1 : 2;
unsigned int fmt : 6;
unsigned int syt : 16;
quadlet_t data[0];
};
#elif defined __LITTLE_ENDIAN_BITFIELD
struct iso_packet {
/* First quadlet */
unsigned int sid : 6;
unsigned int eoh0 : 2;
unsigned int dbs : 8;
unsigned int reserved : 2;
unsigned int sph : 1;
unsigned int qpc : 3;
unsigned int fn : 2;
unsigned int dbc : 8;
/* Second quadlet */
unsigned int fmt : 6;
unsigned int eoh1 : 2;
unsigned int fdf : 8;
unsigned int syt : 16;
quadlet_t data[0];
};
#else
#error Unknown bitfield type
#endif
struct fraction {
int integer;
int numerator;
int denominator;
int counter;
};
#define PACKET_LIST_SIZE 256
......@@ -148,6 +206,8 @@ struct stream {
int rate;
int dimension;
int fdf;
int mode;
int sample_format;
struct cmp_pcr *opcr;
/* Input samples are copied here. */
......@@ -157,7 +217,7 @@ struct stream {
unsigned char dbc;
struct packet_list *current_packet_list;
int current_packet;
struct fraction packet_size_fraction;
struct fraction ready_samples, samples_per_cycle;
/* We use these to generate control bits when we are packing
* iec958 data.
......@@ -176,8 +236,7 @@ struct stream {
* written back in the dma programs.
*/
atomic_t cycle_count, cycle_count2;
int cycle_offset;
struct fraction syt_fraction;
struct fraction cycle_offset, ticks_per_syt_offset;
int syt_interval;
int stale_count;
......@@ -192,7 +251,7 @@ struct stream {
struct list_head free_packet_lists;
wait_queue_head_t packet_list_wait;
spinlock_t packet_list_lock;
int iso_context;
struct ohci1394_iso_tasklet iso_tasklet;
struct pci_pool *descriptor_pool, *packet_pool;
/* Streams at a host controller are chained through this field. */
......@@ -220,23 +279,6 @@ static spinlock_t host_list_lock = SPIN_LOCK_UNLOCKED;
#define OHCI1394_CONTEXT_DEAD 0x00000800
#define OHCI1394_CONTEXT_ACTIVE 0x00000400
static inline int ohci1394_alloc_it_ctx(struct ti_ohci *ohci)
{
int i;
for (i = 0; i < ohci->nb_iso_xmit_ctx; i++)
if (!test_and_set_bit(i, &ohci->it_ctx_usage))
return i;
return -EBUSY;
}
static inline void ohci1394_free_it_ctx(struct ti_ohci *ohci, int ctx)
{
clear_bit(ctx, &ohci->it_ctx_usage);
}
void ohci1394_start_it_ctx(struct ti_ohci *ohci, int ctx,
dma_addr_t first_cmd, int z, int cycle_match)
{
......@@ -255,7 +297,7 @@ void ohci1394_wake_it_ctx(struct ti_ohci *ohci, int ctx)
OHCI1394_CONTEXT_WAKE);
}
void ohci1394_stop_it_ctx(struct ti_ohci *ohci, int ctx)
void ohci1394_stop_it_ctx(struct ti_ohci *ohci, int ctx, int synchronous)
{
u32 control;
int wait;
......@@ -265,13 +307,15 @@ void ohci1394_stop_it_ctx(struct ti_ohci *ohci, int ctx)
OHCI1394_CONTEXT_RUN);
wmb();
for (wait = 0; wait < 5; wait++) {
control = reg_read(ohci, OHCI1394_IsoXmitContextControlSet + ctx * 16);
if ((control & OHCI1394_CONTEXT_ACTIVE) == 0)
break;
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(1);
if (synchronous) {
for (wait = 0; wait < 5; wait++) {
control = reg_read(ohci, OHCI1394_IsoXmitContextControlSet + ctx * 16);
if ((control & OHCI1394_CONTEXT_ACTIVE) == 0)
break;
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(1);
}
}
}
......@@ -297,6 +341,27 @@ static struct packet_list *stream_get_free_packet_list(struct stream *s)
return pl;
}
static void stream_start_dma(struct stream *s, struct packet_list *pl)
{
u32 syt_cycle, cycle_count, start_cycle;
cycle_count = reg_read(s->host->host->hostdata,
OHCI1394_IsochronousCycleTimer) >> 12;
syt_cycle = (pl->last_cycle_count - PACKET_LIST_SIZE + 1) & 0x0f;
/* We program the DMA controller to start transmission at
* least 17 cycles from now - this happens when the lower four
* bits of cycle_count is 0x0f and syt_cycle is 0, in this
* case the start cycle is cycle_count - 15 + 32. */
start_cycle = (cycle_count & ~0x0f) + 32 + syt_cycle;
if ((start_cycle & 0x1fff) >= 8000)
start_cycle = start_cycle - 8000 + 0x2000;
ohci1394_start_it_ctx(s->host->ohci, s->iso_tasklet.context,
pl->packets[0].db_bus, 3,
start_cycle & 0x7fff);
}
static void stream_put_dma_packet_list(struct stream *s,
struct packet_list *pl)
{
......@@ -315,26 +380,16 @@ static void stream_put_dma_packet_list(struct stream *s,
if (pl->link.prev != &s->dma_packet_lists) {
struct packet *last = &prev->packets[PACKET_LIST_SIZE - 1];
last->db->payload_desc.branch = pl->packets[0].db_bus | 3;
ohci1394_wake_it_ctx(s->host->ohci, s->iso_context);
}
else {
u32 syt, cycle_count;
cycle_count = reg_read(s->host->host->hostdata,
OHCI1394_IsochronousCycleTimer) >> 12;
syt = (pl->packets[0].payload[1] >> 12) & 0x0f;
cycle_count = (cycle_count & ~0x0f) + 32 + syt;
if ((cycle_count & 0x1fff) >= 8000)
cycle_count = cycle_count - 8000 + 0x2000;
ohci1394_start_it_ctx(s->host->ohci, s->iso_context,
pl->packets[0].db_bus, 3,
cycle_count & 0x7fff);
last->db->header_desc.skip = pl->packets[0].db_bus | 3;
ohci1394_wake_it_ctx(s->host->ohci, s->iso_tasklet.context);
}
else
stream_start_dma(s, pl);
}
static void stream_shift_packet_lists(struct stream *s)
static void stream_shift_packet_lists(unsigned long l)
{
struct stream *s = (struct stream *) l;
struct packet_list *pl;
struct packet *last;
int diff;
......@@ -430,36 +485,42 @@ static void fraction_init(struct fraction *f, int numerator, int denominator)
f->integer = numerator / denominator;
f->numerator = numerator % denominator;
f->denominator = denominator;
f->counter = 0;
}
static int fraction_next_size(struct fraction *f)
static __inline__ void fraction_add(struct fraction *dst,
struct fraction *src1,
struct fraction *src2)
{
return f->integer + ((f->counter + f->numerator) / f->denominator);
/* assert: src1->denominator == src2->denominator */
int sum, denom;
/* We use these two local variables to allow gcc to optimize
* the division and the modulo into only one division. */
sum = src1->numerator + src2->numerator;
denom = src1->denominator;
dst->integer = src1->integer + src2->integer + sum / denom;
dst->numerator = sum % denom;
dst->denominator = denom;
}
static void fraction_inc(struct fraction *f)
static __inline__ void fraction_sub_int(struct fraction *dst,
struct fraction *src, int integer)
{
f->counter = (f->counter + f->numerator) % f->denominator;
dst->integer = src->integer - integer;
dst->numerator = src->numerator;
dst->denominator = src->denominator;
}
static void amdtp_irq_handler(int card, quadlet_t isoRecvIntEvent,
quadlet_t isoXmitIntEvent, void *data)
static __inline__ int fraction_floor(struct fraction *frac)
{
struct amdtp_host *host = data;
struct list_head *lh;
struct stream *s = NULL;
spin_lock(&host->stream_list_lock);
list_for_each(lh, &host->stream_list) {
s = list_entry(lh, struct stream, link);
if (isoXmitIntEvent & (1 << s->iso_context))
break;
}
spin_unlock(&host->stream_list_lock);
return frac->integer;
}
if (s != NULL)
stream_shift_packet_lists(s);
static __inline__ int fraction_ceil(struct fraction *frac)
{
return frac->integer + (frac->numerator > 0 ? 1 : 0);
}
void packet_initialize(struct packet *p, struct packet *next)
......@@ -473,18 +534,19 @@ void packet_initialize(struct packet *p, struct packet *next)
p->db->header_desc.control =
DMA_CTL_OUTPUT_MORE | DMA_CTL_IMMEDIATE | 8;
p->db->header_desc.skip = 0;
if (next) {
p->db->payload_desc.control =
DMA_CTL_OUTPUT_LAST | DMA_CTL_BRANCH;
p->db->payload_desc.branch = next->db_bus | 3;
p->db->header_desc.skip = next->db_bus | 3;
}
else {
p->db->payload_desc.control =
DMA_CTL_OUTPUT_LAST | DMA_CTL_BRANCH |
DMA_CTL_UPDATE | DMA_CTL_IRQ;
p->db->payload_desc.branch = 0;
p->db->header_desc.skip = 0;
}
p->db->payload_desc.data_address = p->payload_bus;
p->db->payload_desc.status = 0;
......@@ -633,17 +695,33 @@ static u32 get_header_bits(struct stream *s, int sub_frame, u32 sample)
}
}
static void fill_payload_le16(struct stream *s, quadlet_t *data, int nevents)
{
quadlet_t *event, sample, bits;
unsigned char *p;
int i, j;
for (i = 0, event = data; i < nevents; i++) {
for (j = 0; j < s->dimension; j++) {
p = buffer_get_bytes(s->input, 2);
sample = (p[1] << 16) | (p[0] << 8);
bits = get_header_bits(s, j, sample);
event[j] = cpu_to_be32((bits << 24) | sample);
}
event += s->dimension;
if (++s->iec958_frame_count == 192)
s->iec958_frame_count = 0;
}
}
static void fill_packet(struct stream *s, struct packet *packet, int nevents)
{
int size, node_id, i, j;
quadlet_t *event;
unsigned char *p;
u32 control, sample, bits;
int syt_index, syt, next;
int syt_index, syt, size;
u32 control;
size = (nevents * s->dimension + 2) * sizeof(quadlet_t);
node_id = s->host->host->node_id & 0x3f;
/* Update DMA descriptors */
packet->db->payload_desc.status = 0;
......@@ -656,41 +734,45 @@ static void fill_packet(struct stream *s, struct packet *packet, int nevents)
(s->iso_channel << 8) | (TCODE_ISO_DATA << 4);
packet->db->header_desc.header[1] = size << 16;
/* Fill cip header */
syt_index = s->dbc & (s->syt_interval - 1);
if (syt_index == 0 || syt_index + nevents > s->syt_interval) {
/* Calculate synchronization timestamp (syt). First we
* determine syt_index, that is, the index in the packet of
* the sample for which the timestamp is valid. */
syt_index = (s->syt_interval - s->dbc) & (s->syt_interval - 1);
if (syt_index < nevents) {
syt = ((atomic_read(&s->cycle_count) << 12) |
s->cycle_offset) & 0xffff;
next = fraction_next_size(&s->syt_fraction) + s->cycle_offset;
s->cycle_offset.integer) & 0xffff;
fraction_add(&s->cycle_offset,
&s->cycle_offset, &s->ticks_per_syt_offset);
/* This next addition should be modulo 8000 (0x1f40),
* but we only use the lower 4 bits of cycle_count, so
* we dont need the modulo. */
atomic_add(next / 3072, &s->cycle_count);
s->cycle_offset = next % 3072;
fraction_inc(&s->syt_fraction);
atomic_add(s->cycle_offset.integer / 3072, &s->cycle_count);
s->cycle_offset.integer %= 3072;
}
else {
else
syt = 0xffff;
next = 0;
}
atomic_inc(&s->cycle_count2);
packet->payload[0] = cpu_to_be32((node_id << 24) | (s->dimension << 16) | s->dbc);
packet->payload[1] = cpu_to_be32((1 << 31) | (FMT_AMDTP << 24) | (s->fdf << 16) | syt);
/* Fill payload */
for (i = 0, event = &packet->payload[2]; i < nevents; i++) {
for (j = 0; j < s->dimension; j++) {
p = buffer_get_bytes(s->input, 2);
sample = (p[1] << 16) | (p[0] << 8);
bits = get_header_bits(s, j, sample);
event[j] = cpu_to_be32((bits << 24) | sample);
}
event += s->dimension;
if (++s->iec958_frame_count == 192)
s->iec958_frame_count = 0;
atomic_inc(&s->cycle_count2);
/* Fill cip header */
packet->payload->eoh0 = 0;
packet->payload->sid = s->host->host->node_id & 0x3f;
packet->payload->dbs = s->dimension;
packet->payload->fn = 0;
packet->payload->qpc = 0;
packet->payload->sph = 0;
packet->payload->reserved = 0;
packet->payload->dbc = s->dbc;
packet->payload->eoh1 = 2;
packet->payload->fmt = FMT_AMDTP;
packet->payload->fdf = s->fdf;
packet->payload->syt = cpu_to_be16(syt);
switch (s->sample_format) {
case AMDTP_INPUT_LE16:
fill_payload_le16(s, packet->payload->data, nevents);
break;
}
s->dbc += nevents;
......@@ -700,13 +782,44 @@ static void stream_flush(struct stream *s)
{
struct packet *p;
int nevents;
struct fraction next;
/* The AMDTP specifies two transmission modes: blocking and
* non-blocking. In blocking mode you always transfer
* syt_interval or zero samples, whereas in non-blocking mode
* you send as many samples as you have available at transfer
* time.
*
* The fraction samples_per_cycle specifies the number of
* samples that become available per cycle. We add this to
* the fraction ready_samples, which specifies the number of
* leftover samples from the previous transmission. The sum,
* stored in the fraction next, specifies the number of
* samples available for transmission, and from this we
* determine the number of samples to actually transmit.
*/
while (1) {
fraction_add(&next, &s->ready_samples, &s->samples_per_cycle);
if (s->mode == AMDTP_MODE_BLOCKING) {
if (fraction_floor(&next) >= s->syt_interval)
nevents = s->syt_interval;
else
nevents = 0;
}
else
nevents = fraction_floor(&next);
p = stream_current_packet(s);
if (s->input->length < nevents * s->dimension * 2 || p == NULL)
break;
while (nevents = fraction_next_size(&s->packet_size_fraction),
p = stream_current_packet(s),
nevents * s->dimension * 2 <= s->input->length && p != NULL) {
fill_packet(s, p, nevents);
fraction_inc(&s->packet_size_fraction);
stream_queue_packet(s);
/* Now that we have successfully queued the packet for
* transmission, we update the fraction ready_samples. */
fraction_sub_int(&s->ready_samples, &next, nevents);
}
}
......@@ -714,9 +827,10 @@ static int stream_alloc_packet_lists(struct stream *s)
{
int max_nevents, max_packet_size, i;
max_nevents = s->packet_size_fraction.integer;
if (s->packet_size_fraction.numerator > 0)
max_nevents++;
if (s->mode == AMDTP_MODE_BLOCKING)
max_nevents = s->syt_interval;
else
max_nevents = fraction_ceil(&s->samples_per_cycle);
max_packet_size = max_nevents * s->dimension * 4 + 8;
s->packet_pool = pci_pool_create("packet pool", s->host->ohci->dev,
......@@ -763,15 +877,20 @@ static void plug_update(struct cmp_pcr *plug, void *data)
plug->p2p_count, plug->channel);
s->iso_channel = plug->channel;
if (plug->p2p_count > 0) {
/* start streaming */
struct packet_list *pl;
pl = list_entry(s->dma_packet_lists.next, struct packet_list, link);
stream_start_dma(s, pl);
}
else {
/* stop streaming */
ohci1394_stop_it_ctx(s->host->ohci, s->iso_tasklet.context, 0);
}
}
static int stream_configure(struct stream *s, int cmd, struct amdtp_ioctl *cfg)
{
const int transfer_delay = 9000;
if (cfg->format <= AMDTP_FORMAT_IEC958_AC3)
s->format = cfg->format;
else
......@@ -782,32 +901,59 @@ static int stream_configure(struct stream *s, int cmd, struct amdtp_ioctl *cfg)
s->syt_interval = 8;
s->fdf = FDF_SFC_32KHZ;
s->iec958_rate_code = 0x0c;
s->rate = cfg->rate;
break;
case 44100:
s->syt_interval = 8;
s->fdf = FDF_SFC_44K1HZ;
s->iec958_rate_code = 0x00;
s->rate = cfg->rate;
break;
case 48000:
s->syt_interval = 8;
s->fdf = FDF_SFC_48KHZ;
s->iec958_rate_code = 0x04;
s->rate = cfg->rate;
break;
case 88200:
s->syt_interval = 16;
s->fdf = FDF_SFC_88K2HZ;
s->iec958_rate_code = 0x00;
break;
case 96000:
s->syt_interval = 16;
s->fdf = FDF_SFC_96KHZ;
s->iec958_rate_code = 0x00;
break;
case 176400:
s->syt_interval = 32;
s->fdf = FDF_SFC_176K4HZ;
s->iec958_rate_code = 0x00;
break;
case 192000:
s->syt_interval = 32;
s->fdf = FDF_SFC_192KHZ;
s->iec958_rate_code = 0x00;
break;
default:
return -EINVAL;
}
fraction_init(&s->packet_size_fraction, s->rate, 8000);
s->rate = cfg->rate;
fraction_init(&s->samples_per_cycle, s->rate, 8000);
fraction_init(&s->ready_samples, 0, 8000);
/* The syt_fraction is initialized to the number of ticks
* between syt_interval events. The number of ticks per
/* The ticks_per_syt_offset is initialized to the number of
* ticks between syt_interval events. The number of ticks per
* second is 24.576e6, so the number of ticks between
* syt_interval events is 24.576e6 * syt_interval / rate.
*/
fraction_init(&s->syt_fraction, 24576000 * s->syt_interval, s->rate);
fraction_init(&s->ticks_per_syt_offset,
24576000 * s->syt_interval, s->rate);
fraction_init(&s->cycle_offset, (transfer_delay % 3072) * s->rate, s->rate);
atomic_set(&s->cycle_count, transfer_delay / 3072);
atomic_set(&s->cycle_count2, 0);
s->mode = cfg->mode;
s->sample_format = AMDTP_INPUT_LE16;
/* When using the AM824 raw subformat we can stream signals of
* any dimension. The IEC958 subformat, however, only
......@@ -858,7 +1004,6 @@ struct stream *stream_alloc(struct amdtp_host *host)
{
struct stream *s;
unsigned long flags;
const int transfer_delay = 8651; /* approx 352 us */
s = kmalloc(sizeof(struct stream), SLAB_KERNEL);
if (s == NULL)
......@@ -873,10 +1018,6 @@ struct stream *stream_alloc(struct amdtp_host *host)
return NULL;
}
s->cycle_offset = transfer_delay % 3072;
atomic_set(&s->cycle_count, transfer_delay / 3072);
atomic_set(&s->cycle_count2, 0);
s->descriptor_pool = pci_pool_create("descriptor pool", host->ohci->dev,
sizeof(struct descriptor_block),
16, 0, SLAB_KERNEL);
......@@ -892,8 +1033,11 @@ struct stream *stream_alloc(struct amdtp_host *host)
init_waitqueue_head(&s->packet_list_wait);
spin_lock_init(&s->packet_list_lock);
s->iso_context = ohci1394_alloc_it_ctx(host->ohci);
if (s->iso_context < 0) {
ohci1394_init_iso_tasklet(&s->iso_tasklet, OHCI_ISO_TRANSMIT,
stream_shift_packet_lists,
(unsigned long) s);
if (ohci1394_register_iso_tasklet(host->ohci, &s->iso_tasklet) < 0) {
pci_pool_destroy(s->descriptor_pool);
kfree(s->input);
kfree(s);
......@@ -920,8 +1064,8 @@ void stream_free(struct stream *s)
wait_event_interruptible(s->packet_list_wait,
list_empty(&s->dma_packet_lists));
ohci1394_stop_it_ctx(s->host->ohci, s->iso_context);
ohci1394_free_it_ctx(s->host->ohci, s->iso_context);
ohci1394_stop_it_ctx(s->host->ohci, s->iso_tasklet.context, 1);
ohci1394_unregister_iso_tasklet(s->host->ohci, &s->iso_tasklet);
if (s->opcr != NULL)
cmp_unregister_opcr(s->host->host, s->opcr);
......@@ -969,8 +1113,13 @@ static ssize_t amdtp_write(struct file *file, const char *buffer, size_t count,
stream_flush(s);
if (s->current_packet_list == NULL &&
wait_event_interruptible(s->packet_list_wait,
if (s->current_packet_list != NULL)
continue;
if (file->f_flags & O_NONBLOCK)
return i + length > 0 ? i + length : -EAGAIN;
if (wait_event_interruptible(s->packet_list_wait,
!list_empty(&s->free_packet_lists)))
return -EINTR;
}
......@@ -983,7 +1132,6 @@ static int amdtp_ioctl(struct inode *inode, struct file *file,
{
struct stream *s = file->private_data;
struct amdtp_ioctl cfg;
int new;
switch(cmd)
{
......@@ -994,23 +1142,23 @@ static int amdtp_ioctl(struct inode *inode, struct file *file,
else
return stream_configure(s, cmd, &cfg);
case AMDTP_IOC_PING:
HPSB_INFO("ping: offsetting timpestamps %ld ticks", arg);
new = s->cycle_offset + arg;
s->cycle_offset = new % 3072;
atomic_add(new / 3072, &s->cycle_count);
return 0;
case AMDTP_IOC_ZAP:
while (MOD_IN_USE)
MOD_DEC_USE_COUNT;
return 0;
default:
return -EINVAL;
}
}
static unsigned int amdtp_poll(struct file *file, poll_table *pt)
{
struct stream *s = file->private_data;
poll_wait(file, &s->packet_list_wait, pt);
if (!list_empty(&s->free_packet_lists))
return POLLOUT | POLLWRNORM;
else
return 0;
}
static int amdtp_open(struct inode *inode, struct file *file)
{
struct amdtp_host *host;
......@@ -1044,11 +1192,12 @@ static int amdtp_release(struct inode *inode, struct file *file)
static struct file_operations amdtp_fops =
{
owner: THIS_MODULE,
write: amdtp_write,
ioctl: amdtp_ioctl,
open: amdtp_open,
release: amdtp_release
.owner = THIS_MODULE,
.write = amdtp_write,
.poll = amdtp_poll,
.ioctl = amdtp_ioctl,
.open = amdtp_open,
.release = amdtp_release
};
/* IEEE1394 Subsystem functions */
......@@ -1057,7 +1206,8 @@ static void amdtp_add_host(struct hpsb_host *host)
{
struct amdtp_host *ah;
/* FIXME: check it's an ohci host. */
if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME) != 0)
return;
ah = kmalloc(sizeof *ah, SLAB_KERNEL);
ah->host = host;
......@@ -1068,8 +1218,6 @@ static void amdtp_add_host(struct hpsb_host *host)
spin_lock_irq(&host_list_lock);
list_add_tail(&ah->link, &host_list);
spin_unlock_irq(&host_list_lock);
ohci1394_hook_irq(ah->ohci, amdtp_irq_handler, ah);
}
static void amdtp_remove_host(struct hpsb_host *host)
......@@ -1088,7 +1236,6 @@ static void amdtp_remove_host(struct hpsb_host *host)
if (lh != &host_list) {
ah = list_entry(lh, struct amdtp_host, link);
ohci1394_unhook_irq(ah->ohci, amdtp_irq_handler, ah);
kfree(ah);
}
else
......@@ -1096,8 +1243,8 @@ static void amdtp_remove_host(struct hpsb_host *host)
}
static struct hpsb_highlevel_ops amdtp_highlevel_ops = {
add_host: amdtp_add_host,
remove_host: amdtp_remove_host,
.add_host = amdtp_add_host,
.remove_host = amdtp_remove_host,
};
/* Module interface */
......
......@@ -19,13 +19,24 @@
* AMDTP_FORMAT_IEC958_AC3 will transmit the samples with the data bit
* set, suitable for transmitting compressed AC-3 audio.
*
* The rate field specifies the transmission rate; supported values are
* AMDTP_RATE_32KHZ, AMDTP_RATE_44K1HZ and AMDTP_RATE_48KHZ.
* The rate field specifies the transmission rate; supported values
* are 32000, 44100, 48000, 88200, 96000, 176400 and 192000.
*
* The dimension field specifies the dimension of the signal, that is,
* the number of audio channels. Only AMDTP_FORMAT_RAW supports
* settings greater than 2.
*
* The mode field specifies which transmission mode to use. The AMDTP
* specifies two different transmission modes: blocking and
* non-blocking. The blocking transmission mode always send a fixed
* number of samples, typically 8, 16 or 32. To exactly match the
* transmission rate, the driver alternates between sending empty and
* non-empty packets. In non-blocking mode, the driver transmits as
* small packets as possible. For example, for a transmission rate of
* 44100Hz, the driver should send 5 41/80 samples in every cycle, but
* this is not possible so instead the driver alternates between
* sending 5 and 6 samples.
*
* The last thing to specify is either the isochronous channel to use
* or the output plug to connect to. If you know what channel the
* destination device will listen on, you can specify the channel
......@@ -60,15 +71,20 @@ enum {
};
enum {
AMDTP_RATE_32KHZ,
AMDTP_RATE_44K1HZ,
AMDTP_RATE_48KHZ,
AMDTP_MODE_BLOCKING,
AMDTP_MODE_NON_BLOCKING,
};
enum {
AMDTP_INPUT_LE16,
AMDTP_INPUT_BE16,
};
struct amdtp_ioctl {
__u32 format;
__u32 rate;
__u32 dimension;
__u32 mode;
union { __u32 channel; __u32 plug; } u;
};
......
......@@ -299,14 +299,14 @@ static int pcr_lock(struct hpsb_host *host, int nodeid, quadlet_t *store,
static struct hpsb_highlevel_ops cmp_highlevel_ops = {
add_host: cmp_add_host,
remove_host: cmp_remove_host,
host_reset: cmp_host_reset,
.add_host = cmp_add_host,
.remove_host = cmp_remove_host,
.host_reset = cmp_host_reset,
};
static struct hpsb_address_ops pcr_ops = {
read: pcr_read,
lock: pcr_lock,
.read = pcr_read,
.lock = pcr_lock,
};
/* Module interface */
......
......@@ -70,7 +70,7 @@ static void add_host(struct hpsb_host *host)
{
host->csr.lock = SPIN_LOCK_UNLOCKED;
host->csr.rom_size = host->ops->get_rom(host, &host->csr.rom);
host->csr.rom_size = host->driver->get_rom(host, &host->csr.rom);
host->csr.state = 0;
host->csr.node_ids = 0;
......@@ -152,7 +152,7 @@ static int read_regs(struct hpsb_host *host, int nodeid, quadlet_t *buf,
case CSR_CYCLE_TIME:
oldcycle = host->csr.cycle_time;
host->csr.cycle_time =
host->ops->devctl(host, GET_CYCLE_COUNTER, 0);
host->driver->devctl(host, GET_CYCLE_COUNTER, 0);
if (oldcycle > host->csr.cycle_time) {
/* cycle time wrapped around */
......@@ -163,7 +163,7 @@ static int read_regs(struct hpsb_host *host, int nodeid, quadlet_t *buf,
case CSR_BUS_TIME:
oldcycle = host->csr.cycle_time;
host->csr.cycle_time =
host->ops->devctl(host, GET_CYCLE_COUNTER, 0);
host->driver->devctl(host, GET_CYCLE_COUNTER, 0);
if (oldcycle > host->csr.cycle_time) {
/* cycle time wrapped around */
......@@ -181,32 +181,32 @@ static int read_regs(struct hpsb_host *host, int nodeid, quadlet_t *buf,
return RCODE_ADDRESS_ERROR;
case CSR_BUS_MANAGER_ID:
if (host->ops->hw_csr_reg)
ret = host->ops->hw_csr_reg(host, 0, 0, 0);
if (host->driver->hw_csr_reg)
ret = host->driver->hw_csr_reg(host, 0, 0, 0);
else
ret = host->csr.bus_manager_id;
*(buf++) = cpu_to_be32(ret);
out;
case CSR_BANDWIDTH_AVAILABLE:
if (host->ops->hw_csr_reg)
ret = host->ops->hw_csr_reg(host, 1, 0, 0);
if (host->driver->hw_csr_reg)
ret = host->driver->hw_csr_reg(host, 1, 0, 0);
else
ret = host->csr.bandwidth_available;
*(buf++) = cpu_to_be32(ret);
out;
case CSR_CHANNELS_AVAILABLE_HI:
if (host->ops->hw_csr_reg)
ret = host->ops->hw_csr_reg(host, 2, 0, 0);
if (host->driver->hw_csr_reg)
ret = host->driver->hw_csr_reg(host, 2, 0, 0);
else
ret = host->csr.channels_available_hi;
*(buf++) = cpu_to_be32(ret);
out;
case CSR_CHANNELS_AVAILABLE_LO:
if (host->ops->hw_csr_reg)
ret = host->ops->hw_csr_reg(host, 3, 0, 0);
if (host->driver->hw_csr_reg)
ret = host->driver->hw_csr_reg(host, 3, 0, 0);
else
ret = host->csr.channels_available_lo;
......@@ -244,7 +244,7 @@ static int write_regs(struct hpsb_host *host, int nodeid, int destid,
host->csr.node_ids &= NODE_MASK << 16;
host->csr.node_ids |= be32_to_cpu(*(data++)) & (BUS_MASK << 16);
host->node_id = host->csr.node_ids >> 16;
host->ops->devctl(host, SET_BUS_ID, host->node_id >> 6);
host->driver->devctl(host, SET_BUS_ID, host->node_id >> 6);
out;
case CSR_RESET_START:
......@@ -269,7 +269,7 @@ static int write_regs(struct hpsb_host *host, int nodeid, int destid,
case CSR_CYCLE_TIME:
/* should only be set by cycle start packet, automatically */
host->csr.cycle_time = be32_to_cpu(*data);
host->ops->devctl(host, SET_CYCLE_COUNTER,
host->driver->devctl(host, SET_CYCLE_COUNTER,
be32_to_cpu(*(data++)));
out;
case CSR_BUS_TIME:
......@@ -318,10 +318,10 @@ static int lock_regs(struct hpsb_host *host, int nodeid, quadlet_t *store,
data = be32_to_cpu(data);
arg = be32_to_cpu(arg);
if (host->ops->hw_csr_reg) {
if (host->driver->hw_csr_reg) {
quadlet_t old;
old = host->ops->
old = host->driver->
hw_csr_reg(host, (csraddr - CSR_BUS_MANAGER_ID) >> 2,
data, arg);
......@@ -402,23 +402,23 @@ static int write_fcp(struct hpsb_host *host, int nodeid, int dest,
static struct hpsb_highlevel_ops csr_ops = {
add_host: add_host,
host_reset: host_reset,
.add_host = add_host,
.host_reset = host_reset,
};
static struct hpsb_address_ops map_ops = {
read: read_maps,
.read = read_maps,
};
static struct hpsb_address_ops fcp_ops = {
write: write_fcp,
.write = write_fcp,
};
static struct hpsb_address_ops reg_ops = {
read: read_regs,
write: write_regs,
lock: lock_regs,
.read = read_regs,
.write = write_regs,
.lock = lock_regs,
};
static struct hpsb_highlevel *hl;
......
......@@ -27,6 +27,7 @@
#define _DV_1394_PRIVATE_H
#include "ieee1394.h"
#include "ohci1394.h"
#include <linux/pci.h>
#include <asm/scatterlist.h>
......@@ -91,20 +92,20 @@ static inline void fill_output_more_immediate(struct output_more_immediate *omi,
unsigned char sync_tag,
unsigned int payload_size)
{
omi->q[0] = 0x02000000 | 8 ; /* OUTPUT_MORE_IMMEDIATE; 8 is the size of the IT header */
omi->q[0] = cpu_to_le32(0x02000000 | 8); /* OUTPUT_MORE_IMMEDIATE; 8 is the size of the IT header */
omi->q[1] = 0;
omi->q[2] = 0;
omi->q[3] = 0;
/* IT packet header */
omi->q[4] = (0x0 << 16) /* DMA_SPEED_100 */
| (tag << 14)
| (channel << 8)
| (TCODE_ISO_DATA << 4)
| (sync_tag);
omi->q[5] = payload_size << 16;
omi->q[5] |= (0x7F << 8) | 0xA0; /* reserved field; mimic behavior of my Sony DSR-40 */
omi->q[4] = cpu_to_le32( (0x0 << 16) /* DMA_SPEED_100 */
| (tag << 14)
| (channel << 8)
| (TCODE_ISO_DATA << 4)
| (sync_tag) );
/* reserved field; mimic behavior of my Sony DSR-40 */
omi->q[5] = cpu_to_le32((payload_size << 16) | (0x7F << 8) | 0xA0);
omi->q[6] = 0;
omi->q[7] = 0;
......@@ -114,10 +115,8 @@ static inline void fill_output_more(struct output_more *om,
unsigned int data_size,
unsigned long data_phys_addr)
{
om->q[0] = 0; /* OUTPUT_MORE */
om->q[0] |= data_size;
om->q[1] = data_phys_addr;
om->q[0] = cpu_to_le32(data_size);
om->q[1] = cpu_to_le32(data_phys_addr);
om->q[2] = 0;
om->q[3] = 0;
}
......@@ -128,19 +127,20 @@ static inline void fill_output_last(struct output_last *ol,
unsigned int data_size,
unsigned long data_phys_addr)
{
ol->q[0] = 0;
ol->q[0] |= 1 << 28; /* OUTPUT_LAST */
u32 temp = 0;
temp |= 1 << 28; /* OUTPUT_LAST */
if(want_timestamp) /* controller will update timestamp at DMA time */
ol->q[0] |= 1 << 27;
temp |= 1 << 27;
if(want_interrupt)
ol->q[0] |= 3 << 20;
temp |= 3 << 20;
ol->q[0] |= 3 << 18; /* must take branch */
ol->q[0] |= data_size;
ol->q[1] = data_phys_addr;
temp |= 3 << 18; /* must take branch */
temp |= data_size;
ol->q[0] = cpu_to_le32(temp);
ol->q[1] = cpu_to_le32(data_phys_addr);
ol->q[2] = 0;
ol->q[3] = 0;
}
......@@ -152,15 +152,16 @@ static inline void fill_input_more(struct input_more *im,
unsigned int data_size,
unsigned long data_phys_addr)
{
im->q[0] = 2 << 28; /* INPUT_MORE */
im->q[0] |= 8 << 24; /* s = 1, update xferStatus and resCount */
u32 temp = 2 << 28; /* INPUT_MORE */
temp |= 8 << 24; /* s = 1, update xferStatus and resCount */
if (want_interrupt)
im->q[0] |= 0 << 20; /* interrupts, i=0 in packet-per-buffer mode */
im->q[0] |= 0x0 << 16; /* disable branch to address for packet-per-buffer mode */
temp |= 0 << 20; /* interrupts, i=0 in packet-per-buffer mode */
temp |= 0x0 << 16; /* disable branch to address for packet-per-buffer mode */
/* disable wait on sync field, not used in DV :-( */
im->q[0] |= data_size;
temp |= data_size;
im->q[1] = data_phys_addr;
im->q[0] = cpu_to_le32(temp);
im->q[1] = cpu_to_le32(data_phys_addr);
im->q[2] = 0; /* branchAddress and Z not use in packet-per-buffer mode */
im->q[3] = 0; /* xferStatus & resCount, resCount must be initialize to data_size */
}
......@@ -169,16 +170,17 @@ static inline void fill_input_last(struct input_last *il,
unsigned int data_size,
unsigned long data_phys_addr)
{
il->q[0] = 3 << 28; /* INPUT_LAST */
il->q[0] |= 8 << 24; /* s = 1, update xferStatus and resCount */
il->q[0] |= 3 << 20; /* enable interrupts */
il->q[0] |= 0xC << 16; /* enable branch to address */
u32 temp = 3 << 28; /* INPUT_LAST */
temp |= 8 << 24; /* s = 1, update xferStatus and resCount */
temp |= 3 << 20; /* enable interrupts */
temp |= 0xC << 16; /* enable branch to address */
/* disable wait on sync field, not used in DV :-( */
il->q[0] |= data_size;
temp |= data_size;
il->q[1] = data_phys_addr;
il->q[2] = 1; /* branchAddress (filled in later) and Z = 1 descriptor in next block */
il->q[3] = data_size; /* xferStatus & resCount, resCount must be initialize to data_size */
il->q[0] = cpu_to_le32(temp);
il->q[1] = cpu_to_le32(data_phys_addr);
il->q[2] = cpu_to_le32(1); /* branchAddress (filled in later) and Z = 1 descriptor in next block */
il->q[3] = cpu_to_le32(data_size); /* xferStatus & resCount, resCount must be initialize to data_size */
}
......@@ -434,6 +436,7 @@ struct video_card {
/* OHCI card IT DMA context number, -1 if not in use */
int ohci_it_ctx;
struct ohci1394_iso_tasklet it_tasklet;
/* register offsets for current IT DMA context, 0 if not in use */
u32 ohci_IsoXmitContextControlSet;
......@@ -441,6 +444,7 @@ struct video_card {
u32 ohci_IsoXmitCommandPtr;
/* OHCI card IR DMA context number, -1 if not in use */
struct ohci1394_iso_tasklet ir_tasklet;
int ohci_ir_ctx;
/* register offsets for current IR DMA context, 0 if not in use */
......@@ -476,6 +480,9 @@ struct video_card {
immediately. This is safe because the interrupt handler will never
advance active_frame onto a frame that is not READY (and the spinlock
must be held while marking a frame READY).
spinlock is also used to protect ohci_it_ctx and ohci_ir_ctx,
which can be accessed from both process and interrupt context
*/
spinlock_t spinlock;
......@@ -602,7 +609,7 @@ static int do_dv1394_shutdown(struct video_card *video, int free_user_buf);
calibrated against a Sony DSR-40 DVCAM deck */
#define CIP_N_NTSC 68000000
#define CIP_D_NTSC 1000000000
#define CIP_D_NTSC 1068000000
#define CIP_N_PAL 1
#define CIP_D_PAL 16
......
......@@ -159,9 +159,8 @@ static inline void flush_pci_write(struct ti_ohci *ohci)
reg_read(ohci, OHCI1394_IsochronousCycleTimer);
}
static void irq_handler(int card, quadlet_t isoRecvIntEvent,
quadlet_t isoXmitIntEvent, void *data);
static void it_tasklet_func(unsigned long data);
static void ir_tasklet_func(unsigned long data);
/* GLOBAL DATA */
......@@ -188,42 +187,29 @@ static inline struct video_card* file_to_video_card(struct file *file)
}
/* Taken from bttv.c */
/*******************************/
/* Memory management functions */
/*******************************/
/* note: we no longer use mem_map_reserve, because it causes a memory
leak, and setting vma->vm_flags to VM_RESERVED should be sufficient
to pin the pages in memory anyway. */
static void * rvmalloc(unsigned long size)
{
void * mem;
unsigned long adr;
size = PAGE_ALIGN(size);
mem=vmalloc_32(size);
if (mem) {
mem = vmalloc_32(size);
if(mem)
memset(mem, 0, size); /* Clear the ram out,
no junk to the user */
adr=(unsigned long) mem;
while (size > 0) {
mem_map_reserve(vmalloc_to_page((void *)adr));
adr+=PAGE_SIZE;
size-=PAGE_SIZE;
}
}
return mem;
}
static void rvfree(void * mem, unsigned long size)
{
unsigned long adr;
if (mem) {
adr=(unsigned long) mem;
while (size > 0) {
mem_map_unreserve(vmalloc_to_page((void *)adr));
adr+=PAGE_SIZE;
size-=PAGE_SIZE;
}
vfree(mem);
}
}
......@@ -382,10 +368,10 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
/* is this an empty packet? */
if(video->cip_accum > video->cip_d) {
if(video->cip_accum > (video->cip_d - video->cip_n)) {
empty_packet = 1;
payload_size = 8;
video->cip_accum -= video->cip_d;
video->cip_accum -= (video->cip_d - video->cip_n);
} else {
payload_size = 488;
video->cip_accum += video->cip_n;
......@@ -594,7 +580,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
/* note: we are not linked into the active DMA chain yet */
if(last_branch_address) {
*(last_branch_address) = block_dma | n_descriptors;
*(last_branch_address) = cpu_to_le32(block_dma | n_descriptors);
}
last_branch_address = branch_address;
......@@ -606,7 +592,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
/* when we first assemble a new frame, set the final branch
to loop back up to the top */
*(f->frame_end_branch) = f->descriptor_pool_dma | f->first_n_descriptors;
*(f->frame_end_branch) = cpu_to_le32(f->descriptor_pool_dma | f->first_n_descriptors);
/* make the latest version of the frame buffer visible to the PCI card */
......@@ -643,15 +629,18 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
/* if DMA is already active, we are almost done */
/* just link us onto the active DMA chain */
if(video->frames[last_frame]->frame_end_branch) {
u32 temp;
/* point the previous frame's tail to this frame's head */
*(video->frames[last_frame]->frame_end_branch) = f->descriptor_pool_dma | f->first_n_descriptors;
*(video->frames[last_frame]->frame_end_branch) = cpu_to_le32(f->descriptor_pool_dma | f->first_n_descriptors);
/* this write MUST precede the next one, or we could silently drop frames */
wmb();
/* disable the want_status semaphore on the last packet */
*(video->frames[last_frame]->frame_end_branch - 2) &= 0xF7CFFFFF;
temp = le32_to_cpu(*(video->frames[last_frame]->frame_end_branch - 2));
temp &= 0xF7CFFFFF;
*(video->frames[last_frame]->frame_end_branch - 2) = cpu_to_le32(temp);
/* flush these writes to memory ASAP */
flush_pci_write(video->ohci);
......@@ -914,14 +903,14 @@ static void receive_packets(struct video_card *video, struct frame *f)
last_branch_address = f->frame_end_branch;
if (last_branch_address)
*(last_branch_address) = block_dma | 1; /* set Z=1 */
*(last_branch_address) = cpu_to_le32(block_dma | 1); /* set Z=1 */
f->frame_end_branch = &(block->u.in.il.q[2]);
}
/* loop tail to head */
if (f->frame_end_branch)
*(f->frame_end_branch) = f->descriptor_pool_dma | 1; /* set Z=1 */
*(f->frame_end_branch) = cpu_to_le32(f->descriptor_pool_dma | 1); /* set Z=1 */
spin_unlock_irqrestore(&video->spinlock, irq_flags);
......@@ -992,6 +981,7 @@ static int do_dv1394_init(struct video_card *video, struct dv1394_init *init)
do_dv1394_shutdown(video, 0);
/* try to claim the ISO channel */
spin_lock_irqsave(&video->ohci->IR_channel_lock, flags);
if(video->ohci->ISO_channel_usage & chan_mask) {
......@@ -1004,43 +994,75 @@ static int do_dv1394_init(struct video_card *video, struct dv1394_init *init)
video->channel = init->channel;
/* initialize misc. fields of video */
video->n_frames = init->n_frames;
video->pal_or_ntsc = init->format;
/* find and claim DMA contexts on the OHCI card */
video->cip_accum = 0;
video->continuity_counter = 0;
video->active_frame = -1;
video->first_clear_frame = 0;
video->n_clear_frames = video->n_frames;
video->dropped_frames = 0;
video->write_off = 0;
video->first_run = 1;
video->current_packet = -1;
video->first_frame = 0;
if(video->pal_or_ntsc == DV1394_NTSC) {
video->cip_n = init->cip_n != 0 ? init->cip_n : CIP_N_NTSC;
video->cip_d = init->cip_d != 0 ? init->cip_d : CIP_D_NTSC;
video->frame_size = DV1394_NTSC_FRAME_SIZE;
} else {
video->cip_n = init->cip_n != 0 ? init->cip_n : CIP_N_PAL;
video->cip_d = init->cip_d != 0 ? init->cip_d : CIP_D_PAL;
video->frame_size = DV1394_PAL_FRAME_SIZE;
}
video->syt_offset = init->syt_offset;
if(video->ohci_it_ctx == -1) {
/* find and claim DMA contexts on the OHCI card */
for(i = 0; i < video->ohci->nb_iso_xmit_ctx; i++) {
/* XXX this should be the last step of initialization, since the interrupt
handler uses ohci_i*_ctx to indicate whether or not it is safe to touch
frames. I'm not making this change quite yet, since it would be better
to clean up the init/shutdown process first.*/
if(! test_and_set_bit(i, &video->ohci->it_ctx_usage)) {
video->ohci_it_ctx = i;
debug_printk("dv1394: claimed IT DMA context %d\n", video->ohci_it_ctx);
break;
}
}
if(i == video->ohci->nb_iso_xmit_ctx) {
if(video->ohci_it_ctx == -1) {
ohci1394_init_iso_tasklet(&video->it_tasklet, OHCI_ISO_TRANSMIT,
it_tasklet_func, (unsigned long) video);
if (ohci1394_register_iso_tasklet(video->ohci, &video->it_tasklet) < 0) {
printk(KERN_ERR "dv1394: could not find an available IT DMA context\n");
retval = -EBUSY;
goto err_ctx;
}
else {
video->ohci_it_ctx = video->it_tasklet.context;
debug_printk("dv1394: claimed IT DMA context %d\n", video->ohci_it_ctx);
}
}
if(video->ohci_ir_ctx == -1) {
for(i = 0; i < video->ohci->nb_iso_rcv_ctx; i++) {
ohci1394_init_iso_tasklet(&video->ir_tasklet, OHCI_ISO_RECEIVE,
ir_tasklet_func, (unsigned long) video);
if(! test_and_set_bit(i, &video->ohci->ir_ctx_usage)) {
video->ohci_ir_ctx = i;
debug_printk("dv1394: claimed IR DMA context %d\n", video->ohci_ir_ctx);
break;
}
}
if(i == video->ohci->nb_iso_rcv_ctx) {
if (ohci1394_register_iso_tasklet(video->ohci, &video->ir_tasklet) < 0) {
printk(KERN_ERR "dv1394: could not find an available IR DMA context\n");
retval = -EBUSY;
goto err_ctx;
}
else {
video->ohci_ir_ctx = video->ir_tasklet.context;
debug_printk("dv1394: claimed IR DMA context %d\n", video->ohci_ir_ctx);
}
}
......@@ -1055,38 +1077,8 @@ static int do_dv1394_init(struct video_card *video, struct dv1394_init *init)
}
}
/* initialize misc. fields of video */
video->n_frames = init->n_frames;
video->pal_or_ntsc = init->format;
video->cip_accum = 0;
video->continuity_counter = 0;
video->active_frame = -1;
video->first_clear_frame = 0;
video->n_clear_frames = video->n_frames;
video->dropped_frames = 0;
video->write_off = 0;
video->first_run = 1;
video->current_packet = -1;
video->first_frame = 0;
if(video->pal_or_ntsc == DV1394_NTSC) {
video->cip_n = init->cip_n != 0 ? init->cip_n : CIP_N_NTSC;
video->cip_d = init->cip_d != 0 ? init->cip_d : CIP_D_NTSC;
video->frame_size = DV1394_NTSC_FRAME_SIZE;
} else {
video->cip_n = init->cip_n != 0 ? init->cip_n : CIP_N_PAL;
video->cip_d = init->cip_d != 0 ? init->cip_d : CIP_D_PAL;
video->frame_size = DV1394_PAL_FRAME_SIZE;
}
video->syt_offset = init->syt_offset;
if(video->user_buf == NULL) {
unsigned int i;
......@@ -1224,11 +1216,11 @@ static int do_dv1394_init(struct video_card *video, struct dv1394_init *init)
err_ctx:
if(video->ohci_it_ctx != -1) {
clear_bit(video->ohci_it_ctx, &video->ohci->it_ctx_usage);
ohci1394_unregister_iso_tasklet(video->ohci, &video->it_tasklet);
video->ohci_it_ctx = -1;
}
if(video->ohci_ir_ctx != -1) {
clear_bit(video->ohci_ir_ctx, &video->ohci->ir_ctx_usage);
ohci1394_unregister_iso_tasklet(video->ohci, &video->ir_tasklet);
video->ohci_ir_ctx = -1;
}
......@@ -1313,25 +1305,14 @@ static void stop_dma(struct video_card *video)
static int do_dv1394_shutdown(struct video_card *video, int free_user_buf)
{
int i;
unsigned long flags;
debug_printk("dv1394: shutdown...\n");
/* stop DMA if in progress */
stop_dma(video);
/* release the ISO channel */
if(video->channel != -1) {
u64 chan_mask;
unsigned long flags;
chan_mask = (u64)1 << video->channel;
spin_lock_irqsave(&video->ohci->IR_channel_lock, flags);
video->ohci->ISO_channel_usage &= ~(chan_mask);
spin_unlock_irqrestore(&video->ohci->IR_channel_lock, flags);
video->channel = -1;
}
spin_lock_irqsave(&video->spinlock, flags);
/* release the DMA contexts */
if(video->ohci_it_ctx != -1) {
......@@ -1361,6 +1342,22 @@ static int do_dv1394_shutdown(struct video_card *video, int free_user_buf)
video->ohci_ir_ctx = -1;
}
spin_unlock_irqrestore(&video->spinlock, flags);
/* release the ISO channel */
if(video->channel != -1) {
u64 chan_mask;
unsigned long flags;
chan_mask = (u64)1 << video->channel;
spin_lock_irqsave(&video->ohci->IR_channel_lock, flags);
video->ohci->ISO_channel_usage &= ~(chan_mask);
spin_unlock_irqrestore(&video->ohci->IR_channel_lock, flags);
video->channel = -1;
}
/* free the frame structs */
for(i = 0; i < DV1394_MAX_FRAMES; i++) {
if(video->frames[i])
......@@ -1467,7 +1464,7 @@ static struct page * dv1394_nopage(struct vm_area_struct * area, unsigned long a
}
static struct vm_operations_struct dv1394_vm_ops = {
nopage: dv1394_nopage
.nopage = dv1394_nopage
};
/*
......@@ -2315,12 +2312,13 @@ void dv1394_procfs_del( char *name)
/*** DEVICE DRIVER HANDLERS ************************************************/
static void irq_handler(int card, quadlet_t isoRecvIntEvent,
quadlet_t isoXmitIntEvent, void *data)
static void it_tasklet_func(unsigned long data)
{
int wake = 0;
struct video_card *video = (struct video_card*) data;
spin_lock(&video->spinlock);
irq_printk("INTERRUPT! Video = %08lx Iso event Recv: %08x Xmit: %08x\n",
(unsigned long) video, isoRecvIntEvent, isoXmitIntEvent);
irq_printk("ContextControl = %08x, CommandPtr = %08x\n",
......@@ -2330,13 +2328,12 @@ static void irq_handler(int card, quadlet_t isoRecvIntEvent,
if( (video->ohci_it_ctx != -1) &&
(isoXmitIntEvent & (1 << video->ohci_it_ctx)) &&
(reg_read(video->ohci, video->ohci_IsoXmitContextControlSet) & (1 << 10)) ) {
struct frame *f;
unsigned int frame, i;
spin_lock(&video->spinlock);
if(video->active_frame == -1)
frame = 0;
else
......@@ -2392,13 +2389,15 @@ static void irq_handler(int card, quadlet_t isoRecvIntEvent,
/* see if we need to set the timestamp for the next frame */
if( *(f->mid_frame_timestamp) ) {
struct frame *next_frame;
u32 ts_cyc, ts_off;
u32 begin_ts, ts_cyc, ts_off;
*(f->mid_frame_timestamp) = 0;
begin_ts = le32_to_cpu(*(f->frame_begin_timestamp));
irq_printk(" MIDDLE - first packet was sent at cycle %4u (%2u), assigned timestamp was (%2u) %4u\n",
*(f->frame_begin_timestamp) & 0x1FFF, *(f->frame_begin_timestamp) & 0xF,
f->assigned_timestamp >> 12, f->assigned_timestamp & 0xFFF);
begin_ts & 0x1FFF, begin_ts & 0xF,
f->assigned_timestamp >> 12, f->assigned_timestamp & 0xFFF);
/* prepare next frame and assign timestamp */
next_frame = video->frames[ (frame+1) % video->n_frames ];
......@@ -2412,7 +2411,7 @@ static void irq_handler(int card, quadlet_t isoRecvIntEvent,
/* set the timestamp to the timestamp of the last frame sent,
plus the length of the last frame sent, plus the syt latency */
ts_cyc = *(f->frame_begin_timestamp) & 0xF;
ts_cyc = begin_ts & 0xF;
/* advance one frame, plus syt latency (typically 2-3) */
ts_cyc += f->n_packets + video->syt_offset ;
......@@ -2446,30 +2445,41 @@ static void irq_handler(int card, quadlet_t isoRecvIntEvent,
} /* for(each frame) */
}
spin_unlock(&video->spinlock);
spin_unlock(&video->spinlock);
if(wake) {
kill_fasync(&video->fasync, SIGIO, POLL_OUT);
} /* end XMIT portion */
/* wake readers/writers/ioctl'ers */
wake_up_interruptible(&video->waitq);
}
}
/***** RECEIVE INTERRUPT and DMA ACTIVE *****/
static void ir_tasklet_func(unsigned long data)
{
int wake = 0;
struct video_card *video = (struct video_card*) data;
else if( (video->ohci_ir_ctx != -1) &&
(isoRecvIntEvent & (1 << video->ohci_ir_ctx)) &&
(reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & (1 << 10)) ) {
if( (video->ohci_ir_ctx != -1) &&
(reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & (1 << 10)) ) {
int sof=0; /* start-of-frame flag */
struct frame *f;
u16 packet_length, packet_time;
spin_lock(&video->spinlock);
packet_length = le16_to_cpu(video->packet_buffer[video->current_packet].data_length);
packet_time = le16_to_cpu(video->packet_buffer[video->current_packet].timestamp);
irq_printk("received packet %02d, timestamp=%04x, length=%04x, sof=%02x%02x\n", video->current_packet,
video->packet_buffer[video->current_packet].timestamp, video->packet_buffer[video->current_packet].data_length,
packet_time, packet_length,
video->packet_buffer[video->current_packet].data[0], video->packet_buffer[video->current_packet].data[1]);
f = video->frames[video->active_frame];
/* exclude empty packet */
if (video->packet_buffer[video->current_packet].data_length > 8) {
if (packet_length > 8) {
/* check for start of frame */
sof = (video->packet_buffer[video->current_packet].data[0] == 0x1f &&
......@@ -2524,22 +2534,15 @@ static void irq_handler(int card, quadlet_t isoRecvIntEvent,
/* advance packet_buffer cursor */
video->current_packet = (video->current_packet + 1) % MAX_PACKET_BUFFER;
spin_unlock(&video->spinlock);
wake = 1; /* why the hell not? */
} /* receive interrupt */
spin_unlock(&video->spinlock);
if(wake) {
/* send SIGIO */
if(isoRecvIntEvent & (1))
kill_fasync(&video->fasync, SIGIO, POLL_IN);
if(isoXmitIntEvent & (1))
kill_fasync(&video->fasync, SIGIO, POLL_OUT);
kill_fasync(&video->fasync, SIGIO, POLL_IN);
/* wake readers/writers/ioctl'ers */
wake_up_interruptible(&video->waitq);
}
......@@ -2547,15 +2550,15 @@ static void irq_handler(int card, quadlet_t isoRecvIntEvent,
static struct file_operations dv1394_fops=
{
owner: THIS_MODULE,
poll: dv1394_poll,
ioctl: dv1394_ioctl,
mmap: dv1394_mmap,
open: dv1394_open,
write: dv1394_write,
read: dv1394_read,
release: dv1394_release,
fasync: dv1394_fasync,
.owner = THIS_MODULE,
.poll = dv1394_poll,
.ioctl = dv1394_ioctl,
.mmap = dv1394_mmap,
.open = dv1394_open,
.write = dv1394_write,
.read = dv1394_read,
.release = dv1394_release,
.fasync = dv1394_fasync,
};
......@@ -2572,12 +2575,15 @@ dv1394_devfs_find( char *name)
list_for_each(lh, &dv1394_devfs) {
p = list_entry(lh, struct dv1394_devfs_entry, list);
if(!strncmp(p->name, name, sizeof(p->name))) {
spin_unlock( &dv1394_devfs_lock);
return p;
goto found;
}
}
}
return NULL;
p = NULL;
found:
spin_unlock( &dv1394_devfs_lock);
return p;
}
static int dv1394_devfs_add_entry(struct video_card *video)
......@@ -2704,12 +2710,6 @@ static int dv1394_init(struct ti_ohci *ohci, enum pal_or_ntsc format, enum modes
memset(video, 0, sizeof(struct video_card));
if (ohci1394_hook_irq(ohci, irq_handler, (void*) video) != 0) {
printk(KERN_ERR "dv1394: ohci1394_hook_irq() failed\n");
goto err_free;
}
video->ohci = ohci;
/* lower 2 bits of id indicate which of four "plugs"
per host */
......@@ -2763,9 +2763,11 @@ static int dv1394_init(struct ti_ohci *ohci, enum pal_or_ntsc format, enum modes
if (format == DV1394_NTSC)
video->id |= mode;
else video->id |= 2 + mode;
#ifdef CONFIG_DEVFS_FS
if (dv1394_devfs_add_entry(video) < 0)
goto err_free;
#endif
debug_printk("dv1394: dv1394_init() OK on ID %d\n", video->id);
......@@ -2783,12 +2785,13 @@ static void dv1394_un_init(struct video_card *video)
/* obviously nobody has the driver open at this point */
do_dv1394_shutdown(video, 1);
ohci1394_unhook_irq(video->ohci, irq_handler, (void*) video);
snprintf(buf, sizeof(buf), "dv/host%d/%s/%s", (video->id >> 2),
(video->pal_or_ntsc == DV1394_NTSC ? "NTSC" : "PAL"),
(video->mode == MODE_RECEIVE ? "in" : "out")
);
#ifdef CONFIG_DEVFS_FS
dv1394_devfs_del(buf);
#endif
#ifdef CONFIG_PROC_FS
dv1394_procfs_del(buf);
#endif
......@@ -2825,12 +2828,14 @@ static void dv1394_remove_host (struct hpsb_host *host)
spin_unlock_irqrestore(&dv1394_cards_lock, flags);
n = (video->id >> 2);
#ifdef CONFIG_DEVFS_FS
snprintf(buf, sizeof(buf), "dv/host%d/NTSC", n);
dv1394_devfs_del(buf);
snprintf(buf, sizeof(buf), "dv/host%d/PAL", n);
dv1394_devfs_del(buf);
snprintf(buf, sizeof(buf), "dv/host%d", n);
dv1394_devfs_del(buf);
#endif
#ifdef CONFIG_PROC_FS
snprintf(buf, sizeof(buf), "dv/host%d/NTSC", n);
......@@ -2867,6 +2872,7 @@ static void dv1394_add_host (struct hpsb_host *host)
}
#endif
#ifdef CONFIG_DEVFS_FS
devfs_entry = dv1394_devfs_find("dv");
if (devfs_entry != NULL) {
snprintf(buf, sizeof(buf), "host%d", ohci->id);
......@@ -2874,6 +2880,7 @@ static void dv1394_add_host (struct hpsb_host *host)
dv1394_devfs_add_dir("NTSC", devfs_entry, NULL);
dv1394_devfs_add_dir("PAL", devfs_entry, NULL);
}
#endif
dv1394_init(ohci, DV1394_NTSC, MODE_RECEIVE);
dv1394_init(ohci, DV1394_NTSC, MODE_TRANSMIT);
......@@ -2915,12 +2922,13 @@ static void dv1394_host_reset(struct hpsb_host *host)
if(!video)
return;
spin_lock_irqsave(&video->spinlock, flags);
/* check IT context */
if(video->ohci_it_ctx != -1) {
u32 ctx;
spin_lock_irqsave(&video->spinlock, flags);
ctx = reg_read(video->ohci, video->ohci_IsoXmitContextControlSet);
/* if(RUN but not ACTIVE) */
......@@ -2951,16 +2959,12 @@ static void dv1394_host_reset(struct hpsb_host *host)
reg_read(video->ohci, video->ohci_IsoXmitContextControlSet),
reg_read(video->ohci, video->ohci_IsoXmitCommandPtr));
}
spin_unlock_irqrestore(&video->spinlock, flags);
}
/* check IR context */
if(video->ohci_ir_ctx != -1) {
u32 ctx;
spin_lock_irqsave(&video->spinlock, flags);
ctx = reg_read(video->ohci, video->ohci_IsoRcvContextControlSet);
/* if(RUN but not ACTIVE) */
......@@ -2992,18 +2996,18 @@ static void dv1394_host_reset(struct hpsb_host *host)
reg_read(video->ohci, video->ohci_IsoRcvContextControlSet),
reg_read(video->ohci, video->ohci_IsoRcvCommandPtr));
}
spin_unlock_irqrestore(&video->spinlock, flags);
}
spin_unlock_irqrestore(&video->spinlock, flags);
/* wake readers/writers/ioctl'ers */
wake_up_interruptible(&video->waitq);
}
static struct hpsb_highlevel_ops hl_ops = {
add_host: dv1394_add_host,
remove_host: dv1394_remove_host,
host_reset: dv1394_host_reset,
.add_host = dv1394_add_host,
.remove_host = dv1394_remove_host,
.host_reset = dv1394_host_reset,
};
......@@ -3018,7 +3022,9 @@ static void __exit dv1394_exit_module(void)
{
hpsb_unregister_highlevel (hl_handle);
ieee1394_unregister_chardev(IEEE1394_MINOR_BLOCK_DV1394);
#ifdef CONFIG_DEVFS_FS
dv1394_devfs_del("dv");
#endif
#ifdef CONFIG_PROC_FS
dv1394_procfs_del("dv");
#endif
......@@ -3032,17 +3038,21 @@ static int __init dv1394_init_module(void)
return -EIO;
}
#ifdef CONFIG_DEVFS_FS
if (dv1394_devfs_add_dir("dv", NULL, NULL) < 0) {
printk(KERN_ERR "dv1394: unable to create /dev/ieee1394/dv\n");
ieee1394_unregister_chardev(IEEE1394_MINOR_BLOCK_DV1394);
return -ENOMEM;
}
#endif
#ifdef CONFIG_PROC_FS
if (dv1394_procfs_add_dir("dv",NULL,NULL) < 0) {
printk(KERN_ERR "dv1394: unable to create /proc/bus/ieee1394/dv\n");
ieee1394_unregister_chardev(IEEE1394_MINOR_BLOCK_DV1394);
#ifdef CONFIG_DEVFS_FS
dv1394_devfs_del("dv");
#endif
return -ENOMEM;
}
#endif
......@@ -3051,7 +3061,9 @@ static int __init dv1394_init_module(void)
if (hl_handle == NULL) {
printk(KERN_ERR "dv1394: hpsb_register_highlevel failed\n");
ieee1394_unregister_chardev(IEEE1394_MINOR_BLOCK_DV1394);
#ifdef CONFIG_DEVFS_FS
dv1394_devfs_del("dv");
#endif
#ifdef CONFIG_PROC_FS
dv1394_procfs_del("dv");
#endif
......
......@@ -77,7 +77,7 @@
printk(KERN_ERR fmt, ## args)
static char version[] __devinitdata =
"eth1394.c:v0.50 15/Jul/01 Ben Collins <bcollins@debian.org>";
"$Rev: 546 $ Ben Collins <bcollins@debian.org>";
/* Our ieee1394 highlevel driver */
#define ETHER1394_DRIVER_NAME "ether1394"
......@@ -99,6 +99,7 @@ static int hdr_type_len[] = {
MODULE_AUTHOR("Ben Collins (bcollins@debian.org)");
MODULE_DESCRIPTION("IEEE 1394 IPv4 Driver (IPv4-over-1394 as per RFC 2734)");
MODULE_LICENSE("GPL");
/* Find our host_info struct for a given host pointer. Must be called
* under spinlock. */
......@@ -276,7 +277,7 @@ static inline void ether1394_register_limits (int nodeid, unsigned char max_rec,
static void ether1394_reset_priv (struct net_device *dev, int set_mtu)
{
int flags;
unsigned long flags;
struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv;
int phy_id = priv->host->node_id & NODE_MASK;
......@@ -477,7 +478,7 @@ static inline unsigned short ether1394_parse_encap (struct sk_buff *skb, struct
* use of some of the fields, since they tell us a little bit
* about the sending machine. */
if (hdr->uf.ether_type == __constant_htons (ETH_P_ARP)) {
int flags;
unsigned long flags;
u16 phy_id = srcid & NODE_MASK;
struct eth1394_priv *priv =
(struct eth1394_priv *)dev->priv;
......@@ -525,7 +526,7 @@ static int ether1394_write (struct hpsb_host *host, int srcid, int destid,
{
struct sk_buff *skb;
char *buf = (char *)data;
int flags;
unsigned long flags;
struct net_device *dev = ether1394_find_dev (host);
struct eth1394_priv *priv;
......@@ -596,7 +597,7 @@ static void hpsb_write_sched (void *__ptask)
struct sk_buff *skb = ptask->skb;
struct net_device *dev = ptask->skb->dev;
struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv;
int flags;
unsigned long flags;
/* Statistics */
spin_lock_irqsave (&priv->lock, flags);
......@@ -627,7 +628,8 @@ static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
int kmflags = in_interrupt () ? GFP_ATOMIC : GFP_KERNEL;
struct ethhdr *eth;
struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv;
int proto, flags;
int proto;
unsigned long flags;
nodeid_t dest_node;
u64 addr;
struct packet_task *ptask = NULL;
......@@ -702,14 +704,14 @@ static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
/* Function for incoming 1394 packets */
static struct hpsb_address_ops addr_ops = {
write: ether1394_write,
.write = ether1394_write,
};
/* Ieee1394 highlevel driver functions */
static struct hpsb_highlevel_ops hl_ops = {
add_host: ether1394_add_host,
remove_host: ether1394_remove_host,
host_reset: ether1394_host_reset,
.add_host = ether1394_add_host,
.remove_host = ether1394_remove_host,
.host_reset = ether1394_host_reset,
};
static int __init ether1394_init_module (void)
......
......@@ -139,7 +139,7 @@ void hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
}
if (host->iso_listen_count[channel]++ == 0) {
host->ops->devctl(host, ISO_LISTEN_CHANNEL, channel);
host->driver->devctl(host, ISO_LISTEN_CHANNEL, channel);
}
}
......@@ -152,7 +152,7 @@ void hpsb_unlisten_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
}
if (--host->iso_listen_count[channel] == 0) {
host->ops->devctl(host, ISO_UNLISTEN_CHANNEL, channel);
host->driver->devctl(host, ISO_UNLISTEN_CHANNEL, channel);
}
}
......
......@@ -38,9 +38,9 @@ static int dummy_devctl(struct hpsb_host *h, enum devctl_cmd c, int arg)
return -1;
}
static struct hpsb_host_operations dummy_ops = {
transmit_packet: dummy_transmit_packet,
devctl: dummy_devctl
static struct hpsb_host_driver dummy_driver = {
.transmit_packet = dummy_transmit_packet,
.devctl = dummy_devctl
};
/**
......@@ -63,7 +63,7 @@ int hpsb_ref_host(struct hpsb_host *host)
spin_lock_irqsave(&hosts_lock, flags);
list_for_each(lh, &hosts) {
if (host == list_entry(lh, struct hpsb_host, host_list)) {
host->ops->devctl(host, MODIFY_USAGE, 1);
host->driver->devctl(host, MODIFY_USAGE, 1);
host->refcount++;
retval = 1;
break;
......@@ -87,7 +87,7 @@ void hpsb_unref_host(struct hpsb_host *host)
{
unsigned long flags;
host->ops->devctl(host, MODIFY_USAGE, 0);
host->driver->devctl(host, MODIFY_USAGE, 0);
spin_lock_irqsave(&hosts_lock, flags);
host->refcount--;
......@@ -128,7 +128,6 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra)
h->hostdata = h + 1;
h->driver = drv;
h->ops = drv->ops;
h->refcount = 1;
INIT_LIST_HEAD(&h->pending_packets);
......@@ -152,63 +151,26 @@ void hpsb_add_host(struct hpsb_host *host)
unsigned long flags;
spin_lock_irqsave(&hosts_lock, flags);
host->driver->number_of_hosts++;
list_add_tail(&host->driver_list, &host->driver->hosts);
list_add_tail(&host->host_list, &hosts);
spin_unlock_irqrestore(&hosts_lock, flags);
highlevel_add_host(host);
host->ops->devctl(host, RESET_BUS, 0);
host->driver->devctl(host, RESET_BUS, 0);
}
void hpsb_remove_host(struct hpsb_host *host)
{
struct hpsb_host_driver *drv = host->driver;
unsigned long flags;
host->is_shutdown = 1;
host->ops = &dummy_ops;
host->driver = &dummy_driver;
highlevel_remove_host(host);
spin_lock_irqsave(&hosts_lock, flags);
list_del(&host->driver_list);
list_del(&host->host_list);
drv->number_of_hosts--;
spin_unlock_irqrestore(&hosts_lock, flags);
}
struct hpsb_host_driver *hpsb_register_lowlevel(struct hpsb_host_operations *op,
const char *name)
{
struct hpsb_host_driver *drv;
drv = kmalloc(sizeof(struct hpsb_host_driver), SLAB_KERNEL);
if (!drv) return NULL;
INIT_LIST_HEAD(&drv->list);
INIT_LIST_HEAD(&drv->hosts);
drv->number_of_hosts = 0;
drv->name = name;
drv->ops = op;
spin_lock(&host_drivers_lock);
list_add_tail(&drv->list, &host_drivers);
spin_unlock(&host_drivers_lock);
return drv;
}
void hpsb_unregister_lowlevel(struct hpsb_host_driver *drv)
{
spin_lock(&host_drivers_lock);
list_del(&drv->list);
spin_unlock(&host_drivers_lock);
kfree(drv);
}
/*
* This function calls the given function for every host currently registered.
*/
......
......@@ -15,7 +15,6 @@ struct hpsb_packet;
struct hpsb_host {
struct list_head host_list;
struct hpsb_host_operations *ops;
void *hostdata;
atomic_t generation;
......@@ -59,7 +58,6 @@ struct hpsb_host {
struct csr_control csr;
struct hpsb_host_driver *driver;
struct list_head driver_list;
struct pci_dev *pdev;
};
......@@ -113,7 +111,9 @@ enum reset_types {
SHORT_RESET
};
struct hpsb_host_operations {
struct hpsb_host_driver {
const char *name;
/* This function must store a pointer to the configuration ROM into the
* location referenced to by pointer and return the size of the ROM. It
* may not fail. If any allocation is required, it must be done
......@@ -149,18 +149,6 @@ struct hpsb_host_operations {
quadlet_t data, quadlet_t compare);
};
struct hpsb_host_driver {
struct list_head list;
struct list_head hosts;
int number_of_hosts;
const char *name;
struct hpsb_host_operations *ops;
};
/* core internal use */
void register_builtin_lowlevels(void);
......@@ -184,8 +172,4 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra);
void hpsb_add_host(struct hpsb_host *host);
void hpsb_remove_host(struct hpsb_host *h);
struct hpsb_host_driver *hpsb_register_lowlevel(struct hpsb_host_operations *op,
const char *name);
void hpsb_unregister_lowlevel(struct hpsb_host_driver *drv);
#endif /* _IEEE1394_HOSTS_H */
......@@ -141,7 +141,7 @@ void free_hpsb_packet(struct hpsb_packet *packet)
int hpsb_reset_bus(struct hpsb_host *host, int type)
{
if (!host->in_bus_reset) {
host->ops->devctl(host, RESET_BUS, type);
host->driver->devctl(host, RESET_BUS, type);
return 0;
} else {
return 1;
......@@ -354,7 +354,7 @@ void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
}
host->reset_retries = 0;
if (isroot) host->ops->devctl(host, ACT_CYCLE_MASTER, 1);
if (isroot) host->driver->devctl(host, ACT_CYCLE_MASTER, 1);
atomic_inc(&host->generation);
host->in_bus_reset = 0;
highlevel_host_reset(host);
......@@ -440,7 +440,7 @@ int hpsb_send_packet(struct hpsb_packet *packet)
}
#endif
return host->ops->transmit_packet(host, packet);
return host->driver->transmit_packet(host, packet);
}
static void send_packet_nocare(struct hpsb_packet *packet)
......@@ -736,7 +736,7 @@ void abort_requests(struct hpsb_host *host)
struct list_head *lh;
LIST_HEAD(llist);
host->ops->devctl(host, CANCEL_REQUESTS, 0);
host->driver->devctl(host, CANCEL_REQUESTS, 0);
spin_lock_irqsave(&host->pending_pkt_lock, flags);
list_splice(&host->pending_packets, &llist);
......@@ -810,8 +810,8 @@ static rwlock_t ieee1394_chardevs_lock = RW_LOCK_UNLOCKED;
static int ieee1394_dispatch_open(struct inode *inode, struct file *file);
static struct file_operations ieee1394_chardev_ops = {
owner: THIS_MODULE,
open: ieee1394_dispatch_open,
.owner =THIS_MODULE,
.open = ieee1394_dispatch_open,
};
devfs_handle_t ieee1394_devfs_handle;
......@@ -861,19 +861,57 @@ void ieee1394_unregister_chardev(int blocknum)
write_unlock(&ieee1394_chardevs_lock);
}
/*
ieee1394_get_chardev() - look up and acquire a character device
driver that has previously registered using ieee1394_register_chardev()
On success, returns 1 and sets module and file_ops to the driver.
The module will have an incremented reference count.
On failure, returns 0.
The module will NOT have an incremented reference count.
*/
static int ieee1394_get_chardev(int blocknum,
struct module **module,
struct file_operations **file_ops)
{
int ret = 0;
if( (blocknum < 0) || (blocknum > 15) )
return ret;
read_lock(&ieee1394_chardevs_lock);
*module = ieee1394_chardevs[blocknum].module;
*file_ops = ieee1394_chardevs[blocknum].file_ops;
if(*file_ops == NULL)
goto out;
/* don't need try_inc_mod_count if the driver is non-modular */
if(*module && (try_inc_mod_count(*module) == 0))
goto out;
/* success! */
ret = 1;
out:
read_unlock(&ieee1394_chardevs_lock);
return ret;
}
/* the point of entry for open() on any ieee1394 character device */
static int ieee1394_dispatch_open(struct inode *inode, struct file *file)
{
struct file_operations *file_ops;
struct module *module;
int blocknum;
int retval = -ENODEV;
int retval;
/*
Maintaining correct module reference counts is tricky here!
For Linux v2.4 and later:
The key thing to remember is that the VFS increments the
reference count of ieee1394 before it calls
ieee1394_dispatch_open().
......@@ -886,16 +924,7 @@ static int ieee1394_dispatch_open(struct inode *inode, struct file *file)
If the open() fails, then the VFS will drop the
reference count of whatever module file->f_op->owner points
to, immediately after this function returns.
The comments below refer to the 2.4 case, since the 2.2
case is trivial.
*/
#define INCREF(mod_) do { struct module *mod = (struct module*) mod_; \
if(mod != NULL) __MOD_INC_USE_COUNT(mod); } while(0)
#define DECREF(mod_) do { struct module *mod = (struct module*) mod_; \
if(mod != NULL) __MOD_DEC_USE_COUNT(mod); } while(0)
/* shift away lower four bits of the minor
to get the index of the ieee1394_driver
......@@ -903,20 +932,10 @@ static int ieee1394_dispatch_open(struct inode *inode, struct file *file)
blocknum = (minor(inode->i_rdev) >> 4) & 0xF;
/* printk("ieee1394_dispatch_open(%d)", blocknum); */
/* look up the driver */
read_lock(&ieee1394_chardevs_lock);
module = ieee1394_chardevs[blocknum].module;
/* bump the reference count of the driver that
will receive the open() */
INCREF(module);
file_ops = ieee1394_chardevs[blocknum].file_ops;
read_unlock(&ieee1394_chardevs_lock);
if(file_ops == NULL) {
DECREF(module);
goto out_fail;
}
if(ieee1394_get_chardev(blocknum, &module, &file_ops) == 0)
return -ENODEV;
/* redirect all subsequent requests to the driver's
own file_operations */
......@@ -928,42 +947,42 @@ static int ieee1394_dispatch_open(struct inode *inode, struct file *file)
/* follow through with the open() */
retval = file_ops->open(inode, file);
if(retval) {
if(retval == 0) {
/* if the open() failed, then we need to drop the
extra reference we gave to the task-specific
driver */
DECREF(module);
goto out_fail;
/* If the open() succeeded, then ieee1394 will be left
with an extra module reference, so we discard it here.
The task-specific driver still has the extra
reference given to it by ieee1394_get_chardev().
This extra reference prevents the module from
unloading while the file is open, and will be
dropped by the VFS when the file is released.
*/
} else {
/* if the open() succeeded, then ieee1394 will be left
with an extra module reference, so we discard it here.*/
if(THIS_MODULE)
__MOD_DEC_USE_COUNT((struct module*) THIS_MODULE);
/* note that if ieee1394 is compiled into the kernel,
THIS_MODULE will be (void*) NULL, hence the if and
the cast are necessary */
DECREF(THIS_MODULE);
} else {
/* the task-specific driver still has the extra
reference we gave it. This extra reference prevents
the module from unloading while the file is open,
and will be dropped by the VFS when the file is
released. */
/* if the open() failed, then we need to drop the
extra reference we gave to the task-specific
driver */
return 0;
}
out_fail:
/* point the file's f_ops back to ieee1394. The VFS will then
decrement ieee1394's reference count immediately after this
function returns. */
if(module)
__MOD_DEC_USE_COUNT(module);
file->f_op = &ieee1394_chardev_ops;
return retval;
/* point the file's f_ops back to ieee1394. The VFS will then
decrement ieee1394's reference count immediately after this
function returns. */
file->f_op = &ieee1394_chardev_ops;
}
#undef INCREF
#undef DECREF
return retval;
}
struct proc_dir_entry *ieee1394_procfs_entry;
......@@ -1024,8 +1043,6 @@ module_init(ieee1394_init);
module_exit(ieee1394_cleanup);
/* Exported symbols */
EXPORT_SYMBOL(hpsb_register_lowlevel);
EXPORT_SYMBOL(hpsb_unregister_lowlevel);
EXPORT_SYMBOL(hpsb_alloc_host);
EXPORT_SYMBOL(hpsb_add_host);
EXPORT_SYMBOL(hpsb_remove_host);
......
......@@ -16,7 +16,7 @@ struct hpsb_packet {
struct list_head list;
/* This can be used for host driver internal linking. */
struct hpsb_packet *xnext;
struct list_head driver_list;
nodeid_t node_id;
......@@ -77,6 +77,10 @@ struct hpsb_packet {
quadlet_t embedded_header[5];
};
static inline struct hpsb_packet *driver_packet(struct list_head *l)
{
return list_entry(l, struct hpsb_packet, driver_list);
}
void abort_timedouts(struct hpsb_host *host);
void abort_requests(struct hpsb_host *host);
......
......@@ -45,18 +45,21 @@
* a way that's easy to parse by the protocol interface.
*/
static LIST_HEAD(node_list);
static rwlock_t node_lock = RW_LOCK_UNLOCKED;
/* The nodemgr maintains a number of data structures: the node list,
* the driver list, unit directory list and the host info list. The
* first three lists are accessed from process context only: /proc
* readers, insmod and rmmod, and the nodemgr thread. Access to these
* lists are serialized by means of the nodemgr_serialize mutex, which
* must be taken before accessing the structures and released
* afterwards. The host info list is only accessed during insmod,
* rmmod and from interrupt and allways only for a short period of
* time, so a spinlock is used to protect this list.
*/
static DECLARE_MUTEX(nodemgr_serialize);
static LIST_HEAD(node_list);
static LIST_HEAD(driver_list);
static rwlock_t driver_lock = RW_LOCK_UNLOCKED;
/* The rwlock unit_directory_lock is always held when manipulating the
* global unit_directory_list, but this also protects access to the
* lists of unit directories stored in the protocol drivers.
*/
static LIST_HEAD(unit_directory_list);
static rwlock_t unit_directory_lock = RW_LOCK_UNLOCKED;
static LIST_HEAD(host_info_list);
static spinlock_t host_info_lock = SPIN_LOCK_UNLOCKED;
......@@ -84,6 +87,9 @@ static int raw1394_read_proc(char *page, char **start, off_t off,
int len;
char *out = page;
if (down_interruptible(&nodemgr_serialize))
return -EINTR;
list_for_each(lh, &node_list) {
struct list_head *l;
int ud_count = 0;
......@@ -127,16 +133,24 @@ static int raw1394_read_proc(char *page, char **start, off_t off,
/* Now the unit directories */
list_for_each (l, &ne->unit_directories) {
struct unit_directory *ud = list_entry (l, struct unit_directory, node_list);
int printed = 0; // small hack
PUTF(" Unit Directory %d:\n", ud_count++);
if (ud->flags & UNIT_DIRECTORY_VENDOR_ID)
if (ud->flags & UNIT_DIRECTORY_VENDOR_ID) {
PUTF(" Vendor/Model ID: %s [%06x]",
ud->vendor_name ?: "Unknown", ud->vendor_id);
else if (ud->flags & UNIT_DIRECTORY_MODEL_ID) /* Have to put something */
PUTF(" Vendor/Model ID: %s [%06x]",
ne->vendor_name ?: "Unknown", ne->vendor_id);
if (ud->flags & UNIT_DIRECTORY_MODEL_ID)
printed = 1;
}
if (ud->flags & UNIT_DIRECTORY_MODEL_ID) {
if (!printed)
PUTF(" Vendor/Model ID: %s [%06x]",
ne->vendor_name ?: "Unknown", ne->vendor_id);
PUTF(" / %s [%06x]", ud->model_name ?: "Unknown", ud->model_id);
PUTF("\n");
printed = 1;
}
if (printed)
PUTF("\n");
if (ud->flags & UNIT_DIRECTORY_SPECIFIER_ID)
PUTF(" Software Specifier ID: %06x\n", ud->specifier_id);
if (ud->flags & UNIT_DIRECTORY_VERSION)
......@@ -148,6 +162,8 @@ static int raw1394_read_proc(char *page, char **start, off_t off,
}
up(&nodemgr_serialize);
len = out - page;
len -= off;
if (len < count) {
......@@ -306,7 +322,6 @@ static struct node_entry *nodemgr_create_node(octlet_t guid, quadlet_t busoption
nodeid_t nodeid, unsigned int generation)
{
struct node_entry *ne;
unsigned long flags;
ne = nodemgr_scan_root_directory (host, nodeid, generation);
if (!ne) return NULL;
......@@ -318,9 +333,7 @@ static struct node_entry *nodemgr_create_node(octlet_t guid, quadlet_t busoption
ne->guid = guid;
ne->generation = generation;
write_lock_irqsave(&node_lock, flags);
list_add_tail(&ne->list, &node_list);
write_unlock_irqrestore(&node_lock, flags);
nodemgr_process_config_rom (ne, busoptions);
......@@ -758,11 +771,9 @@ static void nodemgr_release_unit_directory(struct unit_directory *ud)
void hpsb_release_unit_directory(struct unit_directory *ud)
{
unsigned long flags;
write_lock_irqsave(&unit_directory_lock, flags);
down(&nodemgr_serialize);
nodemgr_release_unit_directory(ud);
write_unlock_irqrestore(&unit_directory_lock, flags);
up(&nodemgr_serialize);
}
static void nodemgr_free_unit_directories(struct node_entry *ne)
......@@ -852,13 +863,12 @@ int hpsb_register_protocol(struct hpsb_protocol_driver *driver)
{
struct unit_directory *ud;
struct list_head *lh;
unsigned long flags;
write_lock_irqsave(&driver_lock, flags);
if (down_interruptible(&nodemgr_serialize))
return -EINTR;
list_add_tail(&driver->list, &driver_list);
write_unlock_irqrestore(&driver_lock, flags);
write_lock_irqsave(&unit_directory_lock, flags);
INIT_LIST_HEAD(&driver->unit_directories);
lh = unit_directory_list.next;
while (lh != &unit_directory_list) {
......@@ -867,7 +877,8 @@ int hpsb_register_protocol(struct hpsb_protocol_driver *driver)
if (nodemgr_match_driver(driver, ud) && driver->probe(ud) == 0)
nodemgr_claim_unit_directory(ud, driver);
}
write_unlock_irqrestore(&unit_directory_lock, flags);
up(&nodemgr_serialize);
/*
* Right now registration always succeeds, but maybe we should
......@@ -881,13 +892,10 @@ void hpsb_unregister_protocol(struct hpsb_protocol_driver *driver)
{
struct list_head *lh;
struct unit_directory *ud;
unsigned long flags;
write_lock_irqsave(&driver_lock, flags);
list_del(&driver->list);
write_unlock_irqrestore(&driver_lock, flags);
down(&nodemgr_serialize);
write_lock_irqsave(&unit_directory_lock, flags);
list_del(&driver->list);
lh = driver->unit_directories.next;
while (lh != &driver->unit_directories) {
ud = list_entry(lh, struct unit_directory, driver_list);
......@@ -896,14 +904,13 @@ void hpsb_unregister_protocol(struct hpsb_protocol_driver *driver)
ud->driver->disconnect(ud);
nodemgr_release_unit_directory(ud);
}
write_unlock_irqrestore(&unit_directory_lock, flags);
up(&nodemgr_serialize);
}
static void nodemgr_process_config_rom(struct node_entry *ne,
quadlet_t busoptions)
{
unsigned long flags;
ne->busopt.irmc = (busoptions >> 31) & 1;
ne->busopt.cmc = (busoptions >> 30) & 1;
ne->busopt.isc = (busoptions >> 29) & 1;
......@@ -929,11 +936,9 @@ static void nodemgr_process_config_rom(struct node_entry *ne,
* thing. If this was a new device, the call to
* nodemgr_disconnect_drivers is a no-op and all is well.
*/
write_lock_irqsave(&unit_directory_lock, flags);
nodemgr_free_unit_directories(ne);
nodemgr_process_root_directory(ne);
nodemgr_bind_drivers(ne);
write_unlock_irqrestore(&unit_directory_lock, flags);
}
/*
......@@ -1032,16 +1037,12 @@ static int read_businfo_block(struct hpsb_host *host, nodeid_t nodeid, unsigned
static void nodemgr_remove_node(struct node_entry *ne)
{
unsigned long flags;
HPSB_DEBUG("%s removed: Node[" NODE_BUS_FMT "] GUID[%016Lx] [%s]",
(ne->host->node_id == ne->nodeid) ? "Host" : "Device",
NODE_BUS_ARGS(ne->nodeid), (unsigned long long)ne->guid,
ne->vendor_name ?: "Unknown");
write_lock_irqsave(&unit_directory_lock, flags);
nodemgr_free_unit_directories(ne);
write_unlock_irqrestore(&unit_directory_lock, flags);
list_del(&ne->list);
kfree(ne);
......@@ -1077,7 +1078,7 @@ static void nodemgr_node_probe_one(struct hpsb_host *host,
}
guid = ((u64)buffer[3] << 32) | buffer[4];
ne = hpsb_guid_get_entry(guid);
ne = find_entry_by_guid(guid);
if (!ne)
nodemgr_create_node(guid, buffer[2], host, nodeid, generation);
......@@ -1089,13 +1090,11 @@ static void nodemgr_node_probe_one(struct hpsb_host *host,
static void nodemgr_node_probe_cleanup(struct hpsb_host *host, unsigned int generation)
{
unsigned long flags;
struct list_head *lh, *next;
struct node_entry *ne;
/* Now check to see if we have any nodes that aren't referenced
* any longer. */
write_lock_irqsave(&node_lock, flags);
list_for_each_safe(lh, next, &node_list) {
ne = list_entry(lh, struct node_entry, list);
......@@ -1110,7 +1109,6 @@ static void nodemgr_node_probe_cleanup(struct hpsb_host *host, unsigned int gene
if (ne->generation != generation)
nodemgr_remove_node(ne);
}
write_unlock_irqrestore(&node_lock, flags);
return;
}
......@@ -1169,12 +1167,14 @@ static int nodemgr_host_thread(void *__hi)
daemonize();
strcpy(current->comm, "knodemgrd");
/* Sit and wait for a signal to probe the nodes on the bus. This
* happens when we get a bus reset. */
while (!down_interruptible(&hi->reset_sem))
while (!down_interruptible(&hi->reset_sem) &&
!down_interruptible(&nodemgr_serialize)) {
nodemgr_node_probe(hi->host);
up(&nodemgr_serialize);
}
#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
HPSB_DEBUG ("NodeMgr: Exiting thread for %s", hi->host->driver->name);
#endif
......@@ -1184,24 +1184,22 @@ static int nodemgr_host_thread(void *__hi)
struct node_entry *hpsb_guid_get_entry(u64 guid)
{
unsigned long flags;
struct node_entry *ne;
read_lock_irqsave(&node_lock, flags);
down(&nodemgr_serialize);
ne = find_entry_by_guid(guid);
read_unlock_irqrestore(&node_lock, flags);
up(&nodemgr_serialize);
return ne;
}
struct node_entry *hpsb_nodeid_get_entry(nodeid_t nodeid)
{
unsigned long flags;
struct node_entry *ne;
read_lock_irqsave(&node_lock, flags);
down(&nodemgr_serialize);
ne = find_entry_by_nodeid(nodeid);
read_unlock_irqrestore(&node_lock, flags);
up(&nodemgr_serialize);
return ne;
}
......@@ -1309,17 +1307,14 @@ static void nodemgr_host_reset(struct hpsb_host *host)
}
}
if (hi == NULL) {
HPSB_ERR ("NodeMgr: could not process reset of non-existent host");
goto done_reset_host;
}
if (hi != NULL) {
#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
HPSB_DEBUG ("NodeMgr: Processing host reset for %s", host->driver->name);
HPSB_DEBUG ("NodeMgr: Processing host reset for %s", host->driver->name);
#endif
up(&hi->reset_sem);
up(&hi->reset_sem);
} else
HPSB_ERR ("NodeMgr: could not process reset of non-existent host");
done_reset_host:
spin_unlock_irqrestore (&host_info_lock, flags);
return;
......@@ -1341,38 +1336,39 @@ static void nodemgr_remove_host(struct hpsb_host *host)
break;
}
}
if (!hi)
HPSB_ERR ("NodeMgr: host %s does not exist, cannot remove",
host->driver->name);
spin_unlock_irqrestore (&host_info_lock, flags);
if (hi) {
if (hi->pid >= 0) {
kill_proc(hi->pid, SIGTERM, 1);
wait_for_completion(&hi->exited);
}
kfree(hi);
}
else
HPSB_ERR("NodeMgr: host %s does not exist, cannot remove",
host->driver->name);
down(&nodemgr_serialize);
/* Even if we fail the host_info part, remove all the node
* entries. */
write_lock_irqsave(&node_lock, flags);
list_for_each_safe(lh, next, &node_list) {
ne = list_entry(lh, struct node_entry, list);
if (ne->host == host)
nodemgr_remove_node(ne);
}
write_unlock_irqrestore(&node_lock, flags);
if (hi) {
if (hi->pid >= 0) {
kill_proc(hi->pid, SIGTERM, 1);
wait_for_completion(&hi->exited);
}
kfree(hi);
}
up(&nodemgr_serialize);
return;
}
static struct hpsb_highlevel_ops nodemgr_ops = {
add_host: nodemgr_add_host,
host_reset: nodemgr_host_reset,
remove_host: nodemgr_remove_host,
.add_host = nodemgr_add_host,
.host_reset = nodemgr_host_reset,
.remove_host = nodemgr_remove_host,
};
static struct hpsb_highlevel *hl;
......
......@@ -154,7 +154,7 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
printk(level "%s_%d: " fmt "\n" , OHCI1394_DRIVER_NAME, card , ## args)
static char version[] __devinitdata =
"$Rev: 504 $ Ben Collins <bcollins@debian.org>";
"$Rev: 555 $ Ben Collins <bcollins@debian.org>";
/* Module Parameters */
MODULE_PARM(attempt_root,"i");
......@@ -170,10 +170,6 @@ static void dma_trm_reset(struct dma_trm_ctx *d);
static void ohci1394_pci_remove(struct pci_dev *pdev);
static inline void ohci1394_run_irq_hooks(struct ti_ohci *ohci,
quadlet_t isoRecvEvent,
quadlet_t isoXmitEvent);
#ifndef __LITTLE_ENDIAN
/* Swap a series of quads inplace. */
static __inline__ void block_swab32(quadlet_t *data, size_t size) {
......@@ -443,10 +439,8 @@ static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
d->sent_ind = 0;
d->free_prgs = d->num_desc;
d->branchAddrPtr = NULL;
d->fifo_first = NULL;
d->fifo_last = NULL;
d->pending_first = NULL;
d->pending_last = NULL;
INIT_LIST_HEAD(&d->fifo_list);
INIT_LIST_HEAD(&d->pending_list);
DBGMSG(ohci->id, "Transmit DMA ctx=%d initialized", d->ctx);
}
......@@ -477,34 +471,6 @@ static void ohci_initialize(struct ti_ohci *ohci)
{
quadlet_t buf;
/* Start off with a soft reset, to clear everything to a sane
* state. */
ohci_soft_reset(ohci);
/* Now enable LPS, which we need in order to start accessing
* most of the registers. In fact, on some cards (ALI M5251),
* accessing registers in the SClk domain without LPS enabled
* will lock up the machine. Wait 50msec to make sure we have
* full link enabled. */
reg_write(ohci, OHCI1394_HCControlSet, 0x00080000);
mdelay(50);
/* Determine the number of available IR and IT contexts. */
ohci->nb_iso_rcv_ctx =
get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
DBGMSG(ohci->id, "%d iso receive contexts available",
ohci->nb_iso_rcv_ctx);
ohci->nb_iso_xmit_ctx =
get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
DBGMSG(ohci->id, "%d iso transmit contexts available",
ohci->nb_iso_xmit_ctx);
/* Set the usage bits for non-existent contexts so they can't
* be allocated */
ohci->ir_ctx_usage |= ~0 << ohci->nb_iso_rcv_ctx;
ohci->it_ctx_usage |= ~0 << ohci->nb_iso_xmit_ctx;
spin_lock_init(&ohci->phy_reg_lock);
spin_lock_init(&ohci->event_lock);
......@@ -571,18 +537,18 @@ static void ohci_initialize(struct ti_ohci *ohci)
reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
/* Initialize AR dma */
initialize_dma_rcv_ctx(ohci->ar_req_context, 0);
initialize_dma_rcv_ctx(ohci->ar_resp_context, 0);
initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
/* Initialize AT dma */
initialize_dma_trm_ctx(ohci->at_req_context);
initialize_dma_trm_ctx(ohci->at_resp_context);
initialize_dma_trm_ctx(&ohci->at_req_context);
initialize_dma_trm_ctx(&ohci->at_resp_context);
/* Initialize IR dma */
initialize_dma_rcv_ctx(ohci->ir_context, 1);
initialize_dma_rcv_ctx(&ohci->ir_context, 1);
/* Initialize IT dma */
initialize_dma_trm_ctx(ohci->it_context);
initialize_dma_trm_ctx(&ohci->it_context);
/* Set up isoRecvIntMask to generate interrupts for context 0
(thanks to Michael Greger for seeing that I forgot this) */
......@@ -789,13 +755,7 @@ static void insert_packet(struct ti_ohci *ohci,
d->free_prgs--;
/* queue the packet in the appropriate context queue */
if (d->fifo_last) {
d->fifo_last->xnext = packet;
d->fifo_last = packet;
} else {
d->fifo_first = packet;
d->fifo_last = packet;
}
list_add_tail(&packet->driver_list, &d->fifo_list);
d->prg_ind = (d->prg_ind+1)%d->num_desc;
}
......@@ -807,22 +767,24 @@ static void insert_packet(struct ti_ohci *ohci,
*/
static int dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
{
struct hpsb_packet *p;
int idx,z;
if (d->pending_first == NULL || d->free_prgs == 0)
if (list_empty(&d->pending_list) || d->free_prgs == 0)
return 0;
p = driver_packet(d->pending_list.next);
idx = d->prg_ind;
z = (d->pending_first->data_size) ? 3 : 2;
z = (p->data_size) ? 3 : 2;
/* insert the packets into the at dma fifo */
while (d->free_prgs>0 && d->pending_first) {
insert_packet(ohci, d, d->pending_first);
d->pending_first = d->pending_first->xnext;
while (d->free_prgs > 0 && !list_empty(&d->pending_list)) {
struct hpsb_packet *p = driver_packet(d->pending_list.next);
list_del(&p->driver_list);
insert_packet(ohci, d, p);
}
if (d->pending_first == NULL)
d->pending_last = NULL;
else
if (d->free_prgs == 0)
PRINT(KERN_INFO, ohci->id,
"Transmit DMA FIFO ctx=%d is full... waiting",d->ctx);
......@@ -857,25 +819,16 @@ static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
packet->data_size);
return 0;
}
packet->xnext = NULL;
/* Decide wether we have an iso, a request, or a response packet */
tcode = (packet->header[0]>>4)&0xf;
if (tcode == TCODE_ISO_DATA) d = ohci->it_context;
else if (tcode & 0x02) d = ohci->at_resp_context;
else d = ohci->at_req_context;
if (tcode == TCODE_ISO_DATA) d = &ohci->it_context;
else if (tcode & 0x02) d = &ohci->at_resp_context;
else d = &ohci->at_req_context;
spin_lock_irqsave(&d->lock,flags);
/* queue the packet for later insertion into the dma fifo */
if (d->pending_last) {
d->pending_last->xnext = packet;
d->pending_last = packet;
}
else {
d->pending_first = packet;
d->pending_last = packet;
}
list_add_tail(&packet->driver_list, &d->pending_list);
dma_trm_flush(ohci, d);
......@@ -929,8 +882,8 @@ static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
case CANCEL_REQUESTS:
DBGMSG(ohci->id, "Cancel request received");
dma_trm_reset(ohci->at_req_context);
dma_trm_reset(ohci->at_resp_context);
dma_trm_reset(&ohci->at_req_context);
dma_trm_reset(&ohci->at_resp_context);
break;
case MODIFY_USAGE:
......@@ -1033,46 +986,62 @@ static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
static void dma_trm_reset(struct dma_trm_ctx *d)
{
struct ti_ohci *ohci;
unsigned long flags;
struct hpsb_packet *nextpacket;
LIST_HEAD(packet_list);
if (d==NULL) {
PRINT_G(KERN_ERR, "dma_trm_reset called with NULL arg");
return;
}
ohci = (struct ti_ohci *)(d->ohci);
ohci1394_stop_context(ohci, d->ctrlClear, NULL);
ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
spin_lock_irqsave(&d->lock,flags);
/* Lock the context, reset it and release it. Move the packets
* that were pending in the context to packet_list and free
* them after releasing the lock. */
/* Is there still any packet pending in the fifo ? */
while(d->fifo_first) {
PRINT(KERN_INFO, ohci->id,
"AT dma reset ctx=%d, aborting transmission",
d->ctx);
nextpacket = d->fifo_first->xnext;
hpsb_packet_sent(ohci->host, d->fifo_first, ACKX_ABORTED);
d->fifo_first = nextpacket;
}
d->fifo_first = d->fifo_last = NULL;
spin_lock_irqsave(&d->lock, flags);
/* is there still any packet pending ? */
while(d->pending_first) {
PRINT(KERN_INFO, ohci->id,
"AT dma reset ctx=%d, aborting transmission",
d->ctx);
nextpacket = d->pending_first->xnext;
hpsb_packet_sent(ohci->host, d->pending_first,
ACKX_ABORTED);
d->pending_first = nextpacket;
}
d->pending_first = d->pending_last = NULL;
d->branchAddrPtr=NULL;
list_splice(&d->fifo_list, &packet_list);
list_splice(&d->pending_list, &packet_list);
INIT_LIST_HEAD(&d->fifo_list);
INIT_LIST_HEAD(&d->pending_list);
d->branchAddrPtr = NULL;
d->sent_ind = d->prg_ind;
d->free_prgs = d->num_desc;
spin_unlock_irqrestore(&d->lock,flags);
spin_unlock_irqrestore(&d->lock, flags);
/* Now process subsystem callbacks for the packets from the
* context. */
while (!list_empty(&packet_list)) {
struct hpsb_packet *p = driver_packet(packet_list.next);
PRINT(KERN_INFO, d->ohci->id,
"AT dma reset ctx=%d, aborting transmission", d->ctx);
list_del(&p->driver_list);
hpsb_packet_sent(d->ohci->host, p, ACKX_ABORTED);
}
}
static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
quadlet_t rx_event,
quadlet_t tx_event)
{
struct list_head *lh;
struct ohci1394_iso_tasklet *t;
unsigned long mask;
spin_lock(&ohci->iso_tasklet_list_lock);
list_for_each(lh, &ohci->iso_tasklet_list) {
t = list_entry(lh, struct ohci1394_iso_tasklet, link);
mask = 1 << t->context;
if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
tasklet_schedule(&t->tasklet);
if (t->type == OHCI_ISO_RECEIVE && rx_event & mask)
tasklet_schedule(&t->tasklet);
}
spin_unlock(&ohci->iso_tasklet_list_lock);
}
static void ohci_irq_handler(int irq, void *dev_id,
......@@ -1143,7 +1112,7 @@ static void ohci_irq_handler(int irq, void *dev_id,
* we get sent acks before response packets. This sucks mainly
* because it halts the interrupt handler. */
if (event & OHCI1394_reqTxComplete) {
struct dma_trm_ctx *d = ohci->at_req_context;
struct dma_trm_ctx *d = &ohci->at_req_context;
DBGMSG(ohci->id, "Got reqTxComplete interrupt "
"status=0x%08X", reg_read(ohci, d->ctrlSet));
if (reg_read(ohci, d->ctrlSet) & 0x800)
......@@ -1154,7 +1123,7 @@ static void ohci_irq_handler(int irq, void *dev_id,
event &= ~OHCI1394_reqTxComplete;
}
if (event & OHCI1394_respTxComplete) {
struct dma_trm_ctx *d = ohci->at_resp_context;
struct dma_trm_ctx *d = &ohci->at_resp_context;
DBGMSG(ohci->id, "Got respTxComplete interrupt "
"status=0x%08X", reg_read(ohci, d->ctrlSet));
if (reg_read(ohci, d->ctrlSet) & 0x800)
......@@ -1165,7 +1134,7 @@ static void ohci_irq_handler(int irq, void *dev_id,
event &= ~OHCI1394_respTxComplete;
}
if (event & OHCI1394_RQPkt) {
struct dma_rcv_ctx *d = ohci->ar_req_context;
struct dma_rcv_ctx *d = &ohci->ar_req_context;
DBGMSG(ohci->id, "Got RQPkt interrupt status=0x%08X",
reg_read(ohci, d->ctrlSet));
if (reg_read(ohci, d->ctrlSet) & 0x800)
......@@ -1175,7 +1144,7 @@ static void ohci_irq_handler(int irq, void *dev_id,
event &= ~OHCI1394_RQPkt;
}
if (event & OHCI1394_RSPkt) {
struct dma_rcv_ctx *d = ohci->ar_resp_context;
struct dma_rcv_ctx *d = &ohci->ar_resp_context;
DBGMSG(ohci->id, "Got RSPkt interrupt status=0x%08X",
reg_read(ohci, d->ctrlSet));
if (reg_read(ohci, d->ctrlSet) & 0x800)
......@@ -1185,46 +1154,19 @@ static void ohci_irq_handler(int irq, void *dev_id,
event &= ~OHCI1394_RSPkt;
}
if (event & OHCI1394_isochRx) {
quadlet_t isoRecvIntEvent;
struct dma_rcv_ctx *d = ohci->ir_context;
isoRecvIntEvent =
reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
reg_write(ohci, OHCI1394_IsoRecvIntEventClear,
isoRecvIntEvent);
DBGMSG(ohci->id, "Got isochRx interrupt "
"status=0x%08X isoRecvIntEvent=%08x",
reg_read(ohci, d->ctrlSet), isoRecvIntEvent);
if (isoRecvIntEvent & 0x1) {
if (reg_read(ohci, d->ctrlSet) & 0x800)
ohci1394_stop_context(ohci, d->ctrlClear,
"isochRx");
else
tasklet_schedule(&d->task);
}
ohci1394_run_irq_hooks(ohci, isoRecvIntEvent, 0);
quadlet_t rx_event;
rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
ohci_schedule_iso_tasklets(ohci, rx_event, 0);
event &= ~OHCI1394_isochRx;
}
if (event & OHCI1394_isochTx) {
quadlet_t isoXmitIntEvent;
struct dma_trm_ctx *d = ohci->it_context;
isoXmitIntEvent =
reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
reg_write(ohci, OHCI1394_IsoXmitIntEventClear,
isoXmitIntEvent);
DBGMSG(ohci->id, "Got isochTx interrupt "
"status=0x%08x isoXmitIntEvent=%08x",
reg_read(ohci, d->ctrlSet), isoXmitIntEvent);
ohci1394_run_irq_hooks(ohci, 0, isoXmitIntEvent);
if (isoXmitIntEvent & 0x1) {
if (reg_read(ohci, d->ctrlSet) & 0x800)
ohci1394_stop_context(ohci, d->ctrlClear, "isochTx");
else
tasklet_schedule(&d->task);
}
quadlet_t tx_event;
tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
ohci_schedule_iso_tasklets(ohci, 0, tx_event);
event &= ~OHCI1394_isochTx;
}
if (event & OHCI1394_selfIDComplete) {
......@@ -1507,25 +1449,16 @@ static void dma_trm_tasklet (unsigned long data)
{
struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
struct hpsb_packet *packet, *nextpacket;
struct hpsb_packet *packet;
unsigned long flags;
u32 ack;
size_t datasize;
spin_lock_irqsave(&d->lock, flags);
if (d->fifo_first == NULL) {
#if 0
ohci1394_stop_context(ohci, d->ctrlClear,
"Packet sent ack received but queue is empty");
#endif
spin_unlock_irqrestore(&d->lock, flags);
return;
}
while (d->fifo_first) {
packet = d->fifo_first;
datasize = d->fifo_first->data_size;
while (!list_empty(&d->fifo_list)) {
packet = driver_packet(d->fifo_list.next);
datasize = packet->data_size;
if (datasize && packet->type != hpsb_raw)
ack = le32_to_cpu(
d->prg_cpu[d->sent_ind]->end.status) >> 16;
......@@ -1576,7 +1509,7 @@ static void dma_trm_tasklet (unsigned long data)
d->ctx);
#endif
nextpacket = packet->xnext;
list_del(&packet->driver_list);
hpsb_packet_sent(ohci->host, packet, ack & 0xf);
if (datasize) {
......@@ -1588,90 +1521,64 @@ static void dma_trm_tasklet (unsigned long data)
d->sent_ind = (d->sent_ind+1)%d->num_desc;
d->free_prgs++;
d->fifo_first = nextpacket;
}
if (d->fifo_first == NULL)
d->fifo_last = NULL;
dma_trm_flush(ohci, d);
spin_unlock_irqrestore(&d->lock, flags);
}
static int free_dma_rcv_ctx(struct dma_rcv_ctx **d)
static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
{
int i;
struct ti_ohci *ohci;
if (*d==NULL) return -1;
ohci = (struct ti_ohci *)(*d)->ohci;
if (d->ohci == NULL)
return;
DBGMSG(ohci->id, "Freeing dma_rcv_ctx %d",(*d)->ctx);
DBGMSG(d->ohci->id, "Freeing dma_rcv_ctx %d", d->ctx);
ohci1394_stop_context(ohci, (*d)->ctrlClear, NULL);
ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
tasklet_kill(&(*d)->task);
if (d->type == DMA_CTX_ISO)
ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_tasklet);
else
tasklet_kill(&d->task);
if ((*d)->buf_cpu) {
for (i=0; i<(*d)->num_desc; i++)
if ((*d)->buf_cpu[i] && (*d)->buf_bus[i]) {
if (d->buf_cpu) {
for (i=0; i<d->num_desc; i++)
if (d->buf_cpu[i] && d->buf_bus[i]) {
pci_free_consistent(
ohci->dev, (*d)->buf_size,
(*d)->buf_cpu[i], (*d)->buf_bus[i]);
d->ohci->dev, d->buf_size,
d->buf_cpu[i], d->buf_bus[i]);
OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
}
kfree((*d)->buf_cpu);
kfree((*d)->buf_bus);
kfree(d->buf_cpu);
kfree(d->buf_bus);
}
if ((*d)->prg_cpu) {
for (i=0; i<(*d)->num_desc; i++)
if ((*d)->prg_cpu[i] && (*d)->prg_bus[i]) {
if (d->prg_cpu) {
for (i=0; i<d->num_desc; i++)
if (d->prg_cpu[i] && d->prg_bus[i]) {
pci_free_consistent(
ohci->dev, sizeof(struct dma_cmd),
(*d)->prg_cpu[i], (*d)->prg_bus[i]);
d->ohci->dev, sizeof(struct dma_cmd),
d->prg_cpu[i], d->prg_bus[i]);
OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
}
kfree((*d)->prg_cpu);
kfree((*d)->prg_bus);
kfree(d->prg_cpu);
kfree(d->prg_bus);
}
if ((*d)->spb) kfree((*d)->spb);
if (d->spb) kfree(d->spb);
/* clear ISO context usage bit */
if ((*d)->type == DMA_CTX_ISO) {
clear_bit((*d)->ctx, &ohci->ir_ctx_usage);
}
kfree(*d);
*d = NULL;
return 0;
/* Mark this context as freed. */
d->ohci = NULL;
}
static struct dma_rcv_ctx *
alloc_dma_rcv_ctx(struct ti_ohci *ohci, enum context_type type, int ctx, int num_desc,
static int
alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
enum context_type type, int ctx, int num_desc,
int buf_size, int split_buf_size, int context_base)
{
struct dma_rcv_ctx *d;
int i;
if (type == DMA_CTX_ISO) {
/* try to claim the ISO context usage bit */
if (test_and_set_bit(ctx, &ohci->ir_ctx_usage)) {
PRINT(KERN_ERR, ohci->id, "IR DMA context %d is not available", ctx);
return NULL;
}
}
d = kmalloc(sizeof(struct dma_rcv_ctx), GFP_KERNEL);
if (d == NULL) {
PRINT(KERN_ERR, ohci->id, "Failed to allocate dma_rcv_ctx");
return NULL;
}
memset (d, 0, sizeof (struct dma_rcv_ctx));
d->ohci = ohci;
d->type = type;
d->ctx = ctx;
......@@ -1689,8 +1596,8 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, enum context_type type, int ctx, int num
if (d->buf_cpu == NULL || d->buf_bus == NULL) {
PRINT(KERN_ERR, ohci->id, "Failed to allocate dma buffer");
free_dma_rcv_ctx(&d);
return NULL;
free_dma_rcv_ctx(d);
return -ENOMEM;
}
memset(d->buf_cpu, 0, d->num_desc * sizeof(quadlet_t*));
memset(d->buf_bus, 0, d->num_desc * sizeof(dma_addr_t));
......@@ -1701,8 +1608,8 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, enum context_type type, int ctx, int num
if (d->prg_cpu == NULL || d->prg_bus == NULL) {
PRINT(KERN_ERR, ohci->id, "Failed to allocate dma prg");
free_dma_rcv_ctx(&d);
return NULL;
free_dma_rcv_ctx(d);
return -ENOMEM;
}
memset(d->prg_cpu, 0, d->num_desc * sizeof(struct dma_cmd*));
memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
......@@ -1711,8 +1618,8 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, enum context_type type, int ctx, int num
if (d->spb == NULL) {
PRINT(KERN_ERR, ohci->id, "Failed to allocate split buffer");
free_dma_rcv_ctx(&d);
return NULL;
free_dma_rcv_ctx(d);
return -ENOMEM;
}
for (i=0; i<d->num_desc; i++) {
......@@ -1726,8 +1633,8 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, enum context_type type, int ctx, int num
} else {
PRINT(KERN_ERR, ohci->id,
"Failed to allocate dma buffer");
free_dma_rcv_ctx(&d);
return NULL;
free_dma_rcv_ctx(d);
return -ENOMEM;
}
......@@ -1741,80 +1648,68 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, enum context_type type, int ctx, int num
} else {
PRINT(KERN_ERR, ohci->id,
"Failed to allocate dma prg");
free_dma_rcv_ctx(&d);
return NULL;
free_dma_rcv_ctx(d);
return -ENOMEM;
}
}
spin_lock_init(&d->lock);
/* initialize tasklet */
tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long)d);
if (type == DMA_CTX_ISO) {
ohci1394_init_iso_tasklet(&ohci->ir_tasklet, OHCI_ISO_RECEIVE,
dma_rcv_tasklet, (unsigned long) d);
if (ohci1394_register_iso_tasklet(ohci,
&ohci->ir_tasklet) < 0) {
PRINT(KERN_ERR, ohci->id, "No IR DMA context available");
free_dma_rcv_ctx(d);
return -EBUSY;
}
}
else
tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
return d;
return 0;
}
static int free_dma_trm_ctx(struct dma_trm_ctx **d)
static void free_dma_trm_ctx(struct dma_trm_ctx *d)
{
struct ti_ohci *ohci;
int i;
if (*d==NULL) return -1;
ohci = (struct ti_ohci *)(*d)->ohci;
if (d->ohci == NULL)
return;
DBGMSG(ohci->id, "Freeing dma_trm_ctx %d",(*d)->ctx);
DBGMSG(d->ohci->id, "Freeing dma_trm_ctx %d", d->ctx);
ohci1394_stop_context(ohci, (*d)->ctrlClear, NULL);
ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
tasklet_kill(&(*d)->task);
if (d->type == DMA_CTX_ISO)
ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->it_tasklet);
else
tasklet_kill(&d->task);
if ((*d)->prg_cpu) {
for (i=0; i<(*d)->num_desc; i++)
if ((*d)->prg_cpu[i] && (*d)->prg_bus[i]) {
if (d->prg_cpu) {
for (i=0; i<d->num_desc; i++)
if (d->prg_cpu[i] && d->prg_bus[i]) {
pci_free_consistent(
ohci->dev, sizeof(struct at_dma_prg),
(*d)->prg_cpu[i], (*d)->prg_bus[i]);
d->ohci->dev, sizeof(struct at_dma_prg),
d->prg_cpu[i], d->prg_bus[i]);
OHCI_DMA_FREE("consistent dma_trm prg[%d]", i);
}
kfree((*d)->prg_cpu);
kfree((*d)->prg_bus);
}
/* clear the ISO context usage bit */
if ((*d)->type == DMA_CTX_ISO) {
clear_bit((*d)->ctx, &ohci->it_ctx_usage);
kfree(d->prg_cpu);
kfree(d->prg_bus);
}
kfree(*d);
*d = NULL;
return 0;
/* Mark this context as freed. */
d->ohci = NULL;
}
static struct dma_trm_ctx *
alloc_dma_trm_ctx(struct ti_ohci *ohci, enum context_type type, int ctx, int num_desc,
static int
alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
enum context_type type, int ctx, int num_desc,
int context_base)
{
struct dma_trm_ctx *d;
int i;
if (type == DMA_CTX_ISO) {
/* try to claim the ISO context usage bit */
if (test_and_set_bit(ctx, &ohci->it_ctx_usage)) {
PRINT(KERN_ERR, ohci->id, "IT DMA context %d is not available", ctx);
return NULL;
}
}
d = kmalloc(sizeof(struct dma_trm_ctx), GFP_KERNEL);
if (d == NULL) {
PRINT(KERN_ERR, ohci->id, "Failed to allocate dma_trm_ctx");
return NULL;
}
memset (d, 0, sizeof (struct dma_trm_ctx));
d->ohci = ohci;
d->type = type;
d->ctx = ctx;
......@@ -1829,8 +1724,8 @@ alloc_dma_trm_ctx(struct ti_ohci *ohci, enum context_type type, int ctx, int num
if (d->prg_cpu == NULL || d->prg_bus == NULL) {
PRINT(KERN_ERR, ohci->id, "Failed to allocate at dma prg");
free_dma_trm_ctx(&d);
return NULL;
free_dma_trm_ctx(d);
return -ENOMEM;
}
memset(d->prg_cpu, 0, d->num_desc * sizeof(struct at_dma_prg*));
memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
......@@ -1846,17 +1741,28 @@ alloc_dma_trm_ctx(struct ti_ohci *ohci, enum context_type type, int ctx, int num
} else {
PRINT(KERN_ERR, ohci->id,
"Failed to allocate at dma prg");
free_dma_trm_ctx(&d);
return NULL;
free_dma_trm_ctx(d);
return -ENOMEM;
}
}
spin_lock_init(&d->lock);
/* initialize bottom handler */
tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
/* initialize tasklet */
if (type == DMA_CTX_ISO) {
ohci1394_init_iso_tasklet(&ohci->it_tasklet, OHCI_ISO_TRANSMIT,
dma_rcv_tasklet, (unsigned long) d);
if (ohci1394_register_iso_tasklet(ohci,
&ohci->it_tasklet) < 0) {
PRINT(KERN_ERR, ohci->id, "No IT DMA context available");
free_dma_trm_ctx(d);
return -EBUSY;
}
}
else
tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
return d;
return 0;
}
static u16 ohci_crc16 (u32 *ptr, int length)
......@@ -2028,15 +1934,14 @@ static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
return reg_read(ohci, OHCI1394_CSRData);
}
static struct hpsb_host_operations ohci1394_ops = {
get_rom: ohci_get_rom,
transmit_packet: ohci_transmit,
devctl: ohci_devctl,
hw_csr_reg: ohci_hw_csr_reg,
static struct hpsb_host_driver ohci1394_driver = {
.name = OHCI1394_DRIVER_NAME,
.get_rom = ohci_get_rom,
.transmit_packet = ohci_transmit,
.devctl = ohci_devctl,
.hw_csr_reg = ohci_hw_csr_reg,
};
static struct hpsb_host_driver *ohci1394_driver;
/***********************************
......@@ -2059,7 +1964,6 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
struct hpsb_host *host;
struct ti_ohci *ohci; /* shortcut to currently handled device */
unsigned long ohci_base;
int i;
if (version_printed++ == 0)
PRINT_G(KERN_INFO, "%s", version);
......@@ -2069,7 +1973,7 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
card_id_counter++);
pci_set_master(dev);
host = hpsb_alloc_host(ohci1394_driver, sizeof(struct ti_ohci));
host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci));
if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
ohci = host->hostdata;
......@@ -2151,68 +2055,78 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
/* AR DMA request context allocation */
ohci->ar_req_context =
alloc_dma_rcv_ctx(ohci, DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
OHCI1394_AsReqRcvContextBase);
if (ohci->ar_req_context == NULL)
if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
OHCI1394_AsReqRcvContextBase) < 0)
FAIL(-ENOMEM, "Failed to allocate AR Req context");
/* AR DMA response context allocation */
ohci->ar_resp_context =
alloc_dma_rcv_ctx(ohci, DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
OHCI1394_AsRspRcvContextBase);
if (ohci->ar_resp_context == NULL)
if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
OHCI1394_AsRspRcvContextBase) < 0)
FAIL(-ENOMEM, "Failed to allocate AR Resp context");
/* AT DMA request context */
ohci->at_req_context =
alloc_dma_trm_ctx(ohci, DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
OHCI1394_AsReqTrContextBase);
if (ohci->at_req_context == NULL)
if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
OHCI1394_AsReqTrContextBase) < 0)
FAIL(-ENOMEM, "Failed to allocate AT Req context");
/* AT DMA response context */
ohci->at_resp_context =
alloc_dma_trm_ctx(ohci, DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
OHCI1394_AsRspTrContextBase);
if (ohci->at_resp_context == NULL)
if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
OHCI1394_AsRspTrContextBase) < 0)
FAIL(-ENOMEM, "Failed to allocate AT Resp context");
ohci->ir_ctx_usage = 0;
ohci->it_ctx_usage = 0;
/* IR DMA context */
ohci->ir_context =
alloc_dma_rcv_ctx(ohci, DMA_CTX_ISO, 0, IR_NUM_DESC,
IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
OHCI1394_IsoRcvContextBase);
/* Start off with a soft reset, to clear everything to a sane
* state. */
ohci_soft_reset(ohci);
if (ohci->ir_context == NULL)
FAIL(-ENOMEM, "Failed to allocate IR context");
/* Now enable LPS, which we need in order to start accessing
* most of the registers. In fact, on some cards (ALI M5251),
* accessing registers in the SClk domain without LPS enabled
* will lock up the machine. Wait 50msec to make sure we have
* full link enabled. */
reg_write(ohci, OHCI1394_HCControlSet, 0x00080000);
mdelay(50);
/* IT DMA context allocation */
ohci->it_context =
alloc_dma_trm_ctx(ohci, DMA_CTX_ISO, 0, IT_NUM_DESC,
OHCI1394_IsoXmitContextBase);
/* Determine the number of available IR and IT contexts. */
ohci->nb_iso_rcv_ctx =
get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
DBGMSG(ohci->id, "%d iso receive contexts available",
ohci->nb_iso_rcv_ctx);
if (ohci->it_context == NULL)
FAIL(-ENOMEM, "Failed to allocate IT context");
ohci->nb_iso_xmit_ctx =
get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
DBGMSG(ohci->id, "%d iso transmit contexts available",
ohci->nb_iso_xmit_ctx);
/* Set the usage bits for non-existent contexts so they can't
* be allocated */
ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
INIT_LIST_HEAD(&ohci->iso_tasklet_list);
spin_lock_init(&ohci->iso_tasklet_list_lock);
ohci->ISO_channel_usage = 0;
spin_lock_init(&ohci->IR_channel_lock);
for(i = 0; i < OHCI1394_MAX_IRQ_HOOKS; i++) {
ohci->irq_hooks[i].irq_handler = NULL;
ohci->irq_hooks[i].data = NULL;
}
/* IR DMA context */
if (alloc_dma_rcv_ctx(ohci, &ohci->ir_context,
DMA_CTX_ISO, 0, IR_NUM_DESC,
IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
OHCI1394_IsoRcvContextBase) < 0)
FAIL(-ENOMEM, "Failed to allocate IR context");
/* IT DMA context allocation */
if (alloc_dma_trm_ctx(ohci, &ohci->it_context,
DMA_CTX_ISO, 0, IT_NUM_DESC,
OHCI1394_IsoXmitContextBase) < 0)
FAIL(-ENOMEM, "Failed to allocate IT context");
if (request_irq(dev->irq, ohci_irq_handler, SA_SHIRQ,
OHCI1394_DRIVER_NAME, ohci))
FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
......@@ -2308,12 +2222,12 @@ static void ohci1394_pci_remove(struct pci_dev *pdev)
static struct pci_device_id ohci1394_pci_tbl[] __devinitdata = {
{
class: PCI_CLASS_FIREWIRE_OHCI,
class_mask: 0x00ffffff,
vendor: PCI_ANY_ID,
device: PCI_ANY_ID,
subvendor: PCI_ANY_ID,
subdevice: PCI_ANY_ID,
.class = PCI_CLASS_FIREWIRE_OHCI,
.class_mask = 0x00ffffff,
.vendor = PCI_ANY_ID,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{ 0, },
};
......@@ -2321,10 +2235,10 @@ static struct pci_device_id ohci1394_pci_tbl[] __devinitdata = {
MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
static struct pci_driver ohci1394_pci_driver = {
name: OHCI1394_DRIVER_NAME,
id_table: ohci1394_pci_tbl,
probe: ohci1394_pci_probe,
remove: ohci1394_pci_remove,
.name = OHCI1394_DRIVER_NAME,
.id_table = ohci1394_pci_tbl,
.probe = ohci1394_pci_probe,
.remove = ohci1394_pci_remove,
};
......@@ -2355,65 +2269,69 @@ void ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
if (msg) PRINT(KERN_ERR, ohci->id, "%s: dma prg stopped", msg);
}
static inline void ohci1394_run_irq_hooks(struct ti_ohci *ohci,
quadlet_t isoRecvEvent,
quadlet_t isoXmitEvent)
void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
void (*func)(unsigned long), unsigned long data)
{
int i;
for(i = 0; i < OHCI1394_MAX_IRQ_HOOKS; i++) {
if(ohci->irq_hooks[i].irq_handler != NULL) {
ohci->irq_hooks[i].irq_handler(ohci->id, isoRecvEvent, isoXmitEvent,
ohci->irq_hooks[i].data);
}
}
tasklet_init(&tasklet->tasklet, func, data);
tasklet->type = type;
/* We init the tasklet->link field, so we can list_del() it
* without worrying wether it was added to the list or not. */
INIT_LIST_HEAD(&tasklet->link);
}
int ohci1394_hook_irq(struct ti_ohci *ohci,
void (*irq_handler) (int, quadlet_t, quadlet_t, void *),
void *data)
int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
struct ohci1394_iso_tasklet *tasklet)
{
int i;
/* find a free slot */
for(i = 0; i < OHCI1394_MAX_IRQ_HOOKS; i++) {
if(ohci->irq_hooks[i].irq_handler == NULL)
break;
unsigned long flags, *usage;
int n, i, r = -EBUSY;
if (tasklet->type == OHCI_ISO_TRANSMIT) {
n = ohci->nb_iso_xmit_ctx;
usage = &ohci->it_ctx_usage;
}
else {
n = ohci->nb_iso_rcv_ctx;
usage = &ohci->ir_ctx_usage;
}
if(i >= OHCI1394_MAX_IRQ_HOOKS)
return -EBUSY;
spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
ohci->irq_hooks[i].irq_handler = irq_handler;
ohci->irq_hooks[i].data = data;
for (i = 0; i < n; i++)
if (!test_and_set_bit(i, usage)) {
tasklet->context = i;
list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
r = 0;
break;
}
/* ohci1394 will never be unloaded while an IRQ hook is
in use, because the user must reference this symbol */
spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
return 0;
return r;
}
void ohci1394_unhook_irq(struct ti_ohci *ohci,
void (*irq_handler) (int, quadlet_t, quadlet_t, void *),
void *data)
void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
struct ohci1394_iso_tasklet *tasklet)
{
int i;
for(i = 0; i < OHCI1394_MAX_IRQ_HOOKS; i++) {
if( (ohci->irq_hooks[i].irq_handler == irq_handler) &&
(ohci->irq_hooks[i].data == data) )
break;
}
if(i < OHCI1394_MAX_IRQ_HOOKS) {
ohci->irq_hooks[i].irq_handler = NULL;
ohci->irq_hooks[i].data = NULL;
}
unsigned long flags;
tasklet_kill(&tasklet->tasklet);
spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
if (tasklet->type == OHCI_ISO_TRANSMIT)
clear_bit(tasklet->context, &ohci->it_ctx_usage);
else
clear_bit(tasklet->context, &ohci->ir_ctx_usage);
list_del(&tasklet->link);
spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
}
EXPORT_SYMBOL(ohci1394_stop_context);
EXPORT_SYMBOL(ohci1394_hook_irq);
EXPORT_SYMBOL(ohci1394_unhook_irq);
EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
/***********************************
......@@ -2427,27 +2345,11 @@ MODULE_LICENSE("GPL");
static void __exit ohci1394_cleanup (void)
{
pci_unregister_driver(&ohci1394_pci_driver);
hpsb_unregister_lowlevel(ohci1394_driver);
}
static int __init ohci1394_init(void)
{
int ret;
ohci1394_driver = hpsb_register_lowlevel(&ohci1394_ops,
OHCI1394_DRIVER_NAME);
if (!ohci1394_driver) {
PRINT_G(KERN_ERR, "hpsb_register_lowlevel failed");
return -ENOMEM;
}
ret = pci_module_init(&ohci1394_pci_driver);
if (ret < 0) {
PRINT_G(KERN_ERR, "pci_module_init failed");
hpsb_unregister_lowlevel(ohci1394_driver);
return ret;
}
return ret;
return pci_module_init(&ohci1394_pci_driver);
}
module_init(ohci1394_init);
......
......@@ -127,12 +127,10 @@ struct dma_trm_ctx {
quadlet_t *branchAddrPtr;
/* list of packets inserted in the AT FIFO */
struct hpsb_packet *fifo_first;
struct hpsb_packet *fifo_last;
struct list_head fifo_list;
/* list of pending packets to be inserted in the AT FIFO */
struct hpsb_packet *pending_first;
struct hpsb_packet *pending_last;
struct list_head pending_list;
spinlock_t lock;
struct tasklet_struct task;
......@@ -141,6 +139,13 @@ struct dma_trm_ctx {
int cmdPtr;
};
struct ohci1394_iso_tasklet {
struct tasklet_struct tasklet;
struct list_head link;
int context;
enum { OHCI_ISO_TRANSMIT, OHCI_ISO_RECEIVE } type;
};
struct ti_ohci {
int id; /* sequential card number */
......@@ -172,21 +177,23 @@ struct ti_ohci {
unsigned int max_packet_size;
/* async receive */
struct dma_rcv_ctx *ar_resp_context;
struct dma_rcv_ctx *ar_req_context;
struct dma_rcv_ctx ar_resp_context;
struct dma_rcv_ctx ar_req_context;
/* async transmit */
struct dma_trm_ctx *at_resp_context;
struct dma_trm_ctx *at_req_context;
struct dma_trm_ctx at_resp_context;
struct dma_trm_ctx at_req_context;
/* iso receive */
struct dma_rcv_ctx *ir_context;
struct dma_rcv_ctx ir_context;
struct ohci1394_iso_tasklet ir_tasklet;
spinlock_t IR_channel_lock;
int nb_iso_rcv_ctx;
unsigned long ir_ctx_usage; /* use test_and_set_bit() for atomicity */
/* iso transmit */
struct dma_trm_ctx *it_context;
struct dma_trm_ctx it_context;
struct ohci1394_iso_tasklet it_tasklet;
int nb_iso_xmit_ctx;
unsigned long it_ctx_usage; /* use test_and_set_bit() for atomicity */
......@@ -202,16 +209,12 @@ struct ti_ohci {
int self_id_errors;
/* IRQ hooks, for video1394 and dv1394 */
/* Tasklets for iso receive and transmit, used by video1394,
* amdtp and dv1394 */
#define OHCI1394_MAX_IRQ_HOOKS 16
struct list_head iso_tasklet_list;
spinlock_t iso_tasklet_list_lock;
struct ohci1394_irq_hook {
void (*irq_handler) (int card, quadlet_t isoRecvEvent,
quadlet_t isoXmitEvent, void *data);
void *data;
} irq_hooks[OHCI1394_MAX_IRQ_HOOKS];
/* Swap the selfid buffer? */
unsigned int selfid_swap:1;
/* Some Apple chipset seem to swap incoming headers for us */
......@@ -399,15 +402,17 @@ static inline u32 reg_read(const struct ti_ohci *ohci, int offset)
#define OHCI1394_TCODE_PHY 0xE
void ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg);
struct ti_ohci *ohci1394_get_struct(int card_num);
void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet,
int type,
void (*func)(unsigned long),
unsigned long data);
int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
struct ohci1394_iso_tasklet *tasklet);
void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
struct ohci1394_iso_tasklet *tasklet);
int ohci1394_hook_irq(struct ti_ohci *ohci,
void (*irq_handler) (int, quadlet_t, quadlet_t, void *),
void *data);
void ohci1394_stop_context (struct ti_ohci *ohci, int reg, char *msg);
struct ti_ohci *ohci1394_get_struct(int card_num);
void ohci1394_unhook_irq(struct ti_ohci *ohci,
void (*irq_handler) (int, quadlet_t, quadlet_t, void *),
void *data);
#endif
......@@ -65,7 +65,7 @@ MODULE_PARM_DESC(skip_eeprom, "Do not try to read bus info block from serial eep
static int skip_eeprom = 0;
static struct hpsb_host_driver *lynx_driver;
static struct hpsb_host_driver lynx_driver;
static unsigned int card_id;
......@@ -466,7 +466,7 @@ static void send_next(struct ti_lynx *lynx, int what)
struct hpsb_packet *packet;
d = (what == hpsb_iso ? &lynx->iso_send : &lynx->async);
packet = d->queue;
packet = driver_packet(d->queue.next);
d->header_dma = pci_map_single(lynx->dev, packet->header,
packet->header_size, PCI_DMA_TODEVICE);
......@@ -538,7 +538,6 @@ static int lynx_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
return 0;
}
packet->xnext = NULL;
if (packet->tcode == TCODE_WRITEQ
|| packet->tcode == TCODE_READQ_RESPONSE) {
cpu_to_be32s(&packet->header[3]);
......@@ -546,14 +545,9 @@ static int lynx_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
spin_lock_irqsave(&d->queue_lock, flags);
if (d->queue == NULL) {
d->queue = packet;
d->queue_last = packet;
list_add_tail(&packet->driver_list, &d->queue);
if (d->queue.next == &packet->driver_list)
send_next(lynx, packet->type);
} else {
d->queue_last->xnext = packet;
d->queue_last = packet;
}
spin_unlock_irqrestore(&d->queue_lock, flags);
......@@ -566,7 +560,8 @@ static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
{
struct ti_lynx *lynx = host->hostdata;
int retval = 0;
struct hpsb_packet *packet, *lastpacket;
struct hpsb_packet *packet;
LIST_HEAD(packet_list);
unsigned long flags;
switch (cmd) {
......@@ -620,16 +615,16 @@ static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
spin_lock_irqsave(&lynx->async.queue_lock, flags);
reg_write(lynx, DMA_CHAN_CTRL(CHANNEL_ASYNC_SEND), 0);
packet = lynx->async.queue;
lynx->async.queue = NULL;
list_splice(&lynx->async.queue, &packet_list);
INIT_LIST_HEAD(&lynx->async.queue);
spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
while (packet != NULL) {
lastpacket = packet;
packet = packet->xnext;
hpsb_packet_sent(host, lastpacket, ACKX_ABORTED);
}
while (!list_empty(&packet_list)) {
packet = driver_packet(packet_list.next);
list_del(&packet->driver_list);
hpsb_packet_sent(host, packet, ACKX_ABORTED);
}
break;
......@@ -693,13 +688,13 @@ static ssize_t mem_write(struct file*, const char*, size_t, loff_t*);
static struct file_operations aux_ops = {
owner: THIS_MODULE,
read: mem_read,
write: mem_write,
poll: aux_poll,
llseek: mem_llseek,
open: mem_open,
release: mem_release,
.owner = THIS_MODULE,
.read = mem_read,
.write = mem_write,
.poll = aux_poll,
.llseek = mem_llseek,
.open = mem_open,
.release = mem_release,
};
......@@ -896,6 +891,12 @@ static ssize_t mem_read(struct file *file, char *buffer, size_t count,
ssize_t retval;
void *membase;
if (*offset != off) /* Check for EOF before we trust wrap */
return 0;
if (off + count > off)
return 0;
if ((off + count) > PCILYNX_MAX_MEMORY + 1) {
count = PCILYNX_MAX_MEMORY + 1 - off;
}
......@@ -1122,8 +1123,9 @@ static void lynx_irq_handler(int irq, void *dev_id,
spin_lock(&lynx->async.queue_lock);
ack = reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ASYNC_SEND));
packet = lynx->async.queue;
lynx->async.queue = packet->xnext;
packet = driver_packet(lynx->async.queue.next);
list_del(&packet->driver_list);
pci_unmap_single(lynx->dev, lynx->async.header_dma,
packet->header_size, PCI_DMA_TODEVICE);
......@@ -1132,7 +1134,7 @@ static void lynx_irq_handler(int irq, void *dev_id,
packet->data_size, PCI_DMA_TODEVICE);
}
if (lynx->async.queue != NULL) {
if (!list_empty(&lynx->async.queue)) {
send_next(lynx, hpsb_async);
}
......@@ -1154,8 +1156,8 @@ static void lynx_irq_handler(int irq, void *dev_id,
spin_lock(&lynx->iso_send.queue_lock);
packet = lynx->iso_send.queue;
lynx->iso_send.queue = packet->xnext;
packet = driver_packet(lynx->iso_send.queue.next);
list_del(&packet->driver_list);
pci_unmap_single(lynx->dev, lynx->iso_send.header_dma,
packet->header_size, PCI_DMA_TODEVICE);
......@@ -1164,7 +1166,7 @@ static void lynx_irq_handler(int irq, void *dev_id,
packet->data_size, PCI_DMA_TODEVICE);
}
if (lynx->iso_send.queue != NULL) {
if (!list_empty(&lynx->iso_send.queue)) {
send_next(lynx, hpsb_iso);
}
......@@ -1327,7 +1329,7 @@ static int __devinit add_card(struct pci_dev *dev,
error = -ENOMEM;
host = hpsb_alloc_host(lynx_driver, sizeof(struct ti_lynx));
host = hpsb_alloc_host(&lynx_driver, sizeof(struct ti_lynx));
if (!host) FAIL("failed to allocate control structure memory");
lynx = host->hostdata;
......@@ -1471,7 +1473,8 @@ static int __devinit add_card(struct pci_dev *dev,
lynx->selfid_size = -1;
lynx->phy_reg0 = -1;
lynx->async.queue = NULL;
INIT_LIST_HEAD(&lynx->async.queue);
INIT_LIST_HEAD(&lynx->iso_send.queue);
pcl.next = pcl_bus(lynx, lynx->rcv_pcl);
put_pcl(lynx, lynx->rcv_pcl_start, &pcl);
......@@ -1682,25 +1685,26 @@ static size_t get_lynx_rom(struct hpsb_host *host, const quadlet_t **ptr)
static struct pci_device_id pci_table[] __devinitdata = {
{
vendor: PCI_VENDOR_ID_TI,
device: PCI_DEVICE_ID_TI_PCILYNX,
subvendor: PCI_ANY_ID,
subdevice: PCI_ANY_ID,
.vendor = PCI_VENDOR_ID_TI,
.device = PCI_DEVICE_ID_TI_PCILYNX,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{ } /* Terminating entry */
};
static struct pci_driver lynx_pci_driver = {
name: PCILYNX_DRIVER_NAME,
id_table: pci_table,
probe: add_card,
remove: __devexit_p(remove_card),
.name = PCILYNX_DRIVER_NAME,
.id_table = pci_table,
.probe = add_card,
.remove = __devexit_p(remove_card),
};
static struct hpsb_host_operations lynx_ops = {
get_rom: get_lynx_rom,
transmit_packet: lynx_transmit,
devctl: lynx_devctl,
static struct hpsb_host_driver lynx_driver = {
.name = PCILYNX_DRIVER_NAME,
.get_rom = get_lynx_rom,
.transmit_packet = lynx_transmit,
.devctl = lynx_devctl,
};
MODULE_AUTHOR("Andreas E. Bombe <andreas.bombe@munich.netsurf.de>");
......@@ -1721,22 +1725,14 @@ static int __init pcilynx_init(void)
}
#endif
lynx_driver = hpsb_register_lowlevel(&lynx_ops, PCILYNX_DRIVER_NAME);
if (!lynx_driver) {
ret = -ENOMEM;
goto free_char_dev;
}
ret = pci_module_init(&lynx_pci_driver);
if (ret < 0) {
PRINT_G(KERN_ERR, "PCI module init failed");
goto unregister_lowlevel;
goto free_char_dev;
}
return 0;
unregister_lowlevel:
hpsb_unregister_lowlevel(lynx_driver);
free_char_dev:
#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
unregister_chrdev(PCILYNX_MAJOR, PCILYNX_DRIVER_NAME);
......@@ -1748,7 +1744,6 @@ static int __init pcilynx_init(void)
static void __exit pcilynx_cleanup(void)
{
pci_unregister_driver(&lynx_pci_driver);
hpsb_unregister_lowlevel(lynx_driver);
#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
unregister_chrdev(PCILYNX_MAJOR, PCILYNX_DRIVER_NAME);
......
......@@ -94,7 +94,7 @@ struct ti_lynx {
struct lynx_send_data {
pcl_t pcl_start, pcl;
struct hpsb_packet *queue, *queue_last;
struct list_head queue;
spinlock_t queue_lock;
dma_addr_t header_dma, data_dma;
int channel;
......
......@@ -985,20 +985,20 @@ static int raw1394_release(struct inode *inode, struct file *file)
}
static struct hpsb_highlevel_ops hl_ops = {
add_host: add_host,
remove_host: remove_host,
host_reset: host_reset,
iso_receive: iso_receive,
fcp_request: fcp_request,
.add_host = add_host,
.remove_host = remove_host,
.host_reset = host_reset,
.iso_receive = iso_receive,
.fcp_request = fcp_request,
};
static struct file_operations file_ops = {
owner: THIS_MODULE,
read: raw1394_read,
write: raw1394_write,
poll: raw1394_poll,
open: raw1394_open,
release: raw1394_release,
.owner = THIS_MODULE,
.read = raw1394_read,
.write = raw1394_write,
.poll = raw1394_poll,
.open = raw1394_open,
.release = raw1394_release,
};
static int __init init_raw1394(void)
......
......@@ -320,6 +320,7 @@
#include <linux/blk.h>
#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/blk.h>
#include <asm/current.h>
#include <asm/uaccess.h>
#include <asm/io.h>
......@@ -349,7 +350,7 @@
#include "sbp2.h"
static char version[] __devinitdata =
"$Rev: 507 $ James Goodwin <jamesg@filanet.com>";
"$Rev: 545 $ James Goodwin <jamesg@filanet.com>";
/*
* Module load parameter definitions
......@@ -421,16 +422,25 @@ MODULE_PARM(sbp2_exclusive_login,"i");
MODULE_PARM_DESC(sbp2_exclusive_login, "Exclusive login to sbp2 device (default = 1)");
static int sbp2_exclusive_login = 1;
/*
* SCSI inquiry hack for really badly behaved sbp2 devices. Turn this on if your sbp2 device
* is not properly handling the SCSI inquiry command. This hack makes the inquiry look more
* like a typical MS Windows inquiry.
*/
MODULE_PARM(sbp2_force_inquiry_hack,"i");
MODULE_PARM_DESC(sbp2_force_inquiry_hack, "Force SCSI inquiry hack (default = 0)");
static int sbp2_force_inquiry_hack = 0;
/*
* Export information about protocols/devices supported by this driver.
*/
static struct ieee1394_device_id sbp2_id_table[] = {
{
match_flags: IEEE1394_MATCH_SPECIFIER_ID |
.match_flags =IEEE1394_MATCH_SPECIFIER_ID |
IEEE1394_MATCH_VERSION,
specifier_id: SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff,
version: SBP2_SW_VERSION_ENTRY & 0xffffff
.specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff,
.version = SBP2_SW_VERSION_ENTRY & 0xffffff
},
{ }
};
......@@ -504,14 +514,6 @@ static spinlock_t sbp2_host_info_lock = SPIN_LOCK_UNLOCKED;
#define sbp2_spin_unlock(lock, flags) do {restore_flags(flags);} while (0)
#endif
/*
* SCSI inquiry hack for really badly behaved sbp2 devices. Turn this on if your sbp2 device
* is not properly handling the SCSI inquiry command. This hack makes the inquiry look more
* like a typical MS Windows inquiry.
*/
/* #define SBP2_FORCE_36_BYTE_INQUIRY */
/*
* Globals
*/
......@@ -525,29 +527,35 @@ static LIST_HEAD(sbp2_host_info_list);
static struct hpsb_highlevel *sbp2_hl_handle = NULL;
static struct hpsb_highlevel_ops sbp2_hl_ops = {
add_host: sbp2_add_host,
remove_host: sbp2_remove_host,
.add_host = sbp2_add_host,
.remove_host = sbp2_remove_host,
};
static struct hpsb_address_ops sbp2_ops = {
write: sbp2_handle_status_write
.write = sbp2_handle_status_write
};
#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
static struct hpsb_address_ops sbp2_physdma_ops = {
read: sbp2_handle_physdma_read,
write: sbp2_handle_physdma_write,
.read = sbp2_handle_physdma_read,
.write = sbp2_handle_physdma_write,
};
#endif
static struct hpsb_protocol_driver sbp2_driver = {
name: "SBP2 Driver",
id_table: sbp2_id_table,
probe: sbp2_probe,
disconnect: sbp2_disconnect,
update: sbp2_update
.name = "SBP2 Driver",
.id_table = sbp2_id_table,
.probe = sbp2_probe,
.disconnect = sbp2_disconnect,
.update = sbp2_update
};
/* List of device firmware's that require a forced 36 byte inquiry. Note
* the final 0x0 needs to be there for denoting end of list. */
static u32 sbp2_broken_inquiry_list[] = {
0x00002800, /* Stefan Richter <richtest@bauwesen.tu-cottbus.de> */
0x0
};
/**************************************
* General utility functions
......@@ -1903,27 +1911,58 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id)
break;
case SBP2_FIRMWARE_REVISION_KEY:
/*
* Firmware revision (used to find broken
* devices). If the vendor id is 0xa0b8
* (Symbios vendor id), then we have a
* bridge with 128KB max transfer size
* limitation.
*/
/* Firmware revision */
scsi_id->sbp2_firmware_revision
= CONFIG_ROM_VALUE(ud->quadlets[i]);
SBP2_DEBUG("sbp2_firmware_revision = %x",
(unsigned int) scsi_id->sbp2_firmware_revision);
if ((scsi_id->sbp2_firmware_revision & 0xffff00) ==
SBP2_128KB_BROKEN_FIRMWARE) {
SBP2_WARN("warning: Bridge chipset supports 128KB max transfer size");
}
break;
default:
break;
}
}
/* This is the start of our broken device checking. We try to hack
* around oddities and known defects. */
scsi_id->workarounds = 0x0;
/* If the vendor id is 0xa0b8 (Symbios vendor id), then we have a
* bridge with 128KB max transfer size limitation. For sanity, we
* only voice this when the current sbp2_max_sectors setting
* exceeds the 128k limit. By default, that is not the case.
*
* It would be really nice if we could detect this before the scsi
* host gets initialized. That way we can down-force the
* sbp2_max_sectors to account for it. That is not currently
* possible. */
if ((scsi_id->sbp2_firmware_revision & 0xffff00) ==
SBP2_128KB_BROKEN_FIRMWARE &&
(sbp2_max_sectors * 512) > (128 * 1024)) {
SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB max transfer size.",
NODE_BUS_ARGS(scsi_id->ne->nodeid));
SBP2_WARN("WARNING: Current sbp2_max_sectors setting is larger than 128KB (%d sectors)!",
sbp2_max_sectors);
scsi_id->workarounds |= SBP2_BREAKAGE_128K_MAX_TRANSFER;
}
/* Check for a blacklisted set of devices that require us to force
* a 36 byte host inquiry. This can be overriden as a module param
* (to force all hosts).
*
* XXX If this does not detect your firmware as being defective,
* but using the sbp2_force_inquiry_hack allows your device to
* work, please submit the value of your firmware revision to the
* linux1394-devel mailing list. */
for (i = 0; sbp2_broken_inquiry_list[i]; i++) {
if ((scsi_id->sbp2_firmware_revision & 0xffff00) ==
sbp2_broken_inquiry_list[i]) {
SBP2_WARN("Node " NODE_BUS_FMT ": Using 36byte inquiry workaround",
NODE_BUS_ARGS(scsi_id->ne->nodeid));
scsi_id->workarounds |= SBP2_BREAKAGE_INQUIRY_HACK;
break; // No need to continue.
}
}
}
/*
......@@ -2400,11 +2439,10 @@ static int sbp2_send_command(struct sbp2scsi_host_info *hi, struct scsi_id_insta
* reject this inquiry command. Fix the request_bufflen.
*/
if (*cmd == INQUIRY) {
#ifdef SBP2_FORCE_36_BYTE_INQUIRY
request_bufflen = cmd[4] = 0x24;
#else
request_bufflen = cmd[4];
#endif
if (sbp2_force_inquiry_hack || scsi_id->workarounds & SBP2_BREAKAGE_INQUIRY_HACK)
request_bufflen = cmd[4] = 0x24;
else
request_bufflen = cmd[4];
}
/*
......@@ -3096,7 +3134,11 @@ static int sbp2scsi_reset (Scsi_Cmnd *SCpnt)
/*
* Called by scsi stack to get bios parameters (used by fdisk, and at boot).
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28)
static int sbp2scsi_biosparam (Scsi_Disk *disk, kdev_t dev, int geom[])
#else
static int sbp2scsi_biosparam (Scsi_Disk *disk, struct block_device *dev, int geom[])
#endif
{
int heads, sectors, cylinders;
......@@ -3132,7 +3174,14 @@ static int sbp2scsi_detect (Scsi_Host_Template *tpnt)
* host controller currently registered, and for each of those
* we register a scsi host with the scsi stack.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
spin_unlock_irq(&io_request_lock);
sbp2_init();
spin_lock_irq(&io_request_lock);
#else
sbp2_init();
#endif
/* We return the number of hosts registered. */
return scsi_driver_template.present;
......@@ -3178,23 +3227,23 @@ MODULE_LICENSE("GPL");
/* SCSI host template */
static Scsi_Host_Template scsi_driver_template = {
name: "IEEE-1394 SBP-2 protocol driver",
info: sbp2scsi_info,
detect: sbp2scsi_detect,
queuecommand: sbp2scsi_queuecommand,
eh_abort_handler: sbp2scsi_abort,
eh_device_reset_handler:sbp2scsi_reset,
eh_bus_reset_handler: sbp2scsi_reset,
eh_host_reset_handler: sbp2scsi_reset,
bios_param: sbp2scsi_biosparam,
this_id: -1,
sg_tablesize: SBP2_MAX_SG_ELEMENTS,
use_clustering: SBP2_CLUSTERING,
.name = "IEEE-1394 SBP-2 protocol driver",
.info = sbp2scsi_info,
.detect = sbp2scsi_detect,
.queuecommand = sbp2scsi_queuecommand,
.eh_abort_handler = sbp2scsi_abort,
.eh_device_reset_handler =sbp2scsi_reset,
.eh_bus_reset_handler = sbp2scsi_reset,
.eh_host_reset_handler =sbp2scsi_reset,
.bios_param = sbp2scsi_biosparam,
.this_id = -1,
.sg_tablesize = SBP2_MAX_SG_ELEMENTS,
.use_clustering = SBP2_CLUSTERING,
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
use_new_eh_code: TRUE,
.use_new_eh_code = TRUE,
#endif
emulated: 1,
proc_name: SBP2_DEVICE_NAME,
.emulated = 1,
.proc_name = SBP2_DEVICE_NAME,
};
static int sbp2_module_init(void)
......
......@@ -357,6 +357,10 @@ struct sbp2_command_info {
};
/* A list of flags for detected oddities and brokeness. */
#define SBP2_BREAKAGE_128K_MAX_TRANSFER 0x1
#define SBP2_BREAKAGE_INQUIRY_HACK 0x2
/*
* Information needed on a per scsi id basis (one for each sbp2 device)
*/
......@@ -412,6 +416,9 @@ struct scsi_id_instance_data {
/* Node entry, as retrieved from NodeMgr entries */
struct node_entry *ne;
/* Device specific workarounds/brokeness */
u32 workarounds;
};
/*
......@@ -542,7 +549,11 @@ static int sbp2_max_speed_and_size(struct sbp2scsi_host_info *hi, struct scsi_id
static int sbp2scsi_detect (Scsi_Host_Template *tpnt);
static const char *sbp2scsi_info (struct Scsi_Host *host);
void sbp2scsi_setup(char *str, int *ints);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28)
static int sbp2scsi_biosparam (Scsi_Disk *disk, kdev_t dev, int geom[]);
#else
static int sbp2scsi_biosparam (Scsi_Disk *disk, struct block_device *dev, int geom[]);
#endif
static int sbp2scsi_abort (Scsi_Cmnd *SCpnt);
static int sbp2scsi_reset (Scsi_Cmnd *SCpnt);
static int sbp2scsi_queuecommand (Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *));
......
......@@ -56,8 +56,6 @@
#include "ohci1394.h"
#define ISO_CHANNELS 64
#define ISO_RECEIVE 0
#define ISO_TRANSMIT 1
#ifndef virt_to_page
#define virt_to_page(x) MAP_NR(x)
......@@ -84,9 +82,10 @@ struct it_dma_prg {
struct dma_iso_ctx {
struct ti_ohci *ohci;
int type; /* ISO_TRANSMIT or ISO_RECEIVE */
int ctx;
int type; /* OHCI_ISO_TRANSMIT or OHCI_ISO_RECEIVE */
struct ohci1394_iso_tasklet iso_tasklet;
int channel;
int ctx;
int last_buffer;
int * next_buffer; /* For ISO Transmit of video packets
to write the correct SYT field
......@@ -153,8 +152,8 @@ printk(level "video1394: " fmt "\n" , ## args)
#define PRINT(level, card, fmt, args...) \
printk(level "video1394_%d: " fmt "\n" , card , ## args)
static void irq_handler(int card, quadlet_t isoRecvIntEvent,
quadlet_t isoXmitIntEvent, void *data);
void wakeup_dma_ir_ctx(unsigned long l);
void wakeup_dma_it_ctx(unsigned long l);
static LIST_HEAD(video1394_cards);
static spinlock_t video1394_cards_lock = SPIN_LOCK_UNLOCKED;
......@@ -234,12 +233,12 @@ static void rvfree(void * mem, unsigned long size)
static int free_dma_iso_ctx(struct dma_iso_ctx *d)
{
int i;
unsigned long *usage;
DBGMSG(d->ohci->id, "Freeing dma_iso_ctx %d", d->ctx);
ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
ohci1394_unhook_irq(d->ohci, irq_handler, d);
if (d->iso_tasklet.link.next != NULL)
ohci1394_unregister_iso_tasklet(d->ohci, &d->iso_tasklet);
if (d->buf)
rvfree((void *)d->buf, d->num_desc * d->buf_size);
......@@ -265,11 +264,6 @@ static int free_dma_iso_ctx(struct dma_iso_ctx *d)
if (d->next_buffer)
kfree(d->next_buffer);
usage = (d->type == ISO_RECEIVE) ? &d->ohci->ir_ctx_usage :
&d->ohci->it_ctx_usage;
/* clear the ISO context usage bit */
clear_bit(d->ctx, usage);
list_del(&d->link);
kfree(d);
......@@ -281,55 +275,28 @@ static struct dma_iso_ctx *
alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
int buf_size, int channel, unsigned int packet_size)
{
struct dma_iso_ctx *d=NULL;
struct dma_iso_ctx *d;
int i;
unsigned long *usage = (type == ISO_RECEIVE) ? &ohci->ir_ctx_usage :
&ohci->it_ctx_usage;
/* try to claim the ISO context usage bit */
for (i = 0; i < ohci->nb_iso_rcv_ctx; i++) {
if (!test_and_set_bit(i, usage)) {
PRINT(KERN_ERR, ohci->id, "Free iso ctx %d found", i);
break;
}
}
if (i == ohci->nb_iso_rcv_ctx) {
PRINT(KERN_ERR, ohci->id, "No DMA contexts available");
return NULL;
}
d = (struct dma_iso_ctx *)kmalloc(sizeof(struct dma_iso_ctx),
GFP_KERNEL);
if (d==NULL) {
d = kmalloc(sizeof(struct dma_iso_ctx), GFP_KERNEL);
if (d == NULL) {
PRINT(KERN_ERR, ohci->id, "Failed to allocate dma_iso_ctx");
return NULL;
}
memset(d, 0, sizeof(struct dma_iso_ctx));
memset(d, 0, sizeof *d);
d->ohci = (void *)ohci;
d->ohci = ohci;
d->type = type;
d->ctx = i;
d->channel = channel;
d->num_desc = num_desc;
d->frame_size = buf_size;
if (buf_size%PAGE_SIZE)
d->buf_size = buf_size + PAGE_SIZE - (buf_size%PAGE_SIZE);
else
d->buf_size = buf_size;
d->buf_size = PAGE_ALIGN(buf_size);
d->last_buffer = -1;
d->buf = NULL;
d->ir_prg = NULL;
init_waitqueue_head(&d->waitq);
if (ohci1394_hook_irq(ohci, irq_handler, d) != 0) {
PRINT(KERN_ERR, ohci->id, "ohci1394_hook_irq() failed");
free_dma_iso_ctx(d);
return NULL;
}
d->buf = rvmalloc(d->num_desc * d->buf_size);
if (d->buf == NULL) {
......@@ -339,7 +306,24 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
}
memset(d->buf, 0, d->num_desc * d->buf_size);
if (type == ISO_RECEIVE) {
if (type == OHCI_ISO_RECEIVE)
ohci1394_init_iso_tasklet(&d->iso_tasklet, type,
wakeup_dma_ir_ctx,
(unsigned long) d);
else
ohci1394_init_iso_tasklet(&d->iso_tasklet, type,
wakeup_dma_it_ctx,
(unsigned long) d);
if (ohci1394_register_iso_tasklet(ohci, &d->iso_tasklet) < 0) {
PRINT(KERN_ERR, ohci->id, "no free iso %s contexts",
type == OHCI_ISO_RECEIVE ? "receive" : "transmit");
free_dma_iso_ctx(d);
return NULL;
}
d->ctx = d->iso_tasklet.context;
if (type == OHCI_ISO_RECEIVE) {
d->ctrlSet = OHCI1394_IsoRcvContextControlSet+32*d->ctx;
d->ctrlClear = OHCI1394_IsoRcvContextControlClear+32*d->ctx;
d->cmdPtr = OHCI1394_IsoRcvCommandPtr+32*d->ctx;
......@@ -359,7 +343,7 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
d->nb_cmd = d->buf_size / PAGE_SIZE + 1;
d->left_size = (d->frame_size % PAGE_SIZE) ?
d->frame_size % PAGE_SIZE : PAGE_SIZE;
for (i=0;i<d->num_desc;i++) {
d->ir_prg[i] = kmalloc(d->nb_cmd *
sizeof(struct dma_cmd),
......@@ -371,8 +355,9 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
return NULL;
}
}
}
else { /* ISO_TRANSMIT */
else { /* OHCI_ISO_TRANSMIT */
d->ctrlSet = OHCI1394_IsoXmitContextControlSet+16*d->ctx;
d->ctrlClear = OHCI1394_IsoXmitContextControlClear+16*d->ctx;
d->cmdPtr = OHCI1394_IsoXmitCommandPtr+16*d->ctx;
......@@ -458,7 +443,7 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
PRINT(KERN_INFO, ohci->id, "Iso %s DMA: %d buffers "
"of size %d allocated for a frame size %d, each with %d prgs",
(type==ISO_RECEIVE) ? "receive" : "transmit",
(type == OHCI_ISO_RECEIVE) ? "receive" : "transmit",
d->num_desc, d->buf_size, d->frame_size, d->nb_cmd);
return d;
......@@ -563,18 +548,14 @@ find_ctx(struct list_head *list, int type, int channel)
return NULL;
}
int wakeup_dma_ir_ctx(struct ti_ohci *ohci, struct dma_iso_ctx *d)
void wakeup_dma_ir_ctx(unsigned long l)
{
struct dma_iso_ctx *d = (struct dma_iso_ctx *) l;
int i;
if (d==NULL) {
PRINT(KERN_ERR, ohci->id, "Iso receive event received but "
"context not allocated");
return -EFAULT;
}
spin_lock(&d->lock);
for (i=0;i<d->num_desc;i++) {
for (i = 0; i < d->num_desc; i++) {
if (d->ir_prg[i][d->nb_cmd-1].status & cpu_to_le32(0xFFFF0000)) {
reset_ir_status(d, i);
d->buffer_status[i] = VIDEO1394_BUFFER_READY;
......@@ -585,9 +566,11 @@ int wakeup_dma_ir_ctx(struct ti_ohci *ohci, struct dma_iso_ctx *d)
#endif
}
}
spin_unlock(&d->lock);
if (waitqueue_active(&d->waitq)) wake_up_interruptible(&d->waitq);
return 0;
if (waitqueue_active(&d->waitq))
wake_up_interruptible(&d->waitq);
}
static inline void put_timestamp(struct ti_ohci *ohci, struct dma_iso_ctx * d,
......@@ -642,29 +625,28 @@ static inline void put_timestamp(struct ti_ohci *ohci, struct dma_iso_ctx * d,
#endif
}
int wakeup_dma_it_ctx(struct ti_ohci *ohci, struct dma_iso_ctx *d)
void wakeup_dma_it_ctx(unsigned long l)
{
struct dma_iso_ctx *d = (struct dma_iso_ctx *) l;
struct ti_ohci *ohci = d->ohci;
int i;
if (d==NULL) {
PRINT(KERN_ERR, ohci->id, "Iso transmit event received but "
"context not allocated");
return -EFAULT;
}
spin_lock(&d->lock);
for (i=0;i<d->num_desc;i++) {
if (d->it_prg[i][d->last_used_cmd[i]].end.status&
cpu_to_le32(0xFFFF0000)) {
for (i = 0; i < d->num_desc; i++) {
if (d->it_prg[i][d->last_used_cmd[i]].end.status &
cpu_to_le32(0xFFFF0000)) {
int next = d->next_buffer[i];
put_timestamp(ohci, d, next);
d->it_prg[i][d->last_used_cmd[i]].end.status = 0;
d->buffer_status[i] = VIDEO1394_BUFFER_READY;
}
}
spin_unlock(&d->lock);
if (waitqueue_active(&d->waitq)) wake_up_interruptible(&d->waitq);
return 0;
if (waitqueue_active(&d->waitq))
wake_up_interruptible(&d->waitq);
}
static void initialize_dma_it_prg(struct dma_iso_ctx *d, int n, int sync_tag)
......@@ -871,13 +853,13 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
}
ohci->ISO_channel_usage |= mask;
if (v.buf_size<=0) {
if (v.buf_size == 0 || v.buf_size > VIDEO1394_MAX_SIZE) {
PRINT(KERN_ERR, ohci->id,
"Invalid %d length buffer requested",v.buf_size);
return -EFAULT;
}
if (v.nb_buffers<=0) {
if (v.nb_buffers == 0 || v.nb_buffers > VIDEO1394_MAX_SIZE) {
PRINT(KERN_ERR, ohci->id,
"Invalid %d buffers requested",v.nb_buffers);
return -EFAULT;
......@@ -891,7 +873,7 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
}
if (cmd == VIDEO1394_LISTEN_CHANNEL) {
d = alloc_dma_iso_ctx(ohci, ISO_RECEIVE,
d = alloc_dma_iso_ctx(ohci, OHCI_ISO_RECEIVE,
v.nb_buffers, v.buf_size,
v.channel, 0);
......@@ -912,7 +894,7 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
d->ctx, v.channel);
}
else {
d = alloc_dma_iso_ctx(ohci, ISO_TRANSMIT,
d = alloc_dma_iso_ctx(ohci, OHCI_ISO_TRANSMIT,
v.nb_buffers, v.buf_size,
v.channel, v.packet_size);
......@@ -966,9 +948,9 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
ohci->ISO_channel_usage &= ~mask;
if (cmd == VIDEO1394_UNLISTEN_CHANNEL)
d = find_ctx(&ctx->context_list, ISO_RECEIVE, channel);
d = find_ctx(&ctx->context_list, OHCI_ISO_RECEIVE, channel);
else
d = find_ctx(&ctx->context_list, ISO_TRANSMIT, channel);
d = find_ctx(&ctx->context_list, OHCI_ISO_TRANSMIT, channel);
if (d == NULL) return -EFAULT;
PRINT(KERN_INFO, ohci->id, "Iso context %d "
......@@ -985,7 +967,7 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
if(copy_from_user(&v, (void *)arg, sizeof(v)))
return -EFAULT;
d = find_ctx(&ctx->context_list, ISO_RECEIVE, v.channel);
d = find_ctx(&ctx->context_list, OHCI_ISO_RECEIVE, v.channel);
if ((v.buffer<0) || (v.buffer>d->num_desc)) {
PRINT(KERN_ERR, ohci->id,
......@@ -1047,7 +1029,7 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
if(copy_from_user(&v, (void *)arg, sizeof(v)))
return -EFAULT;
d = find_ctx(&ctx->context_list, ISO_RECEIVE, v.channel);
d = find_ctx(&ctx->context_list, OHCI_ISO_RECEIVE, v.channel);
if ((v.buffer<0) || (v.buffer>d->num_desc)) {
PRINT(KERN_ERR, ohci->id,
......@@ -1128,7 +1110,7 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
if(copy_from_user(&v, (void *)arg, sizeof(v)))
return -EFAULT;
d = find_ctx(&ctx->context_list, ISO_TRANSMIT, v.channel);
d = find_ctx(&ctx->context_list, OHCI_ISO_TRANSMIT, v.channel);
if ((v.buffer<0) || (v.buffer>d->num_desc)) {
PRINT(KERN_ERR, ohci->id,
......@@ -1217,7 +1199,7 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
if(copy_from_user(&v, (void *)arg, sizeof(v)))
return -EFAULT;
d = find_ctx(&ctx->context_list, ISO_TRANSMIT, v.channel);
d = find_ctx(&ctx->context_list, OHCI_ISO_TRANSMIT, v.channel);
if ((v.buffer<0) || (v.buffer>d->num_desc)) {
PRINT(KERN_ERR, ohci->id,
......@@ -1340,7 +1322,7 @@ static int video1394_release(struct inode *inode, struct file *file)
ohci->ISO_channel_usage &= ~mask;
PRINT(KERN_INFO, ohci->id, "On release: Iso %s context "
"%d stop listening on channel %d",
d->type == ISO_RECEIVE ? "receive" : "transmit",
d->type == OHCI_ISO_RECEIVE ? "receive" : "transmit",
d->ctx, d->channel);
free_dma_iso_ctx(d);
}
......@@ -1352,27 +1334,13 @@ static int video1394_release(struct inode *inode, struct file *file)
return 0;
}
static void irq_handler(int card, quadlet_t isoRecvIntEvent,
quadlet_t isoXmitIntEvent, void *data)
{
struct dma_iso_ctx *d = (struct dma_iso_ctx *) data;
DBGMSG(card, "Iso event Recv: %08x Xmit: %08x",
isoRecvIntEvent, isoXmitIntEvent);
if (d->type == ISO_RECEIVE && isoRecvIntEvent & (1 << d->ctx))
wakeup_dma_ir_ctx(d->ohci, d);
if (d->type == ISO_TRANSMIT && isoXmitIntEvent & (1 << d->ctx))
wakeup_dma_it_ctx(d->ohci, d);
}
static struct file_operations video1394_fops=
{
owner: THIS_MODULE,
ioctl: video1394_ioctl,
mmap: video1394_mmap,
open: video1394_open,
release: video1394_release
.owner = THIS_MODULE,
.ioctl = video1394_ioctl,
.mmap = video1394_mmap,
.open = video1394_open,
.release = video1394_release
};
static int video1394_init(struct ti_ohci *ohci)
......@@ -1460,8 +1428,8 @@ static void video1394_add_host (struct hpsb_host *host)
}
static struct hpsb_highlevel_ops hl_ops = {
add_host: video1394_add_host,
remove_host: video1394_remove_host,
.add_host = video1394_add_host,
.remove_host = video1394_remove_host,
};
MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment