Commit 5f4e925a authored by Duane Griffin's avatar Duane Griffin Committed by Greg Kroah-Hartman

Staging: b3dfg: fixups and improvements

- Added support for cable plug/unplug detection.
 - Improvements to error handling.
 - Switch to the pci_* DMA API.
 - Removed set_num_buffers functionality.
 - Locking review.
 - Unconditionally disable transmission when releasing device.
Signed-off-by: default avatarJustin Bronder <jsbronder@brontes3d.com>
Cc: Duane Griffin <duaneg@dghda.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent 44c88924
...@@ -34,52 +34,52 @@ ...@@ -34,52 +34,52 @@
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/version.h> #include <linux/version.h>
#include <linux/mutex.h>
#include <asm/atomic.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
/* TODO: /* TODO:
* locking
* queue/wait buffer presents filltime results for each frame? * queue/wait buffer presents filltime results for each frame?
* counting of dropped frames * counting of dropped frames
* review endianness * review endianness
*/ */
#ifdef DEBUG static unsigned int b3dfg_nbuf = 2;
#define dbg(msg...) printk(msg)
#else module_param_named(buffer_count, b3dfg_nbuf, uint, 0444);
#define dbg(msg...)
#endif MODULE_PARM_DESC(buffer_count, "Number of buffers (min 2, default 2)\n");
MODULE_AUTHOR("Daniel Drake <ddrake@brontes3d.com>");
MODULE_DESCRIPTION("Brontes frame grabber driver");
MODULE_LICENSE("GPL");
#define DRIVER_NAME "b3dfg" #define DRIVER_NAME "b3dfg"
#define PFX DRIVER_NAME ": "
#define B3DFG_MAX_DEVS 4 #define B3DFG_MAX_DEVS 4
#define B3DFG_NR_TRIPLET_BUFFERS 4
#define B3DFG_NR_FRAME_BUFFERS (B3DFG_NR_TRIPLET_BUFFERS * 3)
#define B3DFG_FRAMES_PER_BUFFER 3 #define B3DFG_FRAMES_PER_BUFFER 3
#define B3DFG_BAR_REGS 0 #define B3DFG_BAR_REGS 0
#define B3DFG_REGS_LENGTH 0x10000 #define B3DFG_REGS_LENGTH 0x10000
#define B3DFG_IOC_MAGIC 0xb3 /* dfg :-) */ #define B3DFG_IOC_MAGIC 0xb3 /* dfg :-) */
#define B3DFG_IOCGFRMSZ _IOR(B3DFG_IOC_MAGIC, 1, int) #define B3DFG_IOCGFRMSZ _IOR(B3DFG_IOC_MAGIC, 1, int)
#define B3DFG_IOCTNUMBUFS _IO(B3DFG_IOC_MAGIC, 2) #define B3DFG_IOCTNUMBUFS _IO(B3DFG_IOC_MAGIC, 2)
#define B3DFG_IOCTTRANS _IO(B3DFG_IOC_MAGIC, 3) #define B3DFG_IOCTTRANS _IO(B3DFG_IOC_MAGIC, 3)
#define B3DFG_IOCTQUEUEBUF _IO(B3DFG_IOC_MAGIC, 4) #define B3DFG_IOCTQUEUEBUF _IO(B3DFG_IOC_MAGIC, 4)
#define B3DFG_IOCTPOLLBUF _IOWR(B3DFG_IOC_MAGIC, 5, struct b3dfg_poll) #define B3DFG_IOCTPOLLBUF _IOWR(B3DFG_IOC_MAGIC, 5, struct b3dfg_poll)
#define B3DFG_IOCTWAITBUF _IOWR(B3DFG_IOC_MAGIC, 6, struct b3dfg_wait) #define B3DFG_IOCTWAITBUF _IOWR(B3DFG_IOC_MAGIC, 6, struct b3dfg_wait)
#define B3DFG_IOCGWANDSTAT _IOR(B3DFG_IOC_MAGIC, 7, int) #define B3DFG_IOCGWANDSTAT _IOR(B3DFG_IOC_MAGIC, 7, int)
enum { enum {
/* number of 4kb pages per frame */ /* number of 4kb pages per frame */
B3D_REG_FRM_SIZE = 0x0, B3D_REG_FRM_SIZE = 0x0,
/* bit 0: set to enable interrupts */ /* bit 0: set to enable interrupts
* bit 1: set to enable cable status change interrupts */
B3D_REG_HW_CTRL = 0x4, B3D_REG_HW_CTRL = 0x4,
/* bit 0-1 - 1-based ID of next pending frame transfer (0 = nothing pending) /* bit 0-1 - 1-based ID of next pending frame transfer (0 = none)
* bit 2 indicates the previous DMA transfer has completed * bit 2 indicates the previous DMA transfer has completed
* bit 3 indicates wand cable status change
* bit 8:15 - counter of number of discarded triplets */ * bit 8:15 - counter of number of discarded triplets */
B3D_REG_DMA_STS = 0x8, B3D_REG_DMA_STS = 0x8,
...@@ -110,41 +110,48 @@ enum b3dfg_buffer_state { ...@@ -110,41 +110,48 @@ enum b3dfg_buffer_state {
struct b3dfg_buffer { struct b3dfg_buffer {
unsigned char *frame[B3DFG_FRAMES_PER_BUFFER]; unsigned char *frame[B3DFG_FRAMES_PER_BUFFER];
u8 state;
struct list_head list; struct list_head list;
u8 state;
}; };
struct b3dfg_dev { struct b3dfg_dev {
/* no protection needed: all finalized at initialization time */ /* no protection needed: all finalized at initialization time */
struct pci_dev *pdev; struct pci_dev *pdev;
struct cdev chardev; struct cdev chardev;
struct class_device *classdev; struct class_device *classdev;
void __iomem *regs; void __iomem *regs;
unsigned int frame_size; unsigned int frame_size;
/* we want to serialize some ioctl operations */ /*
struct mutex ioctl_mutex; * Protects buffer state, including buffer_queue, triplet_ready,
* cur_dma_frame_idx & cur_dma_frame_addr.
/* preallocated frame buffers */ */
unsigned char *frame_buffer[B3DFG_NR_FRAME_BUFFERS];
/* buffers_lock protects num_buffers, buffers, buffer_queue */
spinlock_t buffer_lock; spinlock_t buffer_lock;
int num_buffers;
struct b3dfg_buffer *buffers; struct b3dfg_buffer *buffers;
struct list_head buffer_queue; struct list_head buffer_queue;
wait_queue_head_t buffer_waitqueue; /* Last frame in triplet transferred (-1 if none). */
int cur_dma_frame_idx;
atomic_t mapping_count; /* Current frame's address for DMA. */
dma_addr_t cur_dma_frame_addr;
/*
* Protects cstate_tstamp.
* Nests inside buffer_lock.
*/
spinlock_t cstate_lock;
unsigned long cstate_tstamp;
/*
* Protects triplets_dropped.
* Nests inside buffers_lock.
*/
spinlock_t triplets_dropped_lock; spinlock_t triplets_dropped_lock;
unsigned int triplets_dropped; unsigned int triplets_dropped;
/* FIXME: we need some locking here. this could be accessed in parallel wait_queue_head_t buffer_waitqueue;
* from the queue_buffer ioctl and the interrupt handler. */
int cur_dma_frame_idx;
dma_addr_t cur_dma_frame_addr;
unsigned int transmission_enabled:1; unsigned int transmission_enabled:1;
unsigned int triplet_ready:1; unsigned int triplet_ready:1;
...@@ -159,11 +166,13 @@ static const struct pci_device_id b3dfg_ids[] __devinitdata = { ...@@ -159,11 +166,13 @@ static const struct pci_device_id b3dfg_ids[] __devinitdata = {
{ PCI_DEVICE(0x0b3d, 0x0001) }, { PCI_DEVICE(0x0b3d, 0x0001) },
/* FIXME: remove this ID once all boards have been moved to 0xb3d. /* FIXME: remove this ID once all boards have been moved to 0xb3d.
* this is Eureka's vendor ID that we borrowed before we bought our own. */ * Eureka's vendor ID that we borrowed before we bought our own. */
{ PCI_DEVICE(0x1901, 0x0001) }, { PCI_DEVICE(0x1901, 0x0001) },
{ }, { },
}; };
MODULE_DEVICE_TABLE(pci, b3dfg_ids);
/***** user-visible types *****/ /***** user-visible types *****/
struct b3dfg_poll { struct b3dfg_poll {
...@@ -191,145 +200,61 @@ static void b3dfg_write32(struct b3dfg_dev *fgdev, u16 reg, u32 value) ...@@ -191,145 +200,61 @@ static void b3dfg_write32(struct b3dfg_dev *fgdev, u16 reg, u32 value)
/**** buffer management ****/ /**** buffer management ****/
/* program EC220 for transfer of a specific frame */ /*
static void setup_frame_transfer(struct b3dfg_dev *fgdev, * Program EC220 for transfer of a specific frame.
struct b3dfg_buffer *buf, int frame, int acknowledge) * Called with buffer_lock held.
*/
static int setup_frame_transfer(struct b3dfg_dev *fgdev,
struct b3dfg_buffer *buf, int frame)
{ {
unsigned char *frm_addr; unsigned char *frm_addr;
dma_addr_t frm_addr_dma; dma_addr_t frm_addr_dma;
struct device *dev = &fgdev->pdev->dev; unsigned int frm_size = fgdev->frame_size;
unsigned int frame_size = fgdev->frame_size;
unsigned char dma_sts = 0xd;
frm_addr = buf->frame[frame]; frm_addr = buf->frame[frame];
frm_addr_dma = dma_map_single(dev, frm_addr, frame_size, DMA_FROM_DEVICE); frm_addr_dma = pci_map_single(fgdev->pdev, frm_addr,
frm_size, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(frm_addr_dma))
return -ENOMEM;
fgdev->cur_dma_frame_addr = frm_addr_dma; fgdev->cur_dma_frame_addr = frm_addr_dma;
fgdev->cur_dma_frame_idx = frame; fgdev->cur_dma_frame_idx = frame;
b3dfg_write32(fgdev, B3D_REG_EC220_DMA_ADDR, cpu_to_le32(frm_addr_dma)); b3dfg_write32(fgdev, B3D_REG_EC220_DMA_ADDR, cpu_to_le32(frm_addr_dma));
b3dfg_write32(fgdev, B3D_REG_EC220_TRF_SIZE, cpu_to_le32(frame_size >> 2)); b3dfg_write32(fgdev, B3D_REG_EC220_TRF_SIZE, cpu_to_le32(frm_size >> 2));
if (likely(acknowledge))
dma_sts |= 0x2;
b3dfg_write32(fgdev, B3D_REG_EC220_DMA_STS, 0xf); b3dfg_write32(fgdev, B3D_REG_EC220_DMA_STS, 0xf);
}
/* retrieve a buffer pointer from a buffer index. also checks that the
* requested buffer actually exists. buffer_lock should be held by caller */
static inline struct b3dfg_buffer *buffer_from_idx(struct b3dfg_dev *fgdev,
int idx)
{
if (unlikely(idx >= fgdev->num_buffers))
return NULL;
return &fgdev->buffers[idx];
}
/* caller should hold buffer lock */ return 0;
static void free_all_buffers(struct b3dfg_dev *fgdev)
{
kfree(fgdev->buffers);
fgdev->buffers = NULL;
fgdev->num_buffers = 0;
} }
/* Caller should hold buffer lock */
static void dequeue_all_buffers(struct b3dfg_dev *fgdev) static void dequeue_all_buffers(struct b3dfg_dev *fgdev)
{ {
int i; int i;
for (i = 0; i < fgdev->num_buffers; i++) { for (i = 0; i < b3dfg_nbuf; i++) {
struct b3dfg_buffer *buf = &fgdev->buffers[i]; struct b3dfg_buffer *buf = &fgdev->buffers[i];
buf->state = B3DFG_BUFFER_POLLED; buf->state = B3DFG_BUFFER_POLLED;
list_del_init(&buf->list); list_del_init(&buf->list);
} }
} }
/* initialize a buffer: allocate its frames, set default values */
static void init_buffer(struct b3dfg_dev *fgdev, struct b3dfg_buffer *buf,
int idx)
{
unsigned int addr_offset = idx * B3DFG_FRAMES_PER_BUFFER;
int i;
memset(buf, 0, sizeof(struct b3dfg_buffer));
for (i = 0; i < B3DFG_FRAMES_PER_BUFFER; i++)
buf->frame[i] = fgdev->frame_buffer[addr_offset + i];
INIT_LIST_HEAD(&buf->list);
}
/* adjust the number of buffers, growing or shrinking the pool appropriately. */
static int set_num_buffers(struct b3dfg_dev *fgdev, int num_buffers)
{
int i;
struct b3dfg_buffer *buffers;
unsigned long flags;
printk(KERN_INFO PFX "set %d buffers\n", num_buffers);
if (fgdev->transmission_enabled) {
printk(KERN_ERR PFX
"cannot set buffer count while transmission is enabled\n");
return -EBUSY;
}
if (atomic_read(&fgdev->mapping_count) > 0) {
printk(KERN_ERR PFX
"cannot set buffer count while memory mappings are active\n");
return -EBUSY;
}
if (num_buffers > B3DFG_NR_TRIPLET_BUFFERS) {
printk(KERN_ERR PFX "limited to %d triplet buffers\n",
B3DFG_NR_TRIPLET_BUFFERS);
return -E2BIG;
}
spin_lock_irqsave(&fgdev->buffer_lock, flags);
if (num_buffers == fgdev->num_buffers) {
spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
return 0;
}
/* free all buffers then allocate new ones */
dequeue_all_buffers(fgdev);
free_all_buffers(fgdev);
/* must unlock to allocate GFP_KERNEL memory */
spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
if (num_buffers == 0)
return 0;
buffers = kmalloc(num_buffers * sizeof(struct b3dfg_buffer),
GFP_KERNEL);
if (!buffers)
return -ENOMEM;
for (i = 0; i < num_buffers; i++)
init_buffer(fgdev, &buffers[i], i);
spin_lock_irqsave(&fgdev->buffer_lock, flags);
fgdev->buffers = buffers;
fgdev->num_buffers = num_buffers;
spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
return 0;
}
/* queue a buffer to receive data */ /* queue a buffer to receive data */
static int queue_buffer(struct b3dfg_dev *fgdev, int bufidx) static int queue_buffer(struct b3dfg_dev *fgdev, int bufidx)
{ {
struct device *dev = &fgdev->pdev->dev;
struct b3dfg_buffer *buf; struct b3dfg_buffer *buf;
unsigned long flags; unsigned long flags;
int r = 0; int r = 0;
spin_lock_irqsave(&fgdev->buffer_lock, flags); spin_lock_irqsave(&fgdev->buffer_lock, flags);
buf = buffer_from_idx(fgdev, bufidx); if (bufidx < 0 || bufidx >= b3dfg_nbuf) {
if (unlikely(!buf)) {
r = -ENOENT; r = -ENOENT;
goto out; goto out;
} }
buf = &fgdev->buffers[bufidx];
if (unlikely(buf->state == B3DFG_BUFFER_PENDING)) { if (unlikely(buf->state == B3DFG_BUFFER_PENDING)) {
printk(KERN_ERR PFX "buffer %d is already queued", bufidx); dev_dbg(dev, "buffer %d is already queued\n", bufidx);
r = -EINVAL; r = -EINVAL;
goto out; goto out;
} }
...@@ -338,9 +263,11 @@ static int queue_buffer(struct b3dfg_dev *fgdev, int bufidx) ...@@ -338,9 +263,11 @@ static int queue_buffer(struct b3dfg_dev *fgdev, int bufidx)
list_add_tail(&buf->list, &fgdev->buffer_queue); list_add_tail(&buf->list, &fgdev->buffer_queue);
if (fgdev->transmission_enabled && fgdev->triplet_ready) { if (fgdev->transmission_enabled && fgdev->triplet_ready) {
dbg("triplet is ready, so pushing immediately\n"); dev_dbg(dev, "triplet is ready, pushing immediately\n");
fgdev->triplet_ready = 0; fgdev->triplet_ready = 0;
setup_frame_transfer(fgdev, buf, 0, 0); r = setup_frame_transfer(fgdev, buf, 0);
if (r)
dev_err(dev, "unable to map DMA buffer\n");
} }
out: out:
...@@ -352,139 +279,159 @@ static int queue_buffer(struct b3dfg_dev *fgdev, int bufidx) ...@@ -352,139 +279,159 @@ static int queue_buffer(struct b3dfg_dev *fgdev, int bufidx)
* 0 otherwise */ * 0 otherwise */
static int poll_buffer(struct b3dfg_dev *fgdev, void __user *arg) static int poll_buffer(struct b3dfg_dev *fgdev, void __user *arg)
{ {
struct device *dev = &fgdev->pdev->dev;
struct b3dfg_poll p; struct b3dfg_poll p;
struct b3dfg_buffer *buf; struct b3dfg_buffer *buf;
unsigned long flags; unsigned long flags;
int r = 1; int r = 1;
int arg_out = 0;
if (copy_from_user(&p, arg, sizeof(p))) if (copy_from_user(&p, arg, sizeof(p)))
return -EFAULT; return -EFAULT;
if (unlikely(!fgdev->transmission_enabled)) { if (unlikely(!fgdev->transmission_enabled)) {
printk(KERN_ERR PFX dev_dbg(dev, "cannot poll, transmission disabled\n");
"cannot poll buffers when transmission is disabled\n");
return -EINVAL; return -EINVAL;
} }
spin_lock_irqsave(&fgdev->buffer_lock, flags); if (p.buffer_idx < 0 || p.buffer_idx >= b3dfg_nbuf)
buf = buffer_from_idx(fgdev, p.buffer_idx); return -ENOENT;
if (unlikely(!buf)) {
r = -ENOENT;
goto out;
}
if (buf->state != B3DFG_BUFFER_POPULATED) { buf = &fgdev->buffers[p.buffer_idx];
r = 0;
goto out; spin_lock_irqsave(&fgdev->buffer_lock, flags);
}
if (likely(buf->state == B3DFG_BUFFER_POPULATED)) { if (likely(buf->state == B3DFG_BUFFER_POPULATED)) {
arg_out = 1;
buf->state = B3DFG_BUFFER_POLLED; buf->state = B3DFG_BUFFER_POLLED;
/* IRQs already disabled by spin_lock_irqsave above. */
spin_lock(&fgdev->triplets_dropped_lock); spin_lock(&fgdev->triplets_dropped_lock);
p.triplets_dropped = fgdev->triplets_dropped; p.triplets_dropped = fgdev->triplets_dropped;
fgdev->triplets_dropped = 0; fgdev->triplets_dropped = 0;
spin_unlock(&fgdev->triplets_dropped_lock); spin_unlock(&fgdev->triplets_dropped_lock);
if (copy_to_user(arg, &p, sizeof(p))) } else {
r = -EFAULT; r = 0;
} }
out:
spin_unlock_irqrestore(&fgdev->buffer_lock, flags); spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
if (arg_out && copy_to_user(arg, &p, sizeof(p)))
r = -EFAULT;
return r; return r;
} }
static u8 buffer_state(struct b3dfg_dev *fgdev, struct b3dfg_buffer *buf) static unsigned long get_cstate_change(struct b3dfg_dev *fgdev)
{
unsigned long flags, when;
spin_lock_irqsave(&fgdev->cstate_lock, flags);
when = fgdev->cstate_tstamp;
spin_unlock_irqrestore(&fgdev->cstate_lock, flags);
return when;
}
static int is_event_ready(struct b3dfg_dev *fgdev, struct b3dfg_buffer *buf,
unsigned long when)
{ {
int result;
unsigned long flags; unsigned long flags;
u8 state;
spin_lock_irqsave(&fgdev->buffer_lock, flags); spin_lock_irqsave(&fgdev->buffer_lock, flags);
state = buf->state; spin_lock(&fgdev->cstate_lock);
result = (!fgdev->transmission_enabled ||
buf->state == B3DFG_BUFFER_POPULATED ||
when != fgdev->cstate_tstamp);
spin_unlock(&fgdev->cstate_lock);
spin_unlock_irqrestore(&fgdev->buffer_lock, flags); spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
return state;
return result;
} }
/* sleep until a specific buffer becomes populated */ /* sleep until a specific buffer becomes populated */
static int wait_buffer(struct b3dfg_dev *fgdev, void __user *arg) static int wait_buffer(struct b3dfg_dev *fgdev, void __user *arg)
{ {
struct device *dev = &fgdev->pdev->dev;
struct b3dfg_wait w; struct b3dfg_wait w;
struct b3dfg_buffer *buf; struct b3dfg_buffer *buf;
unsigned long flags; unsigned long flags, when;
int r; int r;
if (copy_from_user(&w, arg, sizeof(w))) if (copy_from_user(&w, arg, sizeof(w)))
return -EFAULT; return -EFAULT;
if (unlikely(!fgdev->transmission_enabled)) { if (!fgdev->transmission_enabled) {
printk(KERN_ERR PFX dev_dbg(dev, "cannot wait, transmission disabled\n");
"cannot wait on buffers when transmission is disabled\n");
return -EINVAL; return -EINVAL;
} }
if (w.buffer_idx < 0 || w.buffer_idx >= b3dfg_nbuf)
return -ENOENT;
buf = &fgdev->buffers[w.buffer_idx];
spin_lock_irqsave(&fgdev->buffer_lock, flags); spin_lock_irqsave(&fgdev->buffer_lock, flags);
buf = buffer_from_idx(fgdev, w.buffer_idx);
if (unlikely(!buf)) {
r = -ENOENT;
goto out;
}
if (buf->state == B3DFG_BUFFER_POPULATED) { if (buf->state == B3DFG_BUFFER_POPULATED) {
r = 0; r = w.timeout;
goto out_triplets_dropped; goto out_triplets_dropped;
} }
spin_unlock_irqrestore(&fgdev->buffer_lock, flags); spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
/* FIXME: what prevents the buffer going away at this time? */
when = get_cstate_change(fgdev);
if (w.timeout > 0) { if (w.timeout > 0) {
r = wait_event_interruptible_timeout(fgdev->buffer_waitqueue, r = wait_event_interruptible_timeout(fgdev->buffer_waitqueue,
buffer_state(fgdev, buf) == B3DFG_BUFFER_POPULATED, is_event_ready(fgdev, buf, when),
(w.timeout * HZ) / 1000); (w.timeout * HZ) / 1000);
if (unlikely(r < 0)) if (unlikely(r < 0))
return r; goto out;
else if (unlikely(buffer_state(fgdev, buf)
!= B3DFG_BUFFER_POPULATED))
return -ETIMEDOUT;
w.timeout = r * 1000 / HZ; w.timeout = r * 1000 / HZ;
} else { } else {
r = wait_event_interruptible(fgdev->buffer_waitqueue, r = wait_event_interruptible(fgdev->buffer_waitqueue,
buffer_state(fgdev, buf) == B3DFG_BUFFER_POPULATED); is_event_ready(fgdev, buf, when));
if (unlikely(r))
return -ERESTARTSYS; if (unlikely(r)) {
r = -ERESTARTSYS;
goto out;
}
}
/* TODO: Inform the user via field(s) in w? */
if (!fgdev->transmission_enabled || when != get_cstate_change(fgdev)) {
r = -EINVAL;
goto out;
} }
spin_lock_irqsave(&fgdev->buffer_lock, flags); spin_lock_irqsave(&fgdev->buffer_lock, flags);
/* FIXME: rediscover buffer? it might have changed during the unlocked
* time */ if (buf->state != B3DFG_BUFFER_POPULATED) {
r = -ETIMEDOUT;
goto out_unlock;
}
buf->state = B3DFG_BUFFER_POLLED; buf->state = B3DFG_BUFFER_POLLED;
out_triplets_dropped: out_triplets_dropped:
/* IRQs already disabled by spin_lock_irqsave above. */
spin_lock(&fgdev->triplets_dropped_lock); spin_lock(&fgdev->triplets_dropped_lock);
w.triplets_dropped = fgdev->triplets_dropped; w.triplets_dropped = fgdev->triplets_dropped;
fgdev->triplets_dropped = 0; fgdev->triplets_dropped = 0;
spin_unlock(&fgdev->triplets_dropped_lock); spin_unlock(&fgdev->triplets_dropped_lock);
out_unlock:
spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
if (copy_to_user(arg, &w, sizeof(w))) if (copy_to_user(arg, &w, sizeof(w)))
r = -EFAULT; r = -EFAULT;
out: out:
spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
return r; return r;
} }
/**** virtual memory mapping ****/ /* mmap page fault handler */
static void b3dfg_vma_open(struct vm_area_struct *vma)
{
struct b3dfg_dev *fgdev = vma->vm_file->private_data;
atomic_inc(&fgdev->mapping_count);
}
static void b3dfg_vma_close(struct vm_area_struct *vma)
{
struct b3dfg_dev *fgdev = vma->vm_file->private_data;
atomic_dec(&fgdev->mapping_count);
}
/* page fault handler */
static unsigned long b3dfg_vma_nopfn(struct vm_area_struct *vma, static unsigned long b3dfg_vma_nopfn(struct vm_area_struct *vma,
unsigned long address) unsigned long address)
{ {
...@@ -492,7 +439,6 @@ static unsigned long b3dfg_vma_nopfn(struct vm_area_struct *vma, ...@@ -492,7 +439,6 @@ static unsigned long b3dfg_vma_nopfn(struct vm_area_struct *vma,
unsigned long off = address - vma->vm_start; unsigned long off = address - vma->vm_start;
unsigned int frame_size = fgdev->frame_size; unsigned int frame_size = fgdev->frame_size;
unsigned int buf_size = frame_size * B3DFG_FRAMES_PER_BUFFER; unsigned int buf_size = frame_size * B3DFG_FRAMES_PER_BUFFER;
unsigned long flags;
unsigned char *addr; unsigned char *addr;
/* determine which buffer the offset lies within */ /* determine which buffer the offset lies within */
...@@ -505,29 +451,24 @@ static unsigned long b3dfg_vma_nopfn(struct vm_area_struct *vma, ...@@ -505,29 +451,24 @@ static unsigned long b3dfg_vma_nopfn(struct vm_area_struct *vma,
/* and the offset into the frame */ /* and the offset into the frame */
unsigned int frm_off = buf_off % frame_size; unsigned int frm_off = buf_off % frame_size;
spin_lock_irqsave(&fgdev->buffer_lock, flags); if (unlikely(buf_idx >= b3dfg_nbuf))
if (unlikely(buf_idx > fgdev->num_buffers)) {
spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
return NOPFN_SIGBUS; return NOPFN_SIGBUS;
}
addr = fgdev->buffers[buf_idx].frame[frm_idx] + frm_off; addr = fgdev->buffers[buf_idx].frame[frm_idx] + frm_off;
vm_insert_pfn(vma, vma->vm_start + off, vm_insert_pfn(vma, vma->vm_start + off,
virt_to_phys(addr) >> PAGE_SHIFT); virt_to_phys(addr) >> PAGE_SHIFT);
spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
return NOPFN_REFAULT; return NOPFN_REFAULT;
} }
static struct vm_operations_struct b3dfg_vm_ops = { static struct vm_operations_struct b3dfg_vm_ops = {
.open = b3dfg_vma_open,
.close = b3dfg_vma_close,
.nopfn = b3dfg_vma_nopfn, .nopfn = b3dfg_vma_nopfn,
}; };
static int get_wand_status(struct b3dfg_dev *fgdev, int __user *arg) static int get_wand_status(struct b3dfg_dev *fgdev, int __user *arg)
{ {
u32 wndstat = b3dfg_read32(fgdev, B3D_REG_WAND_STS); u32 wndstat = b3dfg_read32(fgdev, B3D_REG_WAND_STS);
dbg("wand status %x\n", wndstat); dev_dbg(&fgdev->pdev->dev, "wand status %x\n", wndstat);
return __put_user(wndstat & 0x1, arg); return __put_user(wndstat & 0x1, arg);
} }
...@@ -535,47 +476,63 @@ static int enable_transmission(struct b3dfg_dev *fgdev) ...@@ -535,47 +476,63 @@ static int enable_transmission(struct b3dfg_dev *fgdev)
{ {
u16 command; u16 command;
unsigned long flags; unsigned long flags;
struct device *dev = &fgdev->pdev->dev;
dev_dbg(dev, "enable transmission\n");
printk(KERN_INFO PFX "enable transmission\n"); /* check the cable is plugged in. */
if (!b3dfg_read32(fgdev, B3D_REG_WAND_STS)) {
dev_dbg(dev, "cannot start transmission without wand\n");
return -EINVAL;
}
/* check we're a bus master */ /*
* Check we're a bus master.
* TODO: I think we can remove this having added the pci_set_master call
*/
pci_read_config_word(fgdev->pdev, PCI_COMMAND, &command); pci_read_config_word(fgdev->pdev, PCI_COMMAND, &command);
if (!(command & PCI_COMMAND_MASTER)) { if (!(command & PCI_COMMAND_MASTER)) {
printk(KERN_ERR PFX "not a bus master, force-enabling\n"); dev_err(dev, "not a bus master, force-enabling\n");
/* FIXME: why did we lose it in the first place? */
pci_write_config_word(fgdev->pdev, PCI_COMMAND, pci_write_config_word(fgdev->pdev, PCI_COMMAND,
command | PCI_COMMAND_MASTER); command | PCI_COMMAND_MASTER);
} }
spin_lock_irqsave(&fgdev->buffer_lock, flags); spin_lock_irqsave(&fgdev->buffer_lock, flags);
if (fgdev->num_buffers == 0) {
/* Handle racing enable_transmission calls. */
if (fgdev->transmission_enabled) {
spin_unlock_irqrestore(&fgdev->buffer_lock, flags); spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
printk(KERN_ERR PFX "cannot start transmission to 0 buffers\n"); goto out;
return -EINVAL;
} }
spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
spin_lock_irqsave(&fgdev->triplets_dropped_lock, flags); spin_lock(&fgdev->triplets_dropped_lock);
fgdev->triplets_dropped = 0; fgdev->triplets_dropped = 0;
spin_unlock_irqrestore(&fgdev->triplets_dropped_lock, flags); spin_unlock(&fgdev->triplets_dropped_lock);
fgdev->triplet_ready = 0; fgdev->triplet_ready = 0;
fgdev->transmission_enabled = 1;
fgdev->cur_dma_frame_idx = -1; fgdev->cur_dma_frame_idx = -1;
b3dfg_write32(fgdev, B3D_REG_HW_CTRL, 1); fgdev->transmission_enabled = 1;
spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
/* Enable DMA and cable status interrupts. */
b3dfg_write32(fgdev, B3D_REG_HW_CTRL, 0x03);
out:
return 0; return 0;
} }
static void disable_transmission(struct b3dfg_dev *fgdev) static void disable_transmission(struct b3dfg_dev *fgdev)
{ {
struct device *dev = &fgdev->pdev->dev;
unsigned long flags; unsigned long flags;
u32 tmp; u32 tmp;
printk(KERN_INFO PFX "disable transmission\n"); dev_dbg(dev, "disable transmission\n");
/* guarantee that no more interrupts will be serviced */ /* guarantee that no more interrupts will be serviced */
spin_lock_irqsave(&fgdev->buffer_lock, flags);
fgdev->transmission_enabled = 0; fgdev->transmission_enabled = 0;
synchronize_irq(fgdev->pdev->irq);
b3dfg_write32(fgdev, B3D_REG_HW_CTRL, 0); b3dfg_write32(fgdev, B3D_REG_HW_CTRL, 0);
...@@ -583,130 +540,238 @@ static void disable_transmission(struct b3dfg_dev *fgdev) ...@@ -583,130 +540,238 @@ static void disable_transmission(struct b3dfg_dev *fgdev)
* hitting ctrl+c and seeing this message is useful for determining * hitting ctrl+c and seeing this message is useful for determining
* the state of the board. */ * the state of the board. */
tmp = b3dfg_read32(fgdev, B3D_REG_DMA_STS); tmp = b3dfg_read32(fgdev, B3D_REG_DMA_STS);
dbg("brontes DMA_STS reads %x after TX stopped\n", tmp); dev_dbg(dev, "DMA_STS reads %x after TX stopped\n", tmp);
spin_lock_irqsave(&fgdev->buffer_lock, flags);
dequeue_all_buffers(fgdev); dequeue_all_buffers(fgdev);
spin_unlock_irqrestore(&fgdev->buffer_lock, flags); spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
wake_up_interruptible(&fgdev->buffer_waitqueue);
} }
static int set_transmission(struct b3dfg_dev *fgdev, int enabled) static int set_transmission(struct b3dfg_dev *fgdev, int enabled)
{ {
int res = 0;
if (enabled && !fgdev->transmission_enabled) if (enabled && !fgdev->transmission_enabled)
return enable_transmission(fgdev); res = enable_transmission(fgdev);
else if (!enabled && fgdev->transmission_enabled) else if (!enabled && fgdev->transmission_enabled)
disable_transmission(fgdev); disable_transmission(fgdev);
return 0;
return res;
}
/* Called in interrupt context. */
static void handle_cstate_unplug(struct b3dfg_dev *fgdev)
{
/* Disable all interrupts. */
b3dfg_write32(fgdev, B3D_REG_HW_CTRL, 0);
/* Stop transmission. */
spin_lock(&fgdev->buffer_lock);
fgdev->transmission_enabled = 0;
fgdev->cur_dma_frame_idx = -1;
fgdev->triplet_ready = 0;
if (fgdev->cur_dma_frame_addr) {
pci_unmap_single(fgdev->pdev, fgdev->cur_dma_frame_addr,
fgdev->frame_size, PCI_DMA_FROMDEVICE);
fgdev->cur_dma_frame_addr = 0;
}
dequeue_all_buffers(fgdev);
spin_unlock(&fgdev->buffer_lock);
}
/* Called in interrupt context. */
static void handle_cstate_change(struct b3dfg_dev *fgdev)
{
u32 cstate = b3dfg_read32(fgdev, B3D_REG_WAND_STS);
unsigned long when;
struct device *dev = &fgdev->pdev->dev;
dev_dbg(dev, "cable state change: %u\n", cstate);
/*
* When the wand is unplugged we reset our state. The hardware will
* have done the same internally.
*
* Note we should never see a cable *plugged* event, as interrupts
* should only be enabled when transmitting, which requires the cable
* to be plugged. If we do see one it probably means the cable has been
* unplugged and re-plugged very rapidly. Possibly because it has a
* broken wire and is momentarily losing contact.
*
* TODO: At the moment if you plug in the cable then enable transmission
* the hardware will raise a couple of spurious interrupts, so
* just ignore them for now.
*
* Once the hardware is fixed we should complain and treat it as an
* unplug. Or at least track how frequently it is happening and do
* so if too many come in.
*/
if (cstate) {
dev_warn(dev, "ignoring unexpected plug event\n");
return;
}
handle_cstate_unplug(fgdev);
/*
* Record cable state change timestamp & wake anyone waiting
* on a cable state change. Be paranoid about ensuring events
* are not missed if we somehow get two interrupts in a jiffy.
*/
spin_lock(&fgdev->cstate_lock);
when = jiffies_64;
if (when <= fgdev->cstate_tstamp)
when = fgdev->cstate_tstamp + 1;
fgdev->cstate_tstamp = when;
wake_up_interruptible(&fgdev->buffer_waitqueue);
spin_unlock(&fgdev->cstate_lock);
}
/* Called with buffer_lock held. */
static void transfer_complete(struct b3dfg_dev *fgdev)
{
struct b3dfg_buffer *buf;
struct device *dev = &fgdev->pdev->dev;
pci_unmap_single(fgdev->pdev, fgdev->cur_dma_frame_addr,
fgdev->frame_size, PCI_DMA_FROMDEVICE);
fgdev->cur_dma_frame_addr = 0;
buf = list_entry(fgdev->buffer_queue.next, struct b3dfg_buffer, list);
if (buf) {
dev_dbg(dev, "handle frame completion\n");
if (fgdev->cur_dma_frame_idx == B3DFG_FRAMES_PER_BUFFER - 1) {
/* last frame of that triplet completed */
dev_dbg(dev, "triplet completed\n");
buf->state = B3DFG_BUFFER_POPULATED;
list_del_init(&buf->list);
wake_up_interruptible(&fgdev->buffer_waitqueue);
}
} else {
dev_err(dev, "got frame but no buffer!\n");
}
}
/*
* Called with buffer_lock held.
*
* Note that idx is the (1-based) *next* frame to be transferred, while
* cur_dma_frame_idx is the (0-based) *last* frame to have been transferred (or
* -1 if none). Thus there should be a difference of 2 between them.
*/
static bool setup_next_frame_transfer(struct b3dfg_dev *fgdev, int idx)
{
struct b3dfg_buffer *buf;
struct device *dev = &fgdev->pdev->dev;
bool need_ack = 1;
dev_dbg(dev, "program DMA transfer for next frame: %d\n", idx);
buf = list_entry(fgdev->buffer_queue.next, struct b3dfg_buffer, list);
if (buf) {
if (idx == fgdev->cur_dma_frame_idx + 2) {
if (setup_frame_transfer(fgdev, buf, idx - 1))
dev_err(dev, "unable to map DMA buffer\n");
need_ack = 0;
} else {
dev_err(dev, "frame mismatch, got %d, expected %d\n",
idx, fgdev->cur_dma_frame_idx + 2);
/* FIXME: handle dropped triplets here */
}
} else {
dev_err(dev, "cannot setup DMA, no buffer\n");
}
return need_ack;
} }
static irqreturn_t b3dfg_intr(int irq, void *dev_id) static irqreturn_t b3dfg_intr(int irq, void *dev_id)
{ {
struct b3dfg_dev *fgdev = dev_id; struct b3dfg_dev *fgdev = dev_id;
struct device *dev; struct device *dev = &fgdev->pdev->dev;
struct b3dfg_buffer *buf = NULL;
unsigned int frame_size;
u32 sts; u32 sts;
u8 dropped; u8 dropped;
int next_trf; bool need_ack = 1;
int need_ack = 1; irqreturn_t res = IRQ_HANDLED;
if (unlikely(!fgdev->transmission_enabled)) { sts = b3dfg_read32(fgdev, B3D_REG_DMA_STS);
printk("ignore interrupt, TX disabled\n"); if (unlikely(sts == 0)) {
/* FIXME should return IRQ_NONE when we are stable */ dev_warn(dev, "ignore interrupt, DMA status is 0\n");
res = IRQ_NONE;
goto out; goto out;
} }
sts = b3dfg_read32(fgdev, B3D_REG_DMA_STS); if (unlikely(!fgdev->transmission_enabled)) {
if (unlikely(sts == 0)) { dev_warn(dev, "ignore interrupt, TX disabled\n");
printk("ignore interrupt, brontes DMA status is 0\n"); res = IRQ_HANDLED;
/* FIXME should return IRQ_NONE when we are stable */
goto out; goto out;
} }
/* Handle dropped frames, as reported by the hardware. */
dropped = (sts >> 8) & 0xff; dropped = (sts >> 8) & 0xff;
dbg(KERN_INFO PFX "got intr, brontes DMASTS=%08x (dropped=%d comp=%d next_trf=%d)\n", sts, dropped, !!(sts & 0x4), sts & 0x3); dev_dbg(dev, "intr: DMA_STS=%08x (drop=%d comp=%d next=%d)\n",
sts, dropped, !!(sts & 0x4), sts & 0x3);
if (unlikely(dropped > 0)) { if (unlikely(dropped > 0)) {
spin_lock(&fgdev->triplets_dropped_lock); spin_lock(&fgdev->triplets_dropped_lock);
fgdev->triplets_dropped += dropped; fgdev->triplets_dropped += dropped;
spin_unlock(&fgdev->triplets_dropped_lock); spin_unlock(&fgdev->triplets_dropped_lock);
} }
dev = &fgdev->pdev->dev; /* Handle a cable state change (i.e. the wand being unplugged). */
frame_size = fgdev->frame_size; if (sts & 0x08) {
handle_cstate_change(fgdev);
goto out;
}
spin_lock(&fgdev->buffer_lock); spin_lock(&fgdev->buffer_lock);
if (unlikely(list_empty(&fgdev->buffer_queue))) { if (unlikely(list_empty(&fgdev->buffer_queue))) {
/* FIXME need more sanity checking here */ /* FIXME need more sanity checking here */
dbg("driver has no buffer ready --> cannot program any more transfers\n"); dev_info(dev, "buffer not ready for next transfer\n");
fgdev->triplet_ready = 1; fgdev->triplet_ready = 1;
goto out_unlock; goto out_unlock;
} }
next_trf = sts & 0x3; /* Has a frame transfer been completed? */
if (sts & 0x4) { if (sts & 0x4) {
u32 tmp; u32 dma_status = b3dfg_read32(fgdev, B3D_REG_EC220_DMA_STS);
/* Check for DMA errors reported by the hardware. */
if (unlikely(dma_status & 0x1)) {
dev_err(dev, "EC220 error: %08x\n", dma_status);
tmp = b3dfg_read32(fgdev, B3D_REG_EC220_DMA_STS);
/* last DMA completed */
if (unlikely(tmp & 0x1)) {
printk(KERN_ERR PFX "EC220 reports error (%08x)\n", tmp);
/* FIXME flesh out error handling */ /* FIXME flesh out error handling */
goto out_unlock; goto out_unlock;
} }
/* Sanity check, we should have a frame index at this point. */
if (unlikely(fgdev->cur_dma_frame_idx == -1)) { if (unlikely(fgdev->cur_dma_frame_idx == -1)) {
printk("ERROR completed but no last idx?\n"); dev_err(dev, "completed but no last idx?\n");
/* FIXME flesh out error handling */ /* FIXME flesh out error handling */
goto out_unlock; goto out_unlock;
} }
dma_unmap_single(dev, fgdev->cur_dma_frame_addr, frame_size,
DMA_FROM_DEVICE); transfer_complete(fgdev);
buf = list_entry(fgdev->buffer_queue.next, struct b3dfg_buffer, list);
if (likely(buf)) {
dbg("handle frame completion\n");
if (fgdev->cur_dma_frame_idx == B3DFG_FRAMES_PER_BUFFER - 1) {
/* last frame of that triplet completed */
dbg("triplet completed\n");
buf->state = B3DFG_BUFFER_POPULATED;
list_del_init(&buf->list);
wake_up_interruptible(&fgdev->buffer_waitqueue);
}
} else {
printk("got frame but no buffer!\n");
}
} }
if (next_trf) { /* Is there another frame transfer pending? */
next_trf--; if (sts & 0x3)
need_ack = setup_next_frame_transfer(fgdev, sts & 0x3);
buf = list_entry(fgdev->buffer_queue.next, struct b3dfg_buffer, list); else
dbg("program DMA transfer for frame %d\n", next_trf + 1);
if (likely(buf)) {
if (next_trf != fgdev->cur_dma_frame_idx + 1) {
printk("ERROR mismatch, next_trf %d vs cur_dma_frame_idx %d\n",
next_trf, fgdev->cur_dma_frame_idx);
/* FIXME this is where we should handle dropped triplets */
goto out_unlock;
}
setup_frame_transfer(fgdev, buf, next_trf, 1);
need_ack = 0;
} else {
printk("cannot setup next DMA due to no buffer\n");
}
} else {
fgdev->cur_dma_frame_idx = -1; fgdev->cur_dma_frame_idx = -1;
}
out_unlock: out_unlock:
spin_unlock(&fgdev->buffer_lock); spin_unlock(&fgdev->buffer_lock);
out: out:
if (need_ack) { if (need_ack) {
dbg("acknowledging interrupt\n"); dev_dbg(dev, "acknowledging interrupt\n");
b3dfg_write32(fgdev, B3D_REG_EC220_DMA_STS, 0x0b); b3dfg_write32(fgdev, B3D_REG_EC220_DMA_STS, 0x0b);
} }
return IRQ_HANDLED; return res;
} }
static int b3dfg_open(struct inode *inode, struct file *filp) static int b3dfg_open(struct inode *inode, struct file *filp)
...@@ -714,7 +779,7 @@ static int b3dfg_open(struct inode *inode, struct file *filp) ...@@ -714,7 +779,7 @@ static int b3dfg_open(struct inode *inode, struct file *filp)
struct b3dfg_dev *fgdev = struct b3dfg_dev *fgdev =
container_of(inode->i_cdev, struct b3dfg_dev, chardev); container_of(inode->i_cdev, struct b3dfg_dev, chardev);
printk(KERN_INFO PFX "open\n"); dev_dbg(&fgdev->pdev->dev, "open\n");
filp->private_data = fgdev; filp->private_data = fgdev;
return 0; return 0;
} }
...@@ -722,18 +787,14 @@ static int b3dfg_open(struct inode *inode, struct file *filp) ...@@ -722,18 +787,14 @@ static int b3dfg_open(struct inode *inode, struct file *filp)
static int b3dfg_release(struct inode *inode, struct file *filp) static int b3dfg_release(struct inode *inode, struct file *filp)
{ {
struct b3dfg_dev *fgdev = filp->private_data; struct b3dfg_dev *fgdev = filp->private_data;
printk(KERN_INFO PFX "release\n"); dev_dbg(&fgdev->pdev->dev, "release\n");
set_transmission(fgdev, 0); disable_transmission(fgdev);
return 0;
/* no buffer locking needed, this is serialized */
dequeue_all_buffers(fgdev);
return set_num_buffers(fgdev, 0);
} }
static long b3dfg_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) static long b3dfg_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{ {
struct b3dfg_dev *fgdev = filp->private_data; struct b3dfg_dev *fgdev = filp->private_data;
int r;
switch (cmd) { switch (cmd) {
case B3DFG_IOCGFRMSZ: case B3DFG_IOCGFRMSZ:
...@@ -741,15 +802,11 @@ static long b3dfg_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ...@@ -741,15 +802,11 @@ static long b3dfg_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case B3DFG_IOCGWANDSTAT: case B3DFG_IOCGWANDSTAT:
return get_wand_status(fgdev, (int __user *) arg); return get_wand_status(fgdev, (int __user *) arg);
case B3DFG_IOCTNUMBUFS: case B3DFG_IOCTNUMBUFS:
mutex_lock(&fgdev->ioctl_mutex);
r = set_num_buffers(fgdev, (int) arg); /* TODO: Remove once userspace stops using this. */
mutex_unlock(&fgdev->ioctl_mutex); return 0;
return r;
case B3DFG_IOCTTRANS: case B3DFG_IOCTTRANS:
mutex_lock(&fgdev->ioctl_mutex); return set_transmission(fgdev, (int) arg);
r = set_transmission(fgdev, (int) arg);
mutex_unlock(&fgdev->ioctl_mutex);
return r;
case B3DFG_IOCTQUEUEBUF: case B3DFG_IOCTQUEUEBUF:
return queue_buffer(fgdev, (int) arg); return queue_buffer(fgdev, (int) arg);
case B3DFG_IOCTPOLLBUF: case B3DFG_IOCTPOLLBUF:
...@@ -757,7 +814,7 @@ static long b3dfg_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ...@@ -757,7 +814,7 @@ static long b3dfg_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case B3DFG_IOCTWAITBUF: case B3DFG_IOCTWAITBUF:
return wait_buffer(fgdev, (void __user *) arg); return wait_buffer(fgdev, (void __user *) arg);
default: default:
printk(KERN_ERR PFX "unrecognised ioctl %x\n", cmd); dev_dbg(&fgdev->pdev->dev, "unrecognised ioctl %x\n", cmd);
return -EINVAL; return -EINVAL;
} }
} }
...@@ -765,32 +822,26 @@ static long b3dfg_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ...@@ -765,32 +822,26 @@ static long b3dfg_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
static unsigned int b3dfg_poll(struct file *filp, poll_table *poll_table) static unsigned int b3dfg_poll(struct file *filp, poll_table *poll_table)
{ {
struct b3dfg_dev *fgdev = filp->private_data; struct b3dfg_dev *fgdev = filp->private_data;
unsigned long flags; unsigned long flags, when;
int i; int i;
int r = 0; int r = 0;
/* don't let the user mess with buffer allocations etc. while polling */ when = get_cstate_change(fgdev);
mutex_lock(&fgdev->ioctl_mutex);
if (unlikely(!fgdev->transmission_enabled)) {
printk(KERN_ERR PFX "cannot poll() when transmission is disabled\n");
r = POLLERR;
goto out;
}
poll_wait(filp, &fgdev->buffer_waitqueue, poll_table); poll_wait(filp, &fgdev->buffer_waitqueue, poll_table);
spin_lock_irqsave(&fgdev->buffer_lock, flags); spin_lock_irqsave(&fgdev->buffer_lock, flags);
for (i = 0; i < fgdev->num_buffers; i++) { for (i = 0; i < b3dfg_nbuf; i++) {
if (fgdev->buffers[i].state == B3DFG_BUFFER_POPULATED) { if (fgdev->buffers[i].state == B3DFG_BUFFER_POPULATED) {
r = POLLIN | POLLRDNORM; r = POLLIN | POLLRDNORM;
goto out_buffer_unlock; break;
} }
} }
out_buffer_unlock:
spin_unlock_irqrestore(&fgdev->buffer_lock, flags); spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
out:
mutex_unlock(&fgdev->ioctl_mutex); /* TODO: Confirm this is how we want to communicate the change. */
if (!fgdev->transmission_enabled || when != get_cstate_change(fgdev))
r = POLLERR;
return r; return r;
} }
...@@ -799,35 +850,18 @@ static int b3dfg_mmap(struct file *filp, struct vm_area_struct *vma) ...@@ -799,35 +850,18 @@ static int b3dfg_mmap(struct file *filp, struct vm_area_struct *vma)
struct b3dfg_dev *fgdev = filp->private_data; struct b3dfg_dev *fgdev = filp->private_data;
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
unsigned long vsize = vma->vm_end - vma->vm_start; unsigned long vsize = vma->vm_end - vma->vm_start;
unsigned long bufdatalen; unsigned long bufdatalen = b3dfg_nbuf * fgdev->frame_size * 3;
unsigned long psize; unsigned long psize = bufdatalen - offset;
unsigned long flags;
int r = 0; int r = 0;
/* don't let user mess with buffer allocations during mmap */ if (vsize <= psize) {
mutex_lock(&fgdev->ioctl_mutex); vma->vm_flags |= VM_IO | VM_RESERVED | VM_CAN_NONLINEAR |
VM_PFNMAP;
spin_lock_irqsave(&fgdev->buffer_lock, flags); vma->vm_ops = &b3dfg_vm_ops;
bufdatalen = fgdev->num_buffers * fgdev->frame_size * 3; } else {
psize = bufdatalen - offset;
if (fgdev->num_buffers == 0) {
spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
r = -ENOENT;
goto out;
}
spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
if (vsize > psize) {
r = -EINVAL; r = -EINVAL;
goto out;
} }
vma->vm_flags |= VM_IO | VM_RESERVED | VM_CAN_NONLINEAR | VM_PFNMAP;
vma->vm_ops = &b3dfg_vm_ops;
b3dfg_vma_open(vma);
out:
mutex_unlock(&fgdev->ioctl_mutex);
return r; return r;
} }
...@@ -842,43 +876,55 @@ static struct file_operations b3dfg_fops = { ...@@ -842,43 +876,55 @@ static struct file_operations b3dfg_fops = {
static void free_all_frame_buffers(struct b3dfg_dev *fgdev) static void free_all_frame_buffers(struct b3dfg_dev *fgdev)
{ {
int i; int i, j;
for (i = 0; i < B3DFG_NR_FRAME_BUFFERS; i++) for (i = 0; i < b3dfg_nbuf; i++)
kfree(fgdev->frame_buffer[i]); for (j = 0; j < B3DFG_FRAMES_PER_BUFFER; j++)
kfree(fgdev->buffers[i].frame[j]);
kfree(fgdev->buffers);
} }
/* initialize device and any data structures. called before any interrupts /* initialize device and any data structures. called before any interrupts
* are enabled. */ * are enabled. */
static int b3dfg_init_dev(struct b3dfg_dev *fgdev) static int b3dfg_init_dev(struct b3dfg_dev *fgdev)
{ {
int i; int i, j;
u32 frm_size = b3dfg_read32(fgdev, B3D_REG_FRM_SIZE); u32 frm_size = b3dfg_read32(fgdev, B3D_REG_FRM_SIZE);
/* disable interrupts. in abnormal circumstances (e.g. after a crash) the /* Disable interrupts. In abnormal circumstances (e.g. after a crash)
* board may still be transmitting from the previous session. if we ensure * the board may still be transmitting from the previous session. If we
* that interrupts are disabled before we later enable them, we are sure * ensure that interrupts are disabled before we later enable them, we
* to capture a triplet from the start, rather than starting from frame * are sure to capture a triplet from the start, rather than starting
* 2 or 3. disabling interrupts causes the FG to throw away all buffered * from frame 2 or 3. Disabling interrupts causes the FG to throw away
* data and stop buffering more until interrupts are enabled again. */ * all buffered data and stop buffering more until interrupts are
* enabled again.
*/
b3dfg_write32(fgdev, B3D_REG_HW_CTRL, 0); b3dfg_write32(fgdev, B3D_REG_HW_CTRL, 0);
fgdev->frame_size = frm_size * 4096; fgdev->frame_size = frm_size * 4096;
for (i = 0; i < B3DFG_NR_FRAME_BUFFERS; i++) { fgdev->buffers = kzalloc(sizeof(struct b3dfg_buffer) * b3dfg_nbuf,
fgdev->frame_buffer[i] = kmalloc(fgdev->frame_size, GFP_KERNEL); GFP_KERNEL);
if (!fgdev->frame_buffer[i]) if (!fgdev->buffers)
goto err_no_mem; goto err_no_buf;
for (i = 0; i < b3dfg_nbuf; i++) {
struct b3dfg_buffer *buf = &fgdev->buffers[i];
for (j = 0; j < B3DFG_FRAMES_PER_BUFFER; j++) {
buf->frame[j] = kmalloc(fgdev->frame_size, GFP_KERNEL);
if (!buf->frame[j])
goto err_no_mem;
}
INIT_LIST_HEAD(&buf->list);
} }
INIT_LIST_HEAD(&fgdev->buffer_queue); INIT_LIST_HEAD(&fgdev->buffer_queue);
init_waitqueue_head(&fgdev->buffer_waitqueue); init_waitqueue_head(&fgdev->buffer_waitqueue);
spin_lock_init(&fgdev->buffer_lock); spin_lock_init(&fgdev->buffer_lock);
spin_lock_init(&fgdev->cstate_lock);
spin_lock_init(&fgdev->triplets_dropped_lock); spin_lock_init(&fgdev->triplets_dropped_lock);
atomic_set(&fgdev->mapping_count, 0);
mutex_init(&fgdev->ioctl_mutex);
return 0; return 0;
err_no_mem: err_no_mem:
free_all_frame_buffers(fgdev); free_all_frame_buffers(fgdev);
err_no_buf:
return -ENOMEM; return -ENOMEM;
} }
...@@ -900,84 +946,112 @@ static int __devinit b3dfg_probe(struct pci_dev *pdev, ...@@ -900,84 +946,112 @@ static int __devinit b3dfg_probe(struct pci_dev *pdev,
int r = 0; int r = 0;
int minor = get_free_minor(); int minor = get_free_minor();
dev_t devno = MKDEV(MAJOR(b3dfg_devt), minor); dev_t devno = MKDEV(MAJOR(b3dfg_devt), minor);
unsigned long res_len;
resource_size_t res_base;
if (fgdev == NULL) if (fgdev == NULL)
return -ENOMEM; return -ENOMEM;
if (minor < 0) { if (minor < 0) {
printk(KERN_ERR PFX "too many devices found!\n"); dev_err(&pdev->dev, "too many devices found!\n");
return -EIO; r = -EIO;
goto err_free;
} }
b3dfg_devices[minor] = 1; b3dfg_devices[minor] = 1;
printk(KERN_INFO PFX "probe device at %s with IRQ %d\n", dev_info(&pdev->dev, "probe device with IRQ %d\n", pdev->irq);
pci_name(pdev), pdev->irq);
cdev_init(&fgdev->chardev, &b3dfg_fops); cdev_init(&fgdev->chardev, &b3dfg_fops);
fgdev->chardev.owner = THIS_MODULE; fgdev->chardev.owner = THIS_MODULE;
r = cdev_add(&fgdev->chardev, devno, 1); r = cdev_add(&fgdev->chardev, devno, 1);
if (r) if (r) {
goto err1; dev_err(&pdev->dev, "cannot add char device\n");
goto err_release_minor;
}
fgdev->classdev = class_device_create(b3dfg_class, NULL, devno, &pdev->dev, fgdev->classdev = class_device_create(b3dfg_class, NULL, devno,
DRIVER_NAME "%d", minor); &pdev->dev, DRIVER_NAME "%d",
minor);
if (IS_ERR(fgdev->classdev)) { if (IS_ERR(fgdev->classdev)) {
dev_err(&pdev->dev, "cannot create class device\n");
r = PTR_ERR(fgdev->classdev); r = PTR_ERR(fgdev->classdev);
goto err2; goto err_del_cdev;
} }
r = pci_enable_device(pdev); r = pci_enable_device(pdev);
if (r) if (r) {
goto err3; dev_err(&pdev->dev, "cannot enable PCI device\n");
goto err_dev_unreg;
}
if (pci_resource_len(pdev, B3DFG_BAR_REGS) != B3DFG_REGS_LENGTH) { res_len = pci_resource_len(pdev, B3DFG_BAR_REGS);
printk(KERN_ERR PFX "invalid register resource size\n"); if (res_len != B3DFG_REGS_LENGTH) {
goto err4; dev_err(&pdev->dev, "invalid register resource size\n");
r = -EIO;
goto err_disable;
} }
if (pci_resource_flags(pdev, B3DFG_BAR_REGS) != IORESOURCE_MEM) { if (pci_resource_flags(pdev, B3DFG_BAR_REGS) != IORESOURCE_MEM) {
printk(KERN_ERR PFX "invalid resource flags"); dev_err(&pdev->dev, "invalid resource flags\n");
goto err4; r = -EIO;
goto err_disable;
} }
fgdev->regs = ioremap_nocache(pci_resource_start(pdev, B3DFG_BAR_REGS), r = pci_request_regions(pdev, DRIVER_NAME);
B3DFG_REGS_LENGTH); if (r) {
dev_err(&pdev->dev, "cannot obtain PCI resources\n");
goto err_disable;
}
pci_set_master(pdev);
r = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (r) {
dev_err(&pdev->dev, "no usable DMA configuration\n");
goto err_free_res;
}
res_base = pci_resource_start(pdev, B3DFG_BAR_REGS);
fgdev->regs = ioremap_nocache(res_base, res_len);
if (!fgdev->regs) { if (!fgdev->regs) {
printk(KERN_ERR PFX "regs ioremap failed\n"); dev_err(&pdev->dev, "regs ioremap failed\n");
goto err4; r = -EIO;
goto err_free_res;
} }
fgdev->pdev = pdev; fgdev->pdev = pdev;
pci_set_drvdata(pdev, fgdev); pci_set_drvdata(pdev, fgdev);
r = b3dfg_init_dev(fgdev); r = b3dfg_init_dev(fgdev);
if (r < 0) { if (r < 0) {
printk(KERN_ERR PFX "failed to initalize device\n"); dev_err(&pdev->dev, "failed to initalize device\n");
goto err5; goto err_unmap;
} }
r = request_irq(pdev->irq, b3dfg_intr, IRQF_SHARED, DRIVER_NAME, fgdev); r = request_irq(pdev->irq, b3dfg_intr, IRQF_SHARED, DRIVER_NAME, fgdev);
if (r) { if (r) {
printk(KERN_ERR PFX "couldn't request irq %d\n", pdev->irq); dev_err(&pdev->dev, "couldn't request irq %d\n", pdev->irq);
goto err6; goto err_free_bufs;
} }
return 0; return 0;
err6: err_free_bufs:
free_all_frame_buffers(fgdev); free_all_frame_buffers(fgdev);
err5: err_unmap:
iounmap(fgdev->regs); iounmap(fgdev->regs);
err4: err_free_res:
pci_release_regions(pdev);
err_disable:
pci_disable_device(pdev); pci_disable_device(pdev);
err3: err_dev_unreg:
class_device_unregister(fgdev->classdev); class_device_unregister(fgdev->classdev);
err2: err_del_cdev:
cdev_del(&fgdev->chardev); cdev_del(&fgdev->chardev);
err1: err_release_minor:
b3dfg_devices[minor] = 0;
err_free:
kfree(fgdev); kfree(fgdev);
if (minor >= 0)
b3dfg_devices[minor] = 0;
return r; return r;
} }
...@@ -986,10 +1060,11 @@ static void __devexit b3dfg_remove(struct pci_dev *pdev) ...@@ -986,10 +1060,11 @@ static void __devexit b3dfg_remove(struct pci_dev *pdev)
struct b3dfg_dev *fgdev = pci_get_drvdata(pdev); struct b3dfg_dev *fgdev = pci_get_drvdata(pdev);
unsigned int minor = MINOR(fgdev->chardev.dev); unsigned int minor = MINOR(fgdev->chardev.dev);
printk(KERN_INFO PFX "remove\n"); dev_dbg(&pdev->dev, "remove\n");
free_irq(pdev->irq, fgdev); free_irq(pdev->irq, fgdev);
iounmap(fgdev->regs); iounmap(fgdev->regs);
pci_release_regions(pdev);
pci_disable_device(pdev); pci_disable_device(pdev);
class_device_unregister(fgdev->classdev); class_device_unregister(fgdev->classdev);
cdev_del(&fgdev->chardev); cdev_del(&fgdev->chardev);
...@@ -1002,14 +1077,20 @@ static struct pci_driver b3dfg_driver = { ...@@ -1002,14 +1077,20 @@ static struct pci_driver b3dfg_driver = {
.name = DRIVER_NAME, .name = DRIVER_NAME,
.id_table = b3dfg_ids, .id_table = b3dfg_ids,
.probe = b3dfg_probe, .probe = b3dfg_probe,
.remove = b3dfg_remove, .remove = __devexit_p(b3dfg_remove),
}; };
static int __init b3dfg_module_init(void) static int __init b3dfg_module_init(void)
{ {
int r; int r;
printk(KERN_INFO PFX "loaded\n"); if (b3dfg_nbuf < 2) {
printk(KERN_ERR DRIVER_NAME
": buffer_count is out of range (must be >= 2)");
return -EINVAL;
}
printk(KERN_INFO DRIVER_NAME ": loaded\n");
b3dfg_class = class_create(THIS_MODULE, DRIVER_NAME); b3dfg_class = class_create(THIS_MODULE, DRIVER_NAME);
if (IS_ERR(b3dfg_class)) if (IS_ERR(b3dfg_class))
...@@ -1034,7 +1115,7 @@ static int __init b3dfg_module_init(void) ...@@ -1034,7 +1115,7 @@ static int __init b3dfg_module_init(void)
static void __exit b3dfg_module_exit(void) static void __exit b3dfg_module_exit(void)
{ {
printk(KERN_INFO PFX "unloaded\n"); printk(KERN_INFO DRIVER_NAME ": unloaded\n");
pci_unregister_driver(&b3dfg_driver); pci_unregister_driver(&b3dfg_driver);
unregister_chrdev_region(b3dfg_devt, B3DFG_MAX_DEVS); unregister_chrdev_region(b3dfg_devt, B3DFG_MAX_DEVS);
class_destroy(b3dfg_class); class_destroy(b3dfg_class);
...@@ -1042,8 +1123,3 @@ static void __exit b3dfg_module_exit(void) ...@@ -1042,8 +1123,3 @@ static void __exit b3dfg_module_exit(void)
module_init(b3dfg_module_init); module_init(b3dfg_module_init);
module_exit(b3dfg_module_exit); module_exit(b3dfg_module_exit);
MODULE_AUTHOR("Daniel Drake <ddrake@brontes3d.com>");
MODULE_DESCRIPTION("Brontes frame grabber driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, b3dfg_ids);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment