Commit 37d803ed authored by Hans de Goede's avatar Hans de Goede Committed by Mauro Carvalho Chehab

[media] staging-usbvideo: remove

With the new gspca v4l2 vicam driver, there is no more reason to keep the
old v4l1 usbvideo vicam driver around, and since that is the last
usbvideo framework using driver, the old usbvideo framework itself can
go too.
Signed-off-by: default avatarHans de Goede <hdegoede@redhat.com>
Signed-off-by: default avatarMauro Carvalho Chehab <mchehab@redhat.com>
parent 76fafe78
...@@ -57,8 +57,6 @@ source "drivers/staging/dabusb/Kconfig" ...@@ -57,8 +57,6 @@ source "drivers/staging/dabusb/Kconfig"
source "drivers/staging/se401/Kconfig" source "drivers/staging/se401/Kconfig"
source "drivers/staging/usbvideo/Kconfig"
source "drivers/staging/usbip/Kconfig" source "drivers/staging/usbip/Kconfig"
source "drivers/staging/winbond/Kconfig" source "drivers/staging/winbond/Kconfig"
......
...@@ -10,7 +10,6 @@ obj-$(CONFIG_VIDEO_CX25821) += cx25821/ ...@@ -10,7 +10,6 @@ obj-$(CONFIG_VIDEO_CX25821) += cx25821/
obj-$(CONFIG_VIDEO_TM6000) += tm6000/ obj-$(CONFIG_VIDEO_TM6000) += tm6000/
obj-$(CONFIG_DVB_CXD2099) += cxd2099/ obj-$(CONFIG_DVB_CXD2099) += cxd2099/
obj-$(CONFIG_USB_DABUSB) += dabusb/ obj-$(CONFIG_USB_DABUSB) += dabusb/
obj-$(CONFIG_USB_VICAM) += usbvideo/
obj-$(CONFIG_USB_SE401) += se401/ obj-$(CONFIG_USB_SE401) += se401/
obj-$(CONFIG_LIRC_STAGING) += lirc/ obj-$(CONFIG_LIRC_STAGING) += lirc/
obj-$(CONFIG_USB_IP_COMMON) += usbip/ obj-$(CONFIG_USB_IP_COMMON) += usbip/
......
config VIDEO_USBVIDEO
tristate
config USB_VICAM
tristate "USB 3com HomeConnect (aka vicam) support (DEPRECATED)"
depends on VIDEO_DEV && VIDEO_V4L2_COMMON && USB
select VIDEO_USBVIDEO
---help---
Say Y here if you have 3com homeconnect camera (vicam).
This driver uses the deprecated V4L1 API and will be removed in
2.6.39, unless someone converts it to the V4L2 API.
To compile this driver as a module, choose M here: the
module will be called vicam.
obj-$(CONFIG_VIDEO_USBVIDEO) += usbvideo.o
obj-$(CONFIG_USB_VICAM) += vicam.o
This is an obsolete driver for some old webcams that still use V4L1 API.
As V4L1 support is being removed from kernel, if nobody take care on it,
the driver will be removed for 2.6.39.
Please send patches to linux-media@vger.kernel.org
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <asm/io.h>
#include "usbvideo.h"
#if defined(MAP_NR)
#define virt_to_page(v) MAP_NR(v) /* Kernels 2.2.x */
#endif
static int video_nr = -1;
module_param(video_nr, int, 0);
/*
* Local prototypes.
*/
static void usbvideo_Disconnect(struct usb_interface *intf);
static void usbvideo_CameraRelease(struct uvd *uvd);
static long usbvideo_v4l_ioctl(struct file *file,
unsigned int cmd, unsigned long arg);
static int usbvideo_v4l_mmap(struct file *file, struct vm_area_struct *vma);
static int usbvideo_v4l_open(struct file *file);
static ssize_t usbvideo_v4l_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos);
static int usbvideo_v4l_close(struct file *file);
static int usbvideo_StartDataPump(struct uvd *uvd);
static void usbvideo_StopDataPump(struct uvd *uvd);
static int usbvideo_GetFrame(struct uvd *uvd, int frameNum);
static int usbvideo_NewFrame(struct uvd *uvd, int framenum);
static void usbvideo_SoftwareContrastAdjustment(struct uvd *uvd,
struct usbvideo_frame *frame);
/*******************************/
/* Memory management functions */
/*******************************/
static void *usbvideo_rvmalloc(unsigned long size)
{
void *mem;
unsigned long adr;
size = PAGE_ALIGN(size);
mem = vmalloc_32(size);
if (!mem)
return NULL;
memset(mem, 0, size); /* Clear the ram out, no junk to the user */
adr = (unsigned long) mem;
while (size > 0) {
SetPageReserved(vmalloc_to_page((void *)adr));
adr += PAGE_SIZE;
size -= PAGE_SIZE;
}
return mem;
}
static void usbvideo_rvfree(void *mem, unsigned long size)
{
unsigned long adr;
if (!mem)
return;
adr = (unsigned long) mem;
while ((long) size > 0) {
ClearPageReserved(vmalloc_to_page((void *)adr));
adr += PAGE_SIZE;
size -= PAGE_SIZE;
}
vfree(mem);
}
static void RingQueue_Initialize(struct RingQueue *rq)
{
assert(rq != NULL);
init_waitqueue_head(&rq->wqh);
}
static void RingQueue_Allocate(struct RingQueue *rq, int rqLen)
{
/* Make sure the requested size is a power of 2 and
round up if necessary. This allows index wrapping
using masks rather than modulo */
int i = 1;
assert(rq != NULL);
assert(rqLen > 0);
while(rqLen >> i)
i++;
if(rqLen != 1 << (i-1))
rqLen = 1 << i;
rq->length = rqLen;
rq->ri = rq->wi = 0;
rq->queue = usbvideo_rvmalloc(rq->length);
assert(rq->queue != NULL);
}
static int RingQueue_IsAllocated(const struct RingQueue *rq)
{
if (rq == NULL)
return 0;
return (rq->queue != NULL) && (rq->length > 0);
}
static void RingQueue_Free(struct RingQueue *rq)
{
assert(rq != NULL);
if (RingQueue_IsAllocated(rq)) {
usbvideo_rvfree(rq->queue, rq->length);
rq->queue = NULL;
rq->length = 0;
}
}
int RingQueue_Dequeue(struct RingQueue *rq, unsigned char *dst, int len)
{
int rql, toread;
assert(rq != NULL);
assert(dst != NULL);
rql = RingQueue_GetLength(rq);
if(!rql)
return 0;
/* Clip requested length to available data */
if(len > rql)
len = rql;
toread = len;
if(rq->ri > rq->wi) {
/* Read data from tail */
int read = (toread < (rq->length - rq->ri)) ? toread : rq->length - rq->ri;
memcpy(dst, rq->queue + rq->ri, read);
toread -= read;
dst += read;
rq->ri = (rq->ri + read) & (rq->length-1);
}
if(toread) {
/* Read data from head */
memcpy(dst, rq->queue + rq->ri, toread);
rq->ri = (rq->ri + toread) & (rq->length-1);
}
return len;
}
EXPORT_SYMBOL(RingQueue_Dequeue);
int RingQueue_Enqueue(struct RingQueue *rq, const unsigned char *cdata, int n)
{
int enqueued = 0;
assert(rq != NULL);
assert(cdata != NULL);
assert(rq->length > 0);
while (n > 0) {
int m, q_avail;
/* Calculate the largest chunk that fits the tail of the ring */
q_avail = rq->length - rq->wi;
if (q_avail <= 0) {
rq->wi = 0;
q_avail = rq->length;
}
m = n;
assert(q_avail > 0);
if (m > q_avail)
m = q_avail;
memcpy(rq->queue + rq->wi, cdata, m);
RING_QUEUE_ADVANCE_INDEX(rq, wi, m);
cdata += m;
enqueued += m;
n -= m;
}
return enqueued;
}
EXPORT_SYMBOL(RingQueue_Enqueue);
static void RingQueue_InterruptibleSleepOn(struct RingQueue *rq)
{
assert(rq != NULL);
interruptible_sleep_on(&rq->wqh);
}
void RingQueue_WakeUpInterruptible(struct RingQueue *rq)
{
assert(rq != NULL);
if (waitqueue_active(&rq->wqh))
wake_up_interruptible(&rq->wqh);
}
EXPORT_SYMBOL(RingQueue_WakeUpInterruptible);
void RingQueue_Flush(struct RingQueue *rq)
{
assert(rq != NULL);
rq->ri = 0;
rq->wi = 0;
}
EXPORT_SYMBOL(RingQueue_Flush);
/*
* usbvideo_VideosizeToString()
*
* This procedure converts given videosize value to readable string.
*
* History:
* 07-Aug-2000 Created.
* 19-Oct-2000 Reworked for usbvideo module.
*/
static void usbvideo_VideosizeToString(char *buf, int bufLen, videosize_t vs)
{
char tmp[40];
int n;
n = 1 + sprintf(tmp, "%ldx%ld", VIDEOSIZE_X(vs), VIDEOSIZE_Y(vs));
assert(n < sizeof(tmp));
if ((buf == NULL) || (bufLen < n))
err("usbvideo_VideosizeToString: buffer is too small.");
else
memmove(buf, tmp, n);
}
/*
* usbvideo_OverlayChar()
*
* History:
* 01-Feb-2000 Created.
*/
static void usbvideo_OverlayChar(struct uvd *uvd, struct usbvideo_frame *frame,
int x, int y, int ch)
{
static const unsigned short digits[16] = {
0xF6DE, /* 0 */
0x2492, /* 1 */
0xE7CE, /* 2 */
0xE79E, /* 3 */
0xB792, /* 4 */
0xF39E, /* 5 */
0xF3DE, /* 6 */
0xF492, /* 7 */
0xF7DE, /* 8 */
0xF79E, /* 9 */
0x77DA, /* a */
0xD75C, /* b */
0xF24E, /* c */
0xD6DC, /* d */
0xF34E, /* e */
0xF348 /* f */
};
unsigned short digit;
int ix, iy;
int value;
if ((uvd == NULL) || (frame == NULL))
return;
value = hex_to_bin(ch);
if (value < 0)
return;
digit = digits[value];
for (iy=0; iy < 5; iy++) {
for (ix=0; ix < 3; ix++) {
if (digit & 0x8000) {
if (uvd->paletteBits & (1L << VIDEO_PALETTE_RGB24)) {
/* TODO */ RGB24_PUTPIXEL(frame, x+ix, y+iy, 0xFF, 0xFF, 0xFF);
}
}
digit = digit << 1;
}
}
}
/*
* usbvideo_OverlayString()
*
* History:
* 01-Feb-2000 Created.
*/
static void usbvideo_OverlayString(struct uvd *uvd, struct usbvideo_frame *frame,
int x, int y, const char *str)
{
while (*str) {
usbvideo_OverlayChar(uvd, frame, x, y, *str);
str++;
x += 4; /* 3 pixels character + 1 space */
}
}
/*
* usbvideo_OverlayStats()
*
* Overlays important debugging information.
*
* History:
* 01-Feb-2000 Created.
*/
static void usbvideo_OverlayStats(struct uvd *uvd, struct usbvideo_frame *frame)
{
const int y_diff = 8;
char tmp[16];
int x = 10, y=10;
long i, j, barLength;
const int qi_x1 = 60, qi_y1 = 10;
const int qi_x2 = VIDEOSIZE_X(frame->request) - 10, qi_h = 10;
/* Call the user callback, see if we may proceed after that */
if (VALID_CALLBACK(uvd, overlayHook)) {
if (GET_CALLBACK(uvd, overlayHook)(uvd, frame) < 0)
return;
}
/*
* We draw a (mostly) hollow rectangle with qi_xxx coordinates.
* Left edge symbolizes the queue index 0; right edge symbolizes
* the full capacity of the queue.
*/
barLength = qi_x2 - qi_x1 - 2;
if ((barLength > 10) && (uvd->paletteBits & (1L << VIDEO_PALETTE_RGB24))) {
/* TODO */ long u_lo, u_hi, q_used;
long m_ri, m_wi, m_lo, m_hi;
/*
* Determine fill zones (used areas of the queue):
* 0 xxxxxxx u_lo ...... uvd->dp.ri xxxxxxxx u_hi ..... uvd->dp.length
*
* if u_lo < 0 then there is no first filler.
*/
q_used = RingQueue_GetLength(&uvd->dp);
if ((uvd->dp.ri + q_used) >= uvd->dp.length) {
u_hi = uvd->dp.length;
u_lo = (q_used + uvd->dp.ri) & (uvd->dp.length-1);
} else {
u_hi = (q_used + uvd->dp.ri);
u_lo = -1;
}
/* Convert byte indices into screen units */
m_ri = qi_x1 + ((barLength * uvd->dp.ri) / uvd->dp.length);
m_wi = qi_x1 + ((barLength * uvd->dp.wi) / uvd->dp.length);
m_lo = (u_lo > 0) ? (qi_x1 + ((barLength * u_lo) / uvd->dp.length)) : -1;
m_hi = qi_x1 + ((barLength * u_hi) / uvd->dp.length);
for (j=qi_y1; j < (qi_y1 + qi_h); j++) {
for (i=qi_x1; i < qi_x2; i++) {
/* Draw border lines */
if ((j == qi_y1) || (j == (qi_y1 + qi_h - 1)) ||
(i == qi_x1) || (i == (qi_x2 - 1))) {
RGB24_PUTPIXEL(frame, i, j, 0xFF, 0xFF, 0xFF);
continue;
}
/* For all other points the Y coordinate does not matter */
if ((i >= m_ri) && (i <= (m_ri + 3))) {
RGB24_PUTPIXEL(frame, i, j, 0x00, 0xFF, 0x00);
} else if ((i >= m_wi) && (i <= (m_wi + 3))) {
RGB24_PUTPIXEL(frame, i, j, 0xFF, 0x00, 0x00);
} else if ((i < m_lo) || ((i > m_ri) && (i < m_hi)))
RGB24_PUTPIXEL(frame, i, j, 0x00, 0x00, 0xFF);
}
}
}
sprintf(tmp, "%8lx", uvd->stats.frame_num);
usbvideo_OverlayString(uvd, frame, x, y, tmp);
y += y_diff;
sprintf(tmp, "%8lx", uvd->stats.urb_count);
usbvideo_OverlayString(uvd, frame, x, y, tmp);
y += y_diff;
sprintf(tmp, "%8lx", uvd->stats.urb_length);
usbvideo_OverlayString(uvd, frame, x, y, tmp);
y += y_diff;
sprintf(tmp, "%8lx", uvd->stats.data_count);
usbvideo_OverlayString(uvd, frame, x, y, tmp);
y += y_diff;
sprintf(tmp, "%8lx", uvd->stats.header_count);
usbvideo_OverlayString(uvd, frame, x, y, tmp);
y += y_diff;
sprintf(tmp, "%8lx", uvd->stats.iso_skip_count);
usbvideo_OverlayString(uvd, frame, x, y, tmp);
y += y_diff;
sprintf(tmp, "%8lx", uvd->stats.iso_err_count);
usbvideo_OverlayString(uvd, frame, x, y, tmp);
y += y_diff;
sprintf(tmp, "%8x", uvd->vpic.colour);
usbvideo_OverlayString(uvd, frame, x, y, tmp);
y += y_diff;
sprintf(tmp, "%8x", uvd->vpic.hue);
usbvideo_OverlayString(uvd, frame, x, y, tmp);
y += y_diff;
sprintf(tmp, "%8x", uvd->vpic.brightness >> 8);
usbvideo_OverlayString(uvd, frame, x, y, tmp);
y += y_diff;
sprintf(tmp, "%8x", uvd->vpic.contrast >> 12);
usbvideo_OverlayString(uvd, frame, x, y, tmp);
y += y_diff;
sprintf(tmp, "%8d", uvd->vpic.whiteness >> 8);
usbvideo_OverlayString(uvd, frame, x, y, tmp);
y += y_diff;
}
/*
* usbvideo_ReportStatistics()
*
* This procedure prints packet and transfer statistics.
*
* History:
* 14-Jan-2000 Corrected default multiplier.
*/
static void usbvideo_ReportStatistics(const struct uvd *uvd)
{
if ((uvd != NULL) && (uvd->stats.urb_count > 0)) {
unsigned long allPackets, badPackets, goodPackets, percent;
allPackets = uvd->stats.urb_count * CAMERA_URB_FRAMES;
badPackets = uvd->stats.iso_skip_count + uvd->stats.iso_err_count;
goodPackets = allPackets - badPackets;
/* Calculate percentage wisely, remember integer limits */
assert(allPackets != 0);
if (goodPackets < (((unsigned long)-1)/100))
percent = (100 * goodPackets) / allPackets;
else
percent = goodPackets / (allPackets / 100);
dev_info(&uvd->dev->dev,
"Packet Statistics: Total=%lu. Empty=%lu. Usage=%lu%%\n",
allPackets, badPackets, percent);
if (uvd->iso_packet_len > 0) {
unsigned long allBytes, xferBytes;
char multiplier = ' ';
allBytes = allPackets * uvd->iso_packet_len;
xferBytes = uvd->stats.data_count;
assert(allBytes != 0);
if (xferBytes < (((unsigned long)-1)/100))
percent = (100 * xferBytes) / allBytes;
else
percent = xferBytes / (allBytes / 100);
/* Scale xferBytes for easy reading */
if (xferBytes > 10*1024) {
xferBytes /= 1024;
multiplier = 'K';
if (xferBytes > 10*1024) {
xferBytes /= 1024;
multiplier = 'M';
if (xferBytes > 10*1024) {
xferBytes /= 1024;
multiplier = 'G';
if (xferBytes > 10*1024) {
xferBytes /= 1024;
multiplier = 'T';
}
}
}
}
dev_info(&uvd->dev->dev,
"Transfer Statistics: Transferred=%lu%cB Usage=%lu%%\n",
xferBytes, multiplier, percent);
}
}
}
/*
* usbvideo_TestPattern()
*
* Procedure forms a test pattern (yellow grid on blue background).
*
* Parameters:
* fullframe: if TRUE then entire frame is filled, otherwise the procedure
* continues from the current scanline.
* pmode 0: fill the frame with solid blue color (like on VCR or TV)
* 1: Draw a colored grid
*
* History:
* 01-Feb-2000 Created.
*/
void usbvideo_TestPattern(struct uvd *uvd, int fullframe, int pmode)
{
struct usbvideo_frame *frame;
int num_cell = 0;
int scan_length = 0;
static int num_pass;
if (uvd == NULL) {
err("%s: uvd == NULL", __func__);
return;
}
if ((uvd->curframe < 0) || (uvd->curframe >= USBVIDEO_NUMFRAMES)) {
err("%s: uvd->curframe=%d.", __func__, uvd->curframe);
return;
}
/* Grab the current frame */
frame = &uvd->frame[uvd->curframe];
/* Optionally start at the beginning */
if (fullframe) {
frame->curline = 0;
frame->seqRead_Length = 0;
}
#if 0
{ /* For debugging purposes only */
char tmp[20];
usbvideo_VideosizeToString(tmp, sizeof(tmp), frame->request);
dev_info(&uvd->dev->dev, "testpattern: frame=%s\n", tmp);
}
#endif
/* Form every scan line */
for (; frame->curline < VIDEOSIZE_Y(frame->request); frame->curline++) {
int i;
unsigned char *f = frame->data +
(VIDEOSIZE_X(frame->request) * V4L_BYTES_PER_PIXEL * frame->curline);
for (i=0; i < VIDEOSIZE_X(frame->request); i++) {
unsigned char cb=0x80;
unsigned char cg = 0;
unsigned char cr = 0;
if (pmode == 1) {
if (frame->curline % 32 == 0)
cb = 0, cg = cr = 0xFF;
else if (i % 32 == 0) {
if (frame->curline % 32 == 1)
num_cell++;
cb = 0, cg = cr = 0xFF;
} else {
cb = ((num_cell*7) + num_pass) & 0xFF;
cg = ((num_cell*5) + num_pass*2) & 0xFF;
cr = ((num_cell*3) + num_pass*3) & 0xFF;
}
} else {
/* Just the blue screen */
}
*f++ = cb;
*f++ = cg;
*f++ = cr;
scan_length += 3;
}
}
frame->frameState = FrameState_Done;
frame->seqRead_Length += scan_length;
++num_pass;
/* We do this unconditionally, regardless of FLAGS_OVERLAY_STATS */
usbvideo_OverlayStats(uvd, frame);
}
EXPORT_SYMBOL(usbvideo_TestPattern);
#ifdef DEBUG
/*
* usbvideo_HexDump()
*
* A debugging tool. Prints hex dumps.
*
* History:
* 29-Jul-2000 Added printing of offsets.
*/
void usbvideo_HexDump(const unsigned char *data, int len)
{
const int bytes_per_line = 32;
char tmp[128]; /* 32*3 + 5 */
int i, k;
for (i=k=0; len > 0; i++, len--) {
if (i > 0 && ((i % bytes_per_line) == 0)) {
printk("%s\n", tmp);
k=0;
}
if ((i % bytes_per_line) == 0)
k += sprintf(&tmp[k], "%04x: ", i);
k += sprintf(&tmp[k], "%02x ", data[i]);
}
if (k > 0)
printk("%s\n", tmp);
}
EXPORT_SYMBOL(usbvideo_HexDump);
#endif
/* ******************************************************************** */
/* XXX: this piece of crap really wants some error handling.. */
static int usbvideo_ClientIncModCount(struct uvd *uvd)
{
if (uvd == NULL) {
err("%s: uvd == NULL", __func__);
return -EINVAL;
}
if (uvd->handle == NULL) {
err("%s: uvd->handle == NULL", __func__);
return -EINVAL;
}
if (!try_module_get(uvd->handle->md_module)) {
err("%s: try_module_get() == 0", __func__);
return -ENODEV;
}
return 0;
}
static void usbvideo_ClientDecModCount(struct uvd *uvd)
{
if (uvd == NULL) {
err("%s: uvd == NULL", __func__);
return;
}
if (uvd->handle == NULL) {
err("%s: uvd->handle == NULL", __func__);
return;
}
if (uvd->handle->md_module == NULL) {
err("%s: uvd->handle->md_module == NULL", __func__);
return;
}
module_put(uvd->handle->md_module);
}
int usbvideo_register(
struct usbvideo **pCams,
const int num_cams,
const int num_extra,
const char *driverName,
const struct usbvideo_cb *cbTbl,
struct module *md,
const struct usb_device_id *id_table)
{
struct usbvideo *cams;
int i, base_size, result;
/* Check parameters for sanity */
if ((num_cams <= 0) || (pCams == NULL) || (cbTbl == NULL)) {
err("%s: Illegal call", __func__);
return -EINVAL;
}
/* Check registration callback - must be set! */
if (cbTbl->probe == NULL) {
err("%s: probe() is required!", __func__);
return -EINVAL;
}
base_size = num_cams * sizeof(struct uvd) + sizeof(struct usbvideo);
cams = kzalloc(base_size, GFP_KERNEL);
if (cams == NULL) {
err("Failed to allocate %d. bytes for usbvideo struct", base_size);
return -ENOMEM;
}
dbg("%s: Allocated $%p (%d. bytes) for %d. cameras",
__func__, cams, base_size, num_cams);
/* Copy callbacks, apply defaults for those that are not set */
memmove(&cams->cb, cbTbl, sizeof(cams->cb));
if (cams->cb.getFrame == NULL)
cams->cb.getFrame = usbvideo_GetFrame;
if (cams->cb.disconnect == NULL)
cams->cb.disconnect = usbvideo_Disconnect;
if (cams->cb.startDataPump == NULL)
cams->cb.startDataPump = usbvideo_StartDataPump;
if (cams->cb.stopDataPump == NULL)
cams->cb.stopDataPump = usbvideo_StopDataPump;
cams->num_cameras = num_cams;
cams->cam = (struct uvd *) &cams[1];
cams->md_module = md;
mutex_init(&cams->lock); /* to 1 == available */
for (i = 0; i < num_cams; i++) {
struct uvd *up = &cams->cam[i];
up->handle = cams;
/* Allocate user_data separately because of kmalloc's limits */
if (num_extra > 0) {
up->user_size = num_cams * num_extra;
up->user_data = kmalloc(up->user_size, GFP_KERNEL);
if (up->user_data == NULL) {
err("%s: Failed to allocate user_data (%d. bytes)",
__func__, up->user_size);
while (i) {
up = &cams->cam[--i];
kfree(up->user_data);
}
kfree(cams);
return -ENOMEM;
}
dbg("%s: Allocated cams[%d].user_data=$%p (%d. bytes)",
__func__, i, up->user_data, up->user_size);
}
}
/*
* Register ourselves with USB stack.
*/
strcpy(cams->drvName, (driverName != NULL) ? driverName : "Unknown");
cams->usbdrv.name = cams->drvName;
cams->usbdrv.probe = cams->cb.probe;
cams->usbdrv.disconnect = cams->cb.disconnect;
cams->usbdrv.id_table = id_table;
/*
* Update global handle to usbvideo. This is very important
* because probe() can be called before usb_register() returns.
* If the handle is not yet updated then the probe() will fail.
*/
*pCams = cams;
result = usb_register(&cams->usbdrv);
if (result) {
for (i = 0; i < num_cams; i++) {
struct uvd *up = &cams->cam[i];
kfree(up->user_data);
}
kfree(cams);
}
return result;
}
EXPORT_SYMBOL(usbvideo_register);
/*
* usbvideo_Deregister()
*
* Procedure frees all usbvideo and user data structures. Be warned that
* if you had some dynamically allocated components in ->user field then
* you should free them before calling here.
*/
void usbvideo_Deregister(struct usbvideo **pCams)
{
struct usbvideo *cams;
int i;
if (pCams == NULL) {
err("%s: pCams == NULL", __func__);
return;
}
cams = *pCams;
if (cams == NULL) {
err("%s: cams == NULL", __func__);
return;
}
dbg("%s: Deregistering %s driver.", __func__, cams->drvName);
usb_deregister(&cams->usbdrv);
dbg("%s: Deallocating cams=$%p (%d. cameras)", __func__, cams, cams->num_cameras);
for (i=0; i < cams->num_cameras; i++) {
struct uvd *up = &cams->cam[i];
int warning = 0;
if (up->user_data != NULL) {
if (up->user_size <= 0)
++warning;
} else {
if (up->user_size > 0)
++warning;
}
if (warning) {
err("%s: Warning: user_data=$%p user_size=%d.",
__func__, up->user_data, up->user_size);
} else {
dbg("%s: Freeing %d. $%p->user_data=$%p",
__func__, i, up, up->user_data);
kfree(up->user_data);
}
}
/* Whole array was allocated in one chunk */
dbg("%s: Freed %d uvd structures",
__func__, cams->num_cameras);
kfree(cams);
*pCams = NULL;
}
EXPORT_SYMBOL(usbvideo_Deregister);
/*
* usbvideo_Disconnect()
*
* This procedure stops all driver activity. Deallocation of
* the interface-private structure (pointed by 'ptr') is done now
* (if we don't have any open files) or later, when those files
* are closed. After that driver should be removable.
*
* This code handles surprise removal. The uvd->user is a counter which
* increments on open() and decrements on close(). If we see here that
* this counter is not 0 then we have a client who still has us opened.
* We set uvd->remove_pending flag as early as possible, and after that
* all access to the camera will gracefully fail. These failures should
* prompt client to (eventually) close the video device, and then - in
* usbvideo_v4l_close() - we decrement uvd->uvd_used and usage counter.
*
* History:
* 22-Jan-2000 Added polling of MOD_IN_USE to delay removal until all users gone.
* 27-Jan-2000 Reworked to allow pending disconnects; see xxx_close()
* 24-May-2000 Corrected to prevent race condition (MOD_xxx_USE_COUNT).
* 19-Oct-2000 Moved to usbvideo module.
*/
static void usbvideo_Disconnect(struct usb_interface *intf)
{
struct uvd *uvd = usb_get_intfdata (intf);
int i;
if (uvd == NULL) {
err("%s($%p): Illegal call.", __func__, intf);
return;
}
usb_set_intfdata (intf, NULL);
usbvideo_ClientIncModCount(uvd);
if (uvd->debug > 0)
dev_info(&intf->dev, "%s(%p.)\n", __func__, intf);
mutex_lock(&uvd->lock);
uvd->remove_pending = 1; /* Now all ISO data will be ignored */
/* At this time we ask to cancel outstanding URBs */
GET_CALLBACK(uvd, stopDataPump)(uvd);
for (i=0; i < USBVIDEO_NUMSBUF; i++)
usb_free_urb(uvd->sbuf[i].urb);
usb_put_dev(uvd->dev);
uvd->dev = NULL; /* USB device is no more */
video_unregister_device(&uvd->vdev);
if (uvd->debug > 0)
dev_info(&intf->dev, "%s: Video unregistered.\n", __func__);
if (uvd->user)
dev_info(&intf->dev, "%s: In use, disconnect pending.\n",
__func__);
else
usbvideo_CameraRelease(uvd);
mutex_unlock(&uvd->lock);
dev_info(&intf->dev, "USB camera disconnected.\n");
usbvideo_ClientDecModCount(uvd);
}
/*
* usbvideo_CameraRelease()
*
* This code does final release of uvd. This happens
* after the device is disconnected -and- all clients
* closed their files.
*
* History:
* 27-Jan-2000 Created.
*/
static void usbvideo_CameraRelease(struct uvd *uvd)
{
if (uvd == NULL) {
err("%s: Illegal call", __func__);
return;
}
RingQueue_Free(&uvd->dp);
if (VALID_CALLBACK(uvd, userFree))
GET_CALLBACK(uvd, userFree)(uvd);
uvd->uvd_used = 0; /* This is atomic, no need to take mutex */
}
/*
* usbvideo_find_struct()
*
* This code searches the array of preallocated (static) structures
* and returns index of the first one that isn't in use. Returns -1
* if there are no free structures.
*
* History:
* 27-Jan-2000 Created.
*/
static int usbvideo_find_struct(struct usbvideo *cams)
{
int u, rv = -1;
if (cams == NULL) {
err("No usbvideo handle?");
return -1;
}
mutex_lock(&cams->lock);
for (u = 0; u < cams->num_cameras; u++) {
struct uvd *uvd = &cams->cam[u];
if (!uvd->uvd_used) /* This one is free */
{
uvd->uvd_used = 1; /* In use now */
mutex_init(&uvd->lock); /* to 1 == available */
uvd->dev = NULL;
rv = u;
break;
}
}
mutex_unlock(&cams->lock);
return rv;
}
static const struct v4l2_file_operations usbvideo_fops = {
.owner = THIS_MODULE,
.open = usbvideo_v4l_open,
.release =usbvideo_v4l_close,
.read = usbvideo_v4l_read,
.mmap = usbvideo_v4l_mmap,
.ioctl = usbvideo_v4l_ioctl,
};
static const struct video_device usbvideo_template = {
.fops = &usbvideo_fops,
};
struct uvd *usbvideo_AllocateDevice(struct usbvideo *cams)
{
int i, devnum;
struct uvd *uvd = NULL;
if (cams == NULL) {
err("No usbvideo handle?");
return NULL;
}
devnum = usbvideo_find_struct(cams);
if (devnum == -1) {
err("IBM USB camera driver: Too many devices!");
return NULL;
}
uvd = &cams->cam[devnum];
dbg("Device entry #%d. at $%p", devnum, uvd);
/* Not relying upon caller we increase module counter ourselves */
usbvideo_ClientIncModCount(uvd);
mutex_lock(&uvd->lock);
for (i=0; i < USBVIDEO_NUMSBUF; i++) {
uvd->sbuf[i].urb = usb_alloc_urb(FRAMES_PER_DESC, GFP_KERNEL);
if (uvd->sbuf[i].urb == NULL) {
err("usb_alloc_urb(%d.) failed.", FRAMES_PER_DESC);
uvd->uvd_used = 0;
uvd = NULL;
goto allocate_done;
}
}
uvd->user=0;
uvd->remove_pending = 0;
uvd->last_error = 0;
RingQueue_Initialize(&uvd->dp);
/* Initialize video device structure */
uvd->vdev = usbvideo_template;
sprintf(uvd->vdev.name, "%.20s USB Camera", cams->drvName);
/*
* The client is free to overwrite those because we
* return control to the client's probe function right now.
*/
allocate_done:
mutex_unlock(&uvd->lock);
usbvideo_ClientDecModCount(uvd);
return uvd;
}
EXPORT_SYMBOL(usbvideo_AllocateDevice);
int usbvideo_RegisterVideoDevice(struct uvd *uvd)
{
char tmp1[20], tmp2[20]; /* Buffers for printing */
if (uvd == NULL) {
err("%s: Illegal call.", __func__);
return -EINVAL;
}
if (uvd->video_endp == 0) {
dev_info(&uvd->dev->dev,
"%s: No video endpoint specified; data pump disabled.\n",
__func__);
}
if (uvd->paletteBits == 0) {
err("%s: No palettes specified!", __func__);
return -EINVAL;
}
if (uvd->defaultPalette == 0) {
dev_info(&uvd->dev->dev, "%s: No default palette!\n",
__func__);
}
uvd->max_frame_size = VIDEOSIZE_X(uvd->canvas) *
VIDEOSIZE_Y(uvd->canvas) * V4L_BYTES_PER_PIXEL;
usbvideo_VideosizeToString(tmp1, sizeof(tmp1), uvd->videosize);
usbvideo_VideosizeToString(tmp2, sizeof(tmp2), uvd->canvas);
if (uvd->debug > 0) {
dev_info(&uvd->dev->dev,
"%s: iface=%d. endpoint=$%02x paletteBits=$%08lx\n",
__func__, uvd->iface, uvd->video_endp,
uvd->paletteBits);
}
if (uvd->dev == NULL) {
err("%s: uvd->dev == NULL", __func__);
return -EINVAL;
}
uvd->vdev.parent = &uvd->dev->dev;
uvd->vdev.release = video_device_release_empty;
if (video_register_device(&uvd->vdev, VFL_TYPE_GRABBER, video_nr) < 0) {
err("%s: video_register_device failed", __func__);
return -EPIPE;
}
if (uvd->debug > 1) {
dev_info(&uvd->dev->dev,
"%s: video_register_device() successful\n", __func__);
}
dev_info(&uvd->dev->dev, "%s on %s: canvas=%s videosize=%s\n",
(uvd->handle != NULL) ? uvd->handle->drvName : "???",
video_device_node_name(&uvd->vdev), tmp2, tmp1);
usb_get_dev(uvd->dev);
return 0;
}
EXPORT_SYMBOL(usbvideo_RegisterVideoDevice);
/* ******************************************************************** */
static int usbvideo_v4l_mmap(struct file *file, struct vm_area_struct *vma)
{
struct uvd *uvd = file->private_data;
unsigned long start = vma->vm_start;
unsigned long size = vma->vm_end-vma->vm_start;
unsigned long page, pos;
if (!CAMERA_IS_OPERATIONAL(uvd))
return -EFAULT;
if (size > (((USBVIDEO_NUMFRAMES * uvd->max_frame_size) + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)))
return -EINVAL;
pos = (unsigned long) uvd->fbuf;
while (size > 0) {
page = vmalloc_to_pfn((void *)pos);
if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
return -EAGAIN;
start += PAGE_SIZE;
pos += PAGE_SIZE;
if (size > PAGE_SIZE)
size -= PAGE_SIZE;
else
size = 0;
}
return 0;
}
/*
* usbvideo_v4l_open()
*
* This is part of Video 4 Linux API. The driver can be opened by one
* client only (checks internal counter 'uvdser'). The procedure
* then allocates buffers needed for video processing.
*
* History:
* 22-Jan-2000 Rewrote, moved scratch buffer allocation here. Now the
* camera is also initialized here (once per connect), at
* expense of V4L client (it waits on open() call).
* 27-Jan-2000 Used USBVIDEO_NUMSBUF as number of URB buffers.
* 24-May-2000 Corrected to prevent race condition (MOD_xxx_USE_COUNT).
*/
static int usbvideo_v4l_open(struct file *file)
{
struct video_device *dev = video_devdata(file);
struct uvd *uvd = (struct uvd *) dev;
const int sb_size = FRAMES_PER_DESC * uvd->iso_packet_len;
int i, errCode = 0;
if (uvd->debug > 1)
dev_info(&uvd->dev->dev, "%s($%p)\n", __func__, dev);
if (usbvideo_ClientIncModCount(uvd) < 0)
return -ENODEV;
mutex_lock(&uvd->lock);
if (uvd->user) {
err("%s: Someone tried to open an already opened device!", __func__);
errCode = -EBUSY;
} else {
/* Clear statistics */
memset(&uvd->stats, 0, sizeof(uvd->stats));
/* Clean pointers so we know if we allocated something */
for (i=0; i < USBVIDEO_NUMSBUF; i++)
uvd->sbuf[i].data = NULL;
/* Allocate memory for the frame buffers */
uvd->fbuf_size = USBVIDEO_NUMFRAMES * uvd->max_frame_size;
uvd->fbuf = usbvideo_rvmalloc(uvd->fbuf_size);
RingQueue_Allocate(&uvd->dp, RING_QUEUE_SIZE);
if ((uvd->fbuf == NULL) ||
(!RingQueue_IsAllocated(&uvd->dp))) {
err("%s: Failed to allocate fbuf or dp", __func__);
errCode = -ENOMEM;
} else {
/* Allocate all buffers */
for (i=0; i < USBVIDEO_NUMFRAMES; i++) {
uvd->frame[i].frameState = FrameState_Unused;
uvd->frame[i].data = uvd->fbuf + i*(uvd->max_frame_size);
/*
* Set default sizes in case IOCTL (VIDIOCMCAPTURE)
* is not used (using read() instead).
*/
uvd->frame[i].canvas = uvd->canvas;
uvd->frame[i].seqRead_Index = 0;
}
for (i=0; i < USBVIDEO_NUMSBUF; i++) {
uvd->sbuf[i].data = kmalloc(sb_size, GFP_KERNEL);
if (uvd->sbuf[i].data == NULL) {
errCode = -ENOMEM;
break;
}
}
}
if (errCode != 0) {
/* Have to free all that memory */
if (uvd->fbuf != NULL) {
usbvideo_rvfree(uvd->fbuf, uvd->fbuf_size);
uvd->fbuf = NULL;
}
RingQueue_Free(&uvd->dp);
for (i=0; i < USBVIDEO_NUMSBUF; i++) {
kfree(uvd->sbuf[i].data);
uvd->sbuf[i].data = NULL;
}
}
}
/* If so far no errors then we shall start the camera */
if (errCode == 0) {
/* Start data pump if we have valid endpoint */
if (uvd->video_endp != 0)
errCode = GET_CALLBACK(uvd, startDataPump)(uvd);
if (errCode == 0) {
if (VALID_CALLBACK(uvd, setupOnOpen)) {
if (uvd->debug > 1)
dev_info(&uvd->dev->dev,
"%s: setupOnOpen callback\n",
__func__);
errCode = GET_CALLBACK(uvd, setupOnOpen)(uvd);
if (errCode < 0) {
err("%s: setupOnOpen callback failed (%d.).",
__func__, errCode);
} else if (uvd->debug > 1) {
dev_info(&uvd->dev->dev,
"%s: setupOnOpen callback successful\n",
__func__);
}
}
if (errCode == 0) {
uvd->settingsAdjusted = 0;
if (uvd->debug > 1)
dev_info(&uvd->dev->dev,
"%s: Open succeeded.\n",
__func__);
uvd->user++;
file->private_data = uvd;
}
}
}
mutex_unlock(&uvd->lock);
if (errCode != 0)
usbvideo_ClientDecModCount(uvd);
if (uvd->debug > 0)
dev_info(&uvd->dev->dev, "%s: Returning %d.\n", __func__,
errCode);
return errCode;
}
/*
* usbvideo_v4l_close()
*
* This is part of Video 4 Linux API. The procedure
* stops streaming and deallocates all buffers that were earlier
* allocated in usbvideo_v4l_open().
*
* History:
* 22-Jan-2000 Moved scratch buffer deallocation here.
* 27-Jan-2000 Used USBVIDEO_NUMSBUF as number of URB buffers.
* 24-May-2000 Moved MOD_DEC_USE_COUNT outside of code that can sleep.
*/
static int usbvideo_v4l_close(struct file *file)
{
struct video_device *dev = file->private_data;
struct uvd *uvd = (struct uvd *) dev;
int i;
if (uvd->debug > 1)
dev_info(&uvd->dev->dev, "%s($%p)\n", __func__, dev);
mutex_lock(&uvd->lock);
GET_CALLBACK(uvd, stopDataPump)(uvd);
usbvideo_rvfree(uvd->fbuf, uvd->fbuf_size);
uvd->fbuf = NULL;
RingQueue_Free(&uvd->dp);
for (i=0; i < USBVIDEO_NUMSBUF; i++) {
kfree(uvd->sbuf[i].data);
uvd->sbuf[i].data = NULL;
}
#if USBVIDEO_REPORT_STATS
usbvideo_ReportStatistics(uvd);
#endif
uvd->user--;
if (uvd->remove_pending) {
if (uvd->debug > 0)
dev_info(&uvd->dev->dev, "%s: Final disconnect.\n",
__func__);
usbvideo_CameraRelease(uvd);
}
mutex_unlock(&uvd->lock);
usbvideo_ClientDecModCount(uvd);
if (uvd->debug > 1)
dev_info(&uvd->dev->dev, "%s: Completed.\n", __func__);
file->private_data = NULL;
return 0;
}
/*
* usbvideo_v4l_ioctl()
*
* This is part of Video 4 Linux API. The procedure handles ioctl() calls.
*
* History:
* 22-Jan-2000 Corrected VIDIOCSPICT to reject unsupported settings.
*/
static long usbvideo_v4l_do_ioctl(struct file *file, unsigned int cmd, void *arg)
{
struct uvd *uvd = file->private_data;
if (!CAMERA_IS_OPERATIONAL(uvd))
return -EIO;
switch (cmd) {
case VIDIOCGCAP:
{
struct video_capability *b = arg;
*b = uvd->vcap;
return 0;
}
case VIDIOCGCHAN:
{
struct video_channel *v = arg;
*v = uvd->vchan;
return 0;
}
case VIDIOCSCHAN:
{
struct video_channel *v = arg;
if (v->channel != 0)
return -EINVAL;
return 0;
}
case VIDIOCGPICT:
{
struct video_picture *pic = arg;
*pic = uvd->vpic;
return 0;
}
case VIDIOCSPICT:
{
struct video_picture *pic = arg;
/*
* Use temporary 'video_picture' structure to preserve our
* own settings (such as color depth, palette) that we
* aren't allowing everyone (V4L client) to change.
*/
uvd->vpic.brightness = pic->brightness;
uvd->vpic.hue = pic->hue;
uvd->vpic.colour = pic->colour;
uvd->vpic.contrast = pic->contrast;
uvd->settingsAdjusted = 0; /* Will force new settings */
return 0;
}
case VIDIOCSWIN:
{
struct video_window *vw = arg;
if(VALID_CALLBACK(uvd, setVideoMode)) {
return GET_CALLBACK(uvd, setVideoMode)(uvd, vw);
}
if (vw->flags)
return -EINVAL;
if (vw->clipcount)
return -EINVAL;
if (vw->width != VIDEOSIZE_X(uvd->canvas))
return -EINVAL;
if (vw->height != VIDEOSIZE_Y(uvd->canvas))
return -EINVAL;
return 0;
}
case VIDIOCGWIN:
{
struct video_window *vw = arg;
vw->x = 0;
vw->y = 0;
vw->width = VIDEOSIZE_X(uvd->videosize);
vw->height = VIDEOSIZE_Y(uvd->videosize);
vw->chromakey = 0;
if (VALID_CALLBACK(uvd, getFPS))
vw->flags = GET_CALLBACK(uvd, getFPS)(uvd);
else
vw->flags = 10; /* FIXME: do better! */
return 0;
}
case VIDIOCGMBUF:
{
struct video_mbuf *vm = arg;
int i;
memset(vm, 0, sizeof(*vm));
vm->size = uvd->max_frame_size * USBVIDEO_NUMFRAMES;
vm->frames = USBVIDEO_NUMFRAMES;
for(i = 0; i < USBVIDEO_NUMFRAMES; i++)
vm->offsets[i] = i * uvd->max_frame_size;
return 0;
}
case VIDIOCMCAPTURE:
{
struct video_mmap *vm = arg;
if (uvd->debug >= 1) {
dev_info(&uvd->dev->dev,
"VIDIOCMCAPTURE: frame=%d. size=%dx%d, format=%d.\n",
vm->frame, vm->width, vm->height, vm->format);
}
/*
* Check if the requested size is supported. If the requestor
* requests too big a frame then we may be tricked into accessing
* outside of own preallocated frame buffer (in uvd->frame).
* This will cause oops or a security hole. Theoretically, we
* could only clamp the size down to acceptable bounds, but then
* we'd need to figure out how to insert our smaller buffer into
* larger caller's buffer... this is not an easy question. So we
* here just flatly reject too large requests, assuming that the
* caller will resubmit with smaller size. Callers should know
* what size we support (returned by VIDIOCGCAP). However vidcat,
* for one, does not care and allows to ask for any size.
*/
if ((vm->width > VIDEOSIZE_X(uvd->canvas)) ||
(vm->height > VIDEOSIZE_Y(uvd->canvas))) {
if (uvd->debug > 0) {
dev_info(&uvd->dev->dev,
"VIDIOCMCAPTURE: Size=%dx%d "
"too large; allowed only up "
"to %ldx%ld\n", vm->width,
vm->height,
VIDEOSIZE_X(uvd->canvas),
VIDEOSIZE_Y(uvd->canvas));
}
return -EINVAL;
}
/* Check if the palette is supported */
if (((1L << vm->format) & uvd->paletteBits) == 0) {
if (uvd->debug > 0) {
dev_info(&uvd->dev->dev,
"VIDIOCMCAPTURE: format=%d. "
"not supported "
"(paletteBits=$%08lx)\n",
vm->format, uvd->paletteBits);
}
return -EINVAL;
}
if ((vm->frame < 0) || (vm->frame >= USBVIDEO_NUMFRAMES)) {
err("VIDIOCMCAPTURE: vm.frame=%d. !E [0-%d]", vm->frame, USBVIDEO_NUMFRAMES-1);
return -EINVAL;
}
if (uvd->frame[vm->frame].frameState == FrameState_Grabbing) {
/* Not an error - can happen */
}
uvd->frame[vm->frame].request = VIDEOSIZE(vm->width, vm->height);
uvd->frame[vm->frame].palette = vm->format;
/* Mark it as ready */
uvd->frame[vm->frame].frameState = FrameState_Ready;
return usbvideo_NewFrame(uvd, vm->frame);
}
case VIDIOCSYNC:
{
int *frameNum = arg;
int ret;
if (*frameNum < 0 || *frameNum >= USBVIDEO_NUMFRAMES)
return -EINVAL;
if (uvd->debug >= 1)
dev_info(&uvd->dev->dev,
"VIDIOCSYNC: syncing to frame %d.\n",
*frameNum);
if (uvd->flags & FLAGS_NO_DECODING)
ret = usbvideo_GetFrame(uvd, *frameNum);
else if (VALID_CALLBACK(uvd, getFrame)) {
ret = GET_CALLBACK(uvd, getFrame)(uvd, *frameNum);
if ((ret < 0) && (uvd->debug >= 1)) {
err("VIDIOCSYNC: getFrame() returned %d.", ret);
}
} else {
err("VIDIOCSYNC: getFrame is not set");
ret = -EFAULT;
}
/*
* The frame is in FrameState_Done_Hold state. Release it
* right now because its data is already mapped into
* the user space and it's up to the application to
* make use of it until it asks for another frame.
*/
uvd->frame[*frameNum].frameState = FrameState_Unused;
return ret;
}
case VIDIOCGFBUF:
{
struct video_buffer *vb = arg;
memset(vb, 0, sizeof(*vb));
return 0;
}
case VIDIOCKEY:
return 0;
case VIDIOCCAPTURE:
return -EINVAL;
case VIDIOCSFBUF:
case VIDIOCGTUNER:
case VIDIOCSTUNER:
case VIDIOCGFREQ:
case VIDIOCSFREQ:
case VIDIOCGAUDIO:
case VIDIOCSAUDIO:
return -EINVAL;
default:
return -ENOIOCTLCMD;
}
return 0;
}
static long usbvideo_v4l_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
return video_usercopy(file, cmd, arg, usbvideo_v4l_do_ioctl);
}
/*
* usbvideo_v4l_read()
*
* This is mostly boring stuff. We simply ask for a frame and when it
* arrives copy all the video data from it into user space. There is
* no obvious need to override this method.
*
* History:
* 20-Oct-2000 Created.
* 01-Nov-2000 Added mutex (uvd->lock).
*/
static ssize_t usbvideo_v4l_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct uvd *uvd = file->private_data;
int noblock = file->f_flags & O_NONBLOCK;
int frmx = -1, i;
struct usbvideo_frame *frame;
if (!CAMERA_IS_OPERATIONAL(uvd) || (buf == NULL))
return -EFAULT;
if (uvd->debug >= 1)
dev_info(&uvd->dev->dev,
"%s: %Zd. bytes, noblock=%d.\n",
__func__, count, noblock);
mutex_lock(&uvd->lock);
/* See if a frame is completed, then use it. */
for(i = 0; i < USBVIDEO_NUMFRAMES; i++) {
if ((uvd->frame[i].frameState == FrameState_Done) ||
(uvd->frame[i].frameState == FrameState_Done_Hold) ||
(uvd->frame[i].frameState == FrameState_Error)) {
frmx = i;
break;
}
}
/* FIXME: If we don't start a frame here then who ever does? */
if (noblock && (frmx == -1)) {
count = -EAGAIN;
goto read_done;
}
/*
* If no FrameState_Done, look for a FrameState_Grabbing state.
* See if a frame is in process (grabbing), then use it.
* We will need to wait until it becomes cooked, of course.
*/
if (frmx == -1) {
for(i = 0; i < USBVIDEO_NUMFRAMES; i++) {
if (uvd->frame[i].frameState == FrameState_Grabbing) {
frmx = i;
break;
}
}
}
/*
* If no frame is active, start one. We don't care which one
* it will be, so #0 is as good as any.
* In read access mode we don't have convenience of VIDIOCMCAPTURE
* to specify the requested palette (video format) on per-frame
* basis. This means that we have to return data in -some- format
* and just hope that the client knows what to do with it.
* The default format is configured in uvd->defaultPalette field
* as one of VIDEO_PALETTE_xxx values. We stuff it into the new
* frame and initiate the frame filling process.
*/
if (frmx == -1) {
if (uvd->defaultPalette == 0) {
err("%s: No default palette; don't know what to do!", __func__);
count = -EFAULT;
goto read_done;
}
frmx = 0;
/*
* We have no per-frame control over video size.
* Therefore we only can use whatever size was
* specified as default.
*/
uvd->frame[frmx].request = uvd->videosize;
uvd->frame[frmx].palette = uvd->defaultPalette;
uvd->frame[frmx].frameState = FrameState_Ready;
usbvideo_NewFrame(uvd, frmx);
/* Now frame 0 is supposed to start filling... */
}
/*
* Get a pointer to the active frame. It is either previously
* completed frame or frame in progress but not completed yet.
*/
frame = &uvd->frame[frmx];
/*
* Sit back & wait until the frame gets filled and postprocessed.
* If we fail to get the picture [in time] then return the error.
* In this call we specify that we want the frame to be waited for,
* postprocessed and switched into FrameState_Done_Hold state. This
* state is used to hold the frame as "fully completed" between
* subsequent partial reads of the same frame.
*/
if (frame->frameState != FrameState_Done_Hold) {
long rv = -EFAULT;
if (uvd->flags & FLAGS_NO_DECODING)
rv = usbvideo_GetFrame(uvd, frmx);
else if (VALID_CALLBACK(uvd, getFrame))
rv = GET_CALLBACK(uvd, getFrame)(uvd, frmx);
else
err("getFrame is not set");
if ((rv != 0) || (frame->frameState != FrameState_Done_Hold)) {
count = rv;
goto read_done;
}
}
/*
* Copy bytes to user space. We allow for partial reads, which
* means that the user application can request read less than
* the full frame size. It is up to the application to issue
* subsequent calls until entire frame is read.
*
* First things first, make sure we don't copy more than we
* have - even if the application wants more. That would be
* a big security embarassment!
*/
if ((count + frame->seqRead_Index) > frame->seqRead_Length)
count = frame->seqRead_Length - frame->seqRead_Index;
/*
* Copy requested amount of data to user space. We start
* copying from the position where we last left it, which
* will be zero for a new frame (not read before).
*/
if (copy_to_user(buf, frame->data + frame->seqRead_Index, count)) {
count = -EFAULT;
goto read_done;
}
/* Update last read position */
frame->seqRead_Index += count;
if (uvd->debug >= 1) {
err("%s: {copy} count used=%Zd, new seqRead_Index=%ld",
__func__, count, frame->seqRead_Index);
}
/* Finally check if the frame is done with and "release" it */
if (frame->seqRead_Index >= frame->seqRead_Length) {
/* All data has been read */
frame->seqRead_Index = 0;
/* Mark it as available to be used again. */
uvd->frame[frmx].frameState = FrameState_Unused;
if (usbvideo_NewFrame(uvd, (frmx + 1) % USBVIDEO_NUMFRAMES)) {
err("%s: usbvideo_NewFrame failed.", __func__);
}
}
read_done:
mutex_unlock(&uvd->lock);
return count;
}
/*
* Make all of the blocks of data contiguous
*/
static int usbvideo_CompressIsochronous(struct uvd *uvd, struct urb *urb)
{
char *cdata;
int i, totlen = 0;
for (i = 0; i < urb->number_of_packets; i++) {
int n = urb->iso_frame_desc[i].actual_length;
int st = urb->iso_frame_desc[i].status;
cdata = urb->transfer_buffer + urb->iso_frame_desc[i].offset;
/* Detect and ignore errored packets */
if (st < 0) {
if (uvd->debug >= 1)
err("Data error: packet=%d. len=%d. status=%d.", i, n, st);
uvd->stats.iso_err_count++;
continue;
}
/* Detect and ignore empty packets */
if (n <= 0) {
uvd->stats.iso_skip_count++;
continue;
}
totlen += n; /* Little local accounting */
RingQueue_Enqueue(&uvd->dp, cdata, n);
}
return totlen;
}
static void usbvideo_IsocIrq(struct urb *urb)
{
int i, ret, len;
struct uvd *uvd = urb->context;
/* We don't want to do anything if we are about to be removed! */
if (!CAMERA_IS_OPERATIONAL(uvd))
return;
#if 0
if (urb->actual_length > 0) {
dev_info(&uvd->dev->dev,
"urb=$%p status=%d. errcount=%d. length=%d.\n",
urb, urb->status, urb->error_count,
urb->actual_length);
} else {
static int c = 0;
if (c++ % 100 == 0)
dev_info(&uvd->dev->dev, "No Isoc data\n");
}
#endif
if (!uvd->streaming) {
if (uvd->debug >= 1)
dev_info(&uvd->dev->dev,
"Not streaming, but interrupt!\n");
return;
}
uvd->stats.urb_count++;
if (urb->actual_length <= 0)
goto urb_done_with;
/* Copy the data received into ring queue */
len = usbvideo_CompressIsochronous(uvd, urb);
uvd->stats.urb_length = len;
if (len <= 0)
goto urb_done_with;
/* Here we got some data */
uvd->stats.data_count += len;
RingQueue_WakeUpInterruptible(&uvd->dp);
urb_done_with:
for (i = 0; i < FRAMES_PER_DESC; i++) {
urb->iso_frame_desc[i].status = 0;
urb->iso_frame_desc[i].actual_length = 0;
}
urb->status = 0;
urb->dev = uvd->dev;
ret = usb_submit_urb (urb, GFP_KERNEL);
if(ret)
err("usb_submit_urb error (%d)", ret);
return;
}
/*
* usbvideo_StartDataPump()
*
* History:
* 27-Jan-2000 Used ibmcam->iface, ibmcam->ifaceAltActive instead
* of hardcoded values. Simplified by using for loop,
* allowed any number of URBs.
*/
static int usbvideo_StartDataPump(struct uvd *uvd)
{
struct usb_device *dev = uvd->dev;
int i, errFlag;
if (uvd->debug > 1)
dev_info(&uvd->dev->dev, "%s($%p)\n", __func__, uvd);
if (!CAMERA_IS_OPERATIONAL(uvd)) {
err("%s: Camera is not operational", __func__);
return -EFAULT;
}
uvd->curframe = -1;
/* Alternate interface 1 is is the biggest frame size */
i = usb_set_interface(dev, uvd->iface, uvd->ifaceAltActive);
if (i < 0) {
err("%s: usb_set_interface error", __func__);
uvd->last_error = i;
return -EBUSY;
}
if (VALID_CALLBACK(uvd, videoStart))
GET_CALLBACK(uvd, videoStart)(uvd);
else
err("%s: videoStart not set", __func__);
/* We double buffer the Iso lists */
for (i=0; i < USBVIDEO_NUMSBUF; i++) {
int j, k;
struct urb *urb = uvd->sbuf[i].urb;
urb->dev = dev;
urb->context = uvd;
urb->pipe = usb_rcvisocpipe(dev, uvd->video_endp);
urb->interval = 1;
urb->transfer_flags = URB_ISO_ASAP;
urb->transfer_buffer = uvd->sbuf[i].data;
urb->complete = usbvideo_IsocIrq;
urb->number_of_packets = FRAMES_PER_DESC;
urb->transfer_buffer_length = uvd->iso_packet_len * FRAMES_PER_DESC;
for (j=k=0; j < FRAMES_PER_DESC; j++, k += uvd->iso_packet_len) {
urb->iso_frame_desc[j].offset = k;
urb->iso_frame_desc[j].length = uvd->iso_packet_len;
}
}
/* Submit all URBs */
for (i=0; i < USBVIDEO_NUMSBUF; i++) {
errFlag = usb_submit_urb(uvd->sbuf[i].urb, GFP_KERNEL);
if (errFlag)
err("%s: usb_submit_isoc(%d) ret %d", __func__, i, errFlag);
}
uvd->streaming = 1;
if (uvd->debug > 1)
dev_info(&uvd->dev->dev,
"%s: streaming=1 video_endp=$%02x\n", __func__,
uvd->video_endp);
return 0;
}
/*
* usbvideo_StopDataPump()
*
* This procedure stops streaming and deallocates URBs. Then it
* activates zero-bandwidth alt. setting of the video interface.
*
* History:
* 22-Jan-2000 Corrected order of actions to work after surprise removal.
* 27-Jan-2000 Used uvd->iface, uvd->ifaceAltInactive instead of hardcoded values.
*/
static void usbvideo_StopDataPump(struct uvd *uvd)
{
int i, j;
if ((uvd == NULL) || (!uvd->streaming) || (uvd->dev == NULL))
return;
if (uvd->debug > 1)
dev_info(&uvd->dev->dev, "%s($%p)\n", __func__, uvd);
/* Unschedule all of the iso td's */
for (i=0; i < USBVIDEO_NUMSBUF; i++) {
usb_kill_urb(uvd->sbuf[i].urb);
}
if (uvd->debug > 1)
dev_info(&uvd->dev->dev, "%s: streaming=0\n", __func__);
uvd->streaming = 0;
if (!uvd->remove_pending) {
/* Invoke minidriver's magic to stop the camera */
if (VALID_CALLBACK(uvd, videoStop))
GET_CALLBACK(uvd, videoStop)(uvd);
else
err("%s: videoStop not set", __func__);
/* Set packet size to 0 */
j = usb_set_interface(uvd->dev, uvd->iface, uvd->ifaceAltInactive);
if (j < 0) {
err("%s: usb_set_interface() error %d.", __func__, j);
uvd->last_error = j;
}
}
}
/*
* usbvideo_NewFrame()
*
* History:
* 29-Mar-00 Added copying of previous frame into the current one.
* 6-Aug-00 Added model 3 video sizes, removed redundant width, height.
*/
static int usbvideo_NewFrame(struct uvd *uvd, int framenum)
{
struct usbvideo_frame *frame;
int n;
if (uvd->debug > 1)
dev_info(&uvd->dev->dev, "usbvideo_NewFrame($%p,%d.)\n", uvd,
framenum);
/* If we're not grabbing a frame right now and the other frame is */
/* ready to be grabbed into, then use it instead */
if (uvd->curframe != -1)
return 0;
/* If necessary we adjust picture settings between frames */
if (!uvd->settingsAdjusted) {
if (VALID_CALLBACK(uvd, adjustPicture))
GET_CALLBACK(uvd, adjustPicture)(uvd);
uvd->settingsAdjusted = 1;
}
n = (framenum + 1) % USBVIDEO_NUMFRAMES;
if (uvd->frame[n].frameState == FrameState_Ready)
framenum = n;
frame = &uvd->frame[framenum];
frame->frameState = FrameState_Grabbing;
frame->scanstate = ScanState_Scanning;
frame->seqRead_Length = 0; /* Accumulated in xxx_parse_data() */
frame->deinterlace = Deinterlace_None;
frame->flags = 0; /* No flags yet, up to minidriver (or us) to set them */
uvd->curframe = framenum;
/*
* Normally we would want to copy previous frame into the current one
* before we even start filling it with data; this allows us to stop
* filling at any moment; top portion of the frame will be new and
* bottom portion will stay as it was in previous frame. If we don't
* do that then missing chunks of video stream will result in flickering
* portions of old data whatever it was before.
*
* If we choose not to copy previous frame (to, for example, save few
* bus cycles - the frame can be pretty large!) then we have an option
* to clear the frame before using. If we experience losses in this
* mode then missing picture will be black (no flickering).
*
* Finally, if user chooses not to clean the current frame before
* filling it with data then the old data will be visible if we fail
* to refill entire frame with new data.
*/
if (!(uvd->flags & FLAGS_SEPARATE_FRAMES)) {
/* This copies previous frame into this one to mask losses */
int prev = (framenum - 1 + USBVIDEO_NUMFRAMES) % USBVIDEO_NUMFRAMES;
memmove(frame->data, uvd->frame[prev].data, uvd->max_frame_size);
} else {
if (uvd->flags & FLAGS_CLEAN_FRAMES) {
/* This provides a "clean" frame but slows things down */
memset(frame->data, 0, uvd->max_frame_size);
}
}
return 0;
}
/*
* usbvideo_CollectRawData()
*
* This procedure can be used instead of 'processData' callback if you
* only want to dump the raw data from the camera into the output
* device (frame buffer). You can look at it with V4L client, but the
* image will be unwatchable. The main purpose of this code and of the
* mode FLAGS_NO_DECODING is debugging and capturing of datastreams from
* new, unknown cameras. This procedure will be automatically invoked
* instead of the specified callback handler when uvd->flags has bit
* FLAGS_NO_DECODING set. Therefore, any regular build of any driver
* based on usbvideo can use this feature at any time.
*/
static void usbvideo_CollectRawData(struct uvd *uvd, struct usbvideo_frame *frame)
{
int n;
assert(uvd != NULL);
assert(frame != NULL);
/* Try to move data from queue into frame buffer */
n = RingQueue_GetLength(&uvd->dp);
if (n > 0) {
int m;
/* See how much space we have left */
m = uvd->max_frame_size - frame->seqRead_Length;
if (n > m)
n = m;
/* Now move that much data into frame buffer */
RingQueue_Dequeue(
&uvd->dp,
frame->data + frame->seqRead_Length,
m);
frame->seqRead_Length += m;
}
/* See if we filled the frame */
if (frame->seqRead_Length >= uvd->max_frame_size) {
frame->frameState = FrameState_Done;
uvd->curframe = -1;
uvd->stats.frame_num++;
}
}
static int usbvideo_GetFrame(struct uvd *uvd, int frameNum)
{
struct usbvideo_frame *frame = &uvd->frame[frameNum];
if (uvd->debug >= 2)
dev_info(&uvd->dev->dev, "%s($%p,%d.)\n", __func__, uvd,
frameNum);
switch (frame->frameState) {
case FrameState_Unused:
if (uvd->debug >= 2)
dev_info(&uvd->dev->dev, "%s: FrameState_Unused\n",
__func__);
return -EINVAL;
case FrameState_Ready:
case FrameState_Grabbing:
case FrameState_Error:
{
int ntries, signalPending;
redo:
if (!CAMERA_IS_OPERATIONAL(uvd)) {
if (uvd->debug >= 2)
dev_info(&uvd->dev->dev,
"%s: Camera is not operational (1)\n",
__func__);
return -EIO;
}
ntries = 0;
do {
RingQueue_InterruptibleSleepOn(&uvd->dp);
signalPending = signal_pending(current);
if (!CAMERA_IS_OPERATIONAL(uvd)) {
if (uvd->debug >= 2)
dev_info(&uvd->dev->dev,
"%s: Camera is not "
"operational (2)\n", __func__);
return -EIO;
}
assert(uvd->fbuf != NULL);
if (signalPending) {
if (uvd->debug >= 2)
dev_info(&uvd->dev->dev,
"%s: Signal=$%08x\n", __func__,
signalPending);
if (uvd->flags & FLAGS_RETRY_VIDIOCSYNC) {
usbvideo_TestPattern(uvd, 1, 0);
uvd->curframe = -1;
uvd->stats.frame_num++;
if (uvd->debug >= 2)
dev_info(&uvd->dev->dev,
"%s: Forced test "
"pattern screen\n",
__func__);
return 0;
} else {
/* Standard answer: Interrupted! */
if (uvd->debug >= 2)
dev_info(&uvd->dev->dev,
"%s: Interrupted!\n",
__func__);
return -EINTR;
}
} else {
/* No signals - we just got new data in dp queue */
if (uvd->flags & FLAGS_NO_DECODING)
usbvideo_CollectRawData(uvd, frame);
else if (VALID_CALLBACK(uvd, processData))
GET_CALLBACK(uvd, processData)(uvd, frame);
else
err("%s: processData not set", __func__);
}
} while (frame->frameState == FrameState_Grabbing);
if (uvd->debug >= 2) {
dev_info(&uvd->dev->dev,
"%s: Grabbing done; state=%d. (%lu. bytes)\n",
__func__, frame->frameState,
frame->seqRead_Length);
}
if (frame->frameState == FrameState_Error) {
int ret = usbvideo_NewFrame(uvd, frameNum);
if (ret < 0) {
err("%s: usbvideo_NewFrame() failed (%d.)", __func__, ret);
return ret;
}
goto redo;
}
/* Note that we fall through to meet our destiny below */
}
case FrameState_Done:
/*
* Do all necessary postprocessing of data prepared in
* "interrupt" code and the collecting code above. The
* frame gets marked as FrameState_Done by queue parsing code.
* This status means that we collected enough data and
* most likely processed it as we went through. However
* the data may need postprocessing, such as deinterlacing
* or picture adjustments implemented in software (horror!)
*
* As soon as the frame becomes "final" it gets promoted to
* FrameState_Done_Hold status where it will remain until the
* caller consumed all the video data from the frame. Then
* the empty shell of ex-frame is thrown out for dogs to eat.
* But we, worried about pets, will recycle the frame!
*/
uvd->stats.frame_num++;
if ((uvd->flags & FLAGS_NO_DECODING) == 0) {
if (VALID_CALLBACK(uvd, postProcess))
GET_CALLBACK(uvd, postProcess)(uvd, frame);
if (frame->flags & USBVIDEO_FRAME_FLAG_SOFTWARE_CONTRAST)
usbvideo_SoftwareContrastAdjustment(uvd, frame);
}
frame->frameState = FrameState_Done_Hold;
if (uvd->debug >= 2)
dev_info(&uvd->dev->dev,
"%s: Entered FrameState_Done_Hold state.\n",
__func__);
return 0;
case FrameState_Done_Hold:
/*
* We stay in this state indefinitely until someone external,
* like ioctl() or read() call finishes digesting the frame
* data. Then it will mark the frame as FrameState_Unused and
* it will be released back into the wild to roam freely.
*/
if (uvd->debug >= 2)
dev_info(&uvd->dev->dev,
"%s: FrameState_Done_Hold state.\n",
__func__);
return 0;
}
/* Catch-all for other cases. We shall not be here. */
err("%s: Invalid state %d.", __func__, frame->frameState);
frame->frameState = FrameState_Unused;
return 0;
}
/*
* usbvideo_DeinterlaceFrame()
*
* This procedure deinterlaces the given frame. Some cameras produce
* only half of scanlines - sometimes only even lines, sometimes only
* odd lines. The deinterlacing method is stored in frame->deinterlace
* variable.
*
* Here we scan the frame vertically and replace missing scanlines with
* average between surrounding ones - before and after. If we have no
* line above then we just copy next line. Similarly, if we need to
* create a last line then preceding line is used.
*/
void usbvideo_DeinterlaceFrame(struct uvd *uvd, struct usbvideo_frame *frame)
{
if ((uvd == NULL) || (frame == NULL))
return;
if ((frame->deinterlace == Deinterlace_FillEvenLines) ||
(frame->deinterlace == Deinterlace_FillOddLines))
{
const int v4l_linesize = VIDEOSIZE_X(frame->request) * V4L_BYTES_PER_PIXEL;
int i = (frame->deinterlace == Deinterlace_FillEvenLines) ? 0 : 1;
for (; i < VIDEOSIZE_Y(frame->request); i += 2) {
const unsigned char *fs1, *fs2;
unsigned char *fd;
int ip, in, j; /* Previous and next lines */
/*
* Need to average lines before and after 'i'.
* If we go out of bounds seeking those lines then
* we point back to existing line.
*/
ip = i - 1; /* First, get rough numbers */
in = i + 1;
/* Now validate */
if (ip < 0)
ip = in;
if (in >= VIDEOSIZE_Y(frame->request))
in = ip;
/* Sanity check */
if ((ip < 0) || (in < 0) ||
(ip >= VIDEOSIZE_Y(frame->request)) ||
(in >= VIDEOSIZE_Y(frame->request)))
{
err("Error: ip=%d. in=%d. req.height=%ld.",
ip, in, VIDEOSIZE_Y(frame->request));
break;
}
/* Now we need to average lines 'ip' and 'in' to produce line 'i' */
fs1 = frame->data + (v4l_linesize * ip);
fs2 = frame->data + (v4l_linesize * in);
fd = frame->data + (v4l_linesize * i);
/* Average lines around destination */
for (j=0; j < v4l_linesize; j++) {
fd[j] = (unsigned char)((((unsigned) fs1[j]) +
((unsigned)fs2[j])) >> 1);
}
}
}
/* Optionally display statistics on the screen */
if (uvd->flags & FLAGS_OVERLAY_STATS)
usbvideo_OverlayStats(uvd, frame);
}
EXPORT_SYMBOL(usbvideo_DeinterlaceFrame);
/*
* usbvideo_SoftwareContrastAdjustment()
*
* This code adjusts the contrast of the frame, assuming RGB24 format.
* As most software image processing, this job is CPU-intensive.
* Get a camera that supports hardware adjustment!
*
* History:
* 09-Feb-2001 Created.
*/
static void usbvideo_SoftwareContrastAdjustment(struct uvd *uvd,
struct usbvideo_frame *frame)
{
int i, j, v4l_linesize;
signed long adj;
const int ccm = 128; /* Color correction median - see below */
if ((uvd == NULL) || (frame == NULL)) {
err("%s: Illegal call.", __func__);
return;
}
adj = (uvd->vpic.contrast - 0x8000) >> 8; /* -128..+127 = -ccm..+(ccm-1)*/
RESTRICT_TO_RANGE(adj, -ccm, ccm+1);
if (adj == 0) {
/* In rare case of no adjustment */
return;
}
v4l_linesize = VIDEOSIZE_X(frame->request) * V4L_BYTES_PER_PIXEL;
for (i=0; i < VIDEOSIZE_Y(frame->request); i++) {
unsigned char *fd = frame->data + (v4l_linesize * i);
for (j=0; j < v4l_linesize; j++) {
signed long v = (signed long) fd[j];
/* Magnify up to 2 times, reduce down to zero */
v = 128 + ((ccm + adj) * (v - 128)) / ccm;
RESTRICT_TO_RANGE(v, 0, 0xFF); /* Must flatten tails */
fd[j] = (unsigned char) v;
}
}
}
MODULE_LICENSE("GPL");
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef usbvideo_h
#define usbvideo_h
#include "videodev.h"
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <linux/usb.h>
#include <linux/mutex.h>
/* Most helpful debugging aid */
#define assert(expr) ((void) ((expr) ? 0 : (err("assert failed at line %d",__LINE__))))
#define USBVIDEO_REPORT_STATS 1 /* Set to 0 to block statistics on close */
/* Bit flags (options) */
#define FLAGS_RETRY_VIDIOCSYNC (1 << 0)
#define FLAGS_MONOCHROME (1 << 1)
#define FLAGS_DISPLAY_HINTS (1 << 2)
#define FLAGS_OVERLAY_STATS (1 << 3)
#define FLAGS_FORCE_TESTPATTERN (1 << 4)
#define FLAGS_SEPARATE_FRAMES (1 << 5)
#define FLAGS_CLEAN_FRAMES (1 << 6)
#define FLAGS_NO_DECODING (1 << 7)
/* Bit flags for frames (apply to the frame where they are specified) */
#define USBVIDEO_FRAME_FLAG_SOFTWARE_CONTRAST (1 << 0)
/* Camera capabilities (maximum) */
#define CAMERA_URB_FRAMES 32
#define CAMERA_MAX_ISO_PACKET 1023 /* 1022 actually sent by camera */
#define FRAMES_PER_DESC (CAMERA_URB_FRAMES)
#define FRAME_SIZE_PER_DESC (CAMERA_MAX_ISO_PACKET)
/* This macro restricts an int variable to an inclusive range */
#define RESTRICT_TO_RANGE(v,mi,ma) { if ((v) < (mi)) (v) = (mi); else if ((v) > (ma)) (v) = (ma); }
#define V4L_BYTES_PER_PIXEL 3 /* Because we produce RGB24 */
/*
* Use this macro to construct constants for different video sizes.
* We have to deal with different video sizes that have to be
* configured in the device or compared against when we receive
* a data. Normally one would define a bunch of VIDEOSIZE_x_by_y
* #defines and that's the end of story. However this solution
* does not allow to convert between real pixel sizes and the
* constant (integer) value that may be used to tag a frame or
* whatever. The set of macros below constructs videosize constants
* from the pixel size and allows to reconstruct the pixel size
* from the combined value later.
*/
#define VIDEOSIZE(x,y) (((x) & 0xFFFFL) | (((y) & 0xFFFFL) << 16))
#define VIDEOSIZE_X(vs) ((vs) & 0xFFFFL)
#define VIDEOSIZE_Y(vs) (((vs) >> 16) & 0xFFFFL)
typedef unsigned long videosize_t;
/*
* This macro checks if the camera is still operational. The 'uvd'
* pointer must be valid, uvd->dev must be valid, we are not
* removing the device and the device has not erred on us.
*/
#define CAMERA_IS_OPERATIONAL(uvd) (\
(uvd != NULL) && \
((uvd)->dev != NULL) && \
((uvd)->last_error == 0) && \
(!(uvd)->remove_pending))
/*
* We use macros to do YUV -> RGB conversion because this is
* very important for speed and totally unimportant for size.
*
* YUV -> RGB Conversion
* ---------------------
*
* B = 1.164*(Y-16) + 2.018*(V-128)
* G = 1.164*(Y-16) - 0.813*(U-128) - 0.391*(V-128)
* R = 1.164*(Y-16) + 1.596*(U-128)
*
* If you fancy integer arithmetics (as you should), hear this:
*
* 65536*B = 76284*(Y-16) + 132252*(V-128)
* 65536*G = 76284*(Y-16) - 53281*(U-128) - 25625*(V-128)
* 65536*R = 76284*(Y-16) + 104595*(U-128)
*
* Make sure the output values are within [0..255] range.
*/
#define LIMIT_RGB(x) (((x) < 0) ? 0 : (((x) > 255) ? 255 : (x)))
#define YUV_TO_RGB_BY_THE_BOOK(my,mu,mv,mr,mg,mb) { \
int mm_y, mm_yc, mm_u, mm_v, mm_r, mm_g, mm_b; \
mm_y = (my) - 16; \
mm_u = (mu) - 128; \
mm_v = (mv) - 128; \
mm_yc= mm_y * 76284; \
mm_b = (mm_yc + 132252*mm_v ) >> 16; \
mm_g = (mm_yc - 53281*mm_u - 25625*mm_v ) >> 16; \
mm_r = (mm_yc + 104595*mm_u ) >> 16; \
mb = LIMIT_RGB(mm_b); \
mg = LIMIT_RGB(mm_g); \
mr = LIMIT_RGB(mm_r); \
}
#define RING_QUEUE_SIZE (128*1024) /* Must be a power of 2 */
#define RING_QUEUE_ADVANCE_INDEX(rq,ind,n) (rq)->ind = ((rq)->ind + (n)) & ((rq)->length-1)
#define RING_QUEUE_DEQUEUE_BYTES(rq,n) RING_QUEUE_ADVANCE_INDEX(rq,ri,n)
#define RING_QUEUE_PEEK(rq,ofs) ((rq)->queue[((ofs) + (rq)->ri) & ((rq)->length-1)])
struct RingQueue {
unsigned char *queue; /* Data from the Isoc data pump */
int length; /* How many bytes allocated for the queue */
int wi; /* That's where we write */
int ri; /* Read from here until you hit write index */
wait_queue_head_t wqh; /* Processes waiting */
};
enum ScanState {
ScanState_Scanning, /* Scanning for header */
ScanState_Lines /* Parsing lines */
};
/* Completion states of the data parser */
enum ParseState {
scan_Continue, /* Just parse next item */
scan_NextFrame, /* Frame done, send it to V4L */
scan_Out, /* Not enough data for frame */
scan_EndParse /* End parsing */
};
enum FrameState {
FrameState_Unused, /* Unused (no MCAPTURE) */
FrameState_Ready, /* Ready to start grabbing */
FrameState_Grabbing, /* In the process of being grabbed into */
FrameState_Done, /* Finished grabbing, but not been synced yet */
FrameState_Done_Hold, /* Are syncing or reading */
FrameState_Error, /* Something bad happened while processing */
};
/*
* Some frames may contain only even or odd lines. This type
* specifies what type of deinterlacing is required.
*/
enum Deinterlace {
Deinterlace_None=0,
Deinterlace_FillOddLines,
Deinterlace_FillEvenLines
};
#define USBVIDEO_NUMFRAMES 2 /* How many frames we work with */
#define USBVIDEO_NUMSBUF 2 /* How many URBs linked in a ring */
/* This structure represents one Isoc request - URB and buffer */
struct usbvideo_sbuf {
char *data;
struct urb *urb;
};
struct usbvideo_frame {
char *data; /* Frame buffer */
unsigned long header; /* Significant bits from the header */
videosize_t canvas; /* The canvas (max. image) allocated */
videosize_t request; /* That's what the application asked for */
unsigned short palette; /* The desired format */
enum FrameState frameState;/* State of grabbing */
enum ScanState scanstate; /* State of scanning */
enum Deinterlace deinterlace;
int flags; /* USBVIDEO_FRAME_FLAG_xxx bit flags */
int curline; /* Line of frame we're working on */
long seqRead_Length; /* Raw data length of frame */
long seqRead_Index; /* Amount of data that has been already read */
void *user; /* Additional data that user may need */
};
/* Statistics that can be overlaid on screen */
struct usbvideo_statistics {
unsigned long frame_num; /* Sequential number of the frame */
unsigned long urb_count; /* How many URBs we received so far */
unsigned long urb_length; /* Length of last URB */
unsigned long data_count; /* How many bytes we received */
unsigned long header_count; /* How many frame headers we found */
unsigned long iso_skip_count; /* How many empty ISO packets received */
unsigned long iso_err_count; /* How many bad ISO packets received */
};
struct usbvideo;
struct uvd {
struct video_device vdev; /* Must be the first field! */
struct usb_device *dev;
struct usbvideo *handle; /* Points back to the struct usbvideo */
void *user_data; /* Camera-dependent data */
int user_size; /* Size of that camera-dependent data */
int debug; /* Debug level for usbvideo */
unsigned char iface; /* Video interface number */
unsigned char video_endp;
unsigned char ifaceAltActive;
unsigned char ifaceAltInactive; /* Alt settings */
unsigned long flags; /* FLAGS_USBVIDEO_xxx */
unsigned long paletteBits; /* Which palettes we accept? */
unsigned short defaultPalette; /* What palette to use for read() */
struct mutex lock;
int user; /* user count for exclusive use */
videosize_t videosize; /* Current setting */
videosize_t canvas; /* This is the width,height of the V4L canvas */
int max_frame_size; /* Bytes in one video frame */
int uvd_used; /* Is this structure in use? */
int streaming; /* Are we streaming Isochronous? */
int grabbing; /* Are we grabbing? */
int settingsAdjusted; /* Have we adjusted contrast etc.? */
int last_error; /* What calamity struck us? */
char *fbuf; /* Videodev buffer area */
int fbuf_size; /* Videodev buffer size */
int curframe;
int iso_packet_len; /* Videomode-dependent, saves bus bandwidth */
struct RingQueue dp; /* Isoc data pump */
struct usbvideo_frame frame[USBVIDEO_NUMFRAMES];
struct usbvideo_sbuf sbuf[USBVIDEO_NUMSBUF];
volatile int remove_pending; /* If set then about to exit */
struct video_picture vpic, vpic_old; /* Picture settings */
struct video_capability vcap; /* Video capabilities */
struct video_channel vchan; /* May be used for tuner support */
struct usbvideo_statistics stats;
char videoName[32]; /* Holds name like "video7" */
};
/*
* usbvideo callbacks (virtual methods). They are set when usbvideo
* services are registered. All of these default to NULL, except those
* that default to usbvideo-provided methods.
*/
struct usbvideo_cb {
int (*probe)(struct usb_interface *, const struct usb_device_id *);
void (*userFree)(struct uvd *);
void (*disconnect)(struct usb_interface *);
int (*setupOnOpen)(struct uvd *);
void (*videoStart)(struct uvd *);
void (*videoStop)(struct uvd *);
void (*processData)(struct uvd *, struct usbvideo_frame *);
void (*postProcess)(struct uvd *, struct usbvideo_frame *);
void (*adjustPicture)(struct uvd *);
int (*getFPS)(struct uvd *);
int (*overlayHook)(struct uvd *, struct usbvideo_frame *);
int (*getFrame)(struct uvd *, int);
int (*startDataPump)(struct uvd *uvd);
void (*stopDataPump)(struct uvd *uvd);
int (*setVideoMode)(struct uvd *uvd, struct video_window *vw);
};
struct usbvideo {
int num_cameras; /* As allocated */
struct usb_driver usbdrv; /* Interface to the USB stack */
char drvName[80]; /* Driver name */
struct mutex lock; /* Mutex protecting camera structures */
struct usbvideo_cb cb; /* Table of callbacks (virtual methods) */
struct video_device vdt; /* Video device template */
struct uvd *cam; /* Array of camera structures */
struct module *md_module; /* Minidriver module */
};
/*
* This macro retrieves callback address from the struct uvd object.
* No validity checks are done here, so be sure to check the
* callback beforehand with VALID_CALLBACK.
*/
#define GET_CALLBACK(uvd,cbName) ((uvd)->handle->cb.cbName)
/*
* This macro returns either callback pointer or NULL. This is safe
* macro, meaning that most of components of data structures involved
* may be NULL - this only results in NULL being returned. You may
* wish to use this macro to make sure that the callback is callable.
* However keep in mind that those checks take time.
*/
#define VALID_CALLBACK(uvd,cbName) ((((uvd) != NULL) && \
((uvd)->handle != NULL)) ? GET_CALLBACK(uvd,cbName) : NULL)
int RingQueue_Dequeue(struct RingQueue *rq, unsigned char *dst, int len);
int RingQueue_Enqueue(struct RingQueue *rq, const unsigned char *cdata, int n);
void RingQueue_WakeUpInterruptible(struct RingQueue *rq);
void RingQueue_Flush(struct RingQueue *rq);
static inline int RingQueue_GetLength(const struct RingQueue *rq)
{
return (rq->wi - rq->ri + rq->length) & (rq->length-1);
}
static inline int RingQueue_GetFreeSpace(const struct RingQueue *rq)
{
return rq->length - RingQueue_GetLength(rq);
}
void usbvideo_DrawLine(
struct usbvideo_frame *frame,
int x1, int y1,
int x2, int y2,
unsigned char cr, unsigned char cg, unsigned char cb);
void usbvideo_HexDump(const unsigned char *data, int len);
void usbvideo_SayAndWait(const char *what);
void usbvideo_TestPattern(struct uvd *uvd, int fullframe, int pmode);
/* Memory allocation routines */
unsigned long usbvideo_kvirt_to_pa(unsigned long adr);
int usbvideo_register(
struct usbvideo **pCams,
const int num_cams,
const int num_extra,
const char *driverName,
const struct usbvideo_cb *cbTable,
struct module *md,
const struct usb_device_id *id_table);
struct uvd *usbvideo_AllocateDevice(struct usbvideo *cams);
int usbvideo_RegisterVideoDevice(struct uvd *uvd);
void usbvideo_Deregister(struct usbvideo **uvt);
int usbvideo_v4l_initialize(struct video_device *dev);
void usbvideo_DeinterlaceFrame(struct uvd *uvd, struct usbvideo_frame *frame);
/*
* This code performs bounds checking - use it when working with
* new formats, or else you may get oopses all over the place.
* If pixel falls out of bounds then it gets shoved back (as close
* to place of offence as possible) and is painted bright red.
*
* There are two important concepts: frame width, height and
* V4L canvas width, height. The former is the area requested by
* the application -for this very frame-. The latter is the largest
* possible frame that we can serve (we advertise that via V4L ioctl).
* The frame data is expected to be formatted as lines of length
* VIDEOSIZE_X(fr->request), total VIDEOSIZE_Y(frame->request) lines.
*/
static inline void RGB24_PUTPIXEL(
struct usbvideo_frame *fr,
int ix, int iy,
unsigned char vr,
unsigned char vg,
unsigned char vb)
{
register unsigned char *pf;
int limiter = 0, mx, my;
mx = ix;
my = iy;
if (mx < 0) {
mx=0;
limiter++;
} else if (mx >= VIDEOSIZE_X((fr)->request)) {
mx= VIDEOSIZE_X((fr)->request) - 1;
limiter++;
}
if (my < 0) {
my = 0;
limiter++;
} else if (my >= VIDEOSIZE_Y((fr)->request)) {
my = VIDEOSIZE_Y((fr)->request) - 1;
limiter++;
}
pf = (fr)->data + V4L_BYTES_PER_PIXEL*((iy)*VIDEOSIZE_X((fr)->request) + (ix));
if (limiter) {
*pf++ = 0;
*pf++ = 0;
*pf++ = 0xFF;
} else {
*pf++ = (vb);
*pf++ = (vg);
*pf++ = (vr);
}
}
#endif /* usbvideo_h */
/*
* USB ViCam WebCam driver
* Copyright (c) 2002 Joe Burks (jburks@wavicle.org),
* Christopher L Cheney (ccheney@cheney.cx),
* Pavel Machek (pavel@ucw.cz),
* John Tyner (jtyner@cs.ucr.edu),
* Monroe Williams (monroe@pobox.com)
*
* Supports 3COM HomeConnect PC Digital WebCam
* Supports Compro PS39U WebCam
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* This source code is based heavily on the CPiA webcam driver which was
* written by Peter Pregler, Scott J. Bertin and Johannes Erdfelt
*
* Portions of this code were also copied from usbvideo.c
*
* Special thanks to the whole team at Sourceforge for help making
* this driver become a reality. Notably:
* Andy Armstrong who reverse engineered the color encoding and
* Pavel Machek and Chris Cheney who worked on reverse engineering the
* camera controls and wrote the first generation driver.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include "videodev.h"
#include <linux/usb.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/firmware.h>
#include <linux/ihex.h>
#include "usbvideo.h"
// #define VICAM_DEBUG
#ifdef VICAM_DEBUG
#define ADBG(lineno,fmt,args...) printk(fmt, jiffies, __func__, lineno, ##args)
#define DBG(fmt,args...) ADBG((__LINE__),KERN_DEBUG __FILE__"(%ld):%s (%d):"fmt,##args)
#else
#define DBG(fmn,args...) do {} while(0)
#endif
#define DRIVER_AUTHOR "Joe Burks, jburks@wavicle.org"
#define DRIVER_DESC "ViCam WebCam Driver"
/* Define these values to match your device */
#define USB_VICAM_VENDOR_ID 0x04c1
#define USB_VICAM_PRODUCT_ID 0x009d
#define USB_COMPRO_VENDOR_ID 0x0602
#define USB_COMPRO_PRODUCT_ID 0x1001
#define VICAM_BYTES_PER_PIXEL 3
#define VICAM_MAX_READ_SIZE (512*242+128)
#define VICAM_MAX_FRAME_SIZE (VICAM_BYTES_PER_PIXEL*320*240)
#define VICAM_FRAMES 2
#define VICAM_HEADER_SIZE 64
/* rvmalloc / rvfree copied from usbvideo.c
*
* Not sure why these are not yet non-statics which I can reference through
* usbvideo.h the same as it is in 2.4.20. I bet this will get fixed sometime
* in the future.
*
*/
static void *rvmalloc(unsigned long size)
{
void *mem;
unsigned long adr;
size = PAGE_ALIGN(size);
mem = vmalloc_32(size);
if (!mem)
return NULL;
memset(mem, 0, size); /* Clear the ram out, no junk to the user */
adr = (unsigned long) mem;
while (size > 0) {
SetPageReserved(vmalloc_to_page((void *)adr));
adr += PAGE_SIZE;
size -= PAGE_SIZE;
}
return mem;
}
static void rvfree(void *mem, unsigned long size)
{
unsigned long adr;
if (!mem)
return;
adr = (unsigned long) mem;
while ((long) size > 0) {
ClearPageReserved(vmalloc_to_page((void *)adr));
adr += PAGE_SIZE;
size -= PAGE_SIZE;
}
vfree(mem);
}
struct vicam_camera {
u16 shutter_speed; // capture shutter speed
u16 gain; // capture gain
u8 *raw_image; // raw data captured from the camera
u8 *framebuf; // processed data in RGB24 format
u8 *cntrlbuf; // area used to send control msgs
struct video_device vdev; // v4l video device
struct usb_device *udev; // usb device
/* guard against simultaneous accesses to the camera */
struct mutex cam_lock;
int is_initialized;
u8 open_count;
u8 bulkEndpoint;
int needsDummyRead;
};
static int vicam_probe( struct usb_interface *intf, const struct usb_device_id *id);
static void vicam_disconnect(struct usb_interface *intf);
static void read_frame(struct vicam_camera *cam, int framenum);
static void vicam_decode_color(const u8 *, u8 *);
static int __send_control_msg(struct vicam_camera *cam,
u8 request,
u16 value,
u16 index,
unsigned char *cp,
u16 size)
{
int status;
/* cp must be memory that has been allocated by kmalloc */
status = usb_control_msg(cam->udev,
usb_sndctrlpipe(cam->udev, 0),
request,
USB_DIR_OUT | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, value, index,
cp, size, 1000);
status = min(status, 0);
if (status < 0) {
printk(KERN_INFO "Failed sending control message, error %d.\n",
status);
}
return status;
}
static int send_control_msg(struct vicam_camera *cam,
u8 request,
u16 value,
u16 index,
unsigned char *cp,
u16 size)
{
int status = -ENODEV;
mutex_lock(&cam->cam_lock);
if (cam->udev) {
status = __send_control_msg(cam, request, value,
index, cp, size);
}
mutex_unlock(&cam->cam_lock);
return status;
}
static int
initialize_camera(struct vicam_camera *cam)
{
int err;
const struct ihex_binrec *rec;
const struct firmware *uninitialized_var(fw);
err = request_ihex_firmware(&fw, "vicam/firmware.fw", &cam->udev->dev);
if (err) {
printk(KERN_ERR "Failed to load \"vicam/firmware.fw\": %d\n",
err);
return err;
}
for (rec = (void *)fw->data; rec; rec = ihex_next_binrec(rec)) {
memcpy(cam->cntrlbuf, rec->data, be16_to_cpu(rec->len));
err = send_control_msg(cam, 0xff, 0, 0,
cam->cntrlbuf, be16_to_cpu(rec->len));
if (err)
break;
}
release_firmware(fw);
return err;
}
static int
set_camera_power(struct vicam_camera *cam, int state)
{
int status;
if ((status = send_control_msg(cam, 0x50, state, 0, NULL, 0)) < 0)
return status;
if (state) {
send_control_msg(cam, 0x55, 1, 0, NULL, 0);
}
return 0;
}
static long
vicam_ioctl(struct file *file, unsigned int ioctlnr, unsigned long arg)
{
void __user *user_arg = (void __user *)arg;
struct vicam_camera *cam = file->private_data;
long retval = 0;
if (!cam)
return -ENODEV;
switch (ioctlnr) {
/* query capabilities */
case VIDIOCGCAP:
{
struct video_capability b;
DBG("VIDIOCGCAP\n");
memset(&b, 0, sizeof(b));
strcpy(b.name, "ViCam-based Camera");
b.type = VID_TYPE_CAPTURE;
b.channels = 1;
b.audios = 0;
b.maxwidth = 320; /* VIDEOSIZE_CIF */
b.maxheight = 240;
b.minwidth = 320; /* VIDEOSIZE_48_48 */
b.minheight = 240;
if (copy_to_user(user_arg, &b, sizeof(b)))
retval = -EFAULT;
break;
}
/* get/set video source - we are a camera and nothing else */
case VIDIOCGCHAN:
{
struct video_channel v;
DBG("VIDIOCGCHAN\n");
if (copy_from_user(&v, user_arg, sizeof(v))) {
retval = -EFAULT;
break;
}
if (v.channel != 0) {
retval = -EINVAL;
break;
}
v.channel = 0;
strcpy(v.name, "Camera");
v.tuners = 0;
v.flags = 0;
v.type = VIDEO_TYPE_CAMERA;
v.norm = 0;
if (copy_to_user(user_arg, &v, sizeof(v)))
retval = -EFAULT;
break;
}
case VIDIOCSCHAN:
{
int v;
if (copy_from_user(&v, user_arg, sizeof(v)))
retval = -EFAULT;
DBG("VIDIOCSCHAN %d\n", v);
if (retval == 0 && v != 0)
retval = -EINVAL;
break;
}
/* image properties */
case VIDIOCGPICT:
{
struct video_picture vp;
DBG("VIDIOCGPICT\n");
memset(&vp, 0, sizeof (struct video_picture));
vp.brightness = cam->gain << 8;
vp.depth = 24;
vp.palette = VIDEO_PALETTE_RGB24;
if (copy_to_user(user_arg, &vp, sizeof (struct video_picture)))
retval = -EFAULT;
break;
}
case VIDIOCSPICT:
{
struct video_picture vp;
if (copy_from_user(&vp, user_arg, sizeof(vp))) {
retval = -EFAULT;
break;
}
DBG("VIDIOCSPICT depth = %d, pal = %d\n", vp.depth,
vp.palette);
cam->gain = vp.brightness >> 8;
if (vp.depth != 24
|| vp.palette != VIDEO_PALETTE_RGB24)
retval = -EINVAL;
break;
}
/* get/set capture window */
case VIDIOCGWIN:
{
struct video_window vw;
vw.x = 0;
vw.y = 0;
vw.width = 320;
vw.height = 240;
vw.chromakey = 0;
vw.flags = 0;
vw.clips = NULL;
vw.clipcount = 0;
DBG("VIDIOCGWIN\n");
if (copy_to_user(user_arg, (void *)&vw, sizeof(vw)))
retval = -EFAULT;
// I'm not sure what the deal with a capture window is, it is very poorly described
// in the doc. So I won't support it now.
break;
}
case VIDIOCSWIN:
{
struct video_window vw;
if (copy_from_user(&vw, user_arg, sizeof(vw))) {
retval = -EFAULT;
break;
}
DBG("VIDIOCSWIN %d x %d\n", vw.width, vw.height);
if ( vw.width != 320 || vw.height != 240 )
retval = -EFAULT;
break;
}
/* mmap interface */
case VIDIOCGMBUF:
{
struct video_mbuf vm;
int i;
DBG("VIDIOCGMBUF\n");
memset(&vm, 0, sizeof (vm));
vm.size =
VICAM_MAX_FRAME_SIZE * VICAM_FRAMES;
vm.frames = VICAM_FRAMES;
for (i = 0; i < VICAM_FRAMES; i++)
vm.offsets[i] = VICAM_MAX_FRAME_SIZE * i;
if (copy_to_user(user_arg, (void *)&vm, sizeof(vm)))
retval = -EFAULT;
break;
}
case VIDIOCMCAPTURE:
{
struct video_mmap vm;
// int video_size;
if (copy_from_user((void *)&vm, user_arg, sizeof(vm))) {
retval = -EFAULT;
break;
}
DBG("VIDIOCMCAPTURE frame=%d, height=%d, width=%d, format=%d.\n",vm.frame,vm.width,vm.height,vm.format);
if ( vm.frame >= VICAM_FRAMES || vm.format != VIDEO_PALETTE_RGB24 )
retval = -EINVAL;
// in theory right here we'd start the image capturing
// (fill in a bulk urb and submit it asynchronously)
//
// Instead we're going to do a total hack job for now and
// retrieve the frame in VIDIOCSYNC
break;
}
case VIDIOCSYNC:
{
int frame;
if (copy_from_user((void *)&frame, user_arg, sizeof(int))) {
retval = -EFAULT;
break;
}
DBG("VIDIOCSYNC: %d\n", frame);
read_frame(cam, frame);
vicam_decode_color(cam->raw_image,
cam->framebuf +
frame * VICAM_MAX_FRAME_SIZE );
break;
}
/* pointless to implement overlay with this camera */
case VIDIOCCAPTURE:
case VIDIOCGFBUF:
case VIDIOCSFBUF:
case VIDIOCKEY:
retval = -EINVAL;
break;
/* tuner interface - we have none */
case VIDIOCGTUNER:
case VIDIOCSTUNER:
case VIDIOCGFREQ:
case VIDIOCSFREQ:
retval = -EINVAL;
break;
/* audio interface - we have none */
case VIDIOCGAUDIO:
case VIDIOCSAUDIO:
retval = -EINVAL;
break;
default:
retval = -ENOIOCTLCMD;
break;
}
return retval;
}
static int
vicam_open(struct file *file)
{
struct vicam_camera *cam = video_drvdata(file);
DBG("open\n");
if (!cam) {
printk(KERN_ERR
"vicam video_device improperly initialized");
return -EINVAL;
}
/* cam_lock/open_count protects us from simultaneous opens
* ... for now. we probably shouldn't rely on this fact forever.
*/
mutex_lock(&cam->cam_lock);
if (cam->open_count > 0) {
printk(KERN_INFO
"vicam_open called on already opened camera");
mutex_unlock(&cam->cam_lock);
return -EBUSY;
}
cam->raw_image = kmalloc(VICAM_MAX_READ_SIZE, GFP_KERNEL);
if (!cam->raw_image) {
mutex_unlock(&cam->cam_lock);
return -ENOMEM;
}
cam->framebuf = rvmalloc(VICAM_MAX_FRAME_SIZE * VICAM_FRAMES);
if (!cam->framebuf) {
kfree(cam->raw_image);
mutex_unlock(&cam->cam_lock);
return -ENOMEM;
}
cam->cntrlbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!cam->cntrlbuf) {
kfree(cam->raw_image);
rvfree(cam->framebuf, VICAM_MAX_FRAME_SIZE * VICAM_FRAMES);
mutex_unlock(&cam->cam_lock);
return -ENOMEM;
}
cam->needsDummyRead = 1;
cam->open_count++;
file->private_data = cam;
mutex_unlock(&cam->cam_lock);
// First upload firmware, then turn the camera on
if (!cam->is_initialized) {
initialize_camera(cam);
cam->is_initialized = 1;
}
set_camera_power(cam, 1);
return 0;
}
static int
vicam_close(struct file *file)
{
struct vicam_camera *cam = file->private_data;
int open_count;
struct usb_device *udev;
DBG("close\n");
/* it's not the end of the world if
* we fail to turn the camera off.
*/
set_camera_power(cam, 0);
kfree(cam->raw_image);
rvfree(cam->framebuf, VICAM_MAX_FRAME_SIZE * VICAM_FRAMES);
kfree(cam->cntrlbuf);
mutex_lock(&cam->cam_lock);
cam->open_count--;
open_count = cam->open_count;
udev = cam->udev;
mutex_unlock(&cam->cam_lock);
if (!open_count && !udev) {
kfree(cam);
}
return 0;
}
static void vicam_decode_color(const u8 *data, u8 *rgb)
{
/* vicam_decode_color - Convert from Vicam Y-Cr-Cb to RGB
* Copyright (C) 2002 Monroe Williams (monroe@pobox.com)
*/
int i, prevY, nextY;
prevY = 512;
nextY = 512;
data += VICAM_HEADER_SIZE;
for( i = 0; i < 240; i++, data += 512 ) {
const int y = ( i * 242 ) / 240;
int j, prevX, nextX;
int Y, Cr, Cb;
if ( y == 242 - 1 ) {
nextY = -512;
}
prevX = 1;
nextX = 1;
for ( j = 0; j < 320; j++, rgb += 3 ) {
const int x = ( j * 512 ) / 320;
const u8 * const src = &data[x];
if ( x == 512 - 1 ) {
nextX = -1;
}
Cr = ( src[prevX] - src[0] ) +
( src[nextX] - src[0] );
Cr /= 2;
Cb = ( src[prevY] - src[prevX + prevY] ) +
( src[prevY] - src[nextX + prevY] ) +
( src[nextY] - src[prevX + nextY] ) +
( src[nextY] - src[nextX + nextY] );
Cb /= 4;
Y = 1160 * ( src[0] + ( Cr / 2 ) - 16 );
if ( i & 1 ) {
int Ct = Cr;
Cr = Cb;
Cb = Ct;
}
if ( ( x ^ i ) & 1 ) {
Cr = -Cr;
Cb = -Cb;
}
rgb[0] = clamp( ( ( Y + ( 2017 * Cb ) ) +
500 ) / 900, 0, 255 );
rgb[1] = clamp( ( ( Y - ( 392 * Cb ) -
( 813 * Cr ) ) +
500 ) / 1000, 0, 255 );
rgb[2] = clamp( ( ( Y + ( 1594 * Cr ) ) +
500 ) / 1300, 0, 255 );
prevX = -1;
}
prevY = -512;
}
}
static void
read_frame(struct vicam_camera *cam, int framenum)
{
unsigned char *request = cam->cntrlbuf;
int realShutter;
int n;
int actual_length;
if (cam->needsDummyRead) {
cam->needsDummyRead = 0;
read_frame(cam, framenum);
}
memset(request, 0, 16);
request[0] = cam->gain; // 0 = 0% gain, FF = 100% gain
request[1] = 0; // 512x242 capture
request[2] = 0x90; // the function of these two bytes
request[3] = 0x07; // is not yet understood
if (cam->shutter_speed > 60) {
// Short exposure
realShutter =
((-15631900 / cam->shutter_speed) + 260533) / 1000;
request[4] = realShutter & 0xFF;
request[5] = (realShutter >> 8) & 0xFF;
request[6] = 0x03;
request[7] = 0x01;
} else {
// Long exposure
realShutter = 15600 / cam->shutter_speed - 1;
request[4] = 0;
request[5] = 0;
request[6] = realShutter & 0xFF;
request[7] = realShutter >> 8;
}
// Per John Markus Bjørndalen, byte at index 8 causes problems if it isn't 0
request[8] = 0;
// bytes 9-15 do not seem to affect exposure or image quality
mutex_lock(&cam->cam_lock);
if (!cam->udev) {
goto done;
}
n = __send_control_msg(cam, 0x51, 0x80, 0, request, 16);
if (n < 0) {
printk(KERN_ERR
" Problem sending frame capture control message");
goto done;
}
n = usb_bulk_msg(cam->udev,
usb_rcvbulkpipe(cam->udev, cam->bulkEndpoint),
cam->raw_image,
512 * 242 + 128, &actual_length, 10000);
if (n < 0) {
printk(KERN_ERR "Problem during bulk read of frame data: %d\n",
n);
}
done:
mutex_unlock(&cam->cam_lock);
}
static ssize_t
vicam_read( struct file *file, char __user *buf, size_t count, loff_t *ppos )
{
struct vicam_camera *cam = file->private_data;
DBG("read %d bytes.\n", (int) count);
if (*ppos >= VICAM_MAX_FRAME_SIZE) {
*ppos = 0;
return 0;
}
if (*ppos == 0) {
read_frame(cam, 0);
vicam_decode_color(cam->raw_image,
cam->framebuf +
0 * VICAM_MAX_FRAME_SIZE);
}
count = min_t(size_t, count, VICAM_MAX_FRAME_SIZE - *ppos);
if (copy_to_user(buf, &cam->framebuf[*ppos], count)) {
count = -EFAULT;
} else {
*ppos += count;
}
if (count == VICAM_MAX_FRAME_SIZE) {
*ppos = 0;
}
return count;
}
static int
vicam_mmap(struct file *file, struct vm_area_struct *vma)
{
// TODO: allocate the raw frame buffer if necessary
unsigned long page, pos;
unsigned long start = vma->vm_start;
unsigned long size = vma->vm_end-vma->vm_start;
struct vicam_camera *cam = file->private_data;
if (!cam)
return -ENODEV;
DBG("vicam_mmap: %ld\n", size);
/* We let mmap allocate as much as it wants because Linux was adding 2048 bytes
* to the size the application requested for mmap and it was screwing apps up.
if (size > VICAM_FRAMES*VICAM_MAX_FRAME_SIZE)
return -EINVAL;
*/
pos = (unsigned long)cam->framebuf;
while (size > 0) {
page = vmalloc_to_pfn((void *)pos);
if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
return -EAGAIN;
start += PAGE_SIZE;
pos += PAGE_SIZE;
if (size > PAGE_SIZE)
size -= PAGE_SIZE;
else
size = 0;
}
return 0;
}
static const struct v4l2_file_operations vicam_fops = {
.owner = THIS_MODULE,
.open = vicam_open,
.release = vicam_close,
.read = vicam_read,
.mmap = vicam_mmap,
.ioctl = vicam_ioctl,
};
static struct video_device vicam_template = {
.name = "ViCam-based USB Camera",
.fops = &vicam_fops,
.release = video_device_release_empty,
};
/* table of devices that work with this driver */
static struct usb_device_id vicam_table[] = {
{USB_DEVICE(USB_VICAM_VENDOR_ID, USB_VICAM_PRODUCT_ID)},
{USB_DEVICE(USB_COMPRO_VENDOR_ID, USB_COMPRO_PRODUCT_ID)},
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, vicam_table);
static struct usb_driver vicam_driver = {
.name = "vicam",
.probe = vicam_probe,
.disconnect = vicam_disconnect,
.id_table = vicam_table
};
/**
* vicam_probe
* @intf: the interface
* @id: the device id
*
* Called by the usb core when a new device is connected that it thinks
* this driver might be interested in.
*/
static int
vicam_probe( struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *dev = interface_to_usbdev(intf);
int bulkEndpoint = 0;
const struct usb_host_interface *interface;
const struct usb_endpoint_descriptor *endpoint;
struct vicam_camera *cam;
printk(KERN_INFO "ViCam based webcam connected\n");
interface = intf->cur_altsetting;
DBG(KERN_DEBUG "Interface %d. has %u. endpoints!\n",
interface->desc.bInterfaceNumber, (unsigned) (interface->desc.bNumEndpoints));
endpoint = &interface->endpoint[0].desc;
if (usb_endpoint_is_bulk_in(endpoint)) {
/* we found a bulk in endpoint */
bulkEndpoint = endpoint->bEndpointAddress;
} else {
printk(KERN_ERR
"No bulk in endpoint was found ?! (this is bad)\n");
}
if ((cam =
kzalloc(sizeof (struct vicam_camera), GFP_KERNEL)) == NULL) {
printk(KERN_WARNING
"could not allocate kernel memory for vicam_camera struct\n");
return -ENOMEM;
}
cam->shutter_speed = 15;
mutex_init(&cam->cam_lock);
memcpy(&cam->vdev, &vicam_template, sizeof(vicam_template));
video_set_drvdata(&cam->vdev, cam);
cam->udev = dev;
cam->bulkEndpoint = bulkEndpoint;
if (video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1) < 0) {
kfree(cam);
printk(KERN_WARNING "video_register_device failed\n");
return -EIO;
}
printk(KERN_INFO "ViCam webcam driver now controlling device %s\n",
video_device_node_name(&cam->vdev));
usb_set_intfdata (intf, cam);
return 0;
}
static void
vicam_disconnect(struct usb_interface *intf)
{
int open_count;
struct vicam_camera *cam = usb_get_intfdata (intf);
usb_set_intfdata (intf, NULL);
/* we must unregister the device before taking its
* cam_lock. This is because the video open call
* holds the same lock as video unregister. if we
* unregister inside of the cam_lock and open also
* uses the cam_lock, we get deadlock.
*/
video_unregister_device(&cam->vdev);
/* stop the camera from being used */
mutex_lock(&cam->cam_lock);
/* mark the camera as gone */
cam->udev = NULL;
/* the only thing left to do is synchronize with
* our close/release function on who should release
* the camera memory. if there are any users using the
* camera, it's their job. if there are no users,
* it's ours.
*/
open_count = cam->open_count;
mutex_unlock(&cam->cam_lock);
if (!open_count) {
kfree(cam);
}
printk(KERN_DEBUG "ViCam-based WebCam disconnected\n");
}
/*
*/
static int __init
usb_vicam_init(void)
{
int retval;
DBG(KERN_INFO "ViCam-based WebCam driver startup\n");
retval = usb_register(&vicam_driver);
if (retval)
printk(KERN_WARNING "usb_register failed!\n");
return retval;
}
static void __exit
usb_vicam_exit(void)
{
DBG(KERN_INFO
"ViCam-based WebCam driver shutdown\n");
usb_deregister(&vicam_driver);
}
module_init(usb_vicam_init);
module_exit(usb_vicam_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("vicam/firmware.fw");
/*
* Video for Linux version 1 - OBSOLETE
*
* Header file for v4l1 drivers and applications, for
* Linux kernels 2.2.x or 2.4.x.
*
* Provides header for legacy drivers and applications
*
* See http://linuxtv.org for more info
*
*/
#ifndef __LINUX_VIDEODEV_H
#define __LINUX_VIDEODEV_H
#include <linux/types.h>
#include <linux/ioctl.h>
#include <linux/videodev2.h>
#define VID_TYPE_CAPTURE 1 /* Can capture */
#define VID_TYPE_TUNER 2 /* Can tune */
#define VID_TYPE_TELETEXT 4 /* Does teletext */
#define VID_TYPE_OVERLAY 8 /* Overlay onto frame buffer */
#define VID_TYPE_CHROMAKEY 16 /* Overlay by chromakey */
#define VID_TYPE_CLIPPING 32 /* Can clip */
#define VID_TYPE_FRAMERAM 64 /* Uses the frame buffer memory */
#define VID_TYPE_SCALES 128 /* Scalable */
#define VID_TYPE_MONOCHROME 256 /* Monochrome only */
#define VID_TYPE_SUBCAPTURE 512 /* Can capture subareas of the image */
#define VID_TYPE_MPEG_DECODER 1024 /* Can decode MPEG streams */
#define VID_TYPE_MPEG_ENCODER 2048 /* Can encode MPEG streams */
#define VID_TYPE_MJPEG_DECODER 4096 /* Can decode MJPEG streams */
#define VID_TYPE_MJPEG_ENCODER 8192 /* Can encode MJPEG streams */
struct video_capability
{
char name[32];
int type;
int channels; /* Num channels */
int audios; /* Num audio devices */
int maxwidth; /* Supported width */
int maxheight; /* And height */
int minwidth; /* Supported width */
int minheight; /* And height */
};
struct video_channel
{
int channel;
char name[32];
int tuners;
__u32 flags;
#define VIDEO_VC_TUNER 1 /* Channel has a tuner */
#define VIDEO_VC_AUDIO 2 /* Channel has audio */
__u16 type;
#define VIDEO_TYPE_TV 1
#define VIDEO_TYPE_CAMERA 2
__u16 norm; /* Norm set by channel */
};
struct video_tuner
{
int tuner;
char name[32];
unsigned long rangelow, rangehigh; /* Tuner range */
__u32 flags;
#define VIDEO_TUNER_PAL 1
#define VIDEO_TUNER_NTSC 2
#define VIDEO_TUNER_SECAM 4
#define VIDEO_TUNER_LOW 8 /* Uses KHz not MHz */
#define VIDEO_TUNER_NORM 16 /* Tuner can set norm */
#define VIDEO_TUNER_STEREO_ON 128 /* Tuner is seeing stereo */
#define VIDEO_TUNER_RDS_ON 256 /* Tuner is seeing an RDS datastream */
#define VIDEO_TUNER_MBS_ON 512 /* Tuner is seeing an MBS datastream */
__u16 mode; /* PAL/NTSC/SECAM/OTHER */
#define VIDEO_MODE_PAL 0
#define VIDEO_MODE_NTSC 1
#define VIDEO_MODE_SECAM 2
#define VIDEO_MODE_AUTO 3
__u16 signal; /* Signal strength 16bit scale */
};
struct video_picture
{
__u16 brightness;
__u16 hue;
__u16 colour;
__u16 contrast;
__u16 whiteness; /* Black and white only */
__u16 depth; /* Capture depth */
__u16 palette; /* Palette in use */
#define VIDEO_PALETTE_GREY 1 /* Linear greyscale */
#define VIDEO_PALETTE_HI240 2 /* High 240 cube (BT848) */
#define VIDEO_PALETTE_RGB565 3 /* 565 16 bit RGB */
#define VIDEO_PALETTE_RGB24 4 /* 24bit RGB */
#define VIDEO_PALETTE_RGB32 5 /* 32bit RGB */
#define VIDEO_PALETTE_RGB555 6 /* 555 15bit RGB */
#define VIDEO_PALETTE_YUV422 7 /* YUV422 capture */
#define VIDEO_PALETTE_YUYV 8
#define VIDEO_PALETTE_UYVY 9 /* The great thing about standards is ... */
#define VIDEO_PALETTE_YUV420 10
#define VIDEO_PALETTE_YUV411 11 /* YUV411 capture */
#define VIDEO_PALETTE_RAW 12 /* RAW capture (BT848) */
#define VIDEO_PALETTE_YUV422P 13 /* YUV 4:2:2 Planar */
#define VIDEO_PALETTE_YUV411P 14 /* YUV 4:1:1 Planar */
#define VIDEO_PALETTE_YUV420P 15 /* YUV 4:2:0 Planar */
#define VIDEO_PALETTE_YUV410P 16 /* YUV 4:1:0 Planar */
#define VIDEO_PALETTE_PLANAR 13 /* start of planar entries */
#define VIDEO_PALETTE_COMPONENT 7 /* start of component entries */
};
struct video_audio
{
int audio; /* Audio channel */
__u16 volume; /* If settable */
__u16 bass, treble;
__u32 flags;
#define VIDEO_AUDIO_MUTE 1
#define VIDEO_AUDIO_MUTABLE 2
#define VIDEO_AUDIO_VOLUME 4
#define VIDEO_AUDIO_BASS 8
#define VIDEO_AUDIO_TREBLE 16
#define VIDEO_AUDIO_BALANCE 32
char name[16];
#define VIDEO_SOUND_MONO 1
#define VIDEO_SOUND_STEREO 2
#define VIDEO_SOUND_LANG1 4
#define VIDEO_SOUND_LANG2 8
__u16 mode;
__u16 balance; /* Stereo balance */
__u16 step; /* Step actual volume uses */
};
struct video_clip
{
__s32 x,y;
__s32 width, height;
struct video_clip *next; /* For user use/driver use only */
};
struct video_window
{
__u32 x,y; /* Position of window */
__u32 width,height; /* Its size */
__u32 chromakey;
__u32 flags;
struct video_clip __user *clips; /* Set only */
int clipcount;
#define VIDEO_WINDOW_INTERLACE 1
#define VIDEO_WINDOW_CHROMAKEY 16 /* Overlay by chromakey */
#define VIDEO_CLIP_BITMAP -1
/* bitmap is 1024x625, a '1' bit represents a clipped pixel */
#define VIDEO_CLIPMAP_SIZE (128 * 625)
};
struct video_capture
{
__u32 x,y; /* Offsets into image */
__u32 width, height; /* Area to capture */
__u16 decimation; /* Decimation divider */
__u16 flags; /* Flags for capture */
#define VIDEO_CAPTURE_ODD 0 /* Temporal */
#define VIDEO_CAPTURE_EVEN 1
};
struct video_buffer
{
void *base;
int height,width;
int depth;
int bytesperline;
};
struct video_mmap
{
unsigned int frame; /* Frame (0 - n) for double buffer */
int height,width;
unsigned int format; /* should be VIDEO_PALETTE_* */
};
struct video_key
{
__u8 key[8];
__u32 flags;
};
struct video_mbuf
{
int size; /* Total memory to map */
int frames; /* Frames */
int offsets[VIDEO_MAX_FRAME];
};
#define VIDEO_NO_UNIT (-1)
struct video_unit
{
int video; /* Video minor */
int vbi; /* VBI minor */
int radio; /* Radio minor */
int audio; /* Audio minor */
int teletext; /* Teletext minor */
};
struct vbi_format {
__u32 sampling_rate; /* in Hz */
__u32 samples_per_line;
__u32 sample_format; /* VIDEO_PALETTE_RAW only (1 byte) */
__s32 start[2]; /* starting line for each frame */
__u32 count[2]; /* count of lines for each frame */
__u32 flags;
#define VBI_UNSYNC 1 /* can distingues between top/bottom field */
#define VBI_INTERLACED 2 /* lines are interlaced */
};
/* video_info is biased towards hardware mpeg encode/decode */
/* but it could apply generically to any hardware compressor/decompressor */
struct video_info
{
__u32 frame_count; /* frames output since decode/encode began */
__u32 h_size; /* current unscaled horizontal size */
__u32 v_size; /* current unscaled veritcal size */
__u32 smpte_timecode; /* current SMPTE timecode (for current GOP) */
__u32 picture_type; /* current picture type */
__u32 temporal_reference; /* current temporal reference */
__u8 user_data[256]; /* user data last found in compressed stream */
/* user_data[0] contains user data flags, user_data[1] has count */
};
/* generic structure for setting playback modes */
struct video_play_mode
{
int mode;
int p1;
int p2;
};
/* for loading microcode / fpga programming */
struct video_code
{
char loadwhat[16]; /* name or tag of file being passed */
int datasize;
__u8 *data;
};
#define VIDIOCGCAP _IOR('v',1,struct video_capability) /* Get capabilities */
#define VIDIOCGCHAN _IOWR('v',2,struct video_channel) /* Get channel info (sources) */
#define VIDIOCSCHAN _IOW('v',3,struct video_channel) /* Set channel */
#define VIDIOCGTUNER _IOWR('v',4,struct video_tuner) /* Get tuner abilities */
#define VIDIOCSTUNER _IOW('v',5,struct video_tuner) /* Tune the tuner for the current channel */
#define VIDIOCGPICT _IOR('v',6,struct video_picture) /* Get picture properties */
#define VIDIOCSPICT _IOW('v',7,struct video_picture) /* Set picture properties */
#define VIDIOCCAPTURE _IOW('v',8,int) /* Start, end capture */
#define VIDIOCGWIN _IOR('v',9, struct video_window) /* Get the video overlay window */
#define VIDIOCSWIN _IOW('v',10, struct video_window) /* Set the video overlay window - passes clip list for hardware smarts , chromakey etc */
#define VIDIOCGFBUF _IOR('v',11, struct video_buffer) /* Get frame buffer */
#define VIDIOCSFBUF _IOW('v',12, struct video_buffer) /* Set frame buffer - root only */
#define VIDIOCKEY _IOR('v',13, struct video_key) /* Video key event - to dev 255 is to all - cuts capture on all DMA windows with this key (0xFFFFFFFF == all) */
#define VIDIOCGFREQ _IOR('v',14, unsigned long) /* Set tuner */
#define VIDIOCSFREQ _IOW('v',15, unsigned long) /* Set tuner */
#define VIDIOCGAUDIO _IOR('v',16, struct video_audio) /* Get audio info */
#define VIDIOCSAUDIO _IOW('v',17, struct video_audio) /* Audio source, mute etc */
#define VIDIOCSYNC _IOW('v',18, int) /* Sync with mmap grabbing */
#define VIDIOCMCAPTURE _IOW('v',19, struct video_mmap) /* Grab frames */
#define VIDIOCGMBUF _IOR('v',20, struct video_mbuf) /* Memory map buffer info */
#define VIDIOCGUNIT _IOR('v',21, struct video_unit) /* Get attached units */
#define VIDIOCGCAPTURE _IOR('v',22, struct video_capture) /* Get subcapture */
#define VIDIOCSCAPTURE _IOW('v',23, struct video_capture) /* Set subcapture */
#define VIDIOCSPLAYMODE _IOW('v',24, struct video_play_mode) /* Set output video mode/feature */
#define VIDIOCSWRITEMODE _IOW('v',25, int) /* Set write mode */
#define VIDIOCGPLAYINFO _IOR('v',26, struct video_info) /* Get current playback info from hardware */
#define VIDIOCSMICROCODE _IOW('v',27, struct video_code) /* Load microcode into hardware */
#define VIDIOCGVBIFMT _IOR('v',28, struct vbi_format) /* Get VBI information */
#define VIDIOCSVBIFMT _IOW('v',29, struct vbi_format) /* Set VBI information */
#define BASE_VIDIOCPRIVATE 192 /* 192-255 are private */
/* VIDIOCSWRITEMODE */
#define VID_WRITE_MPEG_AUD 0
#define VID_WRITE_MPEG_VID 1
#define VID_WRITE_OSD 2
#define VID_WRITE_TTX 3
#define VID_WRITE_CC 4
#define VID_WRITE_MJPEG 5
/* VIDIOCSPLAYMODE */
#define VID_PLAY_VID_OUT_MODE 0
/* p1: = VIDEO_MODE_PAL, VIDEO_MODE_NTSC, etc ... */
#define VID_PLAY_GENLOCK 1
/* p1: 0 = OFF, 1 = ON */
/* p2: GENLOCK FINE DELAY value */
#define VID_PLAY_NORMAL 2
#define VID_PLAY_PAUSE 3
#define VID_PLAY_SINGLE_FRAME 4
#define VID_PLAY_FAST_FORWARD 5
#define VID_PLAY_SLOW_MOTION 6
#define VID_PLAY_IMMEDIATE_NORMAL 7
#define VID_PLAY_SWITCH_CHANNELS 8
#define VID_PLAY_FREEZE_FRAME 9
#define VID_PLAY_STILL_MODE 10
#define VID_PLAY_MASTER_MODE 11
/* p1: see below */
#define VID_PLAY_MASTER_NONE 1
#define VID_PLAY_MASTER_VIDEO 2
#define VID_PLAY_MASTER_AUDIO 3
#define VID_PLAY_ACTIVE_SCANLINES 12
/* p1 = first active; p2 = last active */
#define VID_PLAY_RESET 13
#define VID_PLAY_END_MARK 14
#endif /* __LINUX_VIDEODEV_H */
/*
* Local variables:
* c-basic-offset: 8
* End:
*/
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment