Commit 82d939ec authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Merge branch 'opw-next' into staging-next

This pulls in all of the staging patches applied during the opw
application process, and some other staging patches that were submitted
during that period of time.  All of these are for 3.14-rc1.
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parents 6ce4eac1 4f317748
......@@ -68,11 +68,10 @@ static struct devalarm alarms[ANDROID_ALARM_TYPE_COUNT];
*/
static int is_wakeup(enum android_alarm_type type)
{
return (type == ANDROID_ALARM_RTC_WAKEUP ||
type == ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP);
return type == ANDROID_ALARM_RTC_WAKEUP ||
type == ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP;
}
static void devalarm_start(struct devalarm *alrm, ktime_t exp)
{
if (is_wakeup(alrm->type))
......@@ -111,7 +110,6 @@ static void alarm_clear(enum android_alarm_type alarm_type)
}
alarm_enabled &= ~alarm_type_mask;
spin_unlock_irqrestore(&alarm_slock, flags);
}
static void alarm_set(enum android_alarm_type alarm_type,
......@@ -280,6 +278,7 @@ static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return 0;
}
#ifdef CONFIG_COMPAT
static long alarm_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
......@@ -371,7 +370,6 @@ static void devalarm_triggered(struct devalarm *alarm)
spin_unlock_irqrestore(&alarm_slock, flags);
}
static enum hrtimer_restart devalarm_hrthandler(struct hrtimer *hrt)
{
struct devalarm *devalrm = container_of(hrt, struct devalarm, u.hrt);
......
......@@ -28,7 +28,7 @@ struct sync_fence;
/**
* struct sync_timeline_ops - sync object implementation ops
* @driver_name: name of the implentation
* @driver_name: name of the implementation
* @dup: duplicate a sync_pt
* @has_signaled: returns:
* 1 if pt has signaled
......@@ -37,12 +37,12 @@ struct sync_fence;
* @compare: returns:
* 1 if b will signal before a
* 0 if a and b will signal at the same time
* -1 if a will signabl before b
* -1 if a will signal before b
* @free_pt: called before sync_pt is freed
* @release_obj: called before sync_timeline is freed
* @print_obj: deprecated
* @print_pt: deprecated
* @fill_driver_data: write implmentation specific driver data to data.
* @fill_driver_data: write implementation specific driver data to data.
* should return an error if there is not enough room
* as specified by size. This information is returned
* to userspace by SYNC_IOC_FENCE_INFO.
......@@ -88,9 +88,9 @@ struct sync_timeline_ops {
/**
* struct sync_timeline - sync object
* @kref: reference count on fence.
* @ops: ops that define the implementaiton of the sync_timeline
* @ops: ops that define the implementation of the sync_timeline
* @name: name of the sync_timeline. Useful for debugging
* @destoryed: set when sync_timeline is destroyed
* @destroyed: set when sync_timeline is destroyed
* @child_list_head: list of children sync_pts for this sync_timeline
* @child_list_lock: lock protecting @child_list_head, destroyed, and
* sync_pt.status
......@@ -119,12 +119,12 @@ struct sync_timeline {
* @parent: sync_timeline to which this sync_pt belongs
* @child_list: membership in sync_timeline.child_list_head
* @active_list: membership in sync_timeline.active_list_head
* @signaled_list: membership in temorary signaled_list on stack
* @signaled_list: membership in temporary signaled_list on stack
* @fence: sync_fence to which the sync_pt belongs
* @pt_list: membership in sync_fence.pt_list_head
* @status: 1: signaled, 0:active, <0: error
* @timestamp: time which sync_pt status transitioned from active to
* singaled or error.
* signaled or error.
*/
struct sync_pt {
struct sync_timeline *parent;
......@@ -145,9 +145,9 @@ struct sync_pt {
/**
* struct sync_fence - sync fence
* @file: file representing this fence
* @kref: referenace count on fence.
* @kref: reference count on fence.
* @name: name of sync_fence. Useful for debugging
* @pt_list_head: list of sync_pts in ths fence. immutable once fence
* @pt_list_head: list of sync_pts in the fence. immutable once fence
* is created
* @waiter_list_head: list of asynchronous waiters on this fence
* @waiter_list_lock: lock protecting @waiter_list_head and @status
......@@ -201,23 +201,23 @@ static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter,
/**
* sync_timeline_create() - creates a sync object
* @ops: specifies the implemention ops for the object
* @ops: specifies the implementation ops for the object
* @size: size to allocate for this obj
* @name: sync_timeline name
*
* Creates a new sync_timeline which will use the implemetation specified by
* @ops. @size bytes will be allocated allowing for implemntation specific
* data to be kept after the generic sync_timeline stuct.
* Creates a new sync_timeline which will use the implementation specified by
* @ops. @size bytes will be allocated allowing for implementation specific
* data to be kept after the generic sync_timeline struct.
*/
struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
int size, const char *name);
/**
* sync_timeline_destory() - destorys a sync object
* sync_timeline_destroy() - destroys a sync object
* @obj: sync_timeline to destroy
*
* A sync implemntation should call this when the @obj is going away
* (i.e. module unload.) @obj won't actually be freed until all its childern
* A sync implementation should call this when the @obj is going away
* (i.e. module unload.) @obj won't actually be freed until all its children
* sync_pts are freed.
*/
void sync_timeline_destroy(struct sync_timeline *obj);
......@@ -226,7 +226,7 @@ void sync_timeline_destroy(struct sync_timeline *obj);
* sync_timeline_signal() - signal a status change on a sync_timeline
* @obj: sync_timeline to signal
*
* A sync implemntation should call this any time one of it's sync_pts
* A sync implementation should call this any time one of it's sync_pts
* has signaled or has an error condition.
*/
void sync_timeline_signal(struct sync_timeline *obj);
......@@ -236,8 +236,8 @@ void sync_timeline_signal(struct sync_timeline *obj);
* @parent: sync_pt's parent sync_timeline
* @size: size to allocate for this pt
*
* Creates a new sync_pt as a chiled of @parent. @size bytes will be
* allocated allowing for implemntation specific data to be kept after
* Creates a new sync_pt as a child of @parent. @size bytes will be
* allocated allowing for implementation specific data to be kept after
* the generic sync_timeline struct.
*/
struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size);
......@@ -287,7 +287,7 @@ struct sync_fence *sync_fence_merge(const char *name,
struct sync_fence *sync_fence_fdget(int fd);
/**
* sync_fence_put() - puts a refernnce of a sync fence
* sync_fence_put() - puts a reference of a sync fence
* @fence: fence to put
*
* Puts a reference on @fence. If this is the last reference, the fence and
......@@ -297,7 +297,7 @@ void sync_fence_put(struct sync_fence *fence);
/**
* sync_fence_install() - installs a fence into a file descriptor
* @fence: fence to instal
* @fence: fence to install
* @fd: file descriptor in which to install the fence
*
* Installs @fence into @fd. @fd's should be acquired through get_unused_fd().
......@@ -359,10 +359,10 @@ struct sync_merge_data {
* struct sync_pt_info - detailed sync_pt information
* @len: length of sync_pt_info including any driver_data
* @obj_name: name of parent sync_timeline
* @driver_name: name of driver implmenting the parent
* @driver_name: name of driver implementing the parent
* @status: status of the sync_pt 0:active 1:signaled <0:error
* @timestamp_ns: timestamp of status change in nanoseconds
* @driver_data: any driver dependant data
* @driver_data: any driver dependent data
*/
struct sync_pt_info {
__u32 len;
......@@ -377,7 +377,7 @@ struct sync_pt_info {
/**
* struct sync_fence_info_data - data returned from fence info ioctl
* @len: ioctl caller writes the size of the buffer its passing in.
* ioctl returns length of sync_fence_data reutnred to userspace
* ioctl returns length of sync_fence_data returned to userspace
* including pt_info.
* @name: name of fence
* @status: status of fence. 1: signaled 0:active <0:error
......@@ -418,7 +418,7 @@ struct sync_fence_info_data {
* pt_info.
*
* pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
* To itterate over the sync_pt_infos, use the sync_pt_info.len field.
* To iterate over the sync_pt_infos, use the sync_pt_info.len field.
*/
#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\
struct sync_fence_info_data)
......
This diff is collapsed.
This diff is collapsed.
......@@ -630,7 +630,7 @@ int ClearArea(DEVICE_EXTENSION *pdx, int nArea)
}
spin_unlock_irq(&pdx->stagedLock);
if (pPages) { /* if we decided to release the memory */
if (pPages) { /* if we decided to release the memory */
/* Now we must undo the pinning down of the pages. We will assume the worst and mark */
/* all the pages as dirty. Don't be tempted to move this up above as you must not be */
/* holding a spin lock to do this stuff as it is not atomic. */
......
......@@ -16,6 +16,7 @@
*/
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include "comedidev.h"
#include "comedi_internal.h"
......@@ -26,31 +27,21 @@
#define COMEDI_PAGE_PROTECTION PAGE_KERNEL
#endif
static void __comedi_buf_free(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned n_pages)
static void comedi_buf_map_kref_release(struct kref *kref)
{
struct comedi_async *async = s->async;
struct comedi_buf_map *bm =
container_of(kref, struct comedi_buf_map, refcount);
struct comedi_buf_page *buf;
unsigned i;
if (async->prealloc_buf) {
vunmap(async->prealloc_buf);
async->prealloc_buf = NULL;
async->prealloc_bufsz = 0;
}
unsigned int i;
if (!async->buf_page_list)
return;
for (i = 0; i < n_pages; ++i) {
buf = &async->buf_page_list[i];
if (buf->virt_addr) {
if (bm->page_list) {
for (i = 0; i < bm->n_pages; i++) {
buf = &bm->page_list[i];
clear_bit(PG_reserved,
&(virt_to_page(buf->virt_addr)->flags));
if (s->async_dma_dir != DMA_NONE) {
if (bm->dma_dir != DMA_NONE) {
#ifdef CONFIG_HAS_DMA
dma_free_coherent(dev->hw_dev,
dma_free_coherent(bm->dma_hw_dev,
PAGE_SIZE,
buf->virt_addr,
buf->dma_addr);
......@@ -59,10 +50,26 @@ static void __comedi_buf_free(struct comedi_device *dev,
free_page((unsigned long)buf->virt_addr);
}
}
vfree(bm->page_list);
}
vfree(async->buf_page_list);
async->buf_page_list = NULL;
async->n_buf_pages = 0;
if (bm->dma_dir != DMA_NONE)
put_device(bm->dma_hw_dev);
kfree(bm);
}
static void __comedi_buf_free(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
if (async->prealloc_buf) {
vunmap(async->prealloc_buf);
async->prealloc_buf = NULL;
async->prealloc_bufsz = 0;
}
comedi_buf_map_put(async->buf_map);
async->buf_map = NULL;
}
static void __comedi_buf_alloc(struct comedi_device *dev,
......@@ -71,6 +78,7 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
{
struct comedi_async *async = s->async;
struct page **pages = NULL;
struct comedi_buf_map *bm;
struct comedi_buf_page *buf;
unsigned i;
......@@ -80,18 +88,29 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
return;
}
async->buf_page_list = vzalloc(sizeof(*buf) * n_pages);
if (async->buf_page_list)
bm = kzalloc(sizeof(*async->buf_map), GFP_KERNEL);
if (!bm)
return;
async->buf_map = bm;
kref_init(&bm->refcount);
bm->dma_dir = s->async_dma_dir;
if (bm->dma_dir != DMA_NONE)
/* Need ref to hardware device to free buffer later. */
bm->dma_hw_dev = get_device(dev->hw_dev);
bm->page_list = vzalloc(sizeof(*buf) * n_pages);
if (bm->page_list)
pages = vmalloc(sizeof(struct page *) * n_pages);
if (!pages)
return;
for (i = 0; i < n_pages; i++) {
buf = &async->buf_page_list[i];
if (s->async_dma_dir != DMA_NONE)
buf = &bm->page_list[i];
if (bm->dma_dir != DMA_NONE)
#ifdef CONFIG_HAS_DMA
buf->virt_addr = dma_alloc_coherent(dev->hw_dev,
buf->virt_addr = dma_alloc_coherent(bm->dma_hw_dev,
PAGE_SIZE,
&buf->dma_addr,
GFP_KERNEL |
......@@ -108,6 +127,7 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
pages[i] = virt_to_page(buf->virt_addr);
}
bm->n_pages = i;
/* vmap the prealloc_buf if all the pages were allocated */
if (i == n_pages)
......@@ -117,6 +137,26 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
vfree(pages);
}
void comedi_buf_map_get(struct comedi_buf_map *bm)
{
if (bm)
kref_get(&bm->refcount);
}
int comedi_buf_map_put(struct comedi_buf_map *bm)
{
if (bm)
return kref_put(&bm->refcount, comedi_buf_map_kref_release);
return 1;
}
bool comedi_buf_is_mmapped(struct comedi_async *async)
{
struct comedi_buf_map *bm = async->buf_map;
return bm && (atomic_read(&bm->refcount.refcount) > 1);
}
int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned long new_size)
{
......@@ -130,7 +170,7 @@ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
return 0;
/* deallocate old buffer */
__comedi_buf_free(dev, s, async->n_buf_pages);
__comedi_buf_free(dev, s);
/* allocate new buffer */
if (new_size) {
......@@ -140,10 +180,9 @@ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
if (!async->prealloc_buf) {
/* allocation failed */
__comedi_buf_free(dev, s, n_pages);
__comedi_buf_free(dev, s);
return -ENOMEM;
}
async->n_buf_pages = n_pages;
}
async->prealloc_bufsz = new_size;
......
This diff is collapsed.
......@@ -16,7 +16,11 @@ void comedi_free_subdevice_minor(struct comedi_subdevice *s);
int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned long new_size);
void comedi_buf_reset(struct comedi_async *async);
bool comedi_buf_is_mmapped(struct comedi_async *async);
void comedi_buf_map_get(struct comedi_buf_map *bm);
int comedi_buf_map_put(struct comedi_buf_map *bm);
unsigned int comedi_buf_write_n_allocated(struct comedi_async *async);
void comedi_device_cancel_all(struct comedi_device *dev);
extern unsigned int comedi_default_buf_size_kb;
extern unsigned int comedi_default_buf_maxsize_kb;
......
......@@ -20,6 +20,10 @@
#define _COMEDIDEV_H
#include <linux/dma-mapping.h>
#include <linux/mutex.h>
#include <linux/spinlock_types.h>
#include <linux/rwsem.h>
#include <linux/kref.h>
#include "comedi.h"
......@@ -100,18 +104,22 @@ struct comedi_buf_page {
dma_addr_t dma_addr;
};
struct comedi_buf_map {
struct device *dma_hw_dev;
struct comedi_buf_page *page_list;
unsigned int n_pages;
enum dma_data_direction dma_dir;
struct kref refcount;
};
struct comedi_async {
struct comedi_subdevice *subdevice;
void *prealloc_buf; /* pre-allocated buffer */
unsigned int prealloc_bufsz; /* buffer size, in bytes */
/* virtual and dma address of each page */
struct comedi_buf_page *buf_page_list;
unsigned n_buf_pages; /* num elements in buf_page_list */
struct comedi_buf_map *buf_map; /* map of buffer pages */
unsigned int max_bufsize; /* maximum buffer size, bytes */
/* current number of mmaps of prealloc_buf */
unsigned int mmap_count;
/* byte count for writer (write completed) */
unsigned int buf_write_count;
......@@ -141,10 +149,7 @@ struct comedi_async {
wait_queue_head_t wait_head;
/* callback stuff */
unsigned int cb_mask;
int (*cb_func) (unsigned int flags, void *);
void *cb_arg;
int (*inttrig) (struct comedi_device *dev, struct comedi_subdevice *s,
unsigned int x);
......@@ -173,6 +178,7 @@ struct comedi_device {
struct device *class_dev;
int minor;
unsigned int detach_count;
/* hw_dev is passed to dma_alloc_coherent when allocating async buffers
* for subdevices that have async_dma_dir set to something other than
* DMA_NONE */
......@@ -185,6 +191,8 @@ struct comedi_device {
bool ioenabled:1;
spinlock_t spinlock;
struct mutex mutex;
struct rw_semaphore attach_lock;
struct kref refcount;
int n_subdevices;
struct comedi_subdevice *subdevices;
......@@ -231,7 +239,8 @@ enum comedi_minor_bits {
static const unsigned COMEDI_SUBDEVICE_MINOR_SHIFT = 4;
static const unsigned COMEDI_SUBDEVICE_MINOR_OFFSET = 1;
struct comedi_device *comedi_dev_from_minor(unsigned minor);
struct comedi_device *comedi_dev_get_from_minor(unsigned minor);
int comedi_dev_put(struct comedi_device *dev);
void init_polling(void);
void cleanup_polling(void);
......@@ -240,7 +249,6 @@ void stop_polling(struct comedi_device *);
/* subdevice runflags */
enum subdevice_runflags {
SRF_USER = 0x00000001,
SRF_RT = 0x00000002,
/* indicates an COMEDI_CB_ERROR event has occurred since the last
* command was started */
......
......@@ -95,7 +95,7 @@ int comedi_alloc_subdevices(struct comedi_device *dev, int num_subdevices)
}
EXPORT_SYMBOL_GPL(comedi_alloc_subdevices);
static void cleanup_device(struct comedi_device *dev)
static void comedi_device_detach_cleanup(struct comedi_device *dev)
{
int i;
struct comedi_subdevice *s;
......@@ -133,10 +133,14 @@ static void cleanup_device(struct comedi_device *dev)
void comedi_device_detach(struct comedi_device *dev)
{
comedi_device_cancel_all(dev);
down_write(&dev->attach_lock);
dev->attached = false;
dev->detach_count++;
if (dev->driver)
dev->driver->detach(dev);
cleanup_device(dev);
comedi_device_detach_cleanup(dev);
up_write(&dev->attach_lock);
}
static int poll_invalid(struct comedi_device *dev, struct comedi_subdevice *s)
......@@ -355,8 +359,9 @@ static int comedi_device_postconfig(struct comedi_device *dev)
ret = __comedi_device_postconfig(dev);
if (ret < 0)
return ret;
smp_wmb();
down_write(&dev->attach_lock);
dev->attached = true;
up_write(&dev->attach_lock);
return 0;
}
......@@ -657,7 +662,7 @@ void comedi_driver_unregister(struct comedi_driver *driver)
/* check for devices using this driver */
for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) {
struct comedi_device *dev = comedi_dev_from_minor(i);
struct comedi_device *dev = comedi_dev_get_from_minor(i);
if (!dev)
continue;
......@@ -671,6 +676,7 @@ void comedi_driver_unregister(struct comedi_driver *driver)
comedi_device_detach(dev);
}
mutex_unlock(&dev->mutex);
comedi_dev_put(dev);
}
}
EXPORT_SYMBOL_GPL(comedi_driver_unregister);
......@@ -513,7 +513,7 @@ static int dio200_subdev_intr_cmd(struct comedi_device *dev,
int event = 0;
spin_lock_irqsave(&subpriv->spinlock, flags);
subpriv->active = 1;
subpriv->active = true;
/* Set up end of acquisition. */
switch (cmd->stop_src) {
......
......@@ -345,7 +345,7 @@ int mite_buf_change(struct mite_dma_descriptor_ring *ring,
for (i = 0; i < n_links; i++) {
ring->descriptors[i].count = cpu_to_le32(PAGE_SIZE);
ring->descriptors[i].addr =
cpu_to_le32(async->buf_page_list[i].dma_addr);
cpu_to_le32(async->buf_map->page_list[i].dma_addr);
ring->descriptors[i].next =
cpu_to_le32(ring->descriptors_dma_addr + (i +
1) *
......
......@@ -753,7 +753,7 @@ static void pcl818_ai_mode13dma_int(int mode, struct comedi_device *dev,
} else {
devpriv->ai_mode = INT_TYPE_AI3_DMA;
outb(0x86 | (dev->irq << 4), dev->iobase + PCL818_CONTROL); /* Ext trig+IRQ+DMA */
};
}
}
/*
......
......@@ -35,7 +35,7 @@ MODULE_LICENSE("GPL");
struct comedi_device *comedi_open(const char *filename)
{
struct comedi_device *dev;
struct comedi_device *dev, *retval = NULL;
unsigned int minor;
if (strncmp(filename, "/dev/comedi", 11) != 0)
......@@ -46,24 +46,27 @@ struct comedi_device *comedi_open(const char *filename)
if (minor >= COMEDI_NUM_BOARD_MINORS)
return NULL;
dev = comedi_dev_from_minor(minor);
if (!dev || !dev->attached)
dev = comedi_dev_get_from_minor(minor);
if (!dev)
return NULL;
if (!try_module_get(dev->driver->module))
return NULL;
down_read(&dev->attach_lock);
if (dev->attached)
retval = dev;
else
retval = NULL;
up_read(&dev->attach_lock);
if (retval == NULL)
comedi_dev_put(dev);
return dev;
return retval;
}
EXPORT_SYMBOL_GPL(comedi_open);
int comedi_close(struct comedi_device *d)
int comedi_close(struct comedi_device *dev)
{
struct comedi_device *dev = (struct comedi_device *)d;
module_put(dev->driver->module);
comedi_dev_put(dev);
return 0;
}
EXPORT_SYMBOL_GPL(comedi_close);
......@@ -73,7 +76,14 @@ static int comedi_do_insn(struct comedi_device *dev,
unsigned int *data)
{
struct comedi_subdevice *s;
int ret = 0;
int ret;
mutex_lock(&dev->mutex);
if (!dev->attached) {
ret = -EINVAL;
goto error;
}
/* a subdevice instruction */
if (insn->subdev >= dev->n_subdevices) {
......@@ -120,6 +130,7 @@ static int comedi_do_insn(struct comedi_device *dev,
s->busy = NULL;
error:
mutex_unlock(&dev->mutex);
return ret;
}
......@@ -169,9 +180,6 @@ int comedi_dio_bitfield2(struct comedi_device *dev, unsigned int subdev,
unsigned int shift;
int ret;
if (subdev >= dev->n_subdevices)
return -EINVAL;
base_channel = CR_CHAN(base_channel);
n_chan = comedi_get_n_channels(dev, subdev);
if (base_channel >= n_chan)
......@@ -211,23 +219,33 @@ int comedi_find_subdevice_by_type(struct comedi_device *dev, int type,
unsigned int subd)
{
struct comedi_subdevice *s;
if (subd > dev->n_subdevices)
return -ENODEV;
for (; subd < dev->n_subdevices; subd++) {
s = &dev->subdevices[subd];
if (s->type == type)
return subd;
}
return -1;
int ret = -ENODEV;
down_read(&dev->attach_lock);
if (dev->attached)
for (; subd < dev->n_subdevices; subd++) {
s = &dev->subdevices[subd];
if (s->type == type) {
ret = subd;
break;
}
}
up_read(&dev->attach_lock);
return ret;
}
EXPORT_SYMBOL_GPL(comedi_find_subdevice_by_type);
int comedi_get_n_channels(struct comedi_device *dev, unsigned int subdevice)
{
struct comedi_subdevice *s = &dev->subdevices[subdevice];
int n;
down_read(&dev->attach_lock);
if (!dev->attached || subdevice >= dev->n_subdevices)
n = 0;
else
n = dev->subdevices[subdevice].n_chan;
up_read(&dev->attach_lock);
return s->n_chan;
return n;
}
EXPORT_SYMBOL_GPL(comedi_get_n_channels);
......@@ -41,16 +41,20 @@ static int comedi_read(struct seq_file *m, void *v)
"driver_name, board_name, n_subdevices");
for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) {
struct comedi_device *dev = comedi_dev_from_minor(i);
struct comedi_device *dev = comedi_dev_get_from_minor(i);
if (!dev)
continue;
down_read(&dev->attach_lock);
if (dev->attached) {
devices_q = 1;
seq_printf(m, "%2d: %-20s %-20s %4d\n",
i, dev->driver->driver_name,
dev->board_name, dev->n_subdevices);
}
up_read(&dev->attach_lock);
comedi_dev_put(dev);
}
if (!devices_q)
seq_puts(m, "no devices\n");
......
......@@ -1059,7 +1059,7 @@ bool crystalhd_cmd_interrupt(struct crystalhd_cmd *ctx)
{
if (!ctx) {
BCMLOG_ERR("Invalid arg..\n");
return 0;
return false;
}
return crystalhd_hw_interrupt(ctx->adp, &ctx->hw_ctx);
......
......@@ -28,9 +28,9 @@ extern int cxt1e1_log_level;
#define COMET_NUM_UNITS 5 /* Number of points per entry in table */
/* forward references */
static void SetPwrLevel(comet_t *comet);
static void WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table);
static void WrtXmtWaveformTbl(ci_t *ci, comet_t *comet, u_int8_t table[COMET_NUM_SAMPLES][COMET_NUM_UNITS]);
static void SetPwrLevel(struct s_comet_reg *comet);
static void WrtRcvEqualizerTbl(ci_t *ci, struct s_comet_reg *comet, u_int32_t *table);
static void WrtXmtWaveformTbl(ci_t *ci, struct s_comet_reg *comet, u_int8_t table[COMET_NUM_SAMPLES][COMET_NUM_UNITS]);
void *TWV_table[12] = {
......@@ -58,7 +58,7 @@ lbo_tbl_lkup(int t1, int lbo) {
return lbo - 1;
}
void init_comet(void *ci, comet_t *comet, u_int32_t port_mode, int clockmaster,
void init_comet(void *ci, struct s_comet_reg *comet, u_int32_t port_mode, int clockmaster,
u_int8_t moreParams)
{
u_int8_t isT1mode;
......@@ -159,8 +159,7 @@ void init_comet(void *ci, comet_t *comet, u_int32_t port_mode, int clockmaster,
/* 60: t1 ALMI cfg */
/* Configure Line Coding */
switch (port_mode)
{
switch (port_mode) {
/* 1 - T1 B8ZS */
case CFG_FRAME_SF:
pci_write_32((u_int32_t *) &comet->cdrc_cfg, 0);
......@@ -286,8 +285,7 @@ void init_comet(void *ci, comet_t *comet, u_int32_t port_mode, int clockmaster,
/* 0x30: "BRIF cfg"; 0x20 is 'CMODE', 0x03 is (bit) rate */
/* note "rate bits can only be set once after reset" */
if (clockmaster)
{
if (clockmaster) {
/* CMODE == clockMode, 0=clock master (so all 3 others should be slave) */
/* rate = 1.544 Mb/s */
if (isT1mode)
......@@ -302,16 +300,17 @@ void init_comet(void *ci, comet_t *comet, u_int32_t port_mode, int clockmaster,
/* Master Mode i.e.FPMODE=0 (@0x20) */
pci_write_32((u_int32_t *) &comet->brif_fpcfg, 0x00);
if ((moreParams & CFG_CLK_PORT_MASK) == CFG_CLK_PORT_INTERNAL)
{
if ((moreParams & CFG_CLK_PORT_MASK) == CFG_CLK_PORT_INTERNAL) {
if (cxt1e1_log_level >= LOG_SBEBUG12)
pr_info(">> %s: clockmaster internal clock\n", __func__);
pr_info(">> %s: clockmaster internal clock\n",
__func__);
/* internal oscillator */
pci_write_32((u_int32_t *) &comet->tx_time, 0x0d);
} else {
/* external clock source */
if (cxt1e1_log_level >= LOG_SBEBUG12)
pr_info(">> %s: clockmaster external clock\n", __func__);
pr_info(">> %s: clockmaster external clock\n",
__func__);
/* loop timing(external) */
pci_write_32((u_int32_t *) &comet->tx_time, 0x09);
}
......@@ -399,7 +398,7 @@ void init_comet(void *ci, comet_t *comet, u_int32_t port_mode, int clockmaster,
** Returns: Nothing
*/
static void
WrtXmtWaveform(ci_t *ci, comet_t *comet, u_int32_t sample, u_int32_t unit, u_int8_t data)
WrtXmtWaveform(ci_t *ci, struct s_comet_reg *comet, u_int32_t sample, u_int32_t unit, u_int8_t data)
{
u_int8_t WaveformAddr;
......@@ -417,19 +416,20 @@ WrtXmtWaveform(ci_t *ci, comet_t *comet, u_int32_t sample, u_int32_t unit, u_int
** Returns: Nothing
*/
static void
WrtXmtWaveformTbl(ci_t *ci, comet_t *comet,
WrtXmtWaveformTbl(ci_t *ci, struct s_comet_reg *comet,
u_int8_t table[COMET_NUM_SAMPLES][COMET_NUM_UNITS])
{
u_int32_t sample, unit;
for (sample = 0; sample < COMET_NUM_SAMPLES; sample++)
{
for (sample = 0; sample < COMET_NUM_SAMPLES; sample++) {
for (unit = 0; unit < COMET_NUM_UNITS; unit++)
WrtXmtWaveform(ci, comet, sample, unit, table[sample][unit]);
WrtXmtWaveform(ci, comet, sample, unit,
table[sample][unit]);
}
/* Enable transmitter and set output amplitude */
pci_write_32((u_int32_t *) &comet->xlpg_cfg, table[COMET_NUM_SAMPLES][0]);
pci_write_32((u_int32_t *) &comet->xlpg_cfg,
table[COMET_NUM_SAMPLES][0]);
}
......@@ -444,7 +444,7 @@ WrtXmtWaveformTbl(ci_t *ci, comet_t *comet,
*/
static void
WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table)
WrtRcvEqualizerTbl(ci_t *ci, struct s_comet_reg *comet, u_int32_t *table)
{
u_int32_t ramaddr;
volatile u_int32_t value;
......@@ -457,7 +457,8 @@ WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table)
/* for write order preservation when Optimizing driver */
pci_flush_write(ci);
/* write the addr, initiate a read */
pci_write_32((u_int32_t *) &comet->rlps_eq_iaddr, (u_int8_t) ramaddr);
pci_write_32((u_int32_t *) &comet->rlps_eq_iaddr,
(u_int8_t) ramaddr);
/* for write order preservation when Optimizing driver */
pci_flush_write(ci);
/*
......@@ -470,9 +471,12 @@ WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table)
}
value = *table++;
pci_write_32((u_int32_t *) &comet->rlps_idata3, (u_int8_t) (value >> 24));
pci_write_32((u_int32_t *) &comet->rlps_idata2, (u_int8_t) (value >> 16));
pci_write_32((u_int32_t *) &comet->rlps_idata1, (u_int8_t) (value >> 8));
pci_write_32((u_int32_t *) &comet->rlps_idata3,
(u_int8_t) (value >> 24));
pci_write_32((u_int32_t *) &comet->rlps_idata2,
(u_int8_t) (value >> 16));
pci_write_32((u_int32_t *) &comet->rlps_idata1,
(u_int8_t) (value >> 8));
pci_write_32((u_int32_t *) &comet->rlps_idata0, (u_int8_t) value);
/* for write order preservation when Optimizing driver */
pci_flush_write(ci);
......@@ -484,7 +488,8 @@ WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table)
/* for write order preservation when optimizing driver */
pci_flush_write(ci);
/* write the addr, initiate a read */
pci_write_32((u_int32_t *) &comet->rlps_eq_iaddr, (u_int8_t) ramaddr);
pci_write_32((u_int32_t *) &comet->rlps_eq_iaddr,
(u_int8_t) ramaddr);
/* for write order preservation when optimizing driver */
pci_flush_write(ci);
......@@ -508,7 +513,7 @@ WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table)
*/
static void
SetPwrLevel(comet_t *comet)
SetPwrLevel(struct s_comet_reg *comet)
{
volatile u_int32_t temp;
......@@ -550,12 +555,11 @@ SetPwrLevel(comet_t *comet)
*/
#if 0
static void
SetCometOps(comet_t *comet)
SetCometOps(struct s_comet_reg *comet)
{
volatile u_int8_t rd_value;
if (comet == mConfig.C4Func1Base + (COMET0_OFFSET >> 2))
{
if (comet == mConfig.C4Func1Base + (COMET0_OFFSET >> 2)) {
/* read the BRIF Configuration */
rd_value = (u_int8_t) pci_read_32((u_int32_t *) &comet->brif_cfg);
rd_value &= ~0x20;
......
This diff is collapsed.
......@@ -274,7 +274,7 @@ VMETRO_TRACE (void *x)
void
VMETRO_TRIGGER (ci_t *ci, int x)
{
comet_t *comet;
struct s_comet_reg *comet;
volatile u_int32_t data;
comet = ci->port[0].cometbase; /* default to COMET # 0 */
......
unsigned int max_intcnt = 0;
unsigned int max_bh = 0;
static unsigned int max_intcnt = 0;
static unsigned int max_bh = 0;
/*-----------------------------------------------------------------------------
* musycc.c -
......
......@@ -194,7 +194,7 @@ checkPorts (ci_t *ci)
* alarms conflicts with NCOMM's interrupt servicing implementation.
*/
comet_t *comet;
struct s_comet_reg *comet;
volatile u_int32_t value;
u_int32_t copyVal, LEDval;
......@@ -507,7 +507,7 @@ c4_cleanup (void)
int
c4_get_portcfg (ci_t *ci)
{
comet_t *comet;
struct s_comet_reg *comet;
int portnum, mask;
u_int32_t wdata, rdata;
......@@ -561,7 +561,7 @@ c4_init (ci_t *ci, u_char *func0, u_char *func1)
for (portnum = 0; portnum < MUSYCC_NPORTS; portnum++)
{
pi = &ci->port[portnum];
pi->cometbase = (comet_t *) ((u_int32_t *) (func1 + COMET_OFFSET (portnum)));
pi->cometbase = (struct s_comet_reg *) ((u_int32_t *) (func1 + COMET_OFFSET (portnum)));
pi->reg = (struct musycc_globalr *) ((u_char *) ci->reg + (portnum * 0x800));
pi->portnum = portnum;
pi->p.portnum = portnum;
......@@ -693,7 +693,7 @@ c4_init2 (ci_t *ci)
int
c4_loop_port (ci_t *ci, int portnum, u_int8_t cmd)
{
comet_t *comet;
struct s_comet_reg *comet;
volatile u_int32_t loopValue;
comet = ci->port[portnum].cometbase;
......@@ -752,7 +752,7 @@ c4_loop_port (ci_t *ci, int portnum, u_int8_t cmd)
status_t
c4_frame_rw (ci_t *ci, struct sbecom_port_param *pp)
{
comet_t *comet;
struct s_comet_reg *comet;
volatile u_int32_t data;
if (pp->portnum >= ci->max_port)/* sanity check */
......
......@@ -133,7 +133,7 @@ struct c4_port_info
void *regram_saved; /* Original malloc value may have non-2KB
* boundary. Need to save for use when
* freeing. */
comet_t *cometbase;
struct s_comet_reg *cometbase;
struct sbe_card_info *up;
/*
......
This diff is collapsed.
This diff is collapsed.
......@@ -344,25 +344,17 @@ void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg)
static int dwc2_find_single_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
unsigned short utime = qh->usecs;
int done = 0;
int i = 0;
int ret = -1;
int i;
while (!done) {
for (i = 0; i < 8; i++) {
/* At the start hsotg->frame_usecs[i] = max_uframe_usecs[i] */
if (utime <= hsotg->frame_usecs[i]) {
hsotg->frame_usecs[i] -= utime;
qh->frame_usecs[i] += utime;
ret = i;
done = 1;
} else {
i++;
if (i == 8)
done = 1;
return i;
}
}
return ret;
return -1;
}
/*
......@@ -372,21 +364,14 @@ static int dwc2_find_multi_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
unsigned short utime = qh->usecs;
unsigned short xtime;
int t_left = utime;
int done = 0;
int i = 0;
int t_left;
int i;
int j;
int ret = -1;
while (!done) {
if (hsotg->frame_usecs[i] <= 0) {
i++;
if (i == 8) {
ret = -1;
done = 1;
}
int k;
for (i = 0; i < 8; i++) {
if (hsotg->frame_usecs[i] <= 0)
continue;
}
/*
* we need n consecutive slots so use j as a start slot
......@@ -400,50 +385,35 @@ static int dwc2_find_multi_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
*/
if (xtime + hsotg->frame_usecs[j] < utime) {
if (hsotg->frame_usecs[j] <
max_uframe_usecs[j]) {
ret = -1;
break;
}
max_uframe_usecs[j])
continue;
}
if (xtime >= utime) {
ret = i;
break;
t_left = utime;
for (k = i; k < 8; k++) {
t_left -= hsotg->frame_usecs[k];
if (t_left <= 0) {
qh->frame_usecs[k] +=
hsotg->frame_usecs[k]
+ t_left;
hsotg->frame_usecs[k] = -t_left;
return i;
} else {
qh->frame_usecs[k] +=
hsotg->frame_usecs[k];
hsotg->frame_usecs[k] = 0;
}
}
}
/* add the frame time to x time */
xtime += hsotg->frame_usecs[j];
/* we must have a fully available next frame or break */
if (xtime < utime &&
hsotg->frame_usecs[j] == max_uframe_usecs[j]) {
ret = -1;
break;
}
}
if (ret >= 0) {
t_left = utime;
for (j = i; t_left > 0 && j < 8; j++) {
t_left -= hsotg->frame_usecs[j];
if (t_left <= 0) {
qh->frame_usecs[j] +=
hsotg->frame_usecs[j] + t_left;
hsotg->frame_usecs[j] = -t_left;
ret = i;
done = 1;
} else {
qh->frame_usecs[j] +=
hsotg->frame_usecs[j];
hsotg->frame_usecs[j] = 0;
}
}
} else {
i++;
if (i == 8) {
ret = -1;
done = 1;
}
hsotg->frame_usecs[j] == max_uframe_usecs[j])
continue;
}
}
return ret;
return -1;
}
static int dwc2_find_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
......
......@@ -135,7 +135,7 @@ MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
static struct platform_driver dwc2_platform_driver = {
.driver = {
.name = (char *)dwc2_driver_name,
.name = dwc2_driver_name,
.of_match_table = dwc2_of_match_table,
},
.probe = dwc2_driver_probe,
......
This diff is collapsed.
......@@ -45,13 +45,13 @@ static int ft1000_poll_thread(void *arg)
msleep(10);
if (!gPollingfailed) {
ret = ft1000_poll(arg);
if (ret != STATUS_SUCCESS) {
if (ret != 0) {
DEBUG("ft1000_poll_thread: polling failed\n");
gPollingfailed = true;
}
}
}
return STATUS_SUCCESS;
return 0;
}
static int ft1000_probe(struct usb_interface *interface,
......
......@@ -11,8 +11,6 @@
#define PSEUDOSZ 16
#define SUCCESS 0x00
struct app_info_block {
u32 nTxMsg; /* DPRAM msg sent to DSP with app_id */
u32 nRxMsg; /* DPRAM msg rcv from dsp with app_id */
......@@ -31,9 +29,6 @@ struct app_info_block {
#define FALSE 0
#define TRUE 1
#define STATUS_SUCCESS 0
#define STATUS_FAILURE 0x1001
#define FT1000_STATUS_CLOSING 0x01
#define DSPBCMSGID 0x10
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment