Commit e1bc1132 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'char-misc-6.11-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc

Pull char / misc fixes from Greg KH:
 "Here are some small char/misc fixes for 6.11-rc4 to resolve reported
  problems. Included in here are:

   - fastrpc revert of a change that broke userspace

   - xillybus fixes for reported issues

  Half of these have been in linux-next this week with no reported
  problems, I don't know if the last bit of xillybus driver changes made
  it in, but they are 'obviously correct' so will be safe :)"

* tag 'char-misc-6.11-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc:
  char: xillybus: Check USB endpoints when probing device
  char: xillybus: Refine workqueue handling
  Revert "misc: fastrpc: Restrict untrusted app to attach to privileged PD"
  char: xillybus: Don't destroy workqueue from work item running on it
parents 394f33f9 2374bf75
...@@ -50,6 +50,7 @@ MODULE_LICENSE("GPL v2"); ...@@ -50,6 +50,7 @@ MODULE_LICENSE("GPL v2");
static const char xillyname[] = "xillyusb"; static const char xillyname[] = "xillyusb";
static unsigned int fifo_buf_order; static unsigned int fifo_buf_order;
static struct workqueue_struct *wakeup_wq;
#define USB_VENDOR_ID_XILINX 0x03fd #define USB_VENDOR_ID_XILINX 0x03fd
#define USB_VENDOR_ID_ALTERA 0x09fb #define USB_VENDOR_ID_ALTERA 0x09fb
...@@ -569,10 +570,6 @@ static void cleanup_dev(struct kref *kref) ...@@ -569,10 +570,6 @@ static void cleanup_dev(struct kref *kref)
* errors if executed. The mechanism relies on that xdev->error is assigned * errors if executed. The mechanism relies on that xdev->error is assigned
* a non-zero value by report_io_error() prior to queueing wakeup_all(), * a non-zero value by report_io_error() prior to queueing wakeup_all(),
* which prevents bulk_in_work() from calling process_bulk_in(). * which prevents bulk_in_work() from calling process_bulk_in().
*
* The fact that wakeup_all() and bulk_in_work() are queued on the same
* workqueue makes their concurrent execution very unlikely, however the
* kernel's API doesn't seem to ensure this strictly.
*/ */
static void wakeup_all(struct work_struct *work) static void wakeup_all(struct work_struct *work)
...@@ -627,7 +624,7 @@ static void report_io_error(struct xillyusb_dev *xdev, ...@@ -627,7 +624,7 @@ static void report_io_error(struct xillyusb_dev *xdev,
if (do_once) { if (do_once) {
kref_get(&xdev->kref); /* xdev is used by work item */ kref_get(&xdev->kref); /* xdev is used by work item */
queue_work(xdev->workq, &xdev->wakeup_workitem); queue_work(wakeup_wq, &xdev->wakeup_workitem);
} }
} }
...@@ -1906,6 +1903,13 @@ static const struct file_operations xillyusb_fops = { ...@@ -1906,6 +1903,13 @@ static const struct file_operations xillyusb_fops = {
static int xillyusb_setup_base_eps(struct xillyusb_dev *xdev) static int xillyusb_setup_base_eps(struct xillyusb_dev *xdev)
{ {
struct usb_device *udev = xdev->udev;
/* Verify that device has the two fundamental bulk in/out endpoints */
if (usb_pipe_type_check(udev, usb_sndbulkpipe(udev, MSG_EP_NUM)) ||
usb_pipe_type_check(udev, usb_rcvbulkpipe(udev, IN_EP_NUM)))
return -ENODEV;
xdev->msg_ep = endpoint_alloc(xdev, MSG_EP_NUM | USB_DIR_OUT, xdev->msg_ep = endpoint_alloc(xdev, MSG_EP_NUM | USB_DIR_OUT,
bulk_out_work, 1, 2); bulk_out_work, 1, 2);
if (!xdev->msg_ep) if (!xdev->msg_ep)
...@@ -1935,14 +1939,15 @@ static int setup_channels(struct xillyusb_dev *xdev, ...@@ -1935,14 +1939,15 @@ static int setup_channels(struct xillyusb_dev *xdev,
__le16 *chandesc, __le16 *chandesc,
int num_channels) int num_channels)
{ {
struct xillyusb_channel *chan; struct usb_device *udev = xdev->udev;
struct xillyusb_channel *chan, *new_channels;
int i; int i;
chan = kcalloc(num_channels, sizeof(*chan), GFP_KERNEL); chan = kcalloc(num_channels, sizeof(*chan), GFP_KERNEL);
if (!chan) if (!chan)
return -ENOMEM; return -ENOMEM;
xdev->channels = chan; new_channels = chan;
for (i = 0; i < num_channels; i++, chan++) { for (i = 0; i < num_channels; i++, chan++) {
unsigned int in_desc = le16_to_cpu(*chandesc++); unsigned int in_desc = le16_to_cpu(*chandesc++);
...@@ -1971,6 +1976,15 @@ static int setup_channels(struct xillyusb_dev *xdev, ...@@ -1971,6 +1976,15 @@ static int setup_channels(struct xillyusb_dev *xdev,
*/ */
if ((out_desc & 0x80) && i < 14) { /* Entry is valid */ if ((out_desc & 0x80) && i < 14) { /* Entry is valid */
if (usb_pipe_type_check(udev,
usb_sndbulkpipe(udev, i + 2))) {
dev_err(xdev->dev,
"Missing BULK OUT endpoint %d\n",
i + 2);
kfree(new_channels);
return -ENODEV;
}
chan->writable = 1; chan->writable = 1;
chan->out_synchronous = !!(out_desc & 0x40); chan->out_synchronous = !!(out_desc & 0x40);
chan->out_seekable = !!(out_desc & 0x20); chan->out_seekable = !!(out_desc & 0x20);
...@@ -1980,6 +1994,7 @@ static int setup_channels(struct xillyusb_dev *xdev, ...@@ -1980,6 +1994,7 @@ static int setup_channels(struct xillyusb_dev *xdev,
} }
} }
xdev->channels = new_channels;
return 0; return 0;
} }
...@@ -2096,9 +2111,11 @@ static int xillyusb_discovery(struct usb_interface *interface) ...@@ -2096,9 +2111,11 @@ static int xillyusb_discovery(struct usb_interface *interface)
* just after responding with the IDT, there is no reason for any * just after responding with the IDT, there is no reason for any
* work item to be running now. To be sure that xdev->channels * work item to be running now. To be sure that xdev->channels
* is updated on anything that might run in parallel, flush the * is updated on anything that might run in parallel, flush the
* workqueue, which rarely does anything. * device's workqueue and the wakeup work item. This rarely
* does anything.
*/ */
flush_workqueue(xdev->workq); flush_workqueue(xdev->workq);
flush_work(&xdev->wakeup_workitem);
xdev->num_channels = num_channels; xdev->num_channels = num_channels;
...@@ -2258,6 +2275,10 @@ static int __init xillyusb_init(void) ...@@ -2258,6 +2275,10 @@ static int __init xillyusb_init(void)
{ {
int rc = 0; int rc = 0;
wakeup_wq = alloc_workqueue(xillyname, 0, 0);
if (!wakeup_wq)
return -ENOMEM;
if (LOG2_INITIAL_FIFO_BUF_SIZE > PAGE_SHIFT) if (LOG2_INITIAL_FIFO_BUF_SIZE > PAGE_SHIFT)
fifo_buf_order = LOG2_INITIAL_FIFO_BUF_SIZE - PAGE_SHIFT; fifo_buf_order = LOG2_INITIAL_FIFO_BUF_SIZE - PAGE_SHIFT;
else else
...@@ -2265,12 +2286,17 @@ static int __init xillyusb_init(void) ...@@ -2265,12 +2286,17 @@ static int __init xillyusb_init(void)
rc = usb_register(&xillyusb_driver); rc = usb_register(&xillyusb_driver);
if (rc)
destroy_workqueue(wakeup_wq);
return rc; return rc;
} }
static void __exit xillyusb_exit(void) static void __exit xillyusb_exit(void)
{ {
usb_deregister(&xillyusb_driver); usb_deregister(&xillyusb_driver);
destroy_workqueue(wakeup_wq);
} }
module_init(xillyusb_init); module_init(xillyusb_init);
......
...@@ -2085,16 +2085,6 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp) ...@@ -2085,16 +2085,6 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
return err; return err;
} }
static int is_attach_rejected(struct fastrpc_user *fl)
{
/* Check if the device node is non-secure */
if (!fl->is_secure_dev) {
dev_dbg(&fl->cctx->rpdev->dev, "untrusted app trying to attach to privileged DSP PD\n");
return -EACCES;
}
return 0;
}
static long fastrpc_device_ioctl(struct file *file, unsigned int cmd, static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
unsigned long arg) unsigned long arg)
{ {
...@@ -2107,18 +2097,12 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int cmd, ...@@ -2107,18 +2097,12 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
err = fastrpc_invoke(fl, argp); err = fastrpc_invoke(fl, argp);
break; break;
case FASTRPC_IOCTL_INIT_ATTACH: case FASTRPC_IOCTL_INIT_ATTACH:
err = is_attach_rejected(fl);
if (!err)
err = fastrpc_init_attach(fl, ROOT_PD); err = fastrpc_init_attach(fl, ROOT_PD);
break; break;
case FASTRPC_IOCTL_INIT_ATTACH_SNS: case FASTRPC_IOCTL_INIT_ATTACH_SNS:
err = is_attach_rejected(fl);
if (!err)
err = fastrpc_init_attach(fl, SENSORS_PD); err = fastrpc_init_attach(fl, SENSORS_PD);
break; break;
case FASTRPC_IOCTL_INIT_CREATE_STATIC: case FASTRPC_IOCTL_INIT_CREATE_STATIC:
err = is_attach_rejected(fl);
if (!err)
err = fastrpc_init_create_static_process(fl, argp); err = fastrpc_init_create_static_process(fl, argp);
break; break;
case FASTRPC_IOCTL_INIT_CREATE: case FASTRPC_IOCTL_INIT_CREATE:
......
...@@ -8,14 +8,11 @@ ...@@ -8,14 +8,11 @@
#define FASTRPC_IOCTL_ALLOC_DMA_BUFF _IOWR('R', 1, struct fastrpc_alloc_dma_buf) #define FASTRPC_IOCTL_ALLOC_DMA_BUFF _IOWR('R', 1, struct fastrpc_alloc_dma_buf)
#define FASTRPC_IOCTL_FREE_DMA_BUFF _IOWR('R', 2, __u32) #define FASTRPC_IOCTL_FREE_DMA_BUFF _IOWR('R', 2, __u32)
#define FASTRPC_IOCTL_INVOKE _IOWR('R', 3, struct fastrpc_invoke) #define FASTRPC_IOCTL_INVOKE _IOWR('R', 3, struct fastrpc_invoke)
/* This ioctl is only supported with secure device nodes */
#define FASTRPC_IOCTL_INIT_ATTACH _IO('R', 4) #define FASTRPC_IOCTL_INIT_ATTACH _IO('R', 4)
#define FASTRPC_IOCTL_INIT_CREATE _IOWR('R', 5, struct fastrpc_init_create) #define FASTRPC_IOCTL_INIT_CREATE _IOWR('R', 5, struct fastrpc_init_create)
#define FASTRPC_IOCTL_MMAP _IOWR('R', 6, struct fastrpc_req_mmap) #define FASTRPC_IOCTL_MMAP _IOWR('R', 6, struct fastrpc_req_mmap)
#define FASTRPC_IOCTL_MUNMAP _IOWR('R', 7, struct fastrpc_req_munmap) #define FASTRPC_IOCTL_MUNMAP _IOWR('R', 7, struct fastrpc_req_munmap)
/* This ioctl is only supported with secure device nodes */
#define FASTRPC_IOCTL_INIT_ATTACH_SNS _IO('R', 8) #define FASTRPC_IOCTL_INIT_ATTACH_SNS _IO('R', 8)
/* This ioctl is only supported with secure device nodes */
#define FASTRPC_IOCTL_INIT_CREATE_STATIC _IOWR('R', 9, struct fastrpc_init_create_static) #define FASTRPC_IOCTL_INIT_CREATE_STATIC _IOWR('R', 9, struct fastrpc_init_create_static)
#define FASTRPC_IOCTL_MEM_MAP _IOWR('R', 10, struct fastrpc_mem_map) #define FASTRPC_IOCTL_MEM_MAP _IOWR('R', 10, struct fastrpc_mem_map)
#define FASTRPC_IOCTL_MEM_UNMAP _IOWR('R', 11, struct fastrpc_mem_unmap) #define FASTRPC_IOCTL_MEM_UNMAP _IOWR('R', 11, struct fastrpc_mem_unmap)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment