Commit bf9503f1 authored by Christian Gromm's avatar Christian Gromm Committed by Greg Kroah-Hartman

staging: most: hdm-usb: fix race between enqueue and most_stop_enqueue

The "broken in pipe" handler of the USB-HDM calls most_stop_enqueue() to
stop the MBO traffic before returning all MBOs back to the Mostcore.  As
the enqueue() call from the Mostcore may run in parallel with the
most_stop_enqueue(), the HDM may run into the inconsistent state and
crash the kernel.

This patch synchronizes enqueue(), most_stop_enqueue() and
most_resume_enqueue() with a mutex, hence avoiding the race condition.
Signed-off-by: default avatarAndrey Shvetsov <andrey.shvetsov@k2l.de>
Signed-off-by: default avatarChristian Gromm <christian.gromm@microchip.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent cf059183
...@@ -415,7 +415,6 @@ static void hdm_write_completion(struct urb *urb) ...@@ -415,7 +415,6 @@ static void hdm_write_completion(struct urb *urb)
switch (urb->status) { switch (urb->status) {
case -EPIPE: case -EPIPE:
dev_warn(dev, "Broken OUT pipe detected\n"); dev_warn(dev, "Broken OUT pipe detected\n");
most_stop_enqueue(&mdev->iface, channel);
mdev->is_channel_healthy[channel] = false; mdev->is_channel_healthy[channel] = false;
mbo->status = MBO_E_INVAL; mbo->status = MBO_E_INVAL;
mdev->clear_work[channel].pipe = urb->pipe; mdev->clear_work[channel].pipe = urb->pipe;
...@@ -578,7 +577,6 @@ static void hdm_read_completion(struct urb *urb) ...@@ -578,7 +577,6 @@ static void hdm_read_completion(struct urb *urb)
switch (urb->status) { switch (urb->status) {
case -EPIPE: case -EPIPE:
dev_warn(dev, "Broken IN pipe detected\n"); dev_warn(dev, "Broken IN pipe detected\n");
most_stop_enqueue(&mdev->iface, channel);
mdev->is_channel_healthy[channel] = false; mdev->is_channel_healthy[channel] = false;
mbo->status = MBO_E_INVAL; mbo->status = MBO_E_INVAL;
mdev->clear_work[channel].pipe = urb->pipe; mdev->clear_work[channel].pipe = urb->pipe;
...@@ -928,6 +926,7 @@ static void wq_clear_halt(struct work_struct *wq_obj) ...@@ -928,6 +926,7 @@ static void wq_clear_halt(struct work_struct *wq_obj)
int pipe = clear_work->pipe; int pipe = clear_work->pipe;
mutex_lock(&mdev->io_mutex); mutex_lock(&mdev->io_mutex);
most_stop_enqueue(&mdev->iface, channel);
free_anchored_buffers(mdev, channel, MBO_E_INVAL); free_anchored_buffers(mdev, channel, MBO_E_INVAL);
if (usb_clear_halt(mdev->usb_device, pipe)) if (usb_clear_halt(mdev->usb_device, pipe))
dev_warn(&mdev->usb_device->dev, "Failed to reset endpoint.\n"); dev_warn(&mdev->usb_device->dev, "Failed to reset endpoint.\n");
......
...@@ -51,6 +51,7 @@ struct most_c_obj { ...@@ -51,6 +51,7 @@ struct most_c_obj {
u16 channel_id; u16 channel_id;
bool is_poisoned; bool is_poisoned;
struct mutex start_mutex; struct mutex start_mutex;
struct mutex nq_mutex; /* nq thread synchronization */
int is_starving; int is_starving;
struct most_interface *iface; struct most_interface *iface;
struct most_inst_obj *inst; struct most_inst_obj *inst;
...@@ -1131,18 +1132,18 @@ static inline void trash_mbo(struct mbo *mbo) ...@@ -1131,18 +1132,18 @@ static inline void trash_mbo(struct mbo *mbo)
spin_unlock_irqrestore(&c->fifo_lock, flags); spin_unlock_irqrestore(&c->fifo_lock, flags);
} }
static struct mbo *get_hdm_mbo(struct most_c_obj *c) static bool hdm_mbo_ready(struct most_c_obj *c)
{ {
unsigned long flags; bool empty;
struct mbo *mbo;
spin_lock_irqsave(&c->fifo_lock, flags); if (c->enqueue_halt)
if (c->enqueue_halt || list_empty(&c->halt_fifo)) return false;
mbo = NULL;
else spin_lock_irq(&c->fifo_lock);
mbo = list_pop_mbo(&c->halt_fifo); empty = list_empty(&c->halt_fifo);
spin_unlock_irqrestore(&c->fifo_lock, flags); spin_unlock_irq(&c->fifo_lock);
return mbo;
return !empty;
} }
static void nq_hdm_mbo(struct mbo *mbo) static void nq_hdm_mbo(struct mbo *mbo)
...@@ -1160,20 +1161,32 @@ static int hdm_enqueue_thread(void *data) ...@@ -1160,20 +1161,32 @@ static int hdm_enqueue_thread(void *data)
{ {
struct most_c_obj *c = data; struct most_c_obj *c = data;
struct mbo *mbo; struct mbo *mbo;
int ret;
typeof(c->iface->enqueue) enqueue = c->iface->enqueue; typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
while (likely(!kthread_should_stop())) { while (likely(!kthread_should_stop())) {
wait_event_interruptible(c->hdm_fifo_wq, wait_event_interruptible(c->hdm_fifo_wq,
(mbo = get_hdm_mbo(c)) || hdm_mbo_ready(c) ||
kthread_should_stop()); kthread_should_stop());
if (unlikely(!mbo)) mutex_lock(&c->nq_mutex);
spin_lock_irq(&c->fifo_lock);
if (unlikely(c->enqueue_halt || list_empty(&c->halt_fifo))) {
spin_unlock_irq(&c->fifo_lock);
mutex_unlock(&c->nq_mutex);
continue; continue;
}
mbo = list_pop_mbo(&c->halt_fifo);
spin_unlock_irq(&c->fifo_lock);
if (c->cfg.direction == MOST_CH_RX) if (c->cfg.direction == MOST_CH_RX)
mbo->buffer_length = c->cfg.buffer_size; mbo->buffer_length = c->cfg.buffer_size;
if (unlikely(enqueue(mbo->ifp, mbo->hdm_channel_id, mbo))) { ret = enqueue(mbo->ifp, mbo->hdm_channel_id, mbo);
mutex_unlock(&c->nq_mutex);
if (unlikely(ret)) {
pr_err("hdm enqueue failed\n"); pr_err("hdm enqueue failed\n");
nq_hdm_mbo(mbo); nq_hdm_mbo(mbo);
c->hdm_enqueue_task = NULL; c->hdm_enqueue_task = NULL;
...@@ -1759,6 +1772,7 @@ struct kobject *most_register_interface(struct most_interface *iface) ...@@ -1759,6 +1772,7 @@ struct kobject *most_register_interface(struct most_interface *iface)
init_completion(&c->cleanup); init_completion(&c->cleanup);
atomic_set(&c->mbo_ref, 0); atomic_set(&c->mbo_ref, 0);
mutex_init(&c->start_mutex); mutex_init(&c->start_mutex);
mutex_init(&c->nq_mutex);
list_add_tail(&c->list, &inst->channel_list); list_add_tail(&c->list, &inst->channel_list);
} }
pr_info("registered new MOST device mdev%d (%s)\n", pr_info("registered new MOST device mdev%d (%s)\n",
...@@ -1824,8 +1838,12 @@ void most_stop_enqueue(struct most_interface *iface, int id) ...@@ -1824,8 +1838,12 @@ void most_stop_enqueue(struct most_interface *iface, int id)
{ {
struct most_c_obj *c = get_channel_by_iface(iface, id); struct most_c_obj *c = get_channel_by_iface(iface, id);
if (likely(c)) if (!c)
c->enqueue_halt = true; return;
mutex_lock(&c->nq_mutex);
c->enqueue_halt = true;
mutex_unlock(&c->nq_mutex);
} }
EXPORT_SYMBOL_GPL(most_stop_enqueue); EXPORT_SYMBOL_GPL(most_stop_enqueue);
...@@ -1841,9 +1859,12 @@ void most_resume_enqueue(struct most_interface *iface, int id) ...@@ -1841,9 +1859,12 @@ void most_resume_enqueue(struct most_interface *iface, int id)
{ {
struct most_c_obj *c = get_channel_by_iface(iface, id); struct most_c_obj *c = get_channel_by_iface(iface, id);
if (unlikely(!c)) if (!c)
return; return;
mutex_lock(&c->nq_mutex);
c->enqueue_halt = false; c->enqueue_halt = false;
mutex_unlock(&c->nq_mutex);
wake_up_interruptible(&c->hdm_fifo_wq); wake_up_interruptible(&c->hdm_fifo_wq);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment