Commit b2357839 authored by Yoshihiro Shimoda's avatar Yoshihiro Shimoda Committed by Felipe Balbi

usb: renesas_usbhs: add a workaround for a race condition of workqueue

The old commit 6e4b74e4 ("usb: renesas: fix scheduling in atomic
context bug") fixed an atomic issue by using workqueue for the shdmac
dmaengine driver. However, this has a potential race condition issue
between the work pending and usbhsg_ep_free_request() in gadget mode.
When usbhsg_ep_free_request() is called while pending the queue,
since the work_struct will be freed and then the work handler is
called, kernel panic happens on process_one_work().

To fix the issue, if we could call cancel_work_sync() at somewhere
before the free request, it could be easy. However,
the usbhsg_ep_free_request() is called on atomic (e.g. f_ncm driver
calls free request via gether_disconnect()).

For now, almost all users are having "USB-DMAC" and the DMAengine
driver can be used on atomic. So, this patch adds a workaround for
a race condition to call the DMAengine APIs without the workqueue.

This means we still have TODO on shdmac environment (SH7724), but
since it doesn't have SMP, the race condition might not happen.

Fixes: ab330cf3 ("usb: renesas_usbhs: add support for USB-DMAC")
Cc: <stable@vger.kernel.org> # v4.1+
Signed-off-by: default avatarYoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
Signed-off-by: default avatarFelipe Balbi <felipe.balbi@linux.intel.com>
parent 4aef7966
...@@ -803,9 +803,8 @@ static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map) ...@@ -803,9 +803,8 @@ static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
} }
static void usbhsf_dma_complete(void *arg); static void usbhsf_dma_complete(void *arg);
static void xfer_work(struct work_struct *work) static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
{ {
struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
struct usbhs_pipe *pipe = pkt->pipe; struct usbhs_pipe *pipe = pkt->pipe;
struct usbhs_fifo *fifo; struct usbhs_fifo *fifo;
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
...@@ -813,12 +812,10 @@ static void xfer_work(struct work_struct *work) ...@@ -813,12 +812,10 @@ static void xfer_work(struct work_struct *work)
struct dma_chan *chan; struct dma_chan *chan;
struct device *dev = usbhs_priv_to_dev(priv); struct device *dev = usbhs_priv_to_dev(priv);
enum dma_transfer_direction dir; enum dma_transfer_direction dir;
unsigned long flags;
usbhs_lock(priv, flags);
fifo = usbhs_pipe_to_fifo(pipe); fifo = usbhs_pipe_to_fifo(pipe);
if (!fifo) if (!fifo)
goto xfer_work_end; return;
chan = usbhsf_dma_chan_get(fifo, pkt); chan = usbhsf_dma_chan_get(fifo, pkt);
dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
...@@ -827,7 +824,7 @@ static void xfer_work(struct work_struct *work) ...@@ -827,7 +824,7 @@ static void xfer_work(struct work_struct *work)
pkt->trans, dir, pkt->trans, dir,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK); DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) if (!desc)
goto xfer_work_end; return;
desc->callback = usbhsf_dma_complete; desc->callback = usbhsf_dma_complete;
desc->callback_param = pipe; desc->callback_param = pipe;
...@@ -835,7 +832,7 @@ static void xfer_work(struct work_struct *work) ...@@ -835,7 +832,7 @@ static void xfer_work(struct work_struct *work)
pkt->cookie = dmaengine_submit(desc); pkt->cookie = dmaengine_submit(desc);
if (pkt->cookie < 0) { if (pkt->cookie < 0) {
dev_err(dev, "Failed to submit dma descriptor\n"); dev_err(dev, "Failed to submit dma descriptor\n");
goto xfer_work_end; return;
} }
dev_dbg(dev, " %s %d (%d/ %d)\n", dev_dbg(dev, " %s %d (%d/ %d)\n",
...@@ -846,8 +843,17 @@ static void xfer_work(struct work_struct *work) ...@@ -846,8 +843,17 @@ static void xfer_work(struct work_struct *work)
dma_async_issue_pending(chan); dma_async_issue_pending(chan);
usbhsf_dma_start(pipe, fifo); usbhsf_dma_start(pipe, fifo);
usbhs_pipe_enable(pipe); usbhs_pipe_enable(pipe);
}
static void xfer_work(struct work_struct *work)
{
struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
struct usbhs_pipe *pipe = pkt->pipe;
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
unsigned long flags;
xfer_work_end: usbhs_lock(priv, flags);
usbhsf_dma_xfer_preparing(pkt);
usbhs_unlock(priv, flags); usbhs_unlock(priv, flags);
} }
...@@ -900,8 +906,13 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done) ...@@ -900,8 +906,13 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
pkt->trans = len; pkt->trans = len;
usbhsf_tx_irq_ctrl(pipe, 0); usbhsf_tx_irq_ctrl(pipe, 0);
/* FIXME: Workaound for usb dmac that driver can be used in atomic */
if (usbhs_get_dparam(priv, has_usb_dmac)) {
usbhsf_dma_xfer_preparing(pkt);
} else {
INIT_WORK(&pkt->work, xfer_work); INIT_WORK(&pkt->work, xfer_work);
schedule_work(&pkt->work); schedule_work(&pkt->work);
}
return 0; return 0;
...@@ -1007,8 +1018,7 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt, ...@@ -1007,8 +1018,7 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt,
pkt->trans = pkt->length; pkt->trans = pkt->length;
INIT_WORK(&pkt->work, xfer_work); usbhsf_dma_xfer_preparing(pkt);
schedule_work(&pkt->work);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment