Commit e1910fcd authored by Geert Uytterhoeven's avatar Geert Uytterhoeven Committed by Greg Kroah-Hartman

serial: sh-sci: Shuffle functions around

This allows to:
  - Remove forward declarations of static functions,
  - Coalesce two sections protected by #ifdef CONFIG_SERIAL_SH_SCI_DMA,
  - Avoid shuffling functions around in the near future,
  - Avoid adding forward declarations in the near future.

No functional changes.
Signed-off-by: default avatarGeert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 3575b858
...@@ -123,11 +123,6 @@ struct sci_port { ...@@ -123,11 +123,6 @@ struct sci_port {
struct notifier_block freq_transition; struct notifier_block freq_transition;
}; };
/* Function prototypes */
static void sci_start_tx(struct uart_port *port);
static void sci_stop_tx(struct uart_port *port);
static void sci_start_rx(struct uart_port *port);
#define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS #define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
static struct sci_port sci_ports[SCI_NPORTS]; static struct sci_port sci_ports[SCI_NPORTS];
...@@ -489,6 +484,89 @@ static void sci_port_disable(struct sci_port *sci_port) ...@@ -489,6 +484,89 @@ static void sci_port_disable(struct sci_port *sci_port)
pm_runtime_put_sync(sci_port->port.dev); pm_runtime_put_sync(sci_port->port.dev);
} }
static inline unsigned long port_rx_irq_mask(struct uart_port *port)
{
/*
* Not all ports (such as SCIFA) will support REIE. Rather than
* special-casing the port type, we check the port initialization
* IRQ enable mask to see whether the IRQ is desired at all. If
* it's unset, it's logically inferred that there's no point in
* testing for it.
*/
return SCSCR_RIE | (to_sci_port(port)->cfg->scscr & SCSCR_REIE);
}
static void sci_start_tx(struct uart_port *port)
{
struct sci_port *s = to_sci_port(port);
unsigned short ctrl;
#ifdef CONFIG_SERIAL_SH_SCI_DMA
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
u16 new, scr = serial_port_in(port, SCSCR);
if (s->chan_tx)
new = scr | SCSCR_TDRQE;
else
new = scr & ~SCSCR_TDRQE;
if (new != scr)
serial_port_out(port, SCSCR, new);
}
if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
dma_submit_error(s->cookie_tx)) {
s->cookie_tx = 0;
schedule_work(&s->work_tx);
}
#endif
if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
/* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
ctrl = serial_port_in(port, SCSCR);
serial_port_out(port, SCSCR, ctrl | SCSCR_TIE);
}
}
static void sci_stop_tx(struct uart_port *port)
{
unsigned short ctrl;
/* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
ctrl = serial_port_in(port, SCSCR);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
ctrl &= ~SCSCR_TDRQE;
ctrl &= ~SCSCR_TIE;
serial_port_out(port, SCSCR, ctrl);
}
static void sci_start_rx(struct uart_port *port)
{
unsigned short ctrl;
ctrl = serial_port_in(port, SCSCR) | port_rx_irq_mask(port);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
ctrl &= ~SCSCR_RDRQE;
serial_port_out(port, SCSCR, ctrl);
}
static void sci_stop_rx(struct uart_port *port)
{
unsigned short ctrl;
ctrl = serial_port_in(port, SCSCR);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
ctrl &= ~SCSCR_RDRQE;
ctrl &= ~port_rx_irq_mask(port);
serial_port_out(port, SCSCR, ctrl);
}
static void sci_clear_SCxSR(struct uart_port *port, unsigned int mask) static void sci_clear_SCxSR(struct uart_port *port, unsigned int mask)
{ {
if (port->type == PORT_SCI) { if (port->type == PORT_SCI) {
...@@ -940,694 +1018,743 @@ static int sci_handle_breaks(struct uart_port *port) ...@@ -940,694 +1018,743 @@ static int sci_handle_breaks(struct uart_port *port)
return copied; return copied;
} }
static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
{
#ifdef CONFIG_SERIAL_SH_SCI_DMA #ifdef CONFIG_SERIAL_SH_SCI_DMA
struct uart_port *port = ptr; static void sci_dma_tx_complete(void *arg)
struct sci_port *s = to_sci_port(port); {
struct sci_port *s = arg;
struct uart_port *port = &s->port;
struct circ_buf *xmit = &port->state->xmit;
unsigned long flags;
if (s->chan_rx) { dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
u16 scr = serial_port_in(port, SCSCR);
u16 ssr = serial_port_in(port, SCxSR);
/* Disable future Rx interrupts */ spin_lock_irqsave(&port->lock, flags);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
disable_irq_nosync(irq);
scr |= SCSCR_RDRQE;
} else {
scr &= ~SCSCR_RIE;
}
serial_port_out(port, SCSCR, scr);
/* Clear current interrupt */
serial_port_out(port, SCxSR,
ssr & ~(SCIF_DR | SCxSR_RDxF(port)));
dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u jiffies\n",
jiffies, s->rx_timeout);
mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
return IRQ_HANDLED; xmit->tail += s->tx_dma_len;
} xmit->tail &= UART_XMIT_SIZE - 1;
#endif
/* I think sci_receive_chars has to be called irrespective port->icount.tx += s->tx_dma_len;
* of whether the I_IXOFF is set, otherwise, how is the interrupt
* to be disabled?
*/
sci_receive_chars(ptr);
return IRQ_HANDLED; if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
} uart_write_wakeup(port);
static irqreturn_t sci_tx_interrupt(int irq, void *ptr) if (!uart_circ_empty(xmit)) {
{ s->cookie_tx = 0;
struct uart_port *port = ptr; schedule_work(&s->work_tx);
unsigned long flags; } else {
s->cookie_tx = -EINVAL;
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
u16 ctrl = serial_port_in(port, SCSCR);
serial_port_out(port, SCSCR, ctrl & ~SCSCR_TIE);
}
}
spin_lock_irqsave(&port->lock, flags);
sci_transmit_chars(port);
spin_unlock_irqrestore(&port->lock, flags); spin_unlock_irqrestore(&port->lock, flags);
return IRQ_HANDLED;
} }
static irqreturn_t sci_er_interrupt(int irq, void *ptr) /* Locking: called with port lock held */
static int sci_dma_rx_push(struct sci_port *s, void *buf, size_t count)
{ {
struct uart_port *port = ptr; struct uart_port *port = &s->port;
struct sci_port *s = to_sci_port(port); struct tty_port *tport = &port->state->port;
int copied;
/* Handle errors */ copied = tty_insert_flip_string(tport, buf, count);
if (port->type == PORT_SCI) { if (copied < count) {
if (sci_handle_errors(port)) { dev_warn(port->dev, "Rx overrun: dropping %zu bytes\n",
/* discard character in rx buffer */ count - copied);
serial_port_in(port, SCxSR); port->icount.buf_overrun++;
sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
}
} else {
sci_handle_fifo_overrun(port);
if (!s->chan_rx)
sci_receive_chars(ptr);
} }
sci_clear_SCxSR(port, SCxSR_ERROR_CLEAR(port)); port->icount.rx += copied;
/* Kick the transmission */
if (!s->chan_tx)
sci_tx_interrupt(irq, ptr);
return IRQ_HANDLED; return copied;
} }
static irqreturn_t sci_br_interrupt(int irq, void *ptr) static int sci_dma_rx_find_active(struct sci_port *s)
{ {
struct uart_port *port = ptr; unsigned int i;
/* Handle BREAKs */ for (i = 0; i < ARRAY_SIZE(s->cookie_rx); i++)
sci_handle_breaks(port); if (s->active_rx == s->cookie_rx[i])
sci_clear_SCxSR(port, SCxSR_BREAK_CLEAR(port)); return i;
return IRQ_HANDLED; dev_err(s->port.dev, "%s: Rx cookie %d not found!\n", __func__,
s->active_rx);
return -1;
} }
static inline unsigned long port_rx_irq_mask(struct uart_port *port) static void sci_rx_dma_release(struct sci_port *s, bool enable_pio)
{ {
/* struct dma_chan *chan = s->chan_rx;
* Not all ports (such as SCIFA) will support REIE. Rather than struct uart_port *port = &s->port;
* special-casing the port type, we check the port initialization unsigned long flags;
* IRQ enable mask to see whether the IRQ is desired at all. If
* it's unset, it's logically inferred that there's no point in spin_lock_irqsave(&port->lock, flags);
* testing for it. s->chan_rx = NULL;
*/ s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL;
return SCSCR_RIE | (to_sci_port(port)->cfg->scscr & SCSCR_REIE); spin_unlock_irqrestore(&port->lock, flags);
dmaengine_terminate_all(chan);
dma_free_coherent(chan->device->dev, s->buf_len_rx * 2, s->rx_buf[0],
sg_dma_address(&s->sg_rx[0]));
dma_release_channel(chan);
if (enable_pio)
sci_start_rx(port);
} }
static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr) static void sci_dma_rx_complete(void *arg)
{ {
unsigned short ssr_status, scr_status, err_enabled, orer_status = 0; struct sci_port *s = arg;
struct uart_port *port = ptr; struct uart_port *port = &s->port;
struct sci_port *s = to_sci_port(port); unsigned long flags;
irqreturn_t ret = IRQ_NONE; int active, count = 0;
ssr_status = serial_port_in(port, SCxSR); dev_dbg(port->dev, "%s(%d) active cookie %d\n", __func__, port->line,
scr_status = serial_port_in(port, SCSCR); s->active_rx);
if (s->overrun_reg == SCxSR)
orer_status = ssr_status;
else {
if (sci_getreg(port, s->overrun_reg)->size)
orer_status = serial_port_in(port, s->overrun_reg);
}
err_enabled = scr_status & port_rx_irq_mask(port); spin_lock_irqsave(&port->lock, flags);
/* Tx Interrupt */ active = sci_dma_rx_find_active(s);
if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCSCR_TIE) && if (active >= 0)
!s->chan_tx) count = sci_dma_rx_push(s, s->rx_buf[active], s->buf_len_rx);
ret = sci_tx_interrupt(irq, ptr);
/* mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
* Rx Interrupt: if we're using DMA, the DMA controller clears RDF /
* DR flags
*/
if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) &&
(scr_status & SCSCR_RIE))
ret = sci_rx_interrupt(irq, ptr);
/* Error Interrupt */ spin_unlock_irqrestore(&port->lock, flags);
if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled)
ret = sci_er_interrupt(irq, ptr);
/* Break Interrupt */
if ((ssr_status & SCxSR_BRK(port)) && err_enabled)
ret = sci_br_interrupt(irq, ptr);
/* Overrun Interrupt */ if (count)
if (orer_status & s->overrun_mask) { tty_flip_buffer_push(&port->state->port);
sci_handle_fifo_overrun(port);
ret = IRQ_HANDLED;
}
return ret; schedule_work(&s->work_rx);
} }
/* static void sci_tx_dma_release(struct sci_port *s, bool enable_pio)
* Here we define a transition notifier so that we can update all of our
* ports' baud rate when the peripheral clock changes.
*/
static int sci_notifier(struct notifier_block *self,
unsigned long phase, void *p)
{ {
struct sci_port *sci_port; struct dma_chan *chan = s->chan_tx;
struct uart_port *port = &s->port;
unsigned long flags; unsigned long flags;
sci_port = container_of(self, struct sci_port, freq_transition); spin_lock_irqsave(&port->lock, flags);
s->chan_tx = NULL;
s->cookie_tx = -EINVAL;
spin_unlock_irqrestore(&port->lock, flags);
dmaengine_terminate_all(chan);
dma_unmap_single(chan->device->dev, s->tx_dma_addr, UART_XMIT_SIZE,
DMA_TO_DEVICE);
dma_release_channel(chan);
if (enable_pio)
sci_start_tx(port);
}
if (phase == CPUFREQ_POSTCHANGE) { static void sci_submit_rx(struct sci_port *s)
struct uart_port *port = &sci_port->port; {
struct dma_chan *chan = s->chan_rx;
int i;
spin_lock_irqsave(&port->lock, flags); for (i = 0; i < 2; i++) {
port->uartclk = clk_get_rate(sci_port->iclk); struct scatterlist *sg = &s->sg_rx[i];
spin_unlock_irqrestore(&port->lock, flags); struct dma_async_tx_descriptor *desc;
}
return NOTIFY_OK; desc = dmaengine_prep_slave_sg(chan,
} sg, 1, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
goto fail;
static const struct sci_irq_desc { desc->callback = sci_dma_rx_complete;
const char *desc; desc->callback_param = s;
irq_handler_t handler; s->cookie_rx[i] = dmaengine_submit(desc);
} sci_irq_desc[] = { if (dma_submit_error(s->cookie_rx[i]))
/* goto fail;
* Split out handlers, the default case.
*/
[SCIx_ERI_IRQ] = {
.desc = "rx err",
.handler = sci_er_interrupt,
},
[SCIx_RXI_IRQ] = { dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__,
.desc = "rx full", s->cookie_rx[i], i);
.handler = sci_rx_interrupt, }
},
[SCIx_TXI_IRQ] = { s->active_rx = s->cookie_rx[0];
.desc = "tx empty",
.handler = sci_tx_interrupt,
},
[SCIx_BRI_IRQ] = { dma_async_issue_pending(chan);
.desc = "break", return;
.handler = sci_br_interrupt,
},
/* fail:
* Special muxed handler. if (i)
*/ dmaengine_terminate_all(chan);
[SCIx_MUX_IRQ] = { for (i = 0; i < 2; i++)
.desc = "mux", s->cookie_rx[i] = -EINVAL;
.handler = sci_mpxed_interrupt, s->active_rx = -EINVAL;
}, dev_warn(s->port.dev, "Failed to re-start Rx DMA, using PIO\n");
}; sci_rx_dma_release(s, true);
}
static int sci_request_irq(struct sci_port *port) static void work_fn_rx(struct work_struct *work)
{ {
struct uart_port *up = &port->port; struct sci_port *s = container_of(work, struct sci_port, work_rx);
int i, j, ret = 0; struct uart_port *port = &s->port;
struct dma_async_tx_descriptor *desc;
struct dma_tx_state state;
enum dma_status status;
unsigned long flags;
int new;
for (i = j = 0; i < SCIx_NR_IRQS; i++, j++) { spin_lock_irqsave(&port->lock, flags);
const struct sci_irq_desc *desc; new = sci_dma_rx_find_active(s);
int irq; if (new < 0) {
spin_unlock_irqrestore(&port->lock, flags);
return;
}
if (SCIx_IRQ_IS_MUXED(port)) { status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state);
i = SCIx_MUX_IRQ; if (status != DMA_COMPLETE) {
irq = up->irq; /* Handle incomplete DMA receive */
} else { struct dma_chan *chan = s->chan_rx;
irq = port->irqs[i]; unsigned int read;
int count;
/* dmaengine_terminate_all(chan);
* Certain port types won't support all of the read = sg_dma_len(&s->sg_rx[new]) - state.residue;
* available interrupt sources. dev_dbg(port->dev, "Read %u bytes with cookie %d\n", read,
*/ s->active_rx);
if (unlikely(irq < 0))
continue; if (read) {
count = sci_dma_rx_push(s, s->rx_buf[new], read);
if (count)
tty_flip_buffer_push(&port->state->port);
} }
desc = sci_irq_desc + i; spin_unlock_irqrestore(&port->lock, flags);
port->irqstr[j] = kasprintf(GFP_KERNEL, "%s:%s",
dev_name(up->dev), desc->desc);
if (!port->irqstr[j])
goto out_nomem;
ret = request_irq(irq, desc->handler, up->irqflags, sci_submit_rx(s);
port->irqstr[j], port); return;
if (unlikely(ret)) {
dev_err(up->dev, "Can't allocate %s IRQ\n", desc->desc);
goto out_noirq;
}
} }
return 0; desc = dmaengine_prep_slave_sg(s->chan_rx, &s->sg_rx[new], 1,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
goto fail;
out_noirq: desc->callback = sci_dma_rx_complete;
while (--i >= 0) desc->callback_param = s;
free_irq(port->irqs[i], port); s->cookie_rx[new] = dmaengine_submit(desc);
if (dma_submit_error(s->cookie_rx[new]))
goto fail;
out_nomem: s->active_rx = s->cookie_rx[!new];
while (--j >= 0)
kfree(port->irqstr[j]);
return ret; dev_dbg(port->dev, "%s: cookie %d #%d, new active cookie %d\n",
__func__, s->cookie_rx[new], new, s->active_rx);
spin_unlock_irqrestore(&port->lock, flags);
return;
fail:
spin_unlock_irqrestore(&port->lock, flags);
dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
sci_rx_dma_release(s, true);
} }
static void sci_free_irq(struct sci_port *port) static void work_fn_tx(struct work_struct *work)
{ {
int i; struct sci_port *s = container_of(work, struct sci_port, work_tx);
struct dma_async_tx_descriptor *desc;
struct dma_chan *chan = s->chan_tx;
struct uart_port *port = &s->port;
struct circ_buf *xmit = &port->state->xmit;
dma_addr_t buf;
/* /*
* Intentionally in reverse order so we iterate over the muxed * DMA is idle now.
* IRQ first. * Port xmit buffer is already mapped, and it is one page... Just adjust
* offsets and lengths. Since it is a circular buffer, we have to
* transmit till the end, and then the rest. Take the port lock to get a
* consistent xmit buffer state.
*/ */
for (i = 0; i < SCIx_NR_IRQS; i++) { spin_lock_irq(&port->lock);
int irq = port->irqs[i]; buf = s->tx_dma_addr + (xmit->tail & (UART_XMIT_SIZE - 1));
s->tx_dma_len = min_t(unsigned int,
CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
spin_unlock_irq(&port->lock);
/* desc = dmaengine_prep_slave_single(chan, buf, s->tx_dma_len,
* Certain port types won't support all of the available DMA_MEM_TO_DEV,
* interrupt sources. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
*/ if (!desc) {
if (unlikely(irq < 0)) dev_warn(port->dev, "Failed preparing Tx DMA descriptor\n");
continue; /* switch to PIO */
sci_tx_dma_release(s, true);
return;
}
free_irq(port->irqs[i], port); dma_sync_single_for_device(chan->device->dev, buf, s->tx_dma_len,
kfree(port->irqstr[i]); DMA_TO_DEVICE);
if (SCIx_IRQ_IS_MUXED(port)) { spin_lock_irq(&port->lock);
/* If there's only one IRQ, we're done. */ desc->callback = sci_dma_tx_complete;
return; desc->callback_param = s;
} spin_unlock_irq(&port->lock);
s->cookie_tx = dmaengine_submit(desc);
if (dma_submit_error(s->cookie_tx)) {
dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
/* switch to PIO */
sci_tx_dma_release(s, true);
return;
} }
}
static unsigned int sci_tx_empty(struct uart_port *port) dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n",
{ __func__, xmit->buf, xmit->tail, xmit->head, s->cookie_tx);
unsigned short status = serial_port_in(port, SCxSR);
unsigned short in_tx_fifo = sci_txfill(port);
return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0; dma_async_issue_pending(chan);
} }
/* static bool filter(struct dma_chan *chan, void *slave)
* Modem control is a bit of a mixed bag for SCI(F) ports. Generally
* CTS/RTS is supported in hardware by at least one port and controlled
* via SCSPTR (SCxPCR for SCIFA/B parts), or external pins (presently
* handled via the ->init_pins() op, which is a bit of a one-way street,
* lacking any ability to defer pin control -- this will later be
* converted over to the GPIO framework).
*
* Other modes (such as loopback) are supported generically on certain
* port types, but not others. For these it's sufficient to test for the
* existence of the support register and simply ignore the port type.
*/
static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
{ {
if (mctrl & TIOCM_LOOP) { struct sh_dmae_slave *param = slave;
const struct plat_sci_reg *reg;
/* dev_dbg(chan->device->dev, "%s: slave ID %d\n",
* Standard loopback mode for SCFCR ports. __func__, param->shdma_slave.slave_id);
*/
reg = sci_getreg(port, SCFCR); chan->private = &param->shdma_slave;
if (reg->size) return true;
serial_port_out(port, SCFCR,
serial_port_in(port, SCFCR) |
SCFCR_LOOP);
}
} }
static unsigned int sci_get_mctrl(struct uart_port *port) static void rx_timer_fn(unsigned long arg)
{ {
/* struct sci_port *s = (struct sci_port *)arg;
* CTS/RTS is handled in hardware when supported, while nothing struct uart_port *port = &s->port;
* else is wired up. Keep it simple and simply assert DSR/CAR. u16 scr = serial_port_in(port, SCSCR);
*/
return TIOCM_DSR | TIOCM_CAR; if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
scr &= ~SCSCR_RDRQE;
enable_irq(s->irqs[SCIx_RXI_IRQ]);
}
serial_port_out(port, SCSCR, scr | SCSCR_RIE);
dev_dbg(port->dev, "DMA Rx timed out\n");
schedule_work(&s->work_rx);
} }
#ifdef CONFIG_SERIAL_SH_SCI_DMA static void sci_request_dma(struct uart_port *port)
static void sci_dma_tx_complete(void *arg)
{ {
struct sci_port *s = arg; struct sci_port *s = to_sci_port(port);
struct uart_port *port = &s->port; struct sh_dmae_slave *param;
struct circ_buf *xmit = &port->state->xmit; struct dma_chan *chan;
unsigned long flags; dma_cap_mask_t mask;
dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); dev_dbg(port->dev, "%s: port %d\n", __func__, port->line);
spin_lock_irqsave(&port->lock, flags); if (s->cfg->dma_slave_tx <= 0 || s->cfg->dma_slave_rx <= 0)
return;
xmit->tail += s->tx_dma_len; dma_cap_zero(mask);
xmit->tail &= UART_XMIT_SIZE - 1; dma_cap_set(DMA_SLAVE, mask);
port->icount.tx += s->tx_dma_len; param = &s->param_tx;
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */
uart_write_wakeup(port); param->shdma_slave.slave_id = s->cfg->dma_slave_tx;
if (!uart_circ_empty(xmit)) { s->cookie_tx = -EINVAL;
s->cookie_tx = 0; chan = dma_request_channel(mask, filter, param);
schedule_work(&s->work_tx); dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan);
} else { if (chan) {
s->cookie_tx = -EINVAL; s->chan_tx = chan;
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) { /* UART circular tx buffer is an aligned page. */
u16 ctrl = serial_port_in(port, SCSCR); s->tx_dma_addr = dma_map_single(chan->device->dev,
serial_port_out(port, SCSCR, ctrl & ~SCSCR_TIE); port->state->xmit.buf,
UART_XMIT_SIZE,
DMA_TO_DEVICE);
if (dma_mapping_error(chan->device->dev, s->tx_dma_addr)) {
dev_warn(port->dev, "Failed mapping Tx DMA descriptor\n");
dma_release_channel(chan);
s->chan_tx = NULL;
} else {
dev_dbg(port->dev, "%s: mapped %lu@%p to %pad\n",
__func__, UART_XMIT_SIZE,
port->state->xmit.buf, &s->tx_dma_addr);
} }
INIT_WORK(&s->work_tx, work_fn_tx);
} }
spin_unlock_irqrestore(&port->lock, flags); param = &s->param_rx;
}
/* Locking: called with port lock held */ /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */
static int sci_dma_rx_push(struct sci_port *s, void *buf, size_t count) param->shdma_slave.slave_id = s->cfg->dma_slave_rx;
{
struct uart_port *port = &s->port;
struct tty_port *tport = &port->state->port;
int copied;
copied = tty_insert_flip_string(tport, buf, count); chan = dma_request_channel(mask, filter, param);
if (copied < count) { dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan);
dev_warn(port->dev, "Rx overrun: dropping %zu bytes\n", if (chan) {
count - copied); unsigned int i;
port->icount.buf_overrun++; dma_addr_t dma;
} void *buf;
port->icount.rx += copied; s->chan_rx = chan;
return copied; s->buf_len_rx = 2 * max_t(size_t, 16, port->fifosize);
} buf = dma_alloc_coherent(chan->device->dev, s->buf_len_rx * 2,
&dma, GFP_KERNEL);
if (!buf) {
dev_warn(port->dev,
"Failed to allocate Rx dma buffer, using PIO\n");
dma_release_channel(chan);
s->chan_rx = NULL;
sci_start_rx(port);
return;
}
static int sci_dma_rx_find_active(struct sci_port *s) for (i = 0; i < 2; i++) {
{ struct scatterlist *sg = &s->sg_rx[i];
unsigned int i;
for (i = 0; i < ARRAY_SIZE(s->cookie_rx); i++) sg_init_table(sg, 1);
if (s->active_rx == s->cookie_rx[i]) s->rx_buf[i] = buf;
return i; sg_dma_address(sg) = dma;
sg->length = s->buf_len_rx;
dev_err(s->port.dev, "%s: Rx cookie %d not found!\n", __func__, buf += s->buf_len_rx;
s->active_rx); dma += s->buf_len_rx;
return -1; }
INIT_WORK(&s->work_rx, work_fn_rx);
setup_timer(&s->rx_timer, rx_timer_fn, (unsigned long)s);
sci_submit_rx(s);
}
} }
static void sci_dma_rx_complete(void *arg) static void sci_free_dma(struct uart_port *port)
{ {
struct sci_port *s = arg; struct sci_port *s = to_sci_port(port);
struct uart_port *port = &s->port;
unsigned long flags;
int active, count = 0;
dev_dbg(port->dev, "%s(%d) active cookie %d\n", __func__, port->line, if (s->chan_tx)
s->active_rx); sci_tx_dma_release(s, false);
if (s->chan_rx)
sci_rx_dma_release(s, false);
}
#else
static inline void sci_request_dma(struct uart_port *port)
{
}
spin_lock_irqsave(&port->lock, flags); static inline void sci_free_dma(struct uart_port *port)
{
}
#endif
active = sci_dma_rx_find_active(s); static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
if (active >= 0) {
count = sci_dma_rx_push(s, s->rx_buf[active], s->buf_len_rx); #ifdef CONFIG_SERIAL_SH_SCI_DMA
struct uart_port *port = ptr;
struct sci_port *s = to_sci_port(port);
mod_timer(&s->rx_timer, jiffies + s->rx_timeout); if (s->chan_rx) {
u16 scr = serial_port_in(port, SCSCR);
u16 ssr = serial_port_in(port, SCxSR);
spin_unlock_irqrestore(&port->lock, flags); /* Disable future Rx interrupts */
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
disable_irq_nosync(irq);
scr |= SCSCR_RDRQE;
} else {
scr &= ~SCSCR_RIE;
}
serial_port_out(port, SCSCR, scr);
/* Clear current interrupt */
serial_port_out(port, SCxSR,
ssr & ~(SCIF_DR | SCxSR_RDxF(port)));
dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u jiffies\n",
jiffies, s->rx_timeout);
mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
if (count) return IRQ_HANDLED;
tty_flip_buffer_push(&port->state->port); }
#endif
schedule_work(&s->work_rx); /* I think sci_receive_chars has to be called irrespective
* of whether the I_IXOFF is set, otherwise, how is the interrupt
* to be disabled?
*/
sci_receive_chars(ptr);
return IRQ_HANDLED;
} }
static void sci_rx_dma_release(struct sci_port *s, bool enable_pio) static irqreturn_t sci_tx_interrupt(int irq, void *ptr)
{ {
struct dma_chan *chan = s->chan_rx; struct uart_port *port = ptr;
struct uart_port *port = &s->port;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&port->lock, flags); spin_lock_irqsave(&port->lock, flags);
s->chan_rx = NULL; sci_transmit_chars(port);
s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL;
spin_unlock_irqrestore(&port->lock, flags); spin_unlock_irqrestore(&port->lock, flags);
dmaengine_terminate_all(chan);
dma_free_coherent(chan->device->dev, s->buf_len_rx * 2, s->rx_buf[0], return IRQ_HANDLED;
sg_dma_address(&s->sg_rx[0]));
dma_release_channel(chan);
if (enable_pio)
sci_start_rx(port);
} }
static void sci_tx_dma_release(struct sci_port *s, bool enable_pio) static irqreturn_t sci_er_interrupt(int irq, void *ptr)
{ {
struct dma_chan *chan = s->chan_tx; struct uart_port *port = ptr;
struct uart_port *port = &s->port; struct sci_port *s = to_sci_port(port);
unsigned long flags;
spin_lock_irqsave(&port->lock, flags); /* Handle errors */
s->chan_tx = NULL; if (port->type == PORT_SCI) {
s->cookie_tx = -EINVAL; if (sci_handle_errors(port)) {
spin_unlock_irqrestore(&port->lock, flags); /* discard character in rx buffer */
dmaengine_terminate_all(chan); serial_port_in(port, SCxSR);
dma_unmap_single(chan->device->dev, s->tx_dma_addr, UART_XMIT_SIZE, sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
DMA_TO_DEVICE); }
dma_release_channel(chan); } else {
if (enable_pio) sci_handle_fifo_overrun(port);
sci_start_tx(port); if (!s->chan_rx)
sci_receive_chars(ptr);
}
sci_clear_SCxSR(port, SCxSR_ERROR_CLEAR(port));
/* Kick the transmission */
if (!s->chan_tx)
sci_tx_interrupt(irq, ptr);
return IRQ_HANDLED;
} }
static void sci_submit_rx(struct sci_port *s) static irqreturn_t sci_br_interrupt(int irq, void *ptr)
{ {
struct dma_chan *chan = s->chan_rx; struct uart_port *port = ptr;
int i;
for (i = 0; i < 2; i++) { /* Handle BREAKs */
struct scatterlist *sg = &s->sg_rx[i]; sci_handle_breaks(port);
struct dma_async_tx_descriptor *desc; sci_clear_SCxSR(port, SCxSR_BREAK_CLEAR(port));
desc = dmaengine_prep_slave_sg(chan, return IRQ_HANDLED;
sg, 1, DMA_DEV_TO_MEM, }
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
goto fail;
desc->callback = sci_dma_rx_complete; static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
desc->callback_param = s; {
s->cookie_rx[i] = dmaengine_submit(desc); unsigned short ssr_status, scr_status, err_enabled, orer_status = 0;
if (dma_submit_error(s->cookie_rx[i])) struct uart_port *port = ptr;
goto fail; struct sci_port *s = to_sci_port(port);
irqreturn_t ret = IRQ_NONE;
dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__, ssr_status = serial_port_in(port, SCxSR);
s->cookie_rx[i], i); scr_status = serial_port_in(port, SCSCR);
if (s->overrun_reg == SCxSR)
orer_status = ssr_status;
else {
if (sci_getreg(port, s->overrun_reg)->size)
orer_status = serial_port_in(port, s->overrun_reg);
} }
s->active_rx = s->cookie_rx[0]; err_enabled = scr_status & port_rx_irq_mask(port);
dma_async_issue_pending(chan); /* Tx Interrupt */
return; if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCSCR_TIE) &&
!s->chan_tx)
ret = sci_tx_interrupt(irq, ptr);
fail: /*
if (i) * Rx Interrupt: if we're using DMA, the DMA controller clears RDF /
dmaengine_terminate_all(chan); * DR flags
for (i = 0; i < 2; i++) */
s->cookie_rx[i] = -EINVAL; if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) &&
s->active_rx = -EINVAL; (scr_status & SCSCR_RIE))
dev_warn(s->port.dev, "Failed to re-start Rx DMA, using PIO\n"); ret = sci_rx_interrupt(irq, ptr);
sci_rx_dma_release(s, true);
}
static void work_fn_rx(struct work_struct *work) /* Error Interrupt */
{ if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled)
struct sci_port *s = container_of(work, struct sci_port, work_rx); ret = sci_er_interrupt(irq, ptr);
struct uart_port *port = &s->port;
struct dma_async_tx_descriptor *desc;
struct dma_tx_state state;
enum dma_status status;
unsigned long flags;
int new;
spin_lock_irqsave(&port->lock, flags); /* Break Interrupt */
new = sci_dma_rx_find_active(s); if ((ssr_status & SCxSR_BRK(port)) && err_enabled)
if (new < 0) { ret = sci_br_interrupt(irq, ptr);
spin_unlock_irqrestore(&port->lock, flags);
return; /* Overrun Interrupt */
if (orer_status & s->overrun_mask) {
sci_handle_fifo_overrun(port);
ret = IRQ_HANDLED;
} }
status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state); return ret;
if (status != DMA_COMPLETE) { }
/* Handle incomplete DMA receive */
struct dma_chan *chan = s->chan_rx;
unsigned int read;
int count;
dmaengine_terminate_all(chan); /*
read = sg_dma_len(&s->sg_rx[new]) - state.residue; * Here we define a transition notifier so that we can update all of our
dev_dbg(port->dev, "Read %u bytes with cookie %d\n", read, * ports' baud rate when the peripheral clock changes.
s->active_rx); */
static int sci_notifier(struct notifier_block *self,
unsigned long phase, void *p)
{
struct sci_port *sci_port;
unsigned long flags;
if (read) { sci_port = container_of(self, struct sci_port, freq_transition);
count = sci_dma_rx_push(s, s->rx_buf[new], read);
if (count)
tty_flip_buffer_push(&port->state->port);
}
spin_unlock_irqrestore(&port->lock, flags); if (phase == CPUFREQ_POSTCHANGE) {
struct uart_port *port = &sci_port->port;
sci_submit_rx(s); spin_lock_irqsave(&port->lock, flags);
return; port->uartclk = clk_get_rate(sci_port->iclk);
spin_unlock_irqrestore(&port->lock, flags);
} }
desc = dmaengine_prep_slave_sg(s->chan_rx, &s->sg_rx[new], 1, return NOTIFY_OK;
DMA_DEV_TO_MEM, }
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
goto fail;
desc->callback = sci_dma_rx_complete;
desc->callback_param = s;
s->cookie_rx[new] = dmaengine_submit(desc);
if (dma_submit_error(s->cookie_rx[new]))
goto fail;
s->active_rx = s->cookie_rx[!new]; static const struct sci_irq_desc {
const char *desc;
irq_handler_t handler;
} sci_irq_desc[] = {
/*
* Split out handlers, the default case.
*/
[SCIx_ERI_IRQ] = {
.desc = "rx err",
.handler = sci_er_interrupt,
},
dev_dbg(port->dev, "%s: cookie %d #%d, new active cookie %d\n", [SCIx_RXI_IRQ] = {
__func__, s->cookie_rx[new], new, s->active_rx); .desc = "rx full",
spin_unlock_irqrestore(&port->lock, flags); .handler = sci_rx_interrupt,
return; },
fail: [SCIx_TXI_IRQ] = {
spin_unlock_irqrestore(&port->lock, flags); .desc = "tx empty",
dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n"); .handler = sci_tx_interrupt,
sci_rx_dma_release(s, true); },
}
static void work_fn_tx(struct work_struct *work) [SCIx_BRI_IRQ] = {
{ .desc = "break",
struct sci_port *s = container_of(work, struct sci_port, work_tx); .handler = sci_br_interrupt,
struct dma_async_tx_descriptor *desc; },
struct dma_chan *chan = s->chan_tx;
struct uart_port *port = &s->port;
struct circ_buf *xmit = &port->state->xmit;
dma_addr_t buf;
/* /*
* DMA is idle now. * Special muxed handler.
* Port xmit buffer is already mapped, and it is one page... Just adjust
* offsets and lengths. Since it is a circular buffer, we have to
* transmit till the end, and then the rest. Take the port lock to get a
* consistent xmit buffer state.
*/ */
spin_lock_irq(&port->lock); [SCIx_MUX_IRQ] = {
buf = s->tx_dma_addr + (xmit->tail & (UART_XMIT_SIZE - 1)); .desc = "mux",
s->tx_dma_len = min_t(unsigned int, .handler = sci_mpxed_interrupt,
CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE), },
CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE)); };
spin_unlock_irq(&port->lock);
desc = dmaengine_prep_slave_single(chan, buf, s->tx_dma_len, static int sci_request_irq(struct sci_port *port)
DMA_MEM_TO_DEV, {
DMA_PREP_INTERRUPT | DMA_CTRL_ACK); struct uart_port *up = &port->port;
if (!desc) { int i, j, ret = 0;
dev_warn(port->dev, "Failed preparing Tx DMA descriptor\n");
/* switch to PIO */
sci_tx_dma_release(s, true);
return;
}
dma_sync_single_for_device(chan->device->dev, buf, s->tx_dma_len, for (i = j = 0; i < SCIx_NR_IRQS; i++, j++) {
DMA_TO_DEVICE); const struct sci_irq_desc *desc;
int irq;
spin_lock_irq(&port->lock); if (SCIx_IRQ_IS_MUXED(port)) {
desc->callback = sci_dma_tx_complete; i = SCIx_MUX_IRQ;
desc->callback_param = s; irq = up->irq;
spin_unlock_irq(&port->lock); } else {
s->cookie_tx = dmaengine_submit(desc); irq = port->irqs[i];
if (dma_submit_error(s->cookie_tx)) {
dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n"); /*
/* switch to PIO */ * Certain port types won't support all of the
sci_tx_dma_release(s, true); * available interrupt sources.
return; */
if (unlikely(irq < 0))
continue;
}
desc = sci_irq_desc + i;
port->irqstr[j] = kasprintf(GFP_KERNEL, "%s:%s",
dev_name(up->dev), desc->desc);
if (!port->irqstr[j])
goto out_nomem;
ret = request_irq(irq, desc->handler, up->irqflags,
port->irqstr[j], port);
if (unlikely(ret)) {
dev_err(up->dev, "Can't allocate %s IRQ\n", desc->desc);
goto out_noirq;
}
} }
dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n", return 0;
__func__, xmit->buf, xmit->tail, xmit->head, s->cookie_tx);
dma_async_issue_pending(chan);
}
#endif
static void sci_start_tx(struct uart_port *port)
{
struct sci_port *s = to_sci_port(port);
unsigned short ctrl;
#ifdef CONFIG_SERIAL_SH_SCI_DMA out_noirq:
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) { while (--i >= 0)
u16 new, scr = serial_port_in(port, SCSCR); free_irq(port->irqs[i], port);
if (s->chan_tx)
new = scr | SCSCR_TDRQE;
else
new = scr & ~SCSCR_TDRQE;
if (new != scr)
serial_port_out(port, SCSCR, new);
}
if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) && out_nomem:
dma_submit_error(s->cookie_tx)) { while (--j >= 0)
s->cookie_tx = 0; kfree(port->irqstr[j]);
schedule_work(&s->work_tx);
}
#endif
if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) { return ret;
/* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
ctrl = serial_port_in(port, SCSCR);
serial_port_out(port, SCSCR, ctrl | SCSCR_TIE);
}
} }
static void sci_stop_tx(struct uart_port *port) static void sci_free_irq(struct sci_port *port)
{ {
unsigned short ctrl; int i;
/* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */ /*
ctrl = serial_port_in(port, SCSCR); * Intentionally in reverse order so we iterate over the muxed
* IRQ first.
*/
for (i = 0; i < SCIx_NR_IRQS; i++) {
int irq = port->irqs[i];
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) /*
ctrl &= ~SCSCR_TDRQE; * Certain port types won't support all of the available
* interrupt sources.
*/
if (unlikely(irq < 0))
continue;
ctrl &= ~SCSCR_TIE; free_irq(port->irqs[i], port);
kfree(port->irqstr[i]);
serial_port_out(port, SCSCR, ctrl); if (SCIx_IRQ_IS_MUXED(port)) {
/* If there's only one IRQ, we're done. */
return;
}
}
} }
static void sci_start_rx(struct uart_port *port) static unsigned int sci_tx_empty(struct uart_port *port)
{ {
unsigned short ctrl; unsigned short status = serial_port_in(port, SCxSR);
unsigned short in_tx_fifo = sci_txfill(port);
ctrl = serial_port_in(port, SCSCR) | port_rx_irq_mask(port);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
ctrl &= ~SCSCR_RDRQE;
serial_port_out(port, SCSCR, ctrl); return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
} }
static void sci_stop_rx(struct uart_port *port) /*
* Modem control is a bit of a mixed bag for SCI(F) ports. Generally
* CTS/RTS is supported in hardware by at least one port and controlled
* via SCSPTR (SCxPCR for SCIFA/B parts), or external pins (presently
* handled via the ->init_pins() op, which is a bit of a one-way street,
* lacking any ability to defer pin control -- this will later be
* converted over to the GPIO framework).
*
* Other modes (such as loopback) are supported generically on certain
* port types, but not others. For these it's sufficient to test for the
* existence of the support register and simply ignore the port type.
*/
static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
{ {
unsigned short ctrl; if (mctrl & TIOCM_LOOP) {
const struct plat_sci_reg *reg;
ctrl = serial_port_in(port, SCSCR);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
ctrl &= ~SCSCR_RDRQE;
ctrl &= ~port_rx_irq_mask(port); /*
* Standard loopback mode for SCFCR ports.
*/
reg = sci_getreg(port, SCFCR);
if (reg->size)
serial_port_out(port, SCFCR,
serial_port_in(port, SCFCR) |
SCFCR_LOOP);
}
}
serial_port_out(port, SCSCR, ctrl); static unsigned int sci_get_mctrl(struct uart_port *port)
{
/*
* CTS/RTS is handled in hardware when supported, while nothing
* else is wired up. Keep it simple and simply assert DSR/CAR.
*/
return TIOCM_DSR | TIOCM_CAR;
} }
static void sci_break_ctl(struct uart_port *port, int break_state) static void sci_break_ctl(struct uart_port *port, int break_state)
...@@ -1660,140 +1787,6 @@ static void sci_break_ctl(struct uart_port *port, int break_state) ...@@ -1660,140 +1787,6 @@ static void sci_break_ctl(struct uart_port *port, int break_state)
serial_port_out(port, SCSCR, scscr); serial_port_out(port, SCSCR, scscr);
} }
#ifdef CONFIG_SERIAL_SH_SCI_DMA
static bool filter(struct dma_chan *chan, void *slave)
{
struct sh_dmae_slave *param = slave;
dev_dbg(chan->device->dev, "%s: slave ID %d\n",
__func__, param->shdma_slave.slave_id);
chan->private = &param->shdma_slave;
return true;
}
static void rx_timer_fn(unsigned long arg)
{
struct sci_port *s = (struct sci_port *)arg;
struct uart_port *port = &s->port;
u16 scr = serial_port_in(port, SCSCR);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
scr &= ~SCSCR_RDRQE;
enable_irq(s->irqs[SCIx_RXI_IRQ]);
}
serial_port_out(port, SCSCR, scr | SCSCR_RIE);
dev_dbg(port->dev, "DMA Rx timed out\n");
schedule_work(&s->work_rx);
}
static void sci_request_dma(struct uart_port *port)
{
struct sci_port *s = to_sci_port(port);
struct sh_dmae_slave *param;
struct dma_chan *chan;
dma_cap_mask_t mask;
dev_dbg(port->dev, "%s: port %d\n", __func__, port->line);
if (s->cfg->dma_slave_tx <= 0 || s->cfg->dma_slave_rx <= 0)
return;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
param = &s->param_tx;
/* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */
param->shdma_slave.slave_id = s->cfg->dma_slave_tx;
s->cookie_tx = -EINVAL;
chan = dma_request_channel(mask, filter, param);
dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan);
if (chan) {
s->chan_tx = chan;
/* UART circular tx buffer is an aligned page. */
s->tx_dma_addr = dma_map_single(chan->device->dev,
port->state->xmit.buf,
UART_XMIT_SIZE,
DMA_TO_DEVICE);
if (dma_mapping_error(chan->device->dev, s->tx_dma_addr)) {
dev_warn(port->dev, "Failed mapping Tx DMA descriptor\n");
dma_release_channel(chan);
s->chan_tx = NULL;
} else {
dev_dbg(port->dev, "%s: mapped %lu@%p to %pad\n",
__func__, UART_XMIT_SIZE,
port->state->xmit.buf, &s->tx_dma_addr);
}
INIT_WORK(&s->work_tx, work_fn_tx);
}
param = &s->param_rx;
/* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */
param->shdma_slave.slave_id = s->cfg->dma_slave_rx;
chan = dma_request_channel(mask, filter, param);
dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan);
if (chan) {
unsigned int i;
dma_addr_t dma;
void *buf;
s->chan_rx = chan;
s->buf_len_rx = 2 * max_t(size_t, 16, port->fifosize);
buf = dma_alloc_coherent(chan->device->dev, s->buf_len_rx * 2,
&dma, GFP_KERNEL);
if (!buf) {
dev_warn(port->dev,
"Failed to allocate Rx dma buffer, using PIO\n");
dma_release_channel(chan);
s->chan_rx = NULL;
sci_start_rx(port);
return;
}
for (i = 0; i < 2; i++) {
struct scatterlist *sg = &s->sg_rx[i];
sg_init_table(sg, 1);
s->rx_buf[i] = buf;
sg_dma_address(sg) = dma;
sg->length = s->buf_len_rx;
buf += s->buf_len_rx;
dma += s->buf_len_rx;
}
INIT_WORK(&s->work_rx, work_fn_rx);
setup_timer(&s->rx_timer, rx_timer_fn, (unsigned long)s);
sci_submit_rx(s);
}
}
static void sci_free_dma(struct uart_port *port)
{
struct sci_port *s = to_sci_port(port);
if (s->chan_tx)
sci_tx_dma_release(s, false);
if (s->chan_rx)
sci_rx_dma_release(s, false);
}
#else
static inline void sci_request_dma(struct uart_port *port)
{
}
static inline void sci_free_dma(struct uart_port *port)
{
}
#endif
static int sci_startup(struct uart_port *port) static int sci_startup(struct uart_port *port)
{ {
struct sci_port *s = to_sci_port(port); struct sci_port *s = to_sci_port(port);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment