Commit 28e86e9a authored by Dario Binacchi's avatar Dario Binacchi Committed by Marc Kleine-Budde

can: c_can: support tx ring algorithm

The algorithm is already used successfully by other CAN drivers
(e.g. mcp251xfd). Its implementation was kindly suggested to me by
Marc Kleine-Budde following a patch I had previously submitted. You can
find every detail at https://lore.kernel.org/patchwork/patch/1422929/.

The idea is that after this patch, it will be easier to patch the driver
to use the message object memory as a true FIFO.

Link: https://lore.kernel.org/r/20210807130800.5246-4-dariobin@libero.itSuggested-by: default avatarMarc Kleine-Budde <mkl@pengutronix.de>
Signed-off-by: default avatarDario Binacchi <dariobin@libero.it>
Signed-off-by: default avatarMarc Kleine-Budde <mkl@pengutronix.de>
parent a54cdbba
...@@ -176,6 +176,13 @@ struct c_can_raminit { ...@@ -176,6 +176,13 @@ struct c_can_raminit {
bool needs_pulse; bool needs_pulse;
}; };
/* c_can tx ring structure */
struct c_can_tx_ring {
unsigned int head;
unsigned int tail;
unsigned int obj_num;
};
/* c_can private data structure */ /* c_can private data structure */
struct c_can_priv { struct c_can_priv {
struct can_priv can; /* must be the first member */ struct can_priv can; /* must be the first member */
...@@ -190,10 +197,10 @@ struct c_can_priv { ...@@ -190,10 +197,10 @@ struct c_can_priv {
unsigned int msg_obj_tx_first; unsigned int msg_obj_tx_first;
unsigned int msg_obj_tx_last; unsigned int msg_obj_tx_last;
u32 msg_obj_rx_mask; u32 msg_obj_rx_mask;
atomic_t tx_active;
atomic_t sie_pending; atomic_t sie_pending;
unsigned long tx_dir; unsigned long tx_dir;
int last_status; int last_status;
struct c_can_tx_ring tx;
u16 (*read_reg)(const struct c_can_priv *priv, enum reg index); u16 (*read_reg)(const struct c_can_priv *priv, enum reg index);
void (*write_reg)(const struct c_can_priv *priv, enum reg index, u16 val); void (*write_reg)(const struct c_can_priv *priv, enum reg index, u16 val);
u32 (*read_reg32)(const struct c_can_priv *priv, enum reg index); u32 (*read_reg32)(const struct c_can_priv *priv, enum reg index);
...@@ -219,4 +226,28 @@ int c_can_power_down(struct net_device *dev); ...@@ -219,4 +226,28 @@ int c_can_power_down(struct net_device *dev);
void c_can_set_ethtool_ops(struct net_device *dev); void c_can_set_ethtool_ops(struct net_device *dev);
static inline u8 c_can_get_tx_head(const struct c_can_tx_ring *ring)
{
return ring->head & (ring->obj_num - 1);
}
static inline u8 c_can_get_tx_tail(const struct c_can_tx_ring *ring)
{
return ring->tail & (ring->obj_num - 1);
}
static inline u8 c_can_get_tx_free(const struct c_can_tx_ring *ring)
{
u8 head = c_can_get_tx_head(ring);
u8 tail = c_can_get_tx_tail(ring);
/* This is not a FIFO. C/D_CAN sends out the buffers
* prioritized. The lowest buffer number wins.
*/
if (head < tail)
return 0;
return ring->obj_num - head;
}
#endif /* C_CAN_H */ #endif /* C_CAN_H */
...@@ -427,24 +427,50 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface, ...@@ -427,24 +427,50 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP); c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP);
} }
static bool c_can_tx_busy(const struct c_can_priv *priv,
const struct c_can_tx_ring *tx_ring)
{
if (c_can_get_tx_free(tx_ring) > 0)
return false;
netif_stop_queue(priv->dev);
/* Memory barrier before checking tx_free (head and tail) */
smp_mb();
if (c_can_get_tx_free(tx_ring) == 0) {
netdev_dbg(priv->dev,
"Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
tx_ring->head, tx_ring->tail,
tx_ring->head - tx_ring->tail);
return true;
}
netif_start_queue(priv->dev);
return false;
}
static netdev_tx_t c_can_start_xmit(struct sk_buff *skb, static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
struct net_device *dev) struct net_device *dev)
{ {
struct can_frame *frame = (struct can_frame *)skb->data; struct can_frame *frame = (struct can_frame *)skb->data;
struct c_can_priv *priv = netdev_priv(dev); struct c_can_priv *priv = netdev_priv(dev);
struct c_can_tx_ring *tx_ring = &priv->tx;
u32 idx, obj; u32 idx, obj;
if (can_dropped_invalid_skb(dev, skb)) if (can_dropped_invalid_skb(dev, skb))
return NETDEV_TX_OK; return NETDEV_TX_OK;
/* This is not a FIFO. C/D_CAN sends out the buffers
* prioritized. The lowest buffer number wins.
*/
idx = fls(atomic_read(&priv->tx_active));
obj = idx + priv->msg_obj_tx_first;
/* If this is the last buffer, stop the xmit queue */ if (c_can_tx_busy(priv, tx_ring))
if (idx == priv->msg_obj_tx_num - 1) return NETDEV_TX_BUSY;
idx = c_can_get_tx_head(tx_ring);
tx_ring->head++;
if (c_can_get_tx_free(tx_ring) == 0)
netif_stop_queue(dev); netif_stop_queue(dev);
obj = idx + priv->msg_obj_tx_first;
/* Store the message in the interface so we can call /* Store the message in the interface so we can call
* can_put_echo_skb(). We must do this before we enable * can_put_echo_skb(). We must do this before we enable
* transmit as we might race against do_tx(). * transmit as we might race against do_tx().
...@@ -453,8 +479,6 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb, ...@@ -453,8 +479,6 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
priv->dlc[idx] = frame->len; priv->dlc[idx] = frame->len;
can_put_echo_skb(skb, dev, idx, 0); can_put_echo_skb(skb, dev, idx, 0);
/* Update the active bits */
atomic_add(BIT(idx), &priv->tx_active);
/* Start transmission */ /* Start transmission */
c_can_object_put(dev, IF_TX, obj, IF_COMM_TX); c_can_object_put(dev, IF_TX, obj, IF_COMM_TX);
...@@ -567,6 +591,7 @@ static int c_can_software_reset(struct net_device *dev) ...@@ -567,6 +591,7 @@ static int c_can_software_reset(struct net_device *dev)
static int c_can_chip_config(struct net_device *dev) static int c_can_chip_config(struct net_device *dev)
{ {
struct c_can_priv *priv = netdev_priv(dev); struct c_can_priv *priv = netdev_priv(dev);
struct c_can_tx_ring *tx_ring = &priv->tx;
int err; int err;
err = c_can_software_reset(dev); err = c_can_software_reset(dev);
...@@ -598,7 +623,8 @@ static int c_can_chip_config(struct net_device *dev) ...@@ -598,7 +623,8 @@ static int c_can_chip_config(struct net_device *dev)
priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
/* Clear all internal status */ /* Clear all internal status */
atomic_set(&priv->tx_active, 0); tx_ring->head = 0;
tx_ring->tail = 0;
priv->tx_dir = 0; priv->tx_dir = 0;
/* set bittiming params */ /* set bittiming params */
...@@ -696,14 +722,14 @@ static int c_can_get_berr_counter(const struct net_device *dev, ...@@ -696,14 +722,14 @@ static int c_can_get_berr_counter(const struct net_device *dev,
static void c_can_do_tx(struct net_device *dev) static void c_can_do_tx(struct net_device *dev)
{ {
struct c_can_priv *priv = netdev_priv(dev); struct c_can_priv *priv = netdev_priv(dev);
struct c_can_tx_ring *tx_ring = &priv->tx;
struct net_device_stats *stats = &dev->stats; struct net_device_stats *stats = &dev->stats;
u32 idx, obj, pkts = 0, bytes = 0, pend, clr; u32 idx, obj, pkts = 0, bytes = 0, pend;
if (priv->msg_obj_tx_last > 32) if (priv->msg_obj_tx_last > 32)
pend = priv->read_reg32(priv, C_CAN_INTPND3_REG); pend = priv->read_reg32(priv, C_CAN_INTPND3_REG);
else else
pend = priv->read_reg(priv, C_CAN_INTPND2_REG); pend = priv->read_reg(priv, C_CAN_INTPND2_REG);
clr = pend;
while ((idx = ffs(pend))) { while ((idx = ffs(pend))) {
idx--; idx--;
...@@ -723,11 +749,14 @@ static void c_can_do_tx(struct net_device *dev) ...@@ -723,11 +749,14 @@ static void c_can_do_tx(struct net_device *dev)
if (!pkts) if (!pkts)
return; return;
/* Clear the bits in the tx_active mask */ tx_ring->tail += pkts;
atomic_sub(clr, &priv->tx_active); if (c_can_get_tx_free(tx_ring)) {
/* Make sure that anybody stopping the queue after
if (clr & BIT(priv->msg_obj_tx_num - 1)) * this sees the new tx_ring->tail.
netif_wake_queue(dev); */
smp_mb();
netif_wake_queue(priv->dev);
}
stats->tx_bytes += bytes; stats->tx_bytes += bytes;
stats->tx_packets += pkts; stats->tx_packets += pkts;
...@@ -1208,6 +1237,10 @@ struct net_device *alloc_c_can_dev(int msg_obj_num) ...@@ -1208,6 +1237,10 @@ struct net_device *alloc_c_can_dev(int msg_obj_num)
priv->msg_obj_tx_last = priv->msg_obj_tx_last =
priv->msg_obj_tx_first + priv->msg_obj_tx_num - 1; priv->msg_obj_tx_first + priv->msg_obj_tx_num - 1;
priv->tx.head = 0;
priv->tx.tail = 0;
priv->tx.obj_num = msg_obj_tx_num;
netif_napi_add(dev, &priv->napi, c_can_poll, priv->msg_obj_rx_num); netif_napi_add(dev, &priv->napi, c_can_poll, priv->msg_obj_rx_num);
priv->dev = dev; priv->dev = dev;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment