Commit 2f1a01a8 authored by Marc Kleine-Budde's avatar Marc Kleine-Budde

can: at91_can: rename struct at91_priv::{tx_next,tx_echo} to {tx_head,tx_tail}

To increase code readability, use the same naming of the counters for
the TX FIFO as in the other drivers implementing the same algorithm.

Link: https://lore.kernel.org/all/20231005-at91_can-rx_offload-v2-12-9987d53600e0@pengutronix.deSigned-off-by: default avatarMarc Kleine-Budde <mkl@pengutronix.de>
parent 2b08e521
......@@ -154,8 +154,8 @@ struct at91_priv {
void __iomem *reg_base;
u32 reg_sr;
unsigned int tx_next;
unsigned int tx_echo;
unsigned int tx_head;
unsigned int tx_tail;
unsigned int rx_next;
struct at91_devtype_data devtype_data;
......@@ -253,24 +253,24 @@ static inline unsigned int get_mb_tx_last(const struct at91_priv *priv)
return get_mb_tx_first(priv) + get_mb_tx_num(priv) - 1;
}
static inline unsigned int get_next_prio_shift(const struct at91_priv *priv)
static inline unsigned int get_head_prio_shift(const struct at91_priv *priv)
{
return get_mb_tx_shift(priv);
}
static inline unsigned int get_next_prio_mask(const struct at91_priv *priv)
static inline unsigned int get_head_prio_mask(const struct at91_priv *priv)
{
return 0xf << get_mb_tx_shift(priv);
}
static inline unsigned int get_next_mb_mask(const struct at91_priv *priv)
static inline unsigned int get_head_mb_mask(const struct at91_priv *priv)
{
return AT91_MB_MASK(get_mb_tx_shift(priv));
}
static inline unsigned int get_next_mask(const struct at91_priv *priv)
static inline unsigned int get_head_mask(const struct at91_priv *priv)
{
return get_next_mb_mask(priv) | get_next_prio_mask(priv);
return get_head_mb_mask(priv) | get_head_prio_mask(priv);
}
static inline unsigned int get_irq_mb_rx(const struct at91_priv *priv)
......@@ -285,19 +285,19 @@ static inline unsigned int get_irq_mb_tx(const struct at91_priv *priv)
~AT91_MB_MASK(get_mb_tx_first(priv));
}
static inline unsigned int get_tx_next_mb(const struct at91_priv *priv)
static inline unsigned int get_tx_head_mb(const struct at91_priv *priv)
{
return (priv->tx_next & get_next_mb_mask(priv)) + get_mb_tx_first(priv);
return (priv->tx_head & get_head_mb_mask(priv)) + get_mb_tx_first(priv);
}
static inline unsigned int get_tx_next_prio(const struct at91_priv *priv)
static inline unsigned int get_tx_head_prio(const struct at91_priv *priv)
{
return (priv->tx_next >> get_next_prio_shift(priv)) & 0xf;
return (priv->tx_head >> get_head_prio_shift(priv)) & 0xf;
}
static inline unsigned int get_tx_echo_mb(const struct at91_priv *priv)
static inline unsigned int get_tx_tail_mb(const struct at91_priv *priv)
{
return (priv->tx_echo & get_next_mb_mask(priv)) + get_mb_tx_first(priv);
return (priv->tx_tail & get_head_mb_mask(priv)) + get_mb_tx_first(priv);
}
static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
......@@ -374,7 +374,7 @@ static void at91_setup_mailboxes(struct net_device *dev)
set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
/* Reset tx and rx helper pointers */
priv->tx_next = priv->tx_echo = 0;
priv->tx_head = priv->tx_tail = 0;
priv->rx_next = get_mb_rx_first(priv);
}
......@@ -470,11 +470,11 @@ static void at91_chip_stop(struct net_device *dev, enum can_state state)
* stop sending, waiting for all messages to be delivered, then start
* again with mailbox AT91_MB_TX_FIRST prio 0.
*
* We use the priv->tx_next as counter for the next transmission
* We use the priv->tx_head as counter for the next transmission
* mailbox, but without the offset AT91_MB_TX_FIRST. The lower bits
* encode the mailbox number, the upper 4 bits the mailbox priority:
*
* priv->tx_next = (prio << get_next_prio_shift(priv)) |
* priv->tx_head = (prio << get_next_prio_shift(priv)) |
* (mb - get_mb_tx_first(priv));
*
*/
......@@ -488,8 +488,8 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
mb = get_tx_next_mb(priv);
prio = get_tx_next_prio(priv);
mb = get_tx_head_mb(priv);
prio = get_tx_head_prio(priv);
if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) {
netif_stop_queue(dev);
......@@ -521,15 +521,15 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* we have to stop the queue and deliver all messages in case
* of a prio+mb counter wrap around. This is the case if
* tx_next buffer prio and mailbox equals 0.
* tx_head buffer prio and mailbox equals 0.
*
* also stop the queue if next buffer is still in use
* (== not ready)
*/
priv->tx_next++;
if (!(at91_read(priv, AT91_MSR(get_tx_next_mb(priv))) &
priv->tx_head++;
if (!(at91_read(priv, AT91_MSR(get_tx_head_mb(priv))) &
AT91_MSR_MRDY) ||
(priv->tx_next & get_next_mask(priv)) == 0)
(priv->tx_head & get_head_mask(priv)) == 0)
netif_stop_queue(dev);
/* Enable interrupt for this mailbox */
......@@ -849,11 +849,11 @@ static int at91_poll(struct napi_struct *napi, int quota)
/* theory of operation:
*
* priv->tx_echo holds the number of the oldest can_frame put for
* priv->tx_tail holds the number of the oldest can_frame put for
* transmission into the hardware, but not yet ACKed by the CAN tx
* complete IRQ.
*
* We iterate from priv->tx_echo to priv->tx_next and check if the
* We iterate from priv->tx_tail to priv->tx_head and check if the
* packet has been transmitted, echo it back to the CAN framework. If
* we discover a not yet transmitted package, stop looking for more.
*
......@@ -866,8 +866,8 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
/* masking of reg_sr not needed, already done by at91_irq */
for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
mb = get_tx_echo_mb(priv);
for (/* nix */; (priv->tx_head - priv->tx_tail) > 0; priv->tx_tail++) {
mb = get_tx_tail_mb(priv);
/* no event in mailbox? */
if (!(reg_sr & (1 << mb)))
......@@ -896,8 +896,8 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
* we get a TX int for the last can frame directly before a
* wrap around.
*/
if ((priv->tx_next & get_next_mask(priv)) != 0 ||
(priv->tx_echo & get_next_mask(priv)) == 0)
if ((priv->tx_head & get_head_mask(priv)) != 0 ||
(priv->tx_tail & get_head_mask(priv)) == 0)
netif_wake_queue(dev);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment