Commit b4640030 authored by Jacob Keller's avatar Jacob Keller Committed by David S. Miller

ixgbe: remove marketing names from busy poll code

This patch renames the LL_EXTENDED_STATS and some of the functions required to
implement busy polling in the ixgbe driver, in order to remove the marketing
"low latency" blurb which hides what the code actually does.

This furthers work which was requested by Linus Torvalds when the initial busy
poll code was included in the kernel. The code in the ixgbe driver itself was
never properly renamed to reflect the change to busy polling as the title.
Signed-off-by: default avatarJacob Keller <jacob.e.keller@intel.com>
Tested-by: default avatarPhil Schmitt <phillip.j.schmitt@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b0007484
......@@ -55,7 +55,7 @@
#include <net/busy_poll.h>
#ifdef CONFIG_NET_RX_BUSY_POLL
#define LL_EXTENDED_STATS
#define BP_EXTENDED_STATS
#endif
/* common prefix used by pr_<> macros */
#undef pr_fmt
......@@ -187,11 +187,11 @@ struct ixgbe_rx_buffer {
struct ixgbe_queue_stats {
u64 packets;
u64 bytes;
#ifdef LL_EXTENDED_STATS
#ifdef BP_EXTENDED_STATS
u64 yields;
u64 misses;
u64 cleaned;
#endif /* LL_EXTENDED_STATS */
#endif /* BP_EXTENDED_STATS */
};
struct ixgbe_tx_queue_stats {
......@@ -399,7 +399,7 @@ static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI);
q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD;
rc = false;
#ifdef LL_EXTENDED_STATS
#ifdef BP_EXTENDED_STATS
q_vector->tx.ring->stats.yields++;
#endif
} else
......@@ -432,7 +432,7 @@ static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
if ((q_vector->state & IXGBE_QV_LOCKED)) {
q_vector->state |= IXGBE_QV_STATE_POLL_YIELD;
rc = false;
#ifdef LL_EXTENDED_STATS
#ifdef BP_EXTENDED_STATS
q_vector->rx.ring->stats.yields++;
#endif
} else
......@@ -457,7 +457,7 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
}
/* true if a socket is polling, even if it did not get the lock */
static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
{
WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED));
return q_vector->state & IXGBE_QV_USER_PEND;
......@@ -487,7 +487,7 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
return false;
}
static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
{
return false;
}
......
......@@ -1117,7 +1117,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i] = 0;
data[i+1] = 0;
i += 2;
#ifdef LL_EXTENDED_STATS
#ifdef BP_EXTENDED_STATS
data[i] = 0;
data[i+1] = 0;
data[i+2] = 0;
......@@ -1132,7 +1132,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i+1] = ring->stats.bytes;
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
i += 2;
#ifdef LL_EXTENDED_STATS
#ifdef BP_EXTENDED_STATS
data[i] = ring->stats.yields;
data[i+1] = ring->stats.misses;
data[i+2] = ring->stats.cleaned;
......@@ -1145,7 +1145,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i] = 0;
data[i+1] = 0;
i += 2;
#ifdef LL_EXTENDED_STATS
#ifdef BP_EXTENDED_STATS
data[i] = 0;
data[i+1] = 0;
data[i+2] = 0;
......@@ -1160,7 +1160,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i+1] = ring->stats.bytes;
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
i += 2;
#ifdef LL_EXTENDED_STATS
#ifdef BP_EXTENDED_STATS
data[i] = ring->stats.yields;
data[i+1] = ring->stats.misses;
data[i+2] = ring->stats.cleaned;
......@@ -1202,28 +1202,28 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
p += ETH_GSTRING_LEN;
sprintf(p, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
#ifdef LL_EXTENDED_STATS
sprintf(p, "tx_queue_%u_ll_napi_yield", i);
#ifdef BP_EXTENDED_STATS
sprintf(p, "tx_queue_%u_bp_napi_yield", i);
p += ETH_GSTRING_LEN;
sprintf(p, "tx_queue_%u_ll_misses", i);
sprintf(p, "tx_queue_%u_bp_misses", i);
p += ETH_GSTRING_LEN;
sprintf(p, "tx_queue_%u_ll_cleaned", i);
sprintf(p, "tx_queue_%u_bp_cleaned", i);
p += ETH_GSTRING_LEN;
#endif /* LL_EXTENDED_STATS */
#endif /* BP_EXTENDED_STATS */
}
for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
sprintf(p, "rx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
#ifdef LL_EXTENDED_STATS
sprintf(p, "rx_queue_%u_ll_poll_yield", i);
#ifdef BP_EXTENDED_STATS
sprintf(p, "rx_queue_%u_bp_poll_yield", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_ll_misses", i);
sprintf(p, "rx_queue_%u_bp_misses", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_ll_cleaned", i);
sprintf(p, "rx_queue_%u_bp_cleaned", i);
p += ETH_GSTRING_LEN;
#endif /* LL_EXTENDED_STATS */
#endif /* BP_EXTENDED_STATS */
}
for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
sprintf(p, "tx_pb_%u_pxon", i);
......
......@@ -1585,7 +1585,7 @@ static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
{
struct ixgbe_adapter *adapter = q_vector->adapter;
if (ixgbe_qv_ll_polling(q_vector))
if (ixgbe_qv_busy_polling(q_vector))
netif_receive_skb(skb);
else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
napi_gro_receive(&q_vector->napi, skb);
......@@ -2097,7 +2097,7 @@ static int ixgbe_low_latency_recv(struct napi_struct *napi)
ixgbe_for_each_ring(ring, q_vector->rx) {
found = ixgbe_clean_rx_irq(q_vector, ring, 4);
#ifdef LL_EXTENDED_STATS
#ifdef BP_EXTENDED_STATS
if (found)
ring->stats.cleaned += found;
else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment