Commit f2aaed55 authored by lipeng's avatar lipeng Committed by David S. Miller

net: hns: Replace netif_tx_lock to ring spin lock

netif_tx_lock is a global spin lock, it will take affect
in all rings in the netdevice. In tx_poll_one process, it can
only lock the current ring, in this case, we define a spin lock
in hnae_ring struct for it.
Signed-off-by: default avatarlipeng <lipeng321@huawei.com>
reviewed-by: default avatarYisen Zhuang <yisen.zhuang@huawei.com>
Signed-off-by: default avatarSalil Mehta <salil.mehta@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b29bd412
...@@ -196,6 +196,7 @@ hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags) ...@@ -196,6 +196,7 @@ hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
ring->q = q; ring->q = q;
ring->flags = flags; ring->flags = flags;
spin_lock_init(&ring->lock);
assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr); assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr);
/* not matter for tx or rx ring, the ntc and ntc start from 0 */ /* not matter for tx or rx ring, the ntc and ntc start from 0 */
......
...@@ -275,6 +275,9 @@ struct hnae_ring { ...@@ -275,6 +275,9 @@ struct hnae_ring {
/* statistic */ /* statistic */
struct ring_stats stats; struct ring_stats stats;
/* ring lock for poll one */
spinlock_t lock;
dma_addr_t desc_dma_addr; dma_addr_t desc_dma_addr;
u32 buf_size; /* size for hnae_desc->addr, preset by AE */ u32 buf_size; /* size for hnae_desc->addr, preset by AE */
u16 desc_num; /* total number of desc */ u16 desc_num; /* total number of desc */
......
...@@ -922,12 +922,13 @@ static int is_valid_clean_head(struct hnae_ring *ring, int h) ...@@ -922,12 +922,13 @@ static int is_valid_clean_head(struct hnae_ring *ring, int h)
/* netif_tx_lock will turn down the performance, set only when necessary */ /* netif_tx_lock will turn down the performance, set only when necessary */
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
#define NETIF_TX_LOCK(ndev) netif_tx_lock(ndev) #define NETIF_TX_LOCK(ring) spin_lock(&ring->lock)
#define NETIF_TX_UNLOCK(ndev) netif_tx_unlock(ndev) #define NETIF_TX_UNLOCK(ring) spin_unlock(&ring->lock)
#else #else
#define NETIF_TX_LOCK(ndev) #define NETIF_TX_LOCK(ring)
#define NETIF_TX_UNLOCK(ndev) #define NETIF_TX_UNLOCK(ring)
#endif #endif
/* reclaim all desc in one budget /* reclaim all desc in one budget
* return error or number of desc left * return error or number of desc left
*/ */
...@@ -941,13 +942,13 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, ...@@ -941,13 +942,13 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
int head; int head;
int bytes, pkts; int bytes, pkts;
NETIF_TX_LOCK(ndev); NETIF_TX_LOCK(ring);
head = readl_relaxed(ring->io_base + RCB_REG_HEAD); head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
rmb(); /* make sure head is ready before touch any data */ rmb(); /* make sure head is ready before touch any data */
if (is_ring_empty(ring) || head == ring->next_to_clean) { if (is_ring_empty(ring) || head == ring->next_to_clean) {
NETIF_TX_UNLOCK(ndev); NETIF_TX_UNLOCK(ring);
return 0; /* no data to poll */ return 0; /* no data to poll */
} }
...@@ -955,7 +956,7 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, ...@@ -955,7 +956,7 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
netdev_err(ndev, "wrong head (%d, %d-%d)\n", head, netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
ring->next_to_use, ring->next_to_clean); ring->next_to_use, ring->next_to_clean);
ring->stats.io_err_cnt++; ring->stats.io_err_cnt++;
NETIF_TX_UNLOCK(ndev); NETIF_TX_UNLOCK(ring);
return -EIO; return -EIO;
} }
...@@ -967,7 +968,7 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, ...@@ -967,7 +968,7 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
prefetch(&ring->desc_cb[ring->next_to_clean]); prefetch(&ring->desc_cb[ring->next_to_clean]);
} }
NETIF_TX_UNLOCK(ndev); NETIF_TX_UNLOCK(ring);
dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
netdev_tx_completed_queue(dev_queue, pkts, bytes); netdev_tx_completed_queue(dev_queue, pkts, bytes);
...@@ -1028,7 +1029,7 @@ static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data) ...@@ -1028,7 +1029,7 @@ static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
int head; int head;
int bytes, pkts; int bytes, pkts;
NETIF_TX_LOCK(ndev); NETIF_TX_LOCK(ring);
head = ring->next_to_use; /* ntu :soft setted ring position*/ head = ring->next_to_use; /* ntu :soft setted ring position*/
bytes = 0; bytes = 0;
...@@ -1036,7 +1037,7 @@ static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data) ...@@ -1036,7 +1037,7 @@ static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
while (head != ring->next_to_clean) while (head != ring->next_to_clean)
hns_nic_reclaim_one_desc(ring, &bytes, &pkts); hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
NETIF_TX_UNLOCK(ndev); NETIF_TX_UNLOCK(ring);
dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
netdev_tx_reset_queue(dev_queue); netdev_tx_reset_queue(dev_queue);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment