Commit 323e9a95 authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-fixes'

Guangbin Huang says:

====================
net: hns3: add some fixes for -net

This series adds some fixes for the HNS3 ethernet driver.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 55161e67 0dd8a25f
...@@ -10,6 +10,27 @@ static LIST_HEAD(hnae3_ae_algo_list); ...@@ -10,6 +10,27 @@ static LIST_HEAD(hnae3_ae_algo_list);
static LIST_HEAD(hnae3_client_list); static LIST_HEAD(hnae3_client_list);
static LIST_HEAD(hnae3_ae_dev_list); static LIST_HEAD(hnae3_ae_dev_list);
void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo)
{
const struct pci_device_id *pci_id;
struct hnae3_ae_dev *ae_dev;
if (!ae_algo)
return;
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
continue;
pci_id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
if (!pci_id)
continue;
if (IS_ENABLED(CONFIG_PCI_IOV))
pci_disable_sriov(ae_dev->pdev);
}
}
EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare);
/* we are keeping things simple and using single lock for all the /* we are keeping things simple and using single lock for all the
* list. This is a non-critical code so other updations, if happen * list. This is a non-critical code so other updations, if happen
* in parallel, can wait. * in parallel, can wait.
......
...@@ -853,6 +853,7 @@ struct hnae3_handle { ...@@ -853,6 +853,7 @@ struct hnae3_handle {
int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev); int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev); void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo);
void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo); void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo);
void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo); void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo);
......
...@@ -1847,7 +1847,6 @@ void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size) ...@@ -1847,7 +1847,6 @@ void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
static int hns3_skb_linearize(struct hns3_enet_ring *ring, static int hns3_skb_linearize(struct hns3_enet_ring *ring,
struct sk_buff *skb, struct sk_buff *skb,
u8 max_non_tso_bd_num,
unsigned int bd_num) unsigned int bd_num)
{ {
/* 'bd_num == UINT_MAX' means the skb' fraglist has a /* 'bd_num == UINT_MAX' means the skb' fraglist has a
...@@ -1864,8 +1863,7 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring, ...@@ -1864,8 +1863,7 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring,
* will not help. * will not help.
*/ */
if (skb->len > HNS3_MAX_TSO_SIZE || if (skb->len > HNS3_MAX_TSO_SIZE ||
(!skb_is_gso(skb) && skb->len > (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) {
HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))) {
u64_stats_update_begin(&ring->syncp); u64_stats_update_begin(&ring->syncp);
ring->stats.hw_limitation++; ring->stats.hw_limitation++;
u64_stats_update_end(&ring->syncp); u64_stats_update_end(&ring->syncp);
...@@ -1900,8 +1898,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, ...@@ -1900,8 +1898,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
goto out; goto out;
} }
if (hns3_skb_linearize(ring, skb, max_non_tso_bd_num, if (hns3_skb_linearize(ring, skb, bd_num))
bd_num))
return -ENOMEM; return -ENOMEM;
bd_num = hns3_tx_bd_count(skb->len); bd_num = hns3_tx_bd_count(skb->len);
...@@ -3258,6 +3255,7 @@ static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i) ...@@ -3258,6 +3255,7 @@ static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
{ {
hns3_unmap_buffer(ring, &ring->desc_cb[i]); hns3_unmap_buffer(ring, &ring->desc_cb[i]);
ring->desc[i].addr = 0; ring->desc[i].addr = 0;
ring->desc_cb[i].refill = 0;
} }
static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i, static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i,
...@@ -3336,6 +3334,7 @@ static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i) ...@@ -3336,6 +3334,7 @@ static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i)
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
ring->desc_cb[i].page_offset); ring->desc_cb[i].page_offset);
ring->desc_cb[i].refill = 1;
return 0; return 0;
} }
...@@ -3365,6 +3364,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, ...@@ -3365,6 +3364,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
{ {
hns3_unmap_buffer(ring, &ring->desc_cb[i]); hns3_unmap_buffer(ring, &ring->desc_cb[i]);
ring->desc_cb[i] = *res_cb; ring->desc_cb[i] = *res_cb;
ring->desc_cb[i].refill = 1;
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
ring->desc_cb[i].page_offset); ring->desc_cb[i].page_offset);
ring->desc[i].rx.bd_base_info = 0; ring->desc[i].rx.bd_base_info = 0;
...@@ -3373,6 +3373,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, ...@@ -3373,6 +3373,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
{ {
ring->desc_cb[i].reuse_flag = 0; ring->desc_cb[i].reuse_flag = 0;
ring->desc_cb[i].refill = 1;
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
ring->desc_cb[i].page_offset); ring->desc_cb[i].page_offset);
ring->desc[i].rx.bd_base_info = 0; ring->desc[i].rx.bd_base_info = 0;
...@@ -3479,10 +3480,14 @@ static int hns3_desc_unused(struct hns3_enet_ring *ring) ...@@ -3479,10 +3480,14 @@ static int hns3_desc_unused(struct hns3_enet_ring *ring)
int ntc = ring->next_to_clean; int ntc = ring->next_to_clean;
int ntu = ring->next_to_use; int ntu = ring->next_to_use;
if (unlikely(ntc == ntu && !ring->desc_cb[ntc].refill))
return ring->desc_num;
return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
} }
static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, /* Return true if there is any allocation failure */
static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
int cleand_count) int cleand_count)
{ {
struct hns3_desc_cb *desc_cb; struct hns3_desc_cb *desc_cb;
...@@ -3507,7 +3512,10 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, ...@@ -3507,7 +3512,10 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
hns3_rl_err(ring_to_netdev(ring), hns3_rl_err(ring_to_netdev(ring),
"alloc rx buffer failed: %d\n", "alloc rx buffer failed: %d\n",
ret); ret);
break;
writel(i, ring->tqp->io_base +
HNS3_RING_RX_RING_HEAD_REG);
return true;
} }
hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
...@@ -3520,6 +3528,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, ...@@ -3520,6 +3528,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
} }
writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
return false;
} }
static bool hns3_can_reuse_page(struct hns3_desc_cb *cb) static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
...@@ -3824,6 +3833,7 @@ static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring) ...@@ -3824,6 +3833,7 @@ static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring)
{ {
ring->desc[ring->next_to_clean].rx.bd_base_info &= ring->desc[ring->next_to_clean].rx.bd_base_info &=
cpu_to_le32(~BIT(HNS3_RXD_VLD_B)); cpu_to_le32(~BIT(HNS3_RXD_VLD_B));
ring->desc_cb[ring->next_to_clean].refill = 0;
ring->next_to_clean += 1; ring->next_to_clean += 1;
if (unlikely(ring->next_to_clean == ring->desc_num)) if (unlikely(ring->next_to_clean == ring->desc_num))
...@@ -4170,6 +4180,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, ...@@ -4170,6 +4180,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
{ {
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
int unused_count = hns3_desc_unused(ring); int unused_count = hns3_desc_unused(ring);
bool failure = false;
int recv_pkts = 0; int recv_pkts = 0;
int err; int err;
...@@ -4178,9 +4189,9 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, ...@@ -4178,9 +4189,9 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
while (recv_pkts < budget) { while (recv_pkts < budget) {
/* Reuse or realloc buffers */ /* Reuse or realloc buffers */
if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
failure = failure ||
hns3_nic_alloc_rx_buffers(ring, unused_count); hns3_nic_alloc_rx_buffers(ring, unused_count);
unused_count = hns3_desc_unused(ring) - unused_count = 0;
ring->pending_buf;
} }
/* Poll one pkt */ /* Poll one pkt */
...@@ -4199,11 +4210,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, ...@@ -4199,11 +4210,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
} }
out: out:
/* Make all data has been write before submit */ return failure ? budget : recv_pkts;
if (unused_count > 0)
hns3_nic_alloc_rx_buffers(ring, unused_count);
return recv_pkts;
} }
static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector) static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
......
...@@ -186,11 +186,9 @@ enum hns3_nic_state { ...@@ -186,11 +186,9 @@ enum hns3_nic_state {
#define HNS3_MAX_BD_SIZE 65535 #define HNS3_MAX_BD_SIZE 65535
#define HNS3_MAX_TSO_BD_NUM 63U #define HNS3_MAX_TSO_BD_NUM 63U
#define HNS3_MAX_TSO_SIZE \ #define HNS3_MAX_TSO_SIZE 1048576U
(HNS3_MAX_BD_SIZE * HNS3_MAX_TSO_BD_NUM) #define HNS3_MAX_NON_TSO_SIZE 9728U
#define HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num) \
(HNS3_MAX_BD_SIZE * (max_non_tso_bd_num))
#define HNS3_VECTOR_GL0_OFFSET 0x100 #define HNS3_VECTOR_GL0_OFFSET 0x100
#define HNS3_VECTOR_GL1_OFFSET 0x200 #define HNS3_VECTOR_GL1_OFFSET 0x200
...@@ -332,6 +330,7 @@ struct hns3_desc_cb { ...@@ -332,6 +330,7 @@ struct hns3_desc_cb {
u32 length; /* length of the buffer */ u32 length; /* length of the buffer */
u16 reuse_flag; u16 reuse_flag;
u16 refill;
/* desc type, used by the ring user to mark the type of the priv data */ /* desc type, used by the ring user to mark the type of the priv data */
u16 type; u16 type;
......
...@@ -137,6 +137,15 @@ static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev, ...@@ -137,6 +137,15 @@ static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
*changed = true; *changed = true;
break; break;
case IEEE_8021QAZ_TSA_ETS: case IEEE_8021QAZ_TSA_ETS:
/* The hardware will switch to sp mode if bandwidth is
* 0, so limit ets bandwidth must be greater than 0.
*/
if (!ets->tc_tx_bw[i]) {
dev_err(&hdev->pdev->dev,
"tc%u ets bw cannot be 0\n", i);
return -EINVAL;
}
if (hdev->tm_info.tc_info[i].tc_sch_mode != if (hdev->tm_info.tc_info[i].tc_sch_mode !=
HCLGE_SCH_MODE_DWRR) HCLGE_SCH_MODE_DWRR)
*changed = true; *changed = true;
......
...@@ -1560,8 +1560,11 @@ static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en) ...@@ -1560,8 +1560,11 @@ static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en)
/* configure TM QCN hw errors */ /* configure TM QCN hw errors */
hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_QCN_MEM_INT_CFG, false); hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_QCN_MEM_INT_CFG, false);
if (en) desc.data[0] = cpu_to_le32(HCLGE_TM_QCN_ERR_INT_TYPE);
if (en) {
desc.data[0] |= cpu_to_le32(HCLGE_TM_QCN_FIFO_INT_EN);
desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN); desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN);
}
ret = hclge_cmd_send(&hdev->hw, &desc, 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) if (ret)
......
...@@ -50,6 +50,8 @@ ...@@ -50,6 +50,8 @@
#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN 0x003F #define HCLGE_PPP_MPF_ECC_ERR_INT3_EN 0x003F
#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK 0x003F #define HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK 0x003F
#define HCLGE_TM_SCH_ECC_ERR_INT_EN 0x3 #define HCLGE_TM_SCH_ECC_ERR_INT_EN 0x3
#define HCLGE_TM_QCN_ERR_INT_TYPE 0x29
#define HCLGE_TM_QCN_FIFO_INT_EN 0xFFFF00
#define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF #define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF
#define HCLGE_NCSI_ERR_INT_EN 0x3 #define HCLGE_NCSI_ERR_INT_EN 0x3
#define HCLGE_NCSI_ERR_INT_TYPE 0x9 #define HCLGE_NCSI_ERR_INT_TYPE 0x9
......
...@@ -13065,6 +13065,7 @@ static int hclge_init(void) ...@@ -13065,6 +13065,7 @@ static int hclge_init(void)
static void hclge_exit(void) static void hclge_exit(void)
{ {
hnae3_unregister_ae_algo_prepare(&ae_algo);
hnae3_unregister_ae_algo(&ae_algo); hnae3_unregister_ae_algo(&ae_algo);
destroy_workqueue(hclge_wq); destroy_workqueue(hclge_wq);
} }
......
...@@ -752,6 +752,8 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev) ...@@ -752,6 +752,8 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map; hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
for (k = 0; k < hdev->tm_info.num_tc; k++) for (k = 0; k < hdev->tm_info.num_tc; k++)
hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT; hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
for (; k < HNAE3_MAX_TC; k++)
hdev->tm_info.pg_info[i].tc_dwrr[k] = 0;
} }
} }
......
...@@ -2273,9 +2273,9 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) ...@@ -2273,9 +2273,9 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
hdev->reset_attempts = 0; hdev->reset_attempts = 0;
hdev->last_reset_time = jiffies; hdev->last_reset_time = jiffies;
while ((hdev->reset_type = hdev->reset_type =
hclgevf_get_reset_level(hdev, &hdev->reset_pending)) hclgevf_get_reset_level(hdev, &hdev->reset_pending);
!= HNAE3_NONE_RESET) if (hdev->reset_type != HNAE3_NONE_RESET)
hclgevf_reset(hdev); hclgevf_reset(hdev);
} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
&hdev->reset_state)) { &hdev->reset_state)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment