Commit c5795c53 authored by Yunsheng Lin's avatar Yunsheng Lin Committed by David S. Miller

net: hns3: Fix for pri to tc mapping in TM

Current mapping between pri and tc is one to one,
so user can't map multi priorities to the same tc.
This patch changes the mapping to many to one.

Fixes: 84844054 ("net: hns3: Add support of TX Scheduler & Shaper to HNS3 driver")
Signed-off-by: default avatarYunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 68ece54e
...@@ -376,12 +376,12 @@ struct hnae3_ae_algo { ...@@ -376,12 +376,12 @@ struct hnae3_ae_algo {
struct hnae3_tc_info { struct hnae3_tc_info {
u16 tqp_offset; /* TQP offset from base TQP */ u16 tqp_offset; /* TQP offset from base TQP */
u16 tqp_count; /* Total TQPs */ u16 tqp_count; /* Total TQPs */
u8 up; /* user priority */
u8 tc; /* TC index */ u8 tc; /* TC index */
bool enable; /* If this TC is enable or not */ bool enable; /* If this TC is enable or not */
}; };
#define HNAE3_MAX_TC 8 #define HNAE3_MAX_TC 8
#define HNAE3_MAX_USER_PRIO 8
struct hnae3_knic_private_info { struct hnae3_knic_private_info {
struct net_device *netdev; /* Set by KNIC client when init instance */ struct net_device *netdev; /* Set by KNIC client when init instance */
u16 rss_size; /* Allocated RSS queues */ u16 rss_size; /* Allocated RSS queues */
...@@ -389,6 +389,7 @@ struct hnae3_knic_private_info { ...@@ -389,6 +389,7 @@ struct hnae3_knic_private_info {
u16 num_desc; u16 num_desc;
u8 num_tc; /* Total number of enabled TCs */ u8 num_tc; /* Total number of enabled TCs */
u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */
struct hnae3_tc_info tc_info[HNAE3_MAX_TC]; /* Idx of array is HW TC */ struct hnae3_tc_info tc_info[HNAE3_MAX_TC]; /* Idx of array is HW TC */
u16 num_tqps; /* total number of TQPs in this handle */ u16 num_tqps; /* total number of TQPs in this handle */
......
...@@ -176,7 +176,6 @@ struct hclge_pg_info { ...@@ -176,7 +176,6 @@ struct hclge_pg_info {
struct hclge_tc_info { struct hclge_tc_info {
u8 tc_id; u8 tc_id;
u8 tc_sch_mode; /* 0: sp; 1: dwrr */ u8 tc_sch_mode; /* 0: sp; 1: dwrr */
u8 up;
u8 pgid; u8 pgid;
u32 bw_limit; u32 bw_limit;
}; };
...@@ -197,6 +196,7 @@ struct hclge_tm_info { ...@@ -197,6 +196,7 @@ struct hclge_tm_info {
u8 num_tc; u8 num_tc;
u8 num_pg; /* It must be 1 if vNET-Base schd */ u8 num_pg; /* It must be 1 if vNET-Base schd */
u8 pg_dwrr[HCLGE_PG_NUM]; u8 pg_dwrr[HCLGE_PG_NUM];
u8 prio_tc[HNAE3_MAX_USER_PRIO];
struct hclge_pg_info pg_info[HCLGE_PG_NUM]; struct hclge_pg_info pg_info[HCLGE_PG_NUM];
struct hclge_tc_info tc_info[HNAE3_MAX_TC]; struct hclge_tc_info tc_info[HNAE3_MAX_TC];
enum hclge_fc_mode fc_mode; enum hclge_fc_mode fc_mode;
......
...@@ -128,9 +128,7 @@ static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) ...@@ -128,9 +128,7 @@ static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
{ {
u8 tc; u8 tc;
for (tc = 0; tc < hdev->tm_info.num_tc; tc++) tc = hdev->tm_info.prio_tc[pri_id];
if (hdev->tm_info.tc_info[tc].up == pri_id)
break;
if (tc >= hdev->tm_info.num_tc) if (tc >= hdev->tm_info.num_tc)
return -EINVAL; return -EINVAL;
...@@ -158,7 +156,7 @@ static int hclge_up_to_tc_map(struct hclge_dev *hdev) ...@@ -158,7 +156,7 @@ static int hclge_up_to_tc_map(struct hclge_dev *hdev)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
for (pri_id = 0; pri_id < hdev->tm_info.num_tc; pri_id++) { for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
ret = hclge_fill_pri_array(hdev, pri, pri_id); ret = hclge_fill_pri_array(hdev, pri, pri_id);
if (ret) if (ret)
return ret; return ret;
...@@ -405,16 +403,17 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) ...@@ -405,16 +403,17 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
kinfo->tc_info[i].tqp_count = kinfo->rss_size; kinfo->tc_info[i].tqp_count = kinfo->rss_size;
kinfo->tc_info[i].tc = i; kinfo->tc_info[i].tc = i;
kinfo->tc_info[i].up = hdev->tm_info.tc_info[i].up;
} else { } else {
/* Set to default queue if TC is disable */ /* Set to default queue if TC is disable */
kinfo->tc_info[i].enable = false; kinfo->tc_info[i].enable = false;
kinfo->tc_info[i].tqp_offset = 0; kinfo->tc_info[i].tqp_offset = 0;
kinfo->tc_info[i].tqp_count = 1; kinfo->tc_info[i].tqp_count = 1;
kinfo->tc_info[i].tc = 0; kinfo->tc_info[i].tc = 0;
kinfo->tc_info[i].up = 0;
} }
} }
memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc));
} }
static void hclge_tm_vport_info_update(struct hclge_dev *hdev) static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
...@@ -436,12 +435,15 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev) ...@@ -436,12 +435,15 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
for (i = 0; i < hdev->tm_info.num_tc; i++) { for (i = 0; i < hdev->tm_info.num_tc; i++) {
hdev->tm_info.tc_info[i].tc_id = i; hdev->tm_info.tc_info[i].tc_id = i;
hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR; hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
hdev->tm_info.tc_info[i].up = i;
hdev->tm_info.tc_info[i].pgid = 0; hdev->tm_info.tc_info[i].pgid = 0;
hdev->tm_info.tc_info[i].bw_limit = hdev->tm_info.tc_info[i].bw_limit =
hdev->tm_info.pg_info[0].bw_limit; hdev->tm_info.pg_info[0].bw_limit;
} }
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
hdev->tm_info.prio_tc[i] =
(i >= hdev->tm_info.num_tc) ? 0 : i;
hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment