Commit de67a690 authored by Yunsheng Lin's avatar Yunsheng Lin Committed by David S. Miller

net: hns3: only support tc 0 for VF

When the VF shares the same TC config as PF, the business
running on PF and VF must have samiliar module.

For simplicity, we are not considering VF sharing the same tc
configuration as PF use case, so this patch removes the support
of TC configuration from VF and forcing VF to just use single
TC.
Signed-off-by: default avatarYunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: default avatarPeng Li <lipeng321@huawei.com>
Signed-off-by: default avatarHuazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 74354140
...@@ -93,14 +93,12 @@ static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc, ...@@ -93,14 +93,12 @@ static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
} }
} }
for (i = 0; i < hdev->num_alloc_vport; i++) { if (num_tc > hdev->vport[0].alloc_tqps) {
if (num_tc > hdev->vport[i].alloc_tqps) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"allocated tqp(%u) checking failed, %u > tqp(%u)\n", "allocated tqp checking failed, %u > tqp(%u)\n",
i, num_tc, hdev->vport[i].alloc_tqps); num_tc, hdev->vport[0].alloc_tqps);
return -EINVAL; return -EINVAL;
} }
}
return 0; return 0;
} }
......
...@@ -319,10 +319,14 @@ static int hclge_get_vf_tcinfo(struct hclge_vport *vport, ...@@ -319,10 +319,14 @@ static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req, struct hclge_mbx_vf_to_pf_cmd *mbx_req,
bool gen_resp) bool gen_resp)
{ {
struct hclge_dev *hdev = vport->back; struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
int ret; u8 vf_tc_map = 0;
int i, ret;
for (i = 0; i < kinfo->num_tc; i++)
vf_tc_map |= BIT(i);
ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &hdev->hw_tc_map, ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &vf_tc_map,
sizeof(u8)); sizeof(u8));
return ret; return ret;
......
...@@ -520,8 +520,14 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) ...@@ -520,8 +520,14 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
u16 max_rss_size; u16 max_rss_size;
u8 i; u8 i;
vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; /* TC configuration is shared by PF/VF in one port, only allow
kinfo->num_tc = min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc); * one tc for VF for simplicity. VF's vport_id is non zero.
*/
kinfo->num_tc = vport->vport_id ? 1 :
min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
vport->qs_offset = (vport->vport_id ? hdev->tm_info.num_tc : 0) +
(vport->vport_id ? (vport->vport_id - 1) : 0);
max_rss_size = min_t(u16, hdev->rss_size_max, max_rss_size = min_t(u16, hdev->rss_size_max,
vport->alloc_tqps / kinfo->num_tc); vport->alloc_tqps / kinfo->num_tc);
...@@ -538,12 +544,12 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) ...@@ -538,12 +544,12 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
} }
kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size; kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size;
vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id;
vport->dwrr = 100; /* 100 percent as init */ vport->dwrr = 100; /* 100 percent as init */
vport->alloc_rss_size = kinfo->rss_size; vport->alloc_rss_size = kinfo->rss_size;
vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
for (i = 0; i < HNAE3_MAX_TC; i++) { for (i = 0; i < HNAE3_MAX_TC; i++) {
if (hdev->hw_tc_map & BIT(i)) { if (hdev->hw_tc_map & BIT(i) && i < kinfo->num_tc) {
kinfo->tc_info[i].enable = true; kinfo->tc_info[i].enable = true;
kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
kinfo->tc_info[i].tqp_count = kinfo->rss_size; kinfo->tc_info[i].tqp_count = kinfo->rss_size;
...@@ -766,13 +772,17 @@ static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) ...@@ -766,13 +772,17 @@ static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
/* Cfg qs -> pri mapping, one by one mapping */ /* Cfg qs -> pri mapping, one by one mapping */
for (k = 0; k < hdev->num_alloc_vport; k++) for (k = 0; k < hdev->num_alloc_vport; k++) {
for (i = 0; i < hdev->tm_info.num_tc; i++) { struct hnae3_knic_private_info *kinfo =
&vport[k].nic.kinfo;
for (i = 0; i < kinfo->num_tc; i++) {
ret = hclge_tm_qs_to_pri_map_cfg( ret = hclge_tm_qs_to_pri_map_cfg(
hdev, vport[k].qs_offset + i, i); hdev, vport[k].qs_offset + i, i);
if (ret) if (ret)
return ret; return ret;
} }
}
} else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) { } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
/* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */ /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
for (k = 0; k < hdev->num_alloc_vport; k++) for (k = 0; k < hdev->num_alloc_vport; k++)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment