Commit c59d6062 authored by Jian Shen's avatar Jian Shen Committed by David S. Miller

net: hns3: add return value for mailbox handling in PF

Currently, there are some querying mailboxes sent from VF to PF,
and VF will wait the PF's handling result. For mailbox
HCLGE_MBX_GET_QID_IN_PF and HCLGE_MBX_GET_RSS_KEY, it may fail
when the input parameter is invalid, but the prototype of their
handler function is void. In this case, PF always return success
to VF, which may cause the VF get incorrect result.

Fixes it by adding return value for these function.

Fixes: 63b1279d ("net: hns3: check queue id range before using")
Fixes: 532cfc0d ("net: hns3: add a check for index in hclge_get_rss_key()")
Signed-off-by: default avatarJian Shen <shenjian15@huawei.com>
Signed-off-by: default avatarGuangbin Huang <huangguangbin2@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7d413735
...@@ -594,7 +594,7 @@ static int hclge_set_vf_mtu(struct hclge_vport *vport, ...@@ -594,7 +594,7 @@ static int hclge_set_vf_mtu(struct hclge_vport *vport,
return hclge_set_vport_mtu(vport, mtu); return hclge_set_vport_mtu(vport, mtu);
} }
static void hclge_get_queue_id_in_pf(struct hclge_vport *vport, static int hclge_get_queue_id_in_pf(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req, struct hclge_mbx_vf_to_pf_cmd *mbx_req,
struct hclge_respond_to_vf_msg *resp_msg) struct hclge_respond_to_vf_msg *resp_msg)
{ {
...@@ -606,15 +606,16 @@ static void hclge_get_queue_id_in_pf(struct hclge_vport *vport, ...@@ -606,15 +606,16 @@ static void hclge_get_queue_id_in_pf(struct hclge_vport *vport,
if (queue_id >= handle->kinfo.num_tqps) { if (queue_id >= handle->kinfo.num_tqps) {
dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n", dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n",
queue_id, mbx_req->mbx_src_vfid); queue_id, mbx_req->mbx_src_vfid);
return; return -EINVAL;
} }
qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id); qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf)); memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf));
resp_msg->len = sizeof(qid_in_pf); resp_msg->len = sizeof(qid_in_pf);
return 0;
} }
static void hclge_get_rss_key(struct hclge_vport *vport, static int hclge_get_rss_key(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req, struct hclge_mbx_vf_to_pf_cmd *mbx_req,
struct hclge_respond_to_vf_msg *resp_msg) struct hclge_respond_to_vf_msg *resp_msg)
{ {
...@@ -634,13 +635,14 @@ static void hclge_get_rss_key(struct hclge_vport *vport, ...@@ -634,13 +635,14 @@ static void hclge_get_rss_key(struct hclge_vport *vport,
dev_warn(&hdev->pdev->dev, dev_warn(&hdev->pdev->dev,
"failed to get the rss hash key, the index(%u) invalid !\n", "failed to get the rss hash key, the index(%u) invalid !\n",
index); index);
return; return -EINVAL;
} }
memcpy(resp_msg->data, memcpy(resp_msg->data,
&rss_cfg->rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN], &rss_cfg->rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
HCLGE_RSS_MBX_RESP_LEN); HCLGE_RSS_MBX_RESP_LEN);
resp_msg->len = HCLGE_RSS_MBX_RESP_LEN; resp_msg->len = HCLGE_RSS_MBX_RESP_LEN;
return 0;
} }
static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code) static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code)
...@@ -816,10 +818,10 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ...@@ -816,10 +818,10 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
"VF fail(%d) to set mtu\n", ret); "VF fail(%d) to set mtu\n", ret);
break; break;
case HCLGE_MBX_GET_QID_IN_PF: case HCLGE_MBX_GET_QID_IN_PF:
hclge_get_queue_id_in_pf(vport, req, &resp_msg); ret = hclge_get_queue_id_in_pf(vport, req, &resp_msg);
break; break;
case HCLGE_MBX_GET_RSS_KEY: case HCLGE_MBX_GET_RSS_KEY:
hclge_get_rss_key(vport, req, &resp_msg); ret = hclge_get_rss_key(vport, req, &resp_msg);
break; break;
case HCLGE_MBX_GET_LINK_MODE: case HCLGE_MBX_GET_LINK_MODE:
hclge_get_link_mode(vport, req); hclge_get_link_mode(vport, req);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment