Commit 5be9963d authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-stats-refactor'

Jie Wang says:

====================
net: hns3: refactor rss/tqp stats functions

Currently, hns3 PF and VF module have two sets of rss and tqp stats APIs
to provide get and set functions. Most of these APIs are the same. There is
no need to keep these two sets of same functions for double development and
bugfix work.

This series refactor the rss and tqp stats APIs in hns3 PF and VF by
implementing one set of common APIs for PF and VF reuse and deleting the
old APIs.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c5bcdd82 43710bfe
...@@ -18,11 +18,12 @@ hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o ...@@ -18,11 +18,12 @@ hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o hns3vf/hclgevf_devlink.o \ hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o hns3vf/hclgevf_devlink.o \
hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o
obj-$(CONFIG_HNS3_HCLGE) += hclge.o obj-$(CONFIG_HNS3_HCLGE) += hclge.o
hclge-objs = hns3pf/hclge_main.o hns3pf/hclge_mdio.o hns3pf/hclge_tm.o \ hclge-objs = hns3pf/hclge_main.o hns3pf/hclge_mdio.o hns3pf/hclge_tm.o \
hns3pf/hclge_mbx.o hns3pf/hclge_err.o hns3pf/hclge_debugfs.o hns3pf/hclge_ptp.o hns3pf/hclge_devlink.o \ hns3pf/hclge_mbx.o hns3pf/hclge_err.o hns3pf/hclge_debugfs.o hns3pf/hclge_ptp.o hns3pf/hclge_devlink.o \
hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o
hclge-$(CONFIG_HNS3_DCB) += hns3pf/hclge_dcb.o hclge-$(CONFIG_HNS3_DCB) += hns3pf/hclge_dcb.o
...@@ -61,7 +61,7 @@ static void hclge_comm_set_default_capability(struct hnae3_ae_dev *ae_dev, ...@@ -61,7 +61,7 @@ static void hclge_comm_set_default_capability(struct hnae3_ae_dev *ae_dev,
} }
void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc, void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc,
enum hclge_comm_opcode_type opcode, enum hclge_opcode_type opcode,
bool is_read) bool is_read)
{ {
memset((void *)desc, 0, sizeof(struct hclge_desc)); memset((void *)desc, 0, sizeof(struct hclge_desc));
...@@ -73,15 +73,14 @@ void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc, ...@@ -73,15 +73,14 @@ void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc,
desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR); desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR);
} }
int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev, bool is_pf, int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw, bool en) struct hclge_comm_hw *hw, bool en)
{ {
struct hclge_comm_firmware_compat_cmd *req; struct hclge_comm_firmware_compat_cmd *req;
struct hclge_desc desc; struct hclge_desc desc;
u32 compat = 0; u32 compat = 0;
hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_COMM_OPC_IMP_COMPAT_CFG, hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false);
false);
if (en) { if (en) {
req = (struct hclge_comm_firmware_compat_cmd *)desc.data; req = (struct hclge_comm_firmware_compat_cmd *)desc.data;
...@@ -96,7 +95,7 @@ int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev, bool is_pf, ...@@ -96,7 +95,7 @@ int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev, bool is_pf,
req->compat = cpu_to_le32(compat); req->compat = cpu_to_le32(compat);
} }
return hclge_comm_cmd_send(hw, &desc, 1, is_pf); return hclge_comm_cmd_send(hw, &desc, 1);
} }
void hclge_comm_free_cmd_desc(struct hclge_comm_cmq_ring *ring) void hclge_comm_free_cmd_desc(struct hclge_comm_cmq_ring *ring)
...@@ -205,11 +204,11 @@ int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev, ...@@ -205,11 +204,11 @@ int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev,
struct hclge_desc desc; struct hclge_desc desc;
int ret; int ret;
hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_COMM_OPC_QUERY_FW_VER, 1); hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
resp = (struct hclge_comm_query_version_cmd *)desc.data; resp = (struct hclge_comm_query_version_cmd *)desc.data;
resp->api_caps = hclge_comm_build_api_caps(); resp->api_caps = hclge_comm_build_api_caps();
ret = hclge_comm_cmd_send(hw, &desc, 1, is_pf); ret = hclge_comm_cmd_send(hw, &desc, 1);
if (ret) if (ret)
return ret; return ret;
...@@ -227,46 +226,32 @@ int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev, ...@@ -227,46 +226,32 @@ int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev,
return ret; return ret;
} }
static bool hclge_is_elem_in_array(const u16 *spec_opcode, u32 size, u16 opcode) static const u16 spec_opcode[] = { HCLGE_OPC_STATS_64_BIT,
HCLGE_OPC_STATS_32_BIT,
HCLGE_OPC_STATS_MAC,
HCLGE_OPC_STATS_MAC_ALL,
HCLGE_OPC_QUERY_32_BIT_REG,
HCLGE_OPC_QUERY_64_BIT_REG,
HCLGE_QUERY_CLEAR_MPF_RAS_INT,
HCLGE_QUERY_CLEAR_PF_RAS_INT,
HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
HCLGE_QUERY_ALL_ERR_INFO };
static bool hclge_comm_is_special_opcode(u16 opcode)
{ {
/* these commands have several descriptors,
* and use the first one to save opcode and return value
*/
u32 i; u32 i;
for (i = 0; i < size; i++) { for (i = 0; i < ARRAY_SIZE(spec_opcode); i++)
if (spec_opcode[i] == opcode) if (spec_opcode[i] == opcode)
return true; return true;
}
return false; return false;
} }
static const u16 pf_spec_opcode[] = { HCLGE_COMM_OPC_STATS_64_BIT,
HCLGE_COMM_OPC_STATS_32_BIT,
HCLGE_COMM_OPC_STATS_MAC,
HCLGE_COMM_OPC_STATS_MAC_ALL,
HCLGE_COMM_OPC_QUERY_32_BIT_REG,
HCLGE_COMM_OPC_QUERY_64_BIT_REG,
HCLGE_COMM_QUERY_CLEAR_MPF_RAS_INT,
HCLGE_COMM_QUERY_CLEAR_PF_RAS_INT,
HCLGE_COMM_QUERY_CLEAR_ALL_MPF_MSIX_INT,
HCLGE_COMM_QUERY_CLEAR_ALL_PF_MSIX_INT,
HCLGE_COMM_QUERY_ALL_ERR_INFO };
static const u16 vf_spec_opcode[] = { HCLGE_COMM_OPC_STATS_64_BIT,
HCLGE_COMM_OPC_STATS_32_BIT,
HCLGE_COMM_OPC_STATS_MAC };
static bool hclge_comm_is_special_opcode(u16 opcode, bool is_pf)
{
/* these commands have several descriptors,
* and use the first one to save opcode and return value
*/
const u16 *spec_opcode = is_pf ? pf_spec_opcode : vf_spec_opcode;
u32 size = is_pf ? ARRAY_SIZE(pf_spec_opcode) :
ARRAY_SIZE(vf_spec_opcode);
return hclge_is_elem_in_array(spec_opcode, size, opcode);
}
static int hclge_comm_ring_space(struct hclge_comm_cmq_ring *ring) static int hclge_comm_ring_space(struct hclge_comm_cmq_ring *ring)
{ {
int ntc = ring->next_to_clean; int ntc = ring->next_to_clean;
...@@ -378,7 +363,7 @@ static int hclge_comm_cmd_convert_err_code(u16 desc_ret) ...@@ -378,7 +363,7 @@ static int hclge_comm_cmd_convert_err_code(u16 desc_ret)
static int hclge_comm_cmd_check_retval(struct hclge_comm_hw *hw, static int hclge_comm_cmd_check_retval(struct hclge_comm_hw *hw,
struct hclge_desc *desc, int num, struct hclge_desc *desc, int num,
int ntc, bool is_pf) int ntc)
{ {
u16 opcode, desc_ret; u16 opcode, desc_ret;
int handle; int handle;
...@@ -390,7 +375,7 @@ static int hclge_comm_cmd_check_retval(struct hclge_comm_hw *hw, ...@@ -390,7 +375,7 @@ static int hclge_comm_cmd_check_retval(struct hclge_comm_hw *hw,
if (ntc >= hw->cmq.csq.desc_num) if (ntc >= hw->cmq.csq.desc_num)
ntc = 0; ntc = 0;
} }
if (likely(!hclge_comm_is_special_opcode(opcode, is_pf))) if (likely(!hclge_comm_is_special_opcode(opcode)))
desc_ret = le16_to_cpu(desc[num - 1].retval); desc_ret = le16_to_cpu(desc[num - 1].retval);
else else
desc_ret = le16_to_cpu(desc[0].retval); desc_ret = le16_to_cpu(desc[0].retval);
...@@ -402,7 +387,7 @@ static int hclge_comm_cmd_check_retval(struct hclge_comm_hw *hw, ...@@ -402,7 +387,7 @@ static int hclge_comm_cmd_check_retval(struct hclge_comm_hw *hw,
static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw, static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw,
struct hclge_desc *desc, struct hclge_desc *desc,
int num, int ntc, bool is_pf) int num, int ntc)
{ {
bool is_completed = false; bool is_completed = false;
int handle, ret; int handle, ret;
...@@ -416,7 +401,7 @@ static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw, ...@@ -416,7 +401,7 @@ static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw,
if (!is_completed) if (!is_completed)
ret = -EBADE; ret = -EBADE;
else else
ret = hclge_comm_cmd_check_retval(hw, desc, num, ntc, is_pf); ret = hclge_comm_cmd_check_retval(hw, desc, num, ntc);
/* Clean the command send queue */ /* Clean the command send queue */
handle = hclge_comm_cmd_csq_clean(hw); handle = hclge_comm_cmd_csq_clean(hw);
...@@ -433,13 +418,12 @@ static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw, ...@@ -433,13 +418,12 @@ static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw,
* @hw: pointer to the hw struct * @hw: pointer to the hw struct
* @desc: prefilled descriptor for describing the command * @desc: prefilled descriptor for describing the command
* @num : the number of descriptors to be sent * @num : the number of descriptors to be sent
* @is_pf: bool to judge pf/vf module
* *
* This is the main send command for command queue, it * This is the main send command for command queue, it
* sends the queue, cleans the queue, etc * sends the queue, cleans the queue, etc
**/ **/
int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc, int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
int num, bool is_pf) int num)
{ {
struct hclge_comm_cmq_ring *csq = &hw->cmq.csq; struct hclge_comm_cmq_ring *csq = &hw->cmq.csq;
int ret; int ret;
...@@ -474,7 +458,7 @@ int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc, ...@@ -474,7 +458,7 @@ int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG,
hw->cmq.csq.next_to_use); hw->cmq.csq.next_to_use);
ret = hclge_comm_cmd_check_result(hw, desc, num, ntc, is_pf); ret = hclge_comm_cmd_check_result(hw, desc, num, ntc);
spin_unlock_bh(&hw->cmq.csq.lock); spin_unlock_bh(&hw->cmq.csq.lock);
...@@ -495,12 +479,12 @@ static void hclge_comm_cmd_uninit_regs(struct hclge_comm_hw *hw) ...@@ -495,12 +479,12 @@ static void hclge_comm_cmd_uninit_regs(struct hclge_comm_hw *hw)
hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0); hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0);
} }
void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev, bool is_pf, void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw) struct hclge_comm_hw *hw)
{ {
struct hclge_comm_cmq *cmdq = &hw->cmq; struct hclge_comm_cmq *cmdq = &hw->cmq;
hclge_comm_firmware_compat_config(ae_dev, is_pf, hw, false); hclge_comm_firmware_compat_config(ae_dev, hw, false);
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state); set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
/* wait to ensure that the firmware completes the possible left /* wait to ensure that the firmware completes the possible left
...@@ -612,7 +596,7 @@ int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw, ...@@ -612,7 +596,7 @@ int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw,
/* ask the firmware to enable some features, driver can work without /* ask the firmware to enable some features, driver can work without
* it. * it.
*/ */
ret = hclge_comm_firmware_compat_config(ae_dev, is_pf, hw, true); ret = hclge_comm_firmware_compat_config(ae_dev, hw, true);
if (ret) if (ret)
dev_warn(&ae_dev->pdev->dev, dev_warn(&ae_dev->pdev->dev,
"Firmware compatible features not enabled(%d).\n", "Firmware compatible features not enabled(%d).\n",
......
/* SPDX-License-Identifier: GPL-2.0+ */
// Copyright (c) 2021-2021 Hisilicon Limited.
#ifndef __HCLGE_COMM_RSS_H
#define __HCLGE_COMM_RSS_H
#include <linux/types.h>
#include "hnae3.h"
#include "hclge_comm_cmd.h"
#define HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ 0
#define HCLGE_COMM_RSS_HASH_ALGO_SIMPLE 1
#define HCLGE_COMM_RSS_HASH_ALGO_SYMMETRIC 2
#define HCLGE_COMM_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
#define HCLGE_COMM_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
#define HCLGE_COMM_D_PORT_BIT BIT(0)
#define HCLGE_COMM_S_PORT_BIT BIT(1)
#define HCLGE_COMM_D_IP_BIT BIT(2)
#define HCLGE_COMM_S_IP_BIT BIT(3)
#define HCLGE_COMM_V_TAG_BIT BIT(4)
#define HCLGE_COMM_RSS_INPUT_TUPLE_SCTP_NO_PORT \
(HCLGE_COMM_D_IP_BIT | HCLGE_COMM_S_IP_BIT | HCLGE_COMM_V_TAG_BIT)
#define HCLGE_COMM_MAX_TC_NUM 8
#define HCLGE_COMM_RSS_TC_OFFSET_S 0
#define HCLGE_COMM_RSS_TC_OFFSET_M GENMASK(10, 0)
#define HCLGE_COMM_RSS_TC_SIZE_MSB_B 11
#define HCLGE_COMM_RSS_TC_SIZE_S 12
#define HCLGE_COMM_RSS_TC_SIZE_M GENMASK(14, 12)
#define HCLGE_COMM_RSS_TC_VALID_B 15
#define HCLGE_COMM_RSS_TC_SIZE_MSB_OFFSET 3
struct hclge_comm_rss_tuple_cfg {
u8 ipv4_tcp_en;
u8 ipv4_udp_en;
u8 ipv4_sctp_en;
u8 ipv4_fragment_en;
u8 ipv6_tcp_en;
u8 ipv6_udp_en;
u8 ipv6_sctp_en;
u8 ipv6_fragment_en;
};
#define HCLGE_COMM_RSS_KEY_SIZE 40
#define HCLGE_COMM_RSS_CFG_TBL_SIZE 16
#define HCLGE_COMM_RSS_CFG_TBL_BW_H 2U
#define HCLGE_COMM_RSS_CFG_TBL_BW_L 8U
#define HCLGE_COMM_RSS_CFG_TBL_SIZE_H 4
#define HCLGE_COMM_RSS_SET_BITMAP_MSK GENMASK(15, 0)
#define HCLGE_COMM_RSS_HASH_ALGO_MASK GENMASK(3, 0)
#define HCLGE_COMM_RSS_HASH_KEY_OFFSET_B 4
#define HCLGE_COMM_RSS_HASH_KEY_NUM 16
struct hclge_comm_rss_config_cmd {
u8 hash_config;
u8 rsv[7];
u8 hash_key[HCLGE_COMM_RSS_HASH_KEY_NUM];
};
struct hclge_comm_rss_cfg {
u8 rss_hash_key[HCLGE_COMM_RSS_KEY_SIZE]; /* user configured hash keys */
/* shadow table */
u16 *rss_indirection_tbl;
u32 rss_algo;
struct hclge_comm_rss_tuple_cfg rss_tuple_sets;
u32 rss_size;
};
struct hclge_comm_rss_input_tuple_cmd {
u8 ipv4_tcp_en;
u8 ipv4_udp_en;
u8 ipv4_sctp_en;
u8 ipv4_fragment_en;
u8 ipv6_tcp_en;
u8 ipv6_udp_en;
u8 ipv6_sctp_en;
u8 ipv6_fragment_en;
u8 rsv[16];
};
struct hclge_comm_rss_ind_tbl_cmd {
__le16 start_table_index;
__le16 rss_set_bitmap;
u8 rss_qid_h[HCLGE_COMM_RSS_CFG_TBL_SIZE_H];
u8 rss_qid_l[HCLGE_COMM_RSS_CFG_TBL_SIZE];
};
struct hclge_comm_rss_tc_mode_cmd {
__le16 rss_tc_mode[HCLGE_COMM_MAX_TC_NUM];
u8 rsv[8];
};
u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle);
void hclge_comm_get_rss_type(struct hnae3_handle *nic,
struct hclge_comm_rss_tuple_cfg *rss_tuple_sets);
void hclge_comm_rss_indir_init_cfg(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_rss_cfg *rss_cfg);
int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type,
u8 *tuple_sets);
int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg,
const u8 hfunc, u8 *hash_algo);
void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key,
u8 *hfunc);
void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg,
u32 *indir, __le16 rss_ind_tbl_size);
int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
const u8 *key);
int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg,
struct ethtool_rxnfc *nfc,
struct hnae3_ae_dev *ae_dev,
struct hclge_comm_rss_input_tuple_cmd *req);
u64 hclge_comm_convert_rss_tuple(u8 tuple_sets);
int hclge_comm_set_rss_input_tuple(struct hnae3_handle *nic,
struct hclge_comm_hw *hw, bool is_pf,
struct hclge_comm_rss_cfg *rss_cfg);
int hclge_comm_set_rss_indir_table(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw, const u16 *indir);
int hclge_comm_rss_init_cfg(struct hnae3_handle *nic,
struct hnae3_ae_dev *ae_dev,
struct hclge_comm_rss_cfg *rss_cfg);
void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset,
u16 *tc_valid, u16 *tc_size);
int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset,
u16 *tc_valid, u16 *tc_size);
int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg,
struct hclge_comm_hw *hw, const u8 *key,
const u8 hfunc);
int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw,
struct hclge_comm_rss_cfg *rss_cfg,
struct ethtool_rxnfc *nfc);
#endif
// SPDX-License-Identifier: GPL-2.0+
// Copyright (c) 2021-2021 Hisilicon Limited.
#include <linux/err.h>
#include "hnae3.h"
#include "hclge_comm_cmd.h"
#include "hclge_comm_tqp_stats.h"
u64 *hclge_comm_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclge_comm_tqp *tqp;
u64 *buff = data;
u16 i;
for (i = 0; i < kinfo->num_tqps; i++) {
tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
}
for (i = 0; i < kinfo->num_tqps; i++) {
tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
}
return buff;
}
int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
return kinfo->num_tqps * HCLGE_COMM_QUEUE_PAIR_SIZE;
}
u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
u8 *buff = data;
u16 i;
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_comm_tqp *tqp =
container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd", tqp->index);
buff += ETH_GSTRING_LEN;
}
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_comm_tqp *tqp =
container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd", tqp->index);
buff += ETH_GSTRING_LEN;
}
return buff;
}
int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
struct hclge_comm_hw *hw)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclge_comm_tqp *tqp;
struct hclge_desc desc;
int ret;
u16 i;
for (i = 0; i < kinfo->num_tqps; i++) {
tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_RX_STATS,
true);
desc.data[0] = cpu_to_le32(tqp->index);
ret = hclge_comm_cmd_send(hw, &desc, 1);
if (ret) {
dev_err(&hw->cmq.csq.pdev->dev,
"failed to get tqp stat, ret = %d, tx = %u.\n",
ret, i);
return ret;
}
tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
le32_to_cpu(desc.data[1]);
hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_TX_STATS,
true);
desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
ret = hclge_comm_cmd_send(hw, &desc, 1);
if (ret) {
dev_err(&hw->cmq.csq.pdev->dev,
"failed to get tqp stat, ret = %d, rx = %u.\n",
ret, i);
return ret;
}
tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
le32_to_cpu(desc.data[1]);
}
return 0;
}
void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclge_comm_tqp *tqp;
struct hnae3_queue *queue;
u16 i;
for (i = 0; i < kinfo->num_tqps; i++) {
queue = kinfo->tqp[i];
tqp = container_of(queue, struct hclge_comm_tqp, q);
memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
}
}
/* SPDX-License-Identifier: GPL-2.0+ */
// Copyright (c) 2021-2021 Hisilicon Limited.
#ifndef __HCLGE_COMM_TQP_STATS_H
#define __HCLGE_COMM_TQP_STATS_H
#include <linux/types.h>
#include <linux/etherdevice.h>
#include "hnae3.h"
/* each tqp has TX & RX two queues */
#define HCLGE_COMM_QUEUE_PAIR_SIZE 2
/* TQP stats */
struct hclge_comm_tqp_stats {
/* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */
u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
/* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */
u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
};
struct hclge_comm_tqp {
/* copy of device pointer from pci_dev,
* used when perform DMA mapping
*/
struct device *dev;
struct hnae3_queue q;
struct hclge_comm_tqp_stats tqp_stats;
u16 index; /* Global index in a NIC controller */
bool alloced;
};
u64 *hclge_comm_tqps_get_stats(struct hnae3_handle *handle, u64 *data);
int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle);
u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data);
void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle);
int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
struct hclge_comm_hw *hw);
#endif
...@@ -20,252 +20,8 @@ struct hclge_misc_vector { ...@@ -20,252 +20,8 @@ struct hclge_misc_vector {
char name[HNAE3_INT_NAME_LEN]; char name[HNAE3_INT_NAME_LEN];
}; };
enum hclge_opcode_type {
/* Generic commands */
HCLGE_OPC_QUERY_FW_VER = 0x0001,
HCLGE_OPC_CFG_RST_TRIGGER = 0x0020,
HCLGE_OPC_GBL_RST_STATUS = 0x0021,
HCLGE_OPC_QUERY_FUNC_STATUS = 0x0022,
HCLGE_OPC_QUERY_PF_RSRC = 0x0023,
HCLGE_OPC_QUERY_VF_RSRC = 0x0024,
HCLGE_OPC_GET_CFG_PARAM = 0x0025,
HCLGE_OPC_PF_RST_DONE = 0x0026,
HCLGE_OPC_QUERY_VF_RST_RDY = 0x0027,
HCLGE_OPC_STATS_64_BIT = 0x0030,
HCLGE_OPC_STATS_32_BIT = 0x0031,
HCLGE_OPC_STATS_MAC = 0x0032,
HCLGE_OPC_QUERY_MAC_REG_NUM = 0x0033,
HCLGE_OPC_STATS_MAC_ALL = 0x0034,
HCLGE_OPC_QUERY_REG_NUM = 0x0040,
HCLGE_OPC_QUERY_32_BIT_REG = 0x0041,
HCLGE_OPC_QUERY_64_BIT_REG = 0x0042,
HCLGE_OPC_DFX_BD_NUM = 0x0043,
HCLGE_OPC_DFX_BIOS_COMMON_REG = 0x0044,
HCLGE_OPC_DFX_SSU_REG_0 = 0x0045,
HCLGE_OPC_DFX_SSU_REG_1 = 0x0046,
HCLGE_OPC_DFX_IGU_EGU_REG = 0x0047,
HCLGE_OPC_DFX_RPU_REG_0 = 0x0048,
HCLGE_OPC_DFX_RPU_REG_1 = 0x0049,
HCLGE_OPC_DFX_NCSI_REG = 0x004A,
HCLGE_OPC_DFX_RTC_REG = 0x004B,
HCLGE_OPC_DFX_PPP_REG = 0x004C,
HCLGE_OPC_DFX_RCB_REG = 0x004D,
HCLGE_OPC_DFX_TQP_REG = 0x004E,
HCLGE_OPC_DFX_SSU_REG_2 = 0x004F,
HCLGE_OPC_QUERY_DEV_SPECS = 0x0050,
/* MAC command */
HCLGE_OPC_CONFIG_MAC_MODE = 0x0301,
HCLGE_OPC_CONFIG_AN_MODE = 0x0304,
HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
HCLGE_OPC_QUERY_MAC_TNL_INT = 0x0310,
HCLGE_OPC_MAC_TNL_INT_EN = 0x0311,
HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312,
HCLGE_OPC_COMMON_LOOPBACK = 0x0315,
HCLGE_OPC_CONFIG_FEC_MODE = 0x031A,
/* PTP commands */
HCLGE_OPC_PTP_INT_EN = 0x0501,
HCLGE_OPC_PTP_MODE_CFG = 0x0507,
/* PFC/Pause commands */
HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701,
HCLGE_OPC_CFG_PFC_PAUSE_EN = 0x0702,
HCLGE_OPC_CFG_MAC_PARA = 0x0703,
HCLGE_OPC_CFG_PFC_PARA = 0x0704,
HCLGE_OPC_QUERY_MAC_TX_PKT_CNT = 0x0705,
HCLGE_OPC_QUERY_MAC_RX_PKT_CNT = 0x0706,
HCLGE_OPC_QUERY_PFC_TX_PKT_CNT = 0x0707,
HCLGE_OPC_QUERY_PFC_RX_PKT_CNT = 0x0708,
HCLGE_OPC_PRI_TO_TC_MAPPING = 0x0709,
HCLGE_OPC_QOS_MAP = 0x070A,
/* ETS/scheduler commands */
HCLGE_OPC_TM_PG_TO_PRI_LINK = 0x0804,
HCLGE_OPC_TM_QS_TO_PRI_LINK = 0x0805,
HCLGE_OPC_TM_NQ_TO_QS_LINK = 0x0806,
HCLGE_OPC_TM_RQ_TO_QS_LINK = 0x0807,
HCLGE_OPC_TM_PORT_WEIGHT = 0x0808,
HCLGE_OPC_TM_PG_WEIGHT = 0x0809,
HCLGE_OPC_TM_QS_WEIGHT = 0x080A,
HCLGE_OPC_TM_PRI_WEIGHT = 0x080B,
HCLGE_OPC_TM_PRI_C_SHAPPING = 0x080C,
HCLGE_OPC_TM_PRI_P_SHAPPING = 0x080D,
HCLGE_OPC_TM_PG_C_SHAPPING = 0x080E,
HCLGE_OPC_TM_PG_P_SHAPPING = 0x080F,
HCLGE_OPC_TM_PORT_SHAPPING = 0x0810,
HCLGE_OPC_TM_PG_SCH_MODE_CFG = 0x0812,
HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813,
HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814,
HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815,
HCLGE_OPC_TM_NODES = 0x0816,
HCLGE_OPC_ETS_TC_WEIGHT = 0x0843,
HCLGE_OPC_QSET_DFX_STS = 0x0844,
HCLGE_OPC_PRI_DFX_STS = 0x0845,
HCLGE_OPC_PG_DFX_STS = 0x0846,
HCLGE_OPC_PORT_DFX_STS = 0x0847,
HCLGE_OPC_SCH_NQ_CNT = 0x0848,
HCLGE_OPC_SCH_RQ_CNT = 0x0849,
HCLGE_OPC_TM_INTERNAL_STS = 0x0850,
HCLGE_OPC_TM_INTERNAL_CNT = 0x0851,
HCLGE_OPC_TM_INTERNAL_STS_1 = 0x0852,
/* Packet buffer allocate commands */
HCLGE_OPC_TX_BUFF_ALLOC = 0x0901,
HCLGE_OPC_RX_PRIV_BUFF_ALLOC = 0x0902,
HCLGE_OPC_RX_PRIV_WL_ALLOC = 0x0903,
HCLGE_OPC_RX_COM_THRD_ALLOC = 0x0904,
HCLGE_OPC_RX_COM_WL_ALLOC = 0x0905,
HCLGE_OPC_RX_GBL_PKT_CNT = 0x0906,
/* TQP management command */
HCLGE_OPC_SET_TQP_MAP = 0x0A01,
/* TQP commands */
HCLGE_OPC_CFG_TX_QUEUE = 0x0B01,
HCLGE_OPC_QUERY_TX_POINTER = 0x0B02,
HCLGE_OPC_QUERY_TX_STATS = 0x0B03,
HCLGE_OPC_TQP_TX_QUEUE_TC = 0x0B04,
HCLGE_OPC_CFG_RX_QUEUE = 0x0B11,
HCLGE_OPC_QUERY_RX_POINTER = 0x0B12,
HCLGE_OPC_QUERY_RX_STATS = 0x0B13,
HCLGE_OPC_STASH_RX_QUEUE_LRO = 0x0B16,
HCLGE_OPC_CFG_RX_QUEUE_LRO = 0x0B17,
HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22,
/* PPU commands */
HCLGE_OPC_PPU_PF_OTHER_INT_DFX = 0x0B4A,
/* TSO command */
HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01,
HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10,
/* RSS commands */
HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01,
HCLGE_OPC_RSS_INDIR_TABLE = 0x0D07,
HCLGE_OPC_RSS_TC_MODE = 0x0D08,
HCLGE_OPC_RSS_INPUT_TUPLE = 0x0D02,
/* Promisuous mode command */
HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01,
/* Vlan offload commands */
HCLGE_OPC_VLAN_PORT_TX_CFG = 0x0F01,
HCLGE_OPC_VLAN_PORT_RX_CFG = 0x0F02,
/* Interrupts commands */
HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503,
HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504,
/* MAC commands */
HCLGE_OPC_MAC_VLAN_ADD = 0x1000,
HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001,
HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002,
HCLGE_OPC_MAC_VLAN_INSERT = 0x1003,
HCLGE_OPC_MAC_VLAN_ALLOCATE = 0x1004,
HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010,
HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011,
/* MAC VLAN commands */
HCLGE_OPC_MAC_VLAN_SWITCH_PARAM = 0x1033,
/* VLAN commands */
HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100,
HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101,
HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102,
HCLGE_OPC_PORT_VLAN_BYPASS = 0x1103,
/* Flow Director commands */
HCLGE_OPC_FD_MODE_CTRL = 0x1200,
HCLGE_OPC_FD_GET_ALLOCATION = 0x1201,
HCLGE_OPC_FD_KEY_CONFIG = 0x1202,
HCLGE_OPC_FD_TCAM_OP = 0x1203,
HCLGE_OPC_FD_AD_OP = 0x1204,
HCLGE_OPC_FD_CNT_OP = 0x1205,
HCLGE_OPC_FD_USER_DEF_OP = 0x1207,
/* MDIO command */
HCLGE_OPC_MDIO_CONFIG = 0x1900,
/* QCN commands */
HCLGE_OPC_QCN_MOD_CFG = 0x1A01,
HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02,
HCLGE_OPC_QCN_SHAPPING_CFG = 0x1A03,
HCLGE_OPC_QCN_SHAPPING_BS_CFG = 0x1A04,
HCLGE_OPC_QCN_QSET_LINK_CFG = 0x1A05,
HCLGE_OPC_QCN_RP_STATUS_GET = 0x1A06,
HCLGE_OPC_QCN_AJUST_INIT = 0x1A07,
HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08,
/* Mailbox command */
HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000,
/* Led command */
HCLGE_OPC_LED_STATUS_CFG = 0xB000,
/* clear hardware resource command */
HCLGE_OPC_CLEAR_HW_RESOURCE = 0x700B,
/* NCL config command */
HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,
/* IMP stats command */
HCLGE_OPC_IMP_STATS_BD = 0x7012,
HCLGE_OPC_IMP_STATS_INFO = 0x7013,
HCLGE_OPC_IMP_COMPAT_CFG = 0x701A,
/* SFP command */
HCLGE_OPC_GET_SFP_EEPROM = 0x7100,
HCLGE_OPC_GET_SFP_EXIST = 0x7101,
HCLGE_OPC_GET_SFP_INFO = 0x7104,
/* Error INT commands */
HCLGE_MAC_COMMON_INT_EN = 0x030E,
HCLGE_TM_SCH_ECC_INT_EN = 0x0829,
HCLGE_SSU_ECC_INT_CMD = 0x0989,
HCLGE_SSU_COMMON_INT_CMD = 0x098C,
HCLGE_PPU_MPF_ECC_INT_CMD = 0x0B40,
HCLGE_PPU_MPF_OTHER_INT_CMD = 0x0B41,
HCLGE_PPU_PF_OTHER_INT_CMD = 0x0B42,
HCLGE_COMMON_ECC_INT_CFG = 0x1505,
HCLGE_QUERY_RAS_INT_STS_BD_NUM = 0x1510,
HCLGE_QUERY_CLEAR_MPF_RAS_INT = 0x1511,
HCLGE_QUERY_CLEAR_PF_RAS_INT = 0x1512,
HCLGE_QUERY_MSIX_INT_STS_BD_NUM = 0x1513,
HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514,
HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT = 0x1515,
HCLGE_QUERY_ALL_ERR_BD_NUM = 0x1516,
HCLGE_QUERY_ALL_ERR_INFO = 0x1517,
HCLGE_CONFIG_ROCEE_RAS_INT_EN = 0x1580,
HCLGE_QUERY_CLEAR_ROCEE_RAS_INT = 0x1581,
HCLGE_ROCEE_PF_RAS_INT_CMD = 0x1584,
HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD = 0x1585,
HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD = 0x1586,
HCLGE_IGU_EGU_TNL_INT_EN = 0x1803,
HCLGE_IGU_COMMON_INT_EN = 0x1806,
HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14,
HCLGE_PPP_CMD0_INT_CMD = 0x2100,
HCLGE_PPP_CMD1_INT_CMD = 0x2101,
HCLGE_MAC_ETHERTYPE_IDX_RD = 0x2105,
HCLGE_NCSI_INT_EN = 0x2401,
/* PHY command */
HCLGE_OPC_PHY_LINK_KSETTING = 0x7025,
HCLGE_OPC_PHY_REG = 0x7026,
/* Query link diagnosis info command */
HCLGE_OPC_QUERY_LINK_DIAGNOSIS = 0x702A,
};
#define hclge_cmd_setup_basic_desc(desc, opcode, is_read) \ #define hclge_cmd_setup_basic_desc(desc, opcode, is_read) \
hclge_comm_cmd_setup_basic_desc(desc, (enum hclge_comm_opcode_type)opcode, \ hclge_comm_cmd_setup_basic_desc(desc, opcode, is_read)
is_read)
#define HCLGE_TQP_REG_OFFSET 0x80000 #define HCLGE_TQP_REG_OFFSET 0x80000
#define HCLGE_TQP_REG_SIZE 0x200 #define HCLGE_TQP_REG_SIZE 0x200
...@@ -481,38 +237,10 @@ struct hclge_vf_num_cmd { ...@@ -481,38 +237,10 @@ struct hclge_vf_num_cmd {
}; };
#define HCLGE_RSS_DEFAULT_OUTPORT_B 4 #define HCLGE_RSS_DEFAULT_OUTPORT_B 4
#define HCLGE_RSS_HASH_KEY_OFFSET_B 4
#define HCLGE_RSS_HASH_KEY_NUM 16
struct hclge_rss_config_cmd {
u8 hash_config;
u8 rsv[7];
u8 hash_key[HCLGE_RSS_HASH_KEY_NUM];
};
struct hclge_rss_input_tuple_cmd {
u8 ipv4_tcp_en;
u8 ipv4_udp_en;
u8 ipv4_sctp_en;
u8 ipv4_fragment_en;
u8 ipv6_tcp_en;
u8 ipv6_udp_en;
u8 ipv6_sctp_en;
u8 ipv6_fragment_en;
u8 rsv[16];
};
#define HCLGE_RSS_CFG_TBL_SIZE 16
#define HCLGE_RSS_CFG_TBL_SIZE_H 4 #define HCLGE_RSS_CFG_TBL_SIZE_H 4
#define HCLGE_RSS_CFG_TBL_BW_H 2U
#define HCLGE_RSS_CFG_TBL_BW_L 8U #define HCLGE_RSS_CFG_TBL_BW_L 8U
struct hclge_rss_indirection_table_cmd {
__le16 start_table_index;
__le16 rss_set_bitmap;
u8 rss_qid_h[HCLGE_RSS_CFG_TBL_SIZE_H];
u8 rss_qid_l[HCLGE_RSS_CFG_TBL_SIZE];
};
#define HCLGE_RSS_TC_OFFSET_S 0 #define HCLGE_RSS_TC_OFFSET_S 0
#define HCLGE_RSS_TC_OFFSET_M GENMASK(10, 0) #define HCLGE_RSS_TC_OFFSET_M GENMASK(10, 0)
#define HCLGE_RSS_TC_SIZE_MSB_B 11 #define HCLGE_RSS_TC_SIZE_MSB_B 11
...@@ -520,10 +248,6 @@ struct hclge_rss_indirection_table_cmd { ...@@ -520,10 +248,6 @@ struct hclge_rss_indirection_table_cmd {
#define HCLGE_RSS_TC_SIZE_M GENMASK(14, 12) #define HCLGE_RSS_TC_SIZE_M GENMASK(14, 12)
#define HCLGE_RSS_TC_SIZE_MSB_OFFSET 3 #define HCLGE_RSS_TC_SIZE_MSB_OFFSET 3
#define HCLGE_RSS_TC_VALID_B 15 #define HCLGE_RSS_TC_VALID_B 15
struct hclge_rss_tc_mode_cmd {
__le16 rss_tc_mode[HCLGE_MAX_TC_NUM];
u8 rsv[8];
};
#define HCLGE_LINK_STATUS_UP_B 0 #define HCLGE_LINK_STATUS_UP_B 0
#define HCLGE_LINK_STATUS_UP_M BIT(HCLGE_LINK_STATUS_UP_B) #define HCLGE_LINK_STATUS_UP_M BIT(HCLGE_LINK_STATUS_UP_B)
......
...@@ -203,7 +203,7 @@ static int hclge_map_update(struct hclge_dev *hdev) ...@@ -203,7 +203,7 @@ static int hclge_map_update(struct hclge_dev *hdev)
if (ret) if (ret)
return ret; return ret;
hclge_rss_indir_init_cfg(hdev); hclge_comm_rss_indir_init_cfg(hdev->ae_dev, &hdev->rss_cfg);
return hclge_rss_init_hw(hdev); return hclge_rss_init_hw(hdev);
} }
......
...@@ -13,6 +13,8 @@ ...@@ -13,6 +13,8 @@
#include "hclge_cmd.h" #include "hclge_cmd.h"
#include "hclge_ptp.h" #include "hclge_ptp.h"
#include "hnae3.h" #include "hnae3.h"
#include "hclge_comm_rss.h"
#include "hclge_comm_tqp_stats.h"
#define HCLGE_MOD_VERSION "1.0" #define HCLGE_MOD_VERSION "1.0"
#define HCLGE_DRIVER_NAME "hclge" #define HCLGE_DRIVER_NAME "hclge"
...@@ -80,22 +82,6 @@ ...@@ -80,22 +82,6 @@
#define HCLGE_TQP_INTR_RL_REG 0x20900 #define HCLGE_TQP_INTR_RL_REG 0x20900
#define HCLGE_RSS_IND_TBL_SIZE 512 #define HCLGE_RSS_IND_TBL_SIZE 512
#define HCLGE_RSS_SET_BITMAP_MSK GENMASK(15, 0)
#define HCLGE_RSS_KEY_SIZE 40
#define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0
#define HCLGE_RSS_HASH_ALGO_SIMPLE 1
#define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2
#define HCLGE_RSS_HASH_ALGO_MASK GENMASK(3, 0)
#define HCLGE_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
#define HCLGE_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
#define HCLGE_D_PORT_BIT BIT(0)
#define HCLGE_S_PORT_BIT BIT(1)
#define HCLGE_D_IP_BIT BIT(2)
#define HCLGE_S_IP_BIT BIT(3)
#define HCLGE_V_TAG_BIT BIT(4)
#define HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT \
(HCLGE_D_IP_BIT | HCLGE_S_IP_BIT | HCLGE_V_TAG_BIT)
#define HCLGE_RSS_TC_SIZE_0 1 #define HCLGE_RSS_TC_SIZE_0 1
#define HCLGE_RSS_TC_SIZE_1 2 #define HCLGE_RSS_TC_SIZE_1 2
...@@ -285,26 +271,6 @@ struct hclge_hw { ...@@ -285,26 +271,6 @@ struct hclge_hw {
int num_vec; int num_vec;
}; };
/* TQP stats */
struct hlcge_tqp_stats {
/* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */
u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
/* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */
u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
};
struct hclge_tqp {
/* copy of device pointer from pci_dev,
* used when perform DMA mapping
*/
struct device *dev;
struct hnae3_queue q;
struct hlcge_tqp_stats tqp_stats;
u16 index; /* Global index in a NIC controller */
bool alloced;
};
enum hclge_fc_mode { enum hclge_fc_mode {
HCLGE_FC_NONE, HCLGE_FC_NONE,
HCLGE_FC_RX_PAUSE, HCLGE_FC_RX_PAUSE,
...@@ -909,7 +875,7 @@ struct hclge_dev { ...@@ -909,7 +875,7 @@ struct hclge_dev {
bool cur_promisc; bool cur_promisc;
int num_alloc_vfs; /* Actual number of VFs allocated */ int num_alloc_vfs; /* Actual number of VFs allocated */
struct hclge_tqp *htqp; struct hclge_comm_tqp *htqp;
struct hclge_vport *vport; struct hclge_vport *vport;
struct dentry *hclge_dbgfs; struct dentry *hclge_dbgfs;
...@@ -968,6 +934,7 @@ struct hclge_dev { ...@@ -968,6 +934,7 @@ struct hclge_dev {
cpumask_t affinity_mask; cpumask_t affinity_mask;
struct hclge_ptp *ptp; struct hclge_ptp *ptp;
struct devlink *devlink; struct devlink *devlink;
struct hclge_comm_rss_cfg rss_cfg;
}; };
/* VPort level vlan tag configuration for TX direction */ /* VPort level vlan tag configuration for TX direction */
...@@ -994,17 +961,6 @@ struct hclge_rx_vtag_cfg { ...@@ -994,17 +961,6 @@ struct hclge_rx_vtag_cfg {
bool strip_tag2_discard_en; /* Outer vlan tag discard for BD enable */ bool strip_tag2_discard_en; /* Outer vlan tag discard for BD enable */
}; };
struct hclge_rss_tuple_cfg {
u8 ipv4_tcp_en;
u8 ipv4_udp_en;
u8 ipv4_sctp_en;
u8 ipv4_fragment_en;
u8 ipv6_tcp_en;
u8 ipv6_udp_en;
u8 ipv6_sctp_en;
u8 ipv6_fragment_en;
};
enum HCLGE_VPORT_STATE { enum HCLGE_VPORT_STATE {
HCLGE_VPORT_STATE_ALIVE, HCLGE_VPORT_STATE_ALIVE,
HCLGE_VPORT_STATE_MAC_TBL_CHANGE, HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
...@@ -1038,15 +994,6 @@ struct hclge_vf_info { ...@@ -1038,15 +994,6 @@ struct hclge_vf_info {
struct hclge_vport { struct hclge_vport {
u16 alloc_tqps; /* Allocated Tx/Rx queues */ u16 alloc_tqps; /* Allocated Tx/Rx queues */
u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
/* User configured lookup table entries */
u16 *rss_indirection_tbl;
int rss_algo; /* User configured hash algorithm */
/* User configured rss tuple sets */
struct hclge_rss_tuple_cfg rss_tuple_sets;
u16 alloc_rss_size;
u16 qs_offset; u16 qs_offset;
u32 bw_limit; /* VSI BW Limit (0 = disabled) */ u32 bw_limit; /* VSI BW Limit (0 = disabled) */
u8 dwrr; u8 dwrr;
...@@ -1107,7 +1054,8 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport, ...@@ -1107,7 +1054,8 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
static inline int hclge_get_queue_id(struct hnae3_queue *queue) static inline int hclge_get_queue_id(struct hnae3_queue *queue)
{ {
struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q); struct hclge_comm_tqp *tqp =
container_of(queue, struct hclge_comm_tqp, q);
return tqp->index; return tqp->index;
} }
...@@ -1125,7 +1073,6 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable); ...@@ -1125,7 +1073,6 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable);
int hclge_buffer_alloc(struct hclge_dev *hdev); int hclge_buffer_alloc(struct hclge_dev *hdev);
int hclge_rss_init_hw(struct hclge_dev *hdev); int hclge_rss_init_hw(struct hclge_dev *hdev);
void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
void hclge_mbx_handler(struct hclge_dev *hdev); void hclge_mbx_handler(struct hclge_dev *hdev);
int hclge_reset_tqp(struct hnae3_handle *handle); int hclge_reset_tqp(struct hnae3_handle *handle);
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include "hclge_main.h" #include "hclge_main.h"
#include "hclge_mbx.h" #include "hclge_mbx.h"
#include "hnae3.h" #include "hnae3.h"
#include "hclge_comm_rss.h"
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include "hclge_trace.h" #include "hclge_trace.h"
...@@ -612,15 +613,17 @@ static void hclge_get_rss_key(struct hclge_vport *vport, ...@@ -612,15 +613,17 @@ static void hclge_get_rss_key(struct hclge_vport *vport,
{ {
#define HCLGE_RSS_MBX_RESP_LEN 8 #define HCLGE_RSS_MBX_RESP_LEN 8
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
struct hclge_comm_rss_cfg *rss_cfg;
u8 index; u8 index;
index = mbx_req->msg.data[0]; index = mbx_req->msg.data[0];
rss_cfg = &hdev->rss_cfg;
/* Check the query index of rss_hash_key from VF, make sure no /* Check the query index of rss_hash_key from VF, make sure no
* more than the size of rss_hash_key. * more than the size of rss_hash_key.
*/ */
if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) > if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) >
sizeof(vport[0].rss_hash_key)) { sizeof(rss_cfg->rss_hash_key)) {
dev_warn(&hdev->pdev->dev, dev_warn(&hdev->pdev->dev,
"failed to get the rss hash key, the index(%u) invalid !\n", "failed to get the rss hash key, the index(%u) invalid !\n",
index); index);
...@@ -628,7 +631,7 @@ static void hclge_get_rss_key(struct hclge_vport *vport, ...@@ -628,7 +631,7 @@ static void hclge_get_rss_key(struct hclge_vport *vport,
} }
memcpy(resp_msg->data, memcpy(resp_msg->data,
&hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN], &rss_cfg->rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
HCLGE_RSS_MBX_RESP_LEN); HCLGE_RSS_MBX_RESP_LEN);
resp_msg->len = HCLGE_RSS_MBX_RESP_LEN; resp_msg->len = HCLGE_RSS_MBX_RESP_LEN;
} }
......
...@@ -678,8 +678,8 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) ...@@ -678,8 +678,8 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
hclge_tm_update_kinfo_rss_size(vport); hclge_tm_update_kinfo_rss_size(vport);
kinfo->num_tqps = hclge_vport_get_tqp_num(vport); kinfo->num_tqps = hclge_vport_get_tqp_num(vport);
vport->dwrr = 100; /* 100 percent as init */ vport->dwrr = 100; /* 100 percent as init */
vport->alloc_rss_size = kinfo->rss_size;
vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
hdev->rss_cfg.rss_size = kinfo->rss_size;
/* when enable mqprio, the tc_info has been updated. */ /* when enable mqprio, the tc_info has been updated. */
if (kinfo->tc_info.mqprio_active) if (kinfo->tc_info.mqprio_active)
......
...@@ -16,30 +16,6 @@ struct hclgevf_dev; ...@@ -16,30 +16,6 @@ struct hclgevf_dev;
#define HCLGEVF_SYNC_RX_RING_HEAD_EN_B 4 #define HCLGEVF_SYNC_RX_RING_HEAD_EN_B 4
enum hclgevf_opcode_type {
/* Generic command */
HCLGEVF_OPC_QUERY_FW_VER = 0x0001,
HCLGEVF_OPC_QUERY_VF_RSRC = 0x0024,
HCLGEVF_OPC_QUERY_DEV_SPECS = 0x0050,
/* TQP command */
HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03,
HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13,
HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
/* GRO command */
HCLGEVF_OPC_GRO_GENERIC_CONFIG = 0x0C10,
/* RSS cmd */
HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01,
HCLGEVF_OPC_RSS_INPUT_TUPLE = 0x0D02,
HCLGEVF_OPC_RSS_INDIR_TABLE = 0x0D07,
HCLGEVF_OPC_RSS_TC_MODE = 0x0D08,
/* Mailbox cmd */
HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001,
/* IMP stats command */
HCLGEVF_OPC_IMP_COMPAT_CFG = 0x701A,
};
#define HCLGEVF_TQP_REG_OFFSET 0x80000 #define HCLGEVF_TQP_REG_OFFSET 0x80000
#define HCLGEVF_TQP_REG_SIZE 0x200 #define HCLGEVF_TQP_REG_SIZE 0x200
...@@ -97,50 +73,6 @@ struct hclgevf_cfg_gro_status_cmd { ...@@ -97,50 +73,6 @@ struct hclgevf_cfg_gro_status_cmd {
u8 rsv[23]; u8 rsv[23];
}; };
#define HCLGEVF_RSS_DEFAULT_OUTPORT_B 4
#define HCLGEVF_RSS_HASH_KEY_OFFSET_B 4
#define HCLGEVF_RSS_HASH_KEY_NUM 16
struct hclgevf_rss_config_cmd {
u8 hash_config;
u8 rsv[7];
u8 hash_key[HCLGEVF_RSS_HASH_KEY_NUM];
};
struct hclgevf_rss_input_tuple_cmd {
u8 ipv4_tcp_en;
u8 ipv4_udp_en;
u8 ipv4_sctp_en;
u8 ipv4_fragment_en;
u8 ipv6_tcp_en;
u8 ipv6_udp_en;
u8 ipv6_sctp_en;
u8 ipv6_fragment_en;
u8 rsv[16];
};
#define HCLGEVF_RSS_CFG_TBL_SIZE 16
struct hclgevf_rss_indirection_table_cmd {
__le16 start_table_index;
__le16 rss_set_bitmap;
u8 rsv[4];
u8 rss_result[HCLGEVF_RSS_CFG_TBL_SIZE];
};
#define HCLGEVF_RSS_TC_OFFSET_S 0
#define HCLGEVF_RSS_TC_OFFSET_M GENMASK(10, 0)
#define HCLGEVF_RSS_TC_SIZE_MSB_B 11
#define HCLGEVF_RSS_TC_SIZE_S 12
#define HCLGEVF_RSS_TC_SIZE_M GENMASK(14, 12)
#define HCLGEVF_RSS_TC_VALID_B 15
#define HCLGEVF_MAX_TC_NUM 8
#define HCLGEVF_RSS_TC_SIZE_MSB_OFFSET 3
struct hclgevf_rss_tc_mode_cmd {
__le16 rss_tc_mode[HCLGEVF_MAX_TC_NUM];
u8 rsv[8];
};
#define HCLGEVF_LINK_STS_B 0 #define HCLGEVF_LINK_STS_B 0
#define HCLGEVF_LINK_STATUS BIT(HCLGEVF_LINK_STS_B) #define HCLGEVF_LINK_STATUS BIT(HCLGEVF_LINK_STS_B)
struct hclgevf_link_status_cmd { struct hclgevf_link_status_cmd {
...@@ -177,8 +109,7 @@ struct hclgevf_cfg_tx_queue_pointer_cmd { ...@@ -177,8 +109,7 @@ struct hclgevf_cfg_tx_queue_pointer_cmd {
#define HCLGEVF_QUERY_DEV_SPECS_BD_NUM 4 #define HCLGEVF_QUERY_DEV_SPECS_BD_NUM 4
#define hclgevf_cmd_setup_basic_desc(desc, opcode, is_read) \ #define hclgevf_cmd_setup_basic_desc(desc, opcode, is_read) \
hclge_comm_cmd_setup_basic_desc(desc, (enum hclge_comm_opcode_type)opcode, \ hclge_comm_cmd_setup_basic_desc(desc, opcode, is_read)
is_read)
struct hclgevf_dev_specs_0_cmd { struct hclgevf_dev_specs_0_cmd {
__le32 rsv0; __le32 rsv0;
......
...@@ -10,6 +10,8 @@ ...@@ -10,6 +10,8 @@
#include "hclge_mbx.h" #include "hclge_mbx.h"
#include "hclgevf_cmd.h" #include "hclgevf_cmd.h"
#include "hnae3.h" #include "hnae3.h"
#include "hclge_comm_rss.h"
#include "hclge_comm_tqp_stats.h"
#define HCLGEVF_MOD_VERSION "1.0" #define HCLGEVF_MOD_VERSION "1.0"
#define HCLGEVF_DRIVER_NAME "hclgevf" #define HCLGEVF_DRIVER_NAME "hclgevf"
...@@ -93,22 +95,6 @@ ...@@ -93,22 +95,6 @@
#define HCLGEVF_WAIT_RESET_DONE 100 #define HCLGEVF_WAIT_RESET_DONE 100
#define HCLGEVF_RSS_IND_TBL_SIZE 512 #define HCLGEVF_RSS_IND_TBL_SIZE 512
#define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff
#define HCLGEVF_RSS_KEY_SIZE 40
#define HCLGEVF_RSS_HASH_ALGO_TOEPLITZ 0
#define HCLGEVF_RSS_HASH_ALGO_SIMPLE 1
#define HCLGEVF_RSS_HASH_ALGO_SYMMETRIC 2
#define HCLGEVF_RSS_HASH_ALGO_MASK 0xf
#define HCLGEVF_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
#define HCLGEVF_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
#define HCLGEVF_D_PORT_BIT BIT(0)
#define HCLGEVF_S_PORT_BIT BIT(1)
#define HCLGEVF_D_IP_BIT BIT(2)
#define HCLGEVF_S_IP_BIT BIT(3)
#define HCLGEVF_V_TAG_BIT BIT(4)
#define HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT \
(HCLGEVF_D_IP_BIT | HCLGEVF_S_IP_BIT | HCLGEVF_V_TAG_BIT)
#define HCLGEVF_MAC_MAX_FRAME 9728 #define HCLGEVF_MAC_MAX_FRAME 9728
...@@ -163,23 +149,6 @@ struct hclgevf_hw { ...@@ -163,23 +149,6 @@ struct hclgevf_hw {
struct hclgevf_mac mac; struct hclgevf_mac mac;
}; };
/* TQP stats */
struct hlcgevf_tqp_stats {
/* query_tqp_tx_queue_statistics, opcode id: 0x0B03 */
u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
/* query_tqp_rx_queue_statistics, opcode id: 0x0B13 */
u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
};
struct hclgevf_tqp {
struct device *dev; /* device for DMA mapping */
struct hnae3_queue q;
struct hlcgevf_tqp_stats tqp_stats;
u16 index; /* global index in a NIC controller */
bool alloced;
};
struct hclgevf_cfg { struct hclgevf_cfg {
u8 tc_num; u8 tc_num;
u16 tqp_desc_num; u16 tqp_desc_num;
...@@ -190,27 +159,6 @@ struct hclgevf_cfg { ...@@ -190,27 +159,6 @@ struct hclgevf_cfg {
u32 numa_node_map; u32 numa_node_map;
}; };
struct hclgevf_rss_tuple_cfg {
u8 ipv4_tcp_en;
u8 ipv4_udp_en;
u8 ipv4_sctp_en;
u8 ipv4_fragment_en;
u8 ipv6_tcp_en;
u8 ipv6_udp_en;
u8 ipv6_sctp_en;
u8 ipv6_fragment_en;
};
struct hclgevf_rss_cfg {
u8 rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */
u32 hash_algo;
u32 rss_size;
u8 hw_tc_map;
/* shadow table */
u8 *rss_indirection_tbl;
struct hclgevf_rss_tuple_cfg rss_tuple_sets;
};
struct hclgevf_misc_vector { struct hclgevf_misc_vector {
u8 __iomem *addr; u8 __iomem *addr;
int vector_irq; int vector_irq;
...@@ -255,7 +203,7 @@ struct hclgevf_dev { ...@@ -255,7 +203,7 @@ struct hclgevf_dev {
struct hnae3_ae_dev *ae_dev; struct hnae3_ae_dev *ae_dev;
struct hclgevf_hw hw; struct hclgevf_hw hw;
struct hclgevf_misc_vector misc_vector; struct hclgevf_misc_vector misc_vector;
struct hclgevf_rss_cfg rss_cfg; struct hclge_comm_rss_cfg rss_cfg;
unsigned long state; unsigned long state;
unsigned long flr_state; unsigned long flr_state;
unsigned long default_reset_request; unsigned long default_reset_request;
...@@ -306,7 +254,7 @@ struct hclgevf_dev { ...@@ -306,7 +254,7 @@ struct hclgevf_dev {
struct delayed_work service_task; struct delayed_work service_task;
struct hclgevf_tqp *htqp; struct hclge_comm_tqp *htqp;
struct hnae3_handle nic; struct hnae3_handle nic;
struct hnae3_handle roce; struct hnae3_handle roce;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment