Commit 20a9013e authored by David S. Miller's avatar David S. Miller

Merge branch 'hnsd3-next'

Guangbin Huang says:

====================
net: hns3: refactor cmdq functions in PF/VF

Currently, hns3 PF and VF module have two sets of cmdq APIs to provide
cmdq message interaction functions. Most of these APIs are the same. The
only differences are the function variables and names with pf and vf
suffixes. These two sets of cmdq APIs are redundent and add extra bug fix
work.

This series refactor the cmdq APIs in hns3 PF and VF by implementing one
set of common cmdq APIs for PF and VF reuse and deleting the old APIs.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents af30f8ea aab8d1c6
......@@ -4,9 +4,9 @@
#
ccflags-y += -I$(srctree)/$(src)
obj-$(CONFIG_HNS3) += hns3pf/
obj-$(CONFIG_HNS3) += hns3vf/
ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3pf
ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3vf
ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3_common
obj-$(CONFIG_HNS3) += hnae3.o
......@@ -14,3 +14,15 @@ obj-$(CONFIG_HNS3_ENET) += hns3.o
hns3-objs = hns3_enet.o hns3_ethtool.o hns3_debugfs.o
hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o hns3vf/hclgevf_devlink.o \
hns3_common/hclge_comm_cmd.o
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
hclge-objs = hns3pf/hclge_main.o hns3pf/hclge_mdio.o hns3pf/hclge_tm.o \
hns3pf/hclge_mbx.o hns3pf/hclge_err.o hns3pf/hclge_debugfs.o hns3pf/hclge_ptp.o hns3pf/hclge_devlink.o \
hns3_common/hclge_comm_cmd.o
hclge-$(CONFIG_HNS3_DCB) += hns3pf/hclge_dcb.o
/* SPDX-License-Identifier: GPL-2.0+ */
// Copyright (c) 2021-2021 Hisilicon Limited.
#ifndef __HCLGE_COMM_CMD_H
#define __HCLGE_COMM_CMD_H
#include <linux/types.h>
#include "hnae3.h"
#define HCLGE_COMM_CMD_FLAG_IN BIT(0)
#define HCLGE_COMM_CMD_FLAG_NEXT BIT(2)
#define HCLGE_COMM_CMD_FLAG_WR BIT(3)
#define HCLGE_COMM_CMD_FLAG_NO_INTR BIT(4)
#define HCLGE_COMM_SEND_SYNC(flag) \
((flag) & HCLGE_COMM_CMD_FLAG_NO_INTR)
#define HCLGE_COMM_LINK_EVENT_REPORT_EN_B 0
#define HCLGE_COMM_NCSI_ERROR_REPORT_EN_B 1
#define HCLGE_COMM_PHY_IMP_EN_B 2
#define HCLGE_COMM_MAC_STATS_EXT_EN_B 3
#define HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B 4
#define hclge_comm_dev_phy_imp_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, (ae_dev)->caps)
#define HCLGE_COMM_TYPE_CRQ 0
#define HCLGE_COMM_TYPE_CSQ 1
#define HCLGE_COMM_CMDQ_CLEAR_WAIT_TIME 200
/* bar registers for cmdq */
#define HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG 0x27000
#define HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG 0x27004
#define HCLGE_COMM_NIC_CSQ_DEPTH_REG 0x27008
#define HCLGE_COMM_NIC_CSQ_TAIL_REG 0x27010
#define HCLGE_COMM_NIC_CSQ_HEAD_REG 0x27014
#define HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG 0x27018
#define HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG 0x2701C
#define HCLGE_COMM_NIC_CRQ_DEPTH_REG 0x27020
#define HCLGE_COMM_NIC_CRQ_TAIL_REG 0x27024
#define HCLGE_COMM_NIC_CRQ_HEAD_REG 0x27028
/* Vector0 interrupt CMDQ event source register(RW) */
#define HCLGE_COMM_VECTOR0_CMDQ_SRC_REG 0x27100
/* Vector0 interrupt CMDQ event status register(RO) */
#define HCLGE_COMM_VECTOR0_CMDQ_STATE_REG 0x27104
#define HCLGE_COMM_CMDQ_INTR_EN_REG 0x27108
#define HCLGE_COMM_CMDQ_INTR_GEN_REG 0x2710C
#define HCLGE_COMM_CMDQ_INTR_STS_REG 0x27104
/* this bit indicates that the driver is ready for hardware reset */
#define HCLGE_COMM_NIC_SW_RST_RDY_B 16
#define HCLGE_COMM_NIC_SW_RST_RDY BIT(HCLGE_COMM_NIC_SW_RST_RDY_B)
#define HCLGE_COMM_NIC_CMQ_DESC_NUM_S 3
#define HCLGE_COMM_NIC_CMQ_DESC_NUM 1024
#define HCLGE_COMM_CMDQ_TX_TIMEOUT 30000
enum hclge_comm_cmd_return_status {
HCLGE_COMM_CMD_EXEC_SUCCESS = 0,
HCLGE_COMM_CMD_NO_AUTH = 1,
HCLGE_COMM_CMD_NOT_SUPPORTED = 2,
HCLGE_COMM_CMD_QUEUE_FULL = 3,
HCLGE_COMM_CMD_NEXT_ERR = 4,
HCLGE_COMM_CMD_UNEXE_ERR = 5,
HCLGE_COMM_CMD_PARA_ERR = 6,
HCLGE_COMM_CMD_RESULT_ERR = 7,
HCLGE_COMM_CMD_TIMEOUT = 8,
HCLGE_COMM_CMD_HILINK_ERR = 9,
HCLGE_COMM_CMD_QUEUE_ILLEGAL = 10,
HCLGE_COMM_CMD_INVALID = 11,
};
enum hclge_comm_special_cmd {
HCLGE_COMM_OPC_STATS_64_BIT = 0x0030,
HCLGE_COMM_OPC_STATS_32_BIT = 0x0031,
HCLGE_COMM_OPC_STATS_MAC = 0x0032,
HCLGE_COMM_OPC_STATS_MAC_ALL = 0x0034,
HCLGE_COMM_OPC_QUERY_32_BIT_REG = 0x0041,
HCLGE_COMM_OPC_QUERY_64_BIT_REG = 0x0042,
HCLGE_COMM_QUERY_CLEAR_MPF_RAS_INT = 0x1511,
HCLGE_COMM_QUERY_CLEAR_PF_RAS_INT = 0x1512,
HCLGE_COMM_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514,
HCLGE_COMM_QUERY_CLEAR_ALL_PF_MSIX_INT = 0x1515,
HCLGE_COMM_QUERY_ALL_ERR_INFO = 0x1517,
};
enum HCLGE_COMM_CAP_BITS {
HCLGE_COMM_CAP_UDP_GSO_B,
HCLGE_COMM_CAP_QB_B,
HCLGE_COMM_CAP_FD_FORWARD_TC_B,
HCLGE_COMM_CAP_PTP_B,
HCLGE_COMM_CAP_INT_QL_B,
HCLGE_COMM_CAP_HW_TX_CSUM_B,
HCLGE_COMM_CAP_TX_PUSH_B,
HCLGE_COMM_CAP_PHY_IMP_B,
HCLGE_COMM_CAP_TQP_TXRX_INDEP_B,
HCLGE_COMM_CAP_HW_PAD_B,
HCLGE_COMM_CAP_STASH_B,
HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B,
HCLGE_COMM_CAP_RAS_IMP_B = 12,
HCLGE_COMM_CAP_FEC_B = 13,
HCLGE_COMM_CAP_PAUSE_B = 14,
HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B = 15,
HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B = 17,
};
enum HCLGE_COMM_API_CAP_BITS {
HCLGE_COMM_API_CAP_FLEX_RSS_TBL_B,
};
enum hclge_comm_opcode_type {
HCLGE_COMM_OPC_QUERY_FW_VER = 0x0001,
HCLGE_COMM_OPC_IMP_COMPAT_CFG = 0x701A,
};
/* capabilities bits map between imp firmware and local driver */
struct hclge_comm_caps_bit_map {
u16 imp_bit;
u16 local_bit;
};
struct hclge_comm_firmware_compat_cmd {
__le32 compat;
u8 rsv[20];
};
enum hclge_comm_cmd_state {
HCLGE_COMM_STATE_CMD_DISABLE,
};
struct hclge_comm_errcode {
u32 imp_errcode;
int common_errno;
};
#define HCLGE_COMM_QUERY_CAP_LENGTH 3
struct hclge_comm_query_version_cmd {
__le32 firmware;
__le32 hardware;
__le32 api_caps;
__le32 caps[HCLGE_COMM_QUERY_CAP_LENGTH]; /* capabilities of device */
};
#define HCLGE_DESC_DATA_LEN 6
struct hclge_desc {
__le16 opcode;
__le16 flag;
__le16 retval;
__le16 rsv;
__le32 data[HCLGE_DESC_DATA_LEN];
};
struct hclge_comm_cmq_ring {
dma_addr_t desc_dma_addr;
struct hclge_desc *desc;
struct pci_dev *pdev;
u32 head;
u32 tail;
u16 buf_size;
u16 desc_num;
int next_to_use;
int next_to_clean;
u8 ring_type; /* cmq ring type */
spinlock_t lock; /* Command queue lock */
};
enum hclge_comm_cmd_status {
HCLGE_COMM_STATUS_SUCCESS = 0,
HCLGE_COMM_ERR_CSQ_FULL = -1,
HCLGE_COMM_ERR_CSQ_TIMEOUT = -2,
HCLGE_COMM_ERR_CSQ_ERROR = -3,
};
struct hclge_comm_cmq {
struct hclge_comm_cmq_ring csq;
struct hclge_comm_cmq_ring crq;
u16 tx_timeout;
enum hclge_comm_cmd_status last_status;
};
struct hclge_comm_hw {
void __iomem *io_base;
void __iomem *mem_base;
struct hclge_comm_cmq cmq;
unsigned long comm_state;
};
static inline void hclge_comm_write_reg(void __iomem *base, u32 reg, u32 value)
{
writel(value, base + reg);
}
static inline u32 hclge_comm_read_reg(u8 __iomem *base, u32 reg)
{
u8 __iomem *reg_addr = READ_ONCE(base);
return readl(reg_addr + reg);
}
#define hclge_comm_write_dev(a, reg, value) \
hclge_comm_write_reg((a)->io_base, reg, value)
#define hclge_comm_read_dev(a, reg) \
hclge_comm_read_reg((a)->io_base, reg)
void hclge_comm_cmd_init_regs(struct hclge_comm_hw *hw);
int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw,
u32 *fw_version, bool is_pf);
int hclge_comm_alloc_cmd_queue(struct hclge_comm_hw *hw, int ring_type);
int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
int num, bool is_pf);
void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read);
int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev, bool is_pf,
struct hclge_comm_hw *hw, bool en);
void hclge_comm_free_cmd_desc(struct hclge_comm_cmq_ring *ring);
void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc,
enum hclge_comm_opcode_type opcode,
bool is_read);
void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev, bool is_pf,
struct hclge_comm_hw *hw);
int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw);
int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw,
u32 *fw_version, bool is_pf,
unsigned long reset_pending);
#endif
# SPDX-License-Identifier: GPL-2.0+
#
# Makefile for the HISILICON network device drivers.
#
ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
ccflags-y += -I $(srctree)/$(src)
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o hclge_ptp.o hclge_devlink.o
hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o
This diff is collapsed.
......@@ -7,81 +7,19 @@
#include <linux/io.h>
#include <linux/etherdevice.h>
#include "hnae3.h"
#define HCLGE_CMDQ_TX_TIMEOUT 30000
#define HCLGE_CMDQ_CLEAR_WAIT_TIME 200
#define HCLGE_DESC_DATA_LEN 6
#include "hclge_comm_cmd.h"
struct hclge_dev;
struct hclge_desc {
__le16 opcode;
#define HCLGE_CMDQ_RX_INVLD_B 0
#define HCLGE_CMDQ_RX_OUTVLD_B 1
__le16 flag;
__le16 retval;
__le16 rsv;
__le32 data[HCLGE_DESC_DATA_LEN];
};
struct hclge_cmq_ring {
dma_addr_t desc_dma_addr;
struct hclge_desc *desc;
struct hclge_dev *dev;
u32 head;
u32 tail;
u16 buf_size;
u16 desc_num;
int next_to_use;
int next_to_clean;
u8 ring_type; /* cmq ring type */
spinlock_t lock; /* Command queue lock */
};
enum hclge_cmd_return_status {
HCLGE_CMD_EXEC_SUCCESS = 0,
HCLGE_CMD_NO_AUTH = 1,
HCLGE_CMD_NOT_SUPPORTED = 2,
HCLGE_CMD_QUEUE_FULL = 3,
HCLGE_CMD_NEXT_ERR = 4,
HCLGE_CMD_UNEXE_ERR = 5,
HCLGE_CMD_PARA_ERR = 6,
HCLGE_CMD_RESULT_ERR = 7,
HCLGE_CMD_TIMEOUT = 8,
HCLGE_CMD_HILINK_ERR = 9,
HCLGE_CMD_QUEUE_ILLEGAL = 10,
HCLGE_CMD_INVALID = 11,
};
enum hclge_cmd_status {
HCLGE_STATUS_SUCCESS = 0,
HCLGE_ERR_CSQ_FULL = -1,
HCLGE_ERR_CSQ_TIMEOUT = -2,
HCLGE_ERR_CSQ_ERROR = -3,
};
struct hclge_misc_vector {
u8 __iomem *addr;
int vector_irq;
char name[HNAE3_INT_NAME_LEN];
};
struct hclge_cmq {
struct hclge_cmq_ring csq;
struct hclge_cmq_ring crq;
u16 tx_timeout;
enum hclge_cmd_status last_status;
};
#define HCLGE_CMD_FLAG_IN BIT(0)
#define HCLGE_CMD_FLAG_OUT BIT(1)
#define HCLGE_CMD_FLAG_NEXT BIT(2)
#define HCLGE_CMD_FLAG_WR BIT(3)
#define HCLGE_CMD_FLAG_NO_INTR BIT(4)
#define HCLGE_CMD_FLAG_ERR_INTR BIT(5)
enum hclge_opcode_type {
/* Generic commands */
HCLGE_OPC_QUERY_FW_VER = 0x0001,
......@@ -325,6 +263,10 @@ enum hclge_opcode_type {
HCLGE_OPC_QUERY_LINK_DIAGNOSIS = 0x702A,
};
#define hclge_cmd_setup_basic_desc(desc, opcode, is_read) \
hclge_comm_cmd_setup_basic_desc(desc, (enum hclge_comm_opcode_type)opcode, \
is_read)
#define HCLGE_TQP_REG_OFFSET 0x80000
#define HCLGE_TQP_REG_SIZE 0x200
......@@ -391,38 +333,6 @@ struct hclge_rx_priv_buff_cmd {
u8 rsv[6];
};
enum HCLGE_CAP_BITS {
HCLGE_CAP_UDP_GSO_B,
HCLGE_CAP_QB_B,
HCLGE_CAP_FD_FORWARD_TC_B,
HCLGE_CAP_PTP_B,
HCLGE_CAP_INT_QL_B,
HCLGE_CAP_HW_TX_CSUM_B,
HCLGE_CAP_TX_PUSH_B,
HCLGE_CAP_PHY_IMP_B,
HCLGE_CAP_TQP_TXRX_INDEP_B,
HCLGE_CAP_HW_PAD_B,
HCLGE_CAP_STASH_B,
HCLGE_CAP_UDP_TUNNEL_CSUM_B,
HCLGE_CAP_RAS_IMP_B = 12,
HCLGE_CAP_FEC_B = 13,
HCLGE_CAP_PAUSE_B = 14,
HCLGE_CAP_RXD_ADV_LAYOUT_B = 15,
HCLGE_CAP_PORT_VLAN_BYPASS_B = 17,
};
enum HCLGE_API_CAP_BITS {
HCLGE_API_CAP_FLEX_RSS_TBL_B,
};
#define HCLGE_QUERY_CAP_LENGTH 3
struct hclge_query_version_cmd {
__le32 firmware;
__le32 hardware;
__le32 api_caps;
__le32 caps[HCLGE_QUERY_CAP_LENGTH]; /* capabilities of device */
};
#define HCLGE_RX_PRIV_EN_B 15
#define HCLGE_TC_NUM_ONE_DESC 4
struct hclge_priv_wl {
......@@ -1015,16 +925,6 @@ struct hclge_common_lb_cmd {
#define HCLGE_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */
#define HCLGE_NON_DCB_ADDITIONAL_BUF 0x1400 /* 5120 byte */
#define HCLGE_TYPE_CRQ 0
#define HCLGE_TYPE_CSQ 1
/* this bit indicates that the driver is ready for hardware reset */
#define HCLGE_NIC_SW_RST_RDY_B 16
#define HCLGE_NIC_SW_RST_RDY BIT(HCLGE_NIC_SW_RST_RDY_B)
#define HCLGE_NIC_CMQ_DESC_NUM 1024
#define HCLGE_NIC_CMQ_DESC_NUM_S 3
#define HCLGE_LED_LOCATE_STATE_S 0
#define HCLGE_LED_LOCATE_STATE_M GENMASK(1, 0)
......@@ -1147,16 +1047,6 @@ struct hclge_query_ppu_pf_other_int_dfx_cmd {
u8 rsv[4];
};
#define HCLGE_LINK_EVENT_REPORT_EN_B 0
#define HCLGE_NCSI_ERROR_REPORT_EN_B 1
#define HCLGE_PHY_IMP_EN_B 2
#define HCLGE_MAC_STATS_EXT_EN_B 3
#define HCLGE_SYNC_RX_RING_HEAD_EN_B 4
struct hclge_firmware_compat_cmd {
__le32 compat;
u8 rsv[20];
};
#define HCLGE_SFP_INFO_CMD_NUM 6
#define HCLGE_SFP_INFO_BD0_LEN 20
#define HCLGE_SFP_INFO_BDX_LEN 24
......@@ -1239,44 +1129,10 @@ struct hclge_phy_reg_cmd {
u8 rsv1[18];
};
/* capabilities bits map between imp firmware and local driver */
struct hclge_caps_bit_map {
u16 imp_bit;
u16 local_bit;
};
int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{
writel(value, base + reg);
}
#define hclge_write_dev(a, reg, value) \
hclge_write_reg((a)->io_base, reg, value)
#define hclge_read_dev(a, reg) \
hclge_read_reg((a)->io_base, reg)
static inline u32 hclge_read_reg(u8 __iomem *base, u32 reg)
{
u8 __iomem *reg_addr = READ_ONCE(base);
return readl(reg_addr + reg);
}
#define HCLGE_SEND_SYNC(flag) \
((flag) & HCLGE_CMD_FLAG_NO_INTR)
struct hclge_hw;
int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num);
void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
enum hclge_opcode_type opcode, bool is_read);
void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read);
enum hclge_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw,
struct hclge_desc *desc);
enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw,
struct hclge_desc *desc);
void hclge_cmd_uninit(struct hclge_dev *hdev);
int hclge_cmd_queue_init(struct hclge_dev *hdev);
enum hclge_comm_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw,
struct hclge_desc *desc);
enum hclge_comm_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw,
struct hclge_desc *desc);
#endif
......@@ -150,7 +150,7 @@ static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
desc->data[0] = cpu_to_le32(index);
for (i = 1; i < bd_num; i++) {
desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
desc++;
hclge_cmd_setup_basic_desc(desc, cmd, true);
}
......@@ -1266,7 +1266,7 @@ static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf,
int i, ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
ret = hclge_cmd_send(&hdev->hw, desc, 2);
if (ret) {
......@@ -1302,7 +1302,7 @@ static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
int i, ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
ret = hclge_cmd_send(&hdev->hw, desc, 2);
if (ret) {
......@@ -1447,9 +1447,9 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
u32 *req;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
......
......@@ -1399,7 +1399,7 @@ static int hclge_config_common_hw_err_int(struct hclge_dev *hdev, bool en)
/* configure common error interrupts */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_COMMON_ECC_INT_CFG, false);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_COMMON_ECC_INT_CFG, false);
if (en) {
......@@ -1498,7 +1498,7 @@ static int hclge_config_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd,
/* configure PPP error interrupts */
hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
......@@ -1633,7 +1633,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
/* configure PPU error interrupts */
if (cmd == HCLGE_PPU_MPF_ECC_INT_CMD) {
hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
if (en) {
desc[0].data[0] =
......@@ -1718,7 +1718,7 @@ static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en)
/* configure SSU ecc error interrupts */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_ECC_INT_CMD, false);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_ECC_INT_CMD, false);
if (en) {
desc[0].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN);
......@@ -1740,7 +1740,7 @@ static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en)
/* configure SSU common error interrupts */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_COMMON_INT_CMD, false);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_COMMON_INT_CMD, false);
if (en) {
......@@ -1963,7 +1963,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
&ae_dev->hw_err_reset_req);
/* clear all main PF RAS errors */
hclge_cmd_reuse_desc(&desc[0], false);
hclge_comm_cmd_reuse_desc(&desc[0], false);
ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
if (ret)
dev_err(dev, "clear all mpf ras int cmd failed (%d)\n", ret);
......@@ -2036,7 +2036,7 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
}
/* clear all PF RAS errors */
hclge_cmd_reuse_desc(&desc[0], false);
hclge_comm_cmd_reuse_desc(&desc[0], false);
ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
if (ret)
dev_err(dev, "clear all pf ras int cmd failed (%d)\n", ret);
......@@ -2087,8 +2087,8 @@ static int hclge_log_rocee_axi_error(struct hclge_dev *hdev)
true);
hclge_cmd_setup_basic_desc(&desc[2], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD,
true);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
ret = hclge_cmd_send(&hdev->hw, &desc[0], 3);
if (ret) {
......@@ -2119,7 +2119,7 @@ static int hclge_log_rocee_ecc_error(struct hclge_dev *hdev)
ret = hclge_cmd_query_error(hdev, &desc[0],
HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD,
HCLGE_CMD_FLAG_NEXT);
HCLGE_COMM_CMD_FLAG_NEXT);
if (ret) {
dev_err(dev, "failed(%d) to query ROCEE ECC error sts\n", ret);
return ret;
......@@ -2235,7 +2235,7 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
}
/* clear error status */
hclge_cmd_reuse_desc(&desc[0], false);
hclge_comm_cmd_reuse_desc(&desc[0], false);
ret = hclge_cmd_send(&hdev->hw, &desc[0], 1);
if (ret) {
dev_err(dev, "failed(%d) to clear ROCEE RAS error\n", ret);
......@@ -2405,7 +2405,8 @@ static int hclge_clear_hw_msix_error(struct hclge_dev *hdev,
else
desc[0].opcode = cpu_to_le16(HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT);
desc[0].flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
desc[0].flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR |
HCLGE_COMM_CMD_FLAG_IN);
return hclge_cmd_send(&hdev->hw, &desc[0], bd_num);
}
......
......@@ -38,20 +38,7 @@
#define HCLGE_VECTOR_REG_OFFSET_H 0x1000
#define HCLGE_VECTOR_VF_OFFSET 0x100000
#define HCLGE_NIC_CSQ_BASEADDR_L_REG 0x27000
#define HCLGE_NIC_CSQ_BASEADDR_H_REG 0x27004
#define HCLGE_NIC_CSQ_DEPTH_REG 0x27008
#define HCLGE_NIC_CSQ_TAIL_REG 0x27010
#define HCLGE_NIC_CSQ_HEAD_REG 0x27014
#define HCLGE_NIC_CRQ_BASEADDR_L_REG 0x27018
#define HCLGE_NIC_CRQ_BASEADDR_H_REG 0x2701C
#define HCLGE_NIC_CRQ_DEPTH_REG 0x27020
#define HCLGE_NIC_CRQ_TAIL_REG 0x27024
#define HCLGE_NIC_CRQ_HEAD_REG 0x27028
#define HCLGE_CMDQ_INTR_STS_REG 0x27104
#define HCLGE_CMDQ_INTR_EN_REG 0x27108
#define HCLGE_CMDQ_INTR_GEN_REG 0x2710C
/* bar registers for common func */
#define HCLGE_GRO_EN_REG 0x28000
......@@ -228,7 +215,6 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_MBX_HANDLING,
HCLGE_STATE_ERR_SERVICE_SCHED,
HCLGE_STATE_STATISTICS_UPDATING,
HCLGE_STATE_CMD_DISABLE,
HCLGE_STATE_LINK_UPDATING,
HCLGE_STATE_RST_FAIL,
HCLGE_STATE_FD_TBL_CHANGED,
......@@ -294,11 +280,9 @@ struct hclge_mac {
};
struct hclge_hw {
void __iomem *io_base;
void __iomem *mem_base;
struct hclge_comm_hw hw;
struct hclge_mac mac;
int num_vec;
struct hclge_cmq cmq;
};
/* TQP stats */
......@@ -641,6 +625,11 @@ struct key_info {
#define MAX_FD_FILTER_NUM 4096
#define HCLGE_ARFS_EXPIRE_INTERVAL 5UL
#define hclge_read_dev(a, reg) \
hclge_comm_read_reg((a)->hw.io_base, reg)
#define hclge_write_dev(a, reg, value) \
hclge_comm_write_reg((a)->hw.io_base, reg, value)
enum HCLGE_FD_ACTIVE_RULE_TYPE {
HCLGE_FD_RULE_NONE,
HCLGE_FD_ARFS_ACTIVE,
......
......@@ -33,7 +33,7 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
{
struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
struct hclge_dev *hdev = vport->back;
enum hclge_cmd_status status;
enum hclge_comm_cmd_status status;
struct hclge_desc desc;
u16 resp;
......@@ -90,7 +90,7 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
{
struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
struct hclge_dev *hdev = vport->back;
enum hclge_cmd_status status;
enum hclge_comm_cmd_status status;
struct hclge_desc desc;
resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
......@@ -661,9 +661,9 @@ static void hclge_handle_link_change_event(struct hclge_dev *hdev,
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
{
u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG);
u32 tail = hclge_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
return tail == hw->cmq.crq.next_to_use;
return tail == hw->hw.cmq.crq.next_to_use;
}
static void hclge_handle_ncsi_error(struct hclge_dev *hdev)
......@@ -694,7 +694,7 @@ static void hclge_handle_vf_tbl(struct hclge_vport *vport,
void hclge_mbx_handler(struct hclge_dev *hdev)
{
struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
struct hclge_comm_cmq_ring *crq = &hdev->hw.hw.cmq.crq;
struct hclge_respond_to_vf_msg resp_msg;
struct hclge_mbx_vf_to_pf_cmd *req;
struct hclge_vport *vport;
......@@ -705,7 +705,8 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
/* handle all the mailbox requests in the queue */
while (!hclge_cmd_crq_empty(&hdev->hw)) {
if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
&hdev->hw.hw.comm_state)) {
dev_warn(&hdev->pdev->dev,
"command queue needs re-initializing\n");
return;
......@@ -867,5 +868,6 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
}
/* Write back CMDQ_RQ header pointer, M7 need this pointer */
hclge_write_dev(&hdev->hw, HCLGE_NIC_CRQ_HEAD_REG, crq->next_to_use);
hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CRQ_HEAD_REG,
crq->next_to_use);
}
......@@ -47,7 +47,7 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
struct hclge_desc desc;
int ret;
if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state))
if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
return 0;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, false);
......@@ -85,7 +85,7 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
struct hclge_desc desc;
int ret;
if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state))
if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
return 0;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, true);
......
......@@ -464,7 +464,7 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
}
spin_lock_init(&ptp->lock);
ptp->io_base = hdev->hw.io_base + HCLGE_PTP_REG_OFFSET;
ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
hdev->ptp = ptp;
......
# SPDX-License-Identifier: GPL-2.0+
#
# Makefile for the HISILICON network device drivers.
#
ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
ccflags-y += -I $(srctree)/$(src)
obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o hclgevf_devlink.o
......@@ -6,9 +6,8 @@
#include <linux/io.h>
#include <linux/types.h>
#include "hnae3.h"
#include "hclge_comm_cmd.h"
#define HCLGEVF_CMDQ_TX_TIMEOUT 30000
#define HCLGEVF_CMDQ_CLEAR_WAIT_TIME 200
#define HCLGEVF_CMDQ_RX_INVLD_B 0
#define HCLGEVF_CMDQ_RX_OUTVLD_B 1
......@@ -16,83 +15,6 @@ struct hclgevf_hw;
struct hclgevf_dev;
#define HCLGEVF_SYNC_RX_RING_HEAD_EN_B 4
struct hclgevf_firmware_compat_cmd {
__le32 compat;
u8 rsv[20];
};
struct hclgevf_desc {
__le16 opcode;
__le16 flag;
__le16 retval;
__le16 rsv;
__le32 data[6];
};
struct hclgevf_desc_cb {
dma_addr_t dma;
void *va;
u32 length;
};
struct hclgevf_cmq_ring {
dma_addr_t desc_dma_addr;
struct hclgevf_desc *desc;
struct hclgevf_desc_cb *desc_cb;
struct hclgevf_dev *dev;
u32 head;
u32 tail;
u16 buf_size;
u16 desc_num;
int next_to_use;
int next_to_clean;
u8 flag;
spinlock_t lock; /* Command queue lock */
};
enum hclgevf_cmd_return_status {
HCLGEVF_CMD_EXEC_SUCCESS = 0,
HCLGEVF_CMD_NO_AUTH = 1,
HCLGEVF_CMD_NOT_SUPPORTED = 2,
HCLGEVF_CMD_QUEUE_FULL = 3,
HCLGEVF_CMD_NEXT_ERR = 4,
HCLGEVF_CMD_UNEXE_ERR = 5,
HCLGEVF_CMD_PARA_ERR = 6,
HCLGEVF_CMD_RESULT_ERR = 7,
HCLGEVF_CMD_TIMEOUT = 8,
HCLGEVF_CMD_HILINK_ERR = 9,
HCLGEVF_CMD_QUEUE_ILLEGAL = 10,
HCLGEVF_CMD_INVALID = 11,
};
enum hclgevf_cmd_status {
HCLGEVF_STATUS_SUCCESS = 0,
HCLGEVF_ERR_CSQ_FULL = -1,
HCLGEVF_ERR_CSQ_TIMEOUT = -2,
HCLGEVF_ERR_CSQ_ERROR = -3
};
struct hclgevf_cmq {
struct hclgevf_cmq_ring csq;
struct hclgevf_cmq_ring crq;
u16 tx_timeout; /* Tx timeout */
enum hclgevf_cmd_status last_status;
};
#define HCLGEVF_CMD_FLAG_IN_VALID_SHIFT 0
#define HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT 1
#define HCLGEVF_CMD_FLAG_NEXT_SHIFT 2
#define HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT 3
#define HCLGEVF_CMD_FLAG_NO_INTR_SHIFT 4
#define HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT 5
#define HCLGEVF_CMD_FLAG_IN BIT(HCLGEVF_CMD_FLAG_IN_VALID_SHIFT)
#define HCLGEVF_CMD_FLAG_OUT BIT(HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT)
#define HCLGEVF_CMD_FLAG_NEXT BIT(HCLGEVF_CMD_FLAG_NEXT_SHIFT)
#define HCLGEVF_CMD_FLAG_WR BIT(HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT)
#define HCLGEVF_CMD_FLAG_NO_INTR BIT(HCLGEVF_CMD_FLAG_NO_INTR_SHIFT)
#define HCLGEVF_CMD_FLAG_ERR_INTR BIT(HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT)
enum hclgevf_opcode_type {
/* Generic command */
......@@ -156,34 +78,6 @@ struct hclgevf_ctrl_vector_chain {
u8 resv;
};
enum HCLGEVF_CAP_BITS {
HCLGEVF_CAP_UDP_GSO_B,
HCLGEVF_CAP_QB_B,
HCLGEVF_CAP_FD_FORWARD_TC_B,
HCLGEVF_CAP_PTP_B,
HCLGEVF_CAP_INT_QL_B,
HCLGEVF_CAP_HW_TX_CSUM_B,
HCLGEVF_CAP_TX_PUSH_B,
HCLGEVF_CAP_PHY_IMP_B,
HCLGEVF_CAP_TQP_TXRX_INDEP_B,
HCLGEVF_CAP_HW_PAD_B,
HCLGEVF_CAP_STASH_B,
HCLGEVF_CAP_UDP_TUNNEL_CSUM_B,
HCLGEVF_CAP_RXD_ADV_LAYOUT_B = 15,
};
enum HCLGEVF_API_CAP_BITS {
HCLGEVF_API_CAP_FLEX_RSS_TBL_B,
};
#define HCLGEVF_QUERY_CAP_LENGTH 3
struct hclgevf_query_version_cmd {
__le32 firmware;
__le32 hardware;
__le32 api_caps;
__le32 caps[HCLGEVF_QUERY_CAP_LENGTH]; /* capabilities of device */
};
#define HCLGEVF_MSIX_OFT_ROCEE_S 0
#define HCLGEVF_MSIX_OFT_ROCEE_M (0xffff << HCLGEVF_MSIX_OFT_ROCEE_S)
#define HCLGEVF_VEC_NUM_S 0
......@@ -273,9 +167,6 @@ struct hclgevf_cfg_tx_queue_pointer_cmd {
u8 rsv[14];
};
#define HCLGEVF_TYPE_CRQ 0
#define HCLGEVF_TYPE_CSQ 1
/* this bit indicates that the driver is ready for hardware reset */
#define HCLGEVF_NIC_SW_RST_RDY_B 16
#define HCLGEVF_NIC_SW_RST_RDY BIT(HCLGEVF_NIC_SW_RST_RDY_B)
......@@ -285,6 +176,10 @@ struct hclgevf_cfg_tx_queue_pointer_cmd {
#define HCLGEVF_QUERY_DEV_SPECS_BD_NUM 4
#define hclgevf_cmd_setup_basic_desc(desc, opcode, is_read) \
hclge_comm_cmd_setup_basic_desc(desc, (enum hclge_comm_opcode_type)opcode, \
is_read)
struct hclgevf_dev_specs_0_cmd {
__le32 rsv0;
__le32 mac_entry_num;
......@@ -305,38 +200,6 @@ struct hclgevf_dev_specs_1_cmd {
u8 rsv1[18];
};
/* capabilities bits map between imp firmware and local driver */
struct hclgevf_caps_bit_map {
u16 imp_bit;
u16 local_bit;
};
static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value)
{
writel(value, base + reg);
}
static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg)
{
u8 __iomem *reg_addr = READ_ONCE(base);
return readl(reg_addr + reg);
}
#define hclgevf_write_dev(a, reg, value) \
hclgevf_write_reg((a)->io_base, reg, value)
#define hclgevf_read_dev(a, reg) \
hclgevf_read_reg((a)->io_base, reg)
#define HCLGEVF_SEND_SYNC(flag) \
((flag) & HCLGEVF_CMD_FLAG_NO_INTR)
int hclgevf_cmd_init(struct hclgevf_dev *hdev);
void hclgevf_cmd_uninit(struct hclgevf_dev *hdev);
int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev);
int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num);
void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
enum hclgevf_opcode_type opcode,
bool is_read);
int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num);
void hclgevf_arq_init(struct hclgevf_dev *hdev);
#endif
......@@ -32,21 +32,6 @@
#define HCLGEVF_VECTOR_REG_OFFSET 0x4
#define HCLGEVF_VECTOR_VF_OFFSET 0x100000
/* bar registers for cmdq */
#define HCLGEVF_NIC_CSQ_BASEADDR_L_REG 0x27000
#define HCLGEVF_NIC_CSQ_BASEADDR_H_REG 0x27004
#define HCLGEVF_NIC_CSQ_DEPTH_REG 0x27008
#define HCLGEVF_NIC_CSQ_TAIL_REG 0x27010
#define HCLGEVF_NIC_CSQ_HEAD_REG 0x27014
#define HCLGEVF_NIC_CRQ_BASEADDR_L_REG 0x27018
#define HCLGEVF_NIC_CRQ_BASEADDR_H_REG 0x2701C
#define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020
#define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024
#define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028
#define HCLGEVF_CMDQ_INTR_EN_REG 0x27108
#define HCLGEVF_CMDQ_INTR_GEN_REG 0x2710C
/* bar registers for common func */
#define HCLGEVF_GRO_EN_REG 0x28000
#define HCLGEVF_RXD_ADV_LAYOUT_EN_REG 0x28008
......@@ -86,10 +71,6 @@
#define HCLGEVF_TQP_INTR_GL2_REG 0x20300
#define HCLGEVF_TQP_INTR_RL_REG 0x20900
/* Vector0 interrupt CMDQ event source register(RW) */
#define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100
/* Vector0 interrupt CMDQ event status register(RO) */
#define HCLGEVF_VECTOR0_CMDQ_STATE_REG 0x27104
/* CMDQ register bits for RX event(=MBX event) */
#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1
/* RST register bits for RESET event */
......@@ -133,6 +114,11 @@
#define HCLGEVF_STATS_TIMER_INTERVAL 36U
#define hclgevf_read_dev(a, reg) \
hclge_comm_read_reg((a)->hw.io_base, reg)
#define hclgevf_write_dev(a, reg, value) \
hclge_comm_write_reg((a)->hw.io_base, reg, value)
enum hclgevf_evt_cause {
HCLGEVF_VECTOR0_EVENT_RST,
HCLGEVF_VECTOR0_EVENT_MBX,
......@@ -154,7 +140,6 @@ enum hclgevf_states {
HCLGEVF_STATE_RST_HANDLING,
HCLGEVF_STATE_MBX_SERVICE_SCHED,
HCLGEVF_STATE_MBX_HANDLING,
HCLGEVF_STATE_CMD_DISABLE,
HCLGEVF_STATE_LINK_UPDATING,
HCLGEVF_STATE_PROMISC_CHANGED,
HCLGEVF_STATE_RST_FAIL,
......@@ -173,12 +158,9 @@ struct hclgevf_mac {
};
struct hclgevf_hw {
void __iomem *io_base;
void __iomem *mem_base;
struct hclge_comm_hw hw;
int num_vec;
struct hclgevf_cmq cmq;
struct hclgevf_mac mac;
void *hdev; /* hchgevf device it is part of */
};
/* TQP stats */
......
......@@ -53,7 +53,8 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
}
while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) {
if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
&hdev->hw.hw.comm_state))
return -EIO;
usleep_range(HCLGEVF_SLEEP_USECOND, HCLGEVF_SLEEP_USECOND * 2);
......@@ -97,7 +98,7 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
u8 *resp_data, u16 resp_len)
{
struct hclge_mbx_vf_to_pf_cmd *req;
struct hclgevf_desc desc;
struct hclge_desc desc;
int status;
req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
......@@ -151,9 +152,9 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw)
{
u32 tail = hclgevf_read_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG);
u32 tail = hclgevf_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
return tail == hw->cmq.crq.next_to_use;
return tail == hw->hw.cmq.crq.next_to_use;
}
static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev,
......@@ -212,14 +213,15 @@ static void hclgevf_handle_mbx_msg(struct hclgevf_dev *hdev,
void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
{
struct hclge_mbx_pf_to_vf_cmd *req;
struct hclgevf_cmq_ring *crq;
struct hclgevf_desc *desc;
struct hclge_comm_cmq_ring *crq;
struct hclge_desc *desc;
u16 flag;
crq = &hdev->hw.cmq.crq;
crq = &hdev->hw.hw.cmq.crq;
while (!hclgevf_cmd_crq_empty(&hdev->hw)) {
if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
&hdev->hw.hw.comm_state)) {
dev_info(&hdev->pdev->dev, "vf crq need init\n");
return;
}
......@@ -269,7 +271,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
}
/* Write back CMDQ_RQ header pointer, M7 need this pointer */
hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CRQ_HEAD_REG,
hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CRQ_HEAD_REG,
crq->next_to_use);
}
......@@ -296,7 +298,8 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
/* process all the async queue messages */
while (tail != hdev->arq.head) {
if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
&hdev->hw.hw.comm_state)) {
dev_info(&hdev->pdev->dev,
"vf crq need init in async\n");
return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment