Commit 0fa1484e authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-next'

Huazhong Tan says:

====================
net: hns3: misc updates for -next

This patchset includes some misc updates for the HNS3 ethernet driver.

[patch 1&2] separates two bloated function.
[patch 3-5] removes some redundant code.
[patch 6-7] cleans up some coding style issues.
[patch 8-10] adds some debugging information.

Change log:
V1->V2:	removes an unnecessary initialization in [patch 1] which
	suggested by David Miller.
	modified some print format issue and commit log in [patch 8].
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b785b06c d8355240
...@@ -270,7 +270,7 @@ static void hns3_dbg_help(struct hnae3_handle *h) ...@@ -270,7 +270,7 @@ static void hns3_dbg_help(struct hnae3_handle *h)
" [igu egu <port_id>] [rpu <tc_queue_num>]", " [igu egu <port_id>] [rpu <tc_queue_num>]",
HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1); HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
strncat(printf_buf + strlen(printf_buf), strncat(printf_buf + strlen(printf_buf),
" [rtc] [ppp] [rcb] [tqp <queue_num>]]\n", " [rtc] [ppp] [rcb] [tqp <queue_num>] [mac]]\n",
HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1); HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
dev_info(&h->pdev->dev, "%s", printf_buf); dev_info(&h->pdev->dev, "%s", printf_buf);
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
# #
ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3 ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
ccflags-y += -I $(srctree)/$(src)
obj-$(CONFIG_HNS3_HCLGE) += hclge.o obj-$(CONFIG_HNS3_HCLGE) += hclge.o
hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o
......
...@@ -733,31 +733,6 @@ struct hclge_mac_mgr_tbl_entry_cmd { ...@@ -733,31 +733,6 @@ struct hclge_mac_mgr_tbl_entry_cmd {
u8 rsv3[2]; u8 rsv3[2];
}; };
struct hclge_mac_vlan_add_cmd {
__le16 flags;
__le16 mac_addr_hi16;
__le32 mac_addr_lo32;
__le32 mac_addr_msk_hi32;
__le16 mac_addr_msk_lo16;
__le16 vlan_tag;
__le16 ingress_port;
__le16 egress_port;
u8 rsv[4];
};
#define HNS3_MAC_VLAN_CFG_FLAG_BIT 0
struct hclge_mac_vlan_remove_cmd {
__le16 flags;
__le16 mac_addr_hi16;
__le32 mac_addr_lo32;
__le32 mac_addr_msk_hi32;
__le16 mac_addr_msk_lo16;
__le16 vlan_tag;
__le16 ingress_port;
__le16 egress_port;
u8 rsv[4];
};
struct hclge_vlan_filter_ctrl_cmd { struct hclge_vlan_filter_ctrl_cmd {
u8 vlan_type; u8 vlan_type;
u8 vlan_fe; u8 vlan_fe;
......
...@@ -143,7 +143,7 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev, ...@@ -143,7 +143,7 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
return; return;
} }
buf_len = sizeof(struct hclge_desc) * bd_num; buf_len = sizeof(struct hclge_desc) * bd_num;
desc_src = kzalloc(buf_len, GFP_KERNEL); desc_src = kzalloc(buf_len, GFP_KERNEL);
if (!desc_src) if (!desc_src)
return; return;
...@@ -173,6 +173,114 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev, ...@@ -173,6 +173,114 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
kfree(desc_src); kfree(desc_src);
} }
static void hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev)
{
struct hclge_config_mac_mode_cmd *req;
struct hclge_desc desc;
u32 loop_en;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to dump mac enable status, ret = %d\n", ret);
return;
}
req = (struct hclge_config_mac_mode_cmd *)desc.data;
loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
dev_info(&hdev->pdev->dev, "config_mac_trans_en: %#x\n",
hnae3_get_bit(loop_en, HCLGE_MAC_TX_EN_B));
dev_info(&hdev->pdev->dev, "config_mac_rcv_en: %#x\n",
hnae3_get_bit(loop_en, HCLGE_MAC_RX_EN_B));
dev_info(&hdev->pdev->dev, "config_pad_trans_en: %#x\n",
hnae3_get_bit(loop_en, HCLGE_MAC_PAD_TX_B));
dev_info(&hdev->pdev->dev, "config_pad_rcv_en: %#x\n",
hnae3_get_bit(loop_en, HCLGE_MAC_PAD_RX_B));
dev_info(&hdev->pdev->dev, "config_1588_trans_en: %#x\n",
hnae3_get_bit(loop_en, HCLGE_MAC_1588_TX_B));
dev_info(&hdev->pdev->dev, "config_1588_rcv_en: %#x\n",
hnae3_get_bit(loop_en, HCLGE_MAC_1588_RX_B));
dev_info(&hdev->pdev->dev, "config_mac_app_loop_en: %#x\n",
hnae3_get_bit(loop_en, HCLGE_MAC_APP_LP_B));
dev_info(&hdev->pdev->dev, "config_mac_line_loop_en: %#x\n",
hnae3_get_bit(loop_en, HCLGE_MAC_LINE_LP_B));
dev_info(&hdev->pdev->dev, "config_mac_fcs_tx_en: %#x\n",
hnae3_get_bit(loop_en, HCLGE_MAC_FCS_TX_B));
dev_info(&hdev->pdev->dev, "config_mac_rx_oversize_truncate_en: %#x\n",
hnae3_get_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B));
dev_info(&hdev->pdev->dev, "config_mac_rx_fcs_strip_en: %#x\n",
hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B));
dev_info(&hdev->pdev->dev, "config_mac_rx_fcs_en: %#x\n",
hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_B));
dev_info(&hdev->pdev->dev, "config_mac_tx_under_min_err_en: %#x\n",
hnae3_get_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B));
dev_info(&hdev->pdev->dev, "config_mac_tx_oversize_truncate_en: %#x\n",
hnae3_get_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B));
}
static void hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev)
{
struct hclge_config_max_frm_size_cmd *req;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to dump mac frame size, ret = %d\n", ret);
return;
}
req = (struct hclge_config_max_frm_size_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "max_frame_size: %u\n",
le16_to_cpu(req->max_frm_size));
dev_info(&hdev->pdev->dev, "min_frame_size: %u\n", req->min_frm_size);
}
static void hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev)
{
#define HCLGE_MAC_SPEED_SHIFT 0
#define HCLGE_MAC_SPEED_MASK GENMASK(5, 0)
#define HCLGE_MAC_DUPLEX_SHIFT 7
struct hclge_config_mac_speed_dup_cmd *req;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to dump mac speed duplex, ret = %d\n", ret);
return;
}
req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "speed: %#lx\n",
hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
HCLGE_MAC_SPEED_SHIFT));
dev_info(&hdev->pdev->dev, "duplex: %#x\n",
hnae3_get_bit(req->speed_dup, HCLGE_MAC_DUPLEX_SHIFT));
}
static void hclge_dbg_dump_mac(struct hclge_dev *hdev)
{
hclge_dbg_dump_mac_enable_status(hdev);
hclge_dbg_dump_mac_frame_size(hdev);
hclge_dbg_dump_mac_speed_duplex(hdev);
}
static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf) static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
{ {
struct device *dev = &hdev->pdev->dev; struct device *dev = &hdev->pdev->dev;
...@@ -304,6 +412,11 @@ static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf) ...@@ -304,6 +412,11 @@ static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
} }
} }
if (strncmp(cmd_buf, "mac", strlen("mac")) == 0) {
hclge_dbg_dump_mac(hdev);
has_dump = true;
}
if (strncmp(cmd_buf, "dcb", 3) == 0) { if (strncmp(cmd_buf, "dcb", 3) == 0) {
hclge_dbg_dump_dcb(hdev, &cmd_buf[sizeof("dcb")]); hclge_dbg_dump_dcb(hdev, &cmd_buf[sizeof("dcb")]);
has_dump = true; has_dump = true;
......
...@@ -4822,7 +4822,8 @@ static int hclge_get_fd_allocation(struct hclge_dev *hdev, ...@@ -4822,7 +4822,8 @@ static int hclge_get_fd_allocation(struct hclge_dev *hdev,
return ret; return ret;
} }
static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num) static int hclge_set_fd_key_config(struct hclge_dev *hdev,
enum HCLGE_FD_STAGE stage_num)
{ {
struct hclge_set_fd_key_config_cmd *req; struct hclge_set_fd_key_config_cmd *req;
struct hclge_fd_key_cfg *stage; struct hclge_fd_key_cfg *stage;
...@@ -4876,9 +4877,6 @@ static int hclge_init_fd_config(struct hclge_dev *hdev) ...@@ -4876,9 +4877,6 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
hdev->fd_cfg.proto_support =
TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE, key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
key_cfg->inner_sipv6_word_en = LOW_2_WORDS; key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
...@@ -4892,11 +4890,9 @@ static int hclge_init_fd_config(struct hclge_dev *hdev) ...@@ -4892,11 +4890,9 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
/* If use max 400bit key, we can support tuples for ether type */ /* If use max 400bit key, we can support tuples for ether type */
if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) { if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
hdev->fd_cfg.proto_support |= ETHER_FLOW;
key_cfg->tuple_active |= key_cfg->tuple_active |=
BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC); BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
}
/* roce_type is used to filter roce frames /* roce_type is used to filter roce frames
* dst_vport is used to specify the rule * dst_vport is used to specify the rule
...@@ -5006,8 +5002,6 @@ static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, ...@@ -5006,8 +5002,6 @@ static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
return true; return true;
switch (tuple_bit) { switch (tuple_bit) {
case 0:
return false;
case BIT(INNER_DST_MAC): case BIT(INNER_DST_MAC):
for (i = 0; i < ETH_ALEN; i++) { for (i = 0; i < ETH_ALEN; i++) {
calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i], calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
...@@ -5165,9 +5159,10 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage, ...@@ -5165,9 +5159,10 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage]; struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES]; u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
u8 *cur_key_x, *cur_key_y; u8 *cur_key_x, *cur_key_y;
unsigned int i;
int ret, tuple_size;
u8 meta_data_region; u8 meta_data_region;
u8 tuple_size;
int ret;
u32 i;
memset(key_x, 0, sizeof(key_x)); memset(key_x, 0, sizeof(key_x));
memset(key_y, 0, sizeof(key_y)); memset(key_y, 0, sizeof(key_y));
...@@ -5244,172 +5239,255 @@ static int hclge_config_action(struct hclge_dev *hdev, u8 stage, ...@@ -5244,172 +5239,255 @@ static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data); return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
} }
static int hclge_fd_check_spec(struct hclge_dev *hdev, static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
struct ethtool_rx_flow_spec *fs, u32 *unused) u32 *unused_tuple)
{ {
struct ethtool_tcpip4_spec *tcp_ip4_spec; if (!spec || !unused_tuple)
struct ethtool_usrip4_spec *usr_ip4_spec;
struct ethtool_tcpip6_spec *tcp_ip6_spec;
struct ethtool_usrip6_spec *usr_ip6_spec;
struct ethhdr *ether_spec;
if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
return -EINVAL; return -EINVAL;
if (!(fs->flow_type & hdev->fd_cfg.proto_support)) *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
return -EOPNOTSUPP;
if ((fs->flow_type & FLOW_EXT) && if (!spec->ip4src)
(fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) { *unused_tuple |= BIT(INNER_SRC_IP);
dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
return -EOPNOTSUPP;
}
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { if (!spec->ip4dst)
case SCTP_V4_FLOW: *unused_tuple |= BIT(INNER_DST_IP);
case TCP_V4_FLOW:
case UDP_V4_FLOW:
tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
if (!tcp_ip4_spec->ip4src)
*unused |= BIT(INNER_SRC_IP);
if (!tcp_ip4_spec->ip4dst) if (!spec->psrc)
*unused |= BIT(INNER_DST_IP); *unused_tuple |= BIT(INNER_SRC_PORT);
if (!tcp_ip4_spec->psrc) if (!spec->pdst)
*unused |= BIT(INNER_SRC_PORT); *unused_tuple |= BIT(INNER_DST_PORT);
if (!tcp_ip4_spec->pdst) if (!spec->tos)
*unused |= BIT(INNER_DST_PORT); *unused_tuple |= BIT(INNER_IP_TOS);
if (!tcp_ip4_spec->tos) return 0;
*unused |= BIT(INNER_IP_TOS); }
break; static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
case IP_USER_FLOW: u32 *unused_tuple)
usr_ip4_spec = &fs->h_u.usr_ip4_spec; {
*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | if (!spec || !unused_tuple)
BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); return -EINVAL;
if (!usr_ip4_spec->ip4src) *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
*unused |= BIT(INNER_SRC_IP); BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
if (!usr_ip4_spec->ip4dst) if (!spec->ip4src)
*unused |= BIT(INNER_DST_IP); *unused_tuple |= BIT(INNER_SRC_IP);
if (!usr_ip4_spec->tos) if (!spec->ip4dst)
*unused |= BIT(INNER_IP_TOS); *unused_tuple |= BIT(INNER_DST_IP);
if (!usr_ip4_spec->proto) if (!spec->tos)
*unused |= BIT(INNER_IP_PROTO); *unused_tuple |= BIT(INNER_IP_TOS);
if (usr_ip4_spec->l4_4_bytes) if (!spec->proto)
return -EOPNOTSUPP; *unused_tuple |= BIT(INNER_IP_PROTO);
if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4) if (spec->l4_4_bytes)
return -EOPNOTSUPP; return -EOPNOTSUPP;
break; if (spec->ip_ver != ETH_RX_NFC_IP4)
case SCTP_V6_FLOW: return -EOPNOTSUPP;
case TCP_V6_FLOW:
case UDP_V6_FLOW:
tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
BIT(INNER_IP_TOS);
/* check whether src/dst ip address used */ return 0;
if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] && }
!tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
*unused |= BIT(INNER_SRC_IP);
if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] && static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
!tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3]) u32 *unused_tuple)
*unused |= BIT(INNER_DST_IP); {
if (!spec || !unused_tuple)
return -EINVAL;
if (!tcp_ip6_spec->psrc) *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
*unused |= BIT(INNER_SRC_PORT); BIT(INNER_IP_TOS);
if (!tcp_ip6_spec->pdst) /* check whether src/dst ip address used */
*unused |= BIT(INNER_DST_PORT); if (!spec->ip6src[0] && !spec->ip6src[1] &&
!spec->ip6src[2] && !spec->ip6src[3])
*unused_tuple |= BIT(INNER_SRC_IP);
if (tcp_ip6_spec->tclass) if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
return -EOPNOTSUPP; !spec->ip6dst[2] && !spec->ip6dst[3])
*unused_tuple |= BIT(INNER_DST_IP);
break; if (!spec->psrc)
case IPV6_USER_FLOW: *unused_tuple |= BIT(INNER_SRC_PORT);
usr_ip6_spec = &fs->h_u.usr_ip6_spec;
*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
BIT(INNER_DST_PORT);
/* check whether src/dst ip address used */ if (!spec->pdst)
if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] && *unused_tuple |= BIT(INNER_DST_PORT);
!usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
*unused |= BIT(INNER_SRC_IP);
if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] && if (spec->tclass)
!usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3]) return -EOPNOTSUPP;
*unused |= BIT(INNER_DST_IP);
if (!usr_ip6_spec->l4_proto) return 0;
*unused |= BIT(INNER_IP_PROTO); }
if (usr_ip6_spec->tclass) static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
return -EOPNOTSUPP; u32 *unused_tuple)
{
if (!spec || !unused_tuple)
return -EINVAL;
if (usr_ip6_spec->l4_4_bytes) *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
return -EOPNOTSUPP; BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
break; /* check whether src/dst ip address used */
case ETHER_FLOW: if (!spec->ip6src[0] && !spec->ip6src[1] &&
ether_spec = &fs->h_u.ether_spec; !spec->ip6src[2] && !spec->ip6src[3])
*unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | *unused_tuple |= BIT(INNER_SRC_IP);
BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
if (is_zero_ether_addr(ether_spec->h_source)) if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
*unused |= BIT(INNER_SRC_MAC); !spec->ip6dst[2] && !spec->ip6dst[3])
*unused_tuple |= BIT(INNER_DST_IP);
if (is_zero_ether_addr(ether_spec->h_dest)) if (!spec->l4_proto)
*unused |= BIT(INNER_DST_MAC); *unused_tuple |= BIT(INNER_IP_PROTO);
if (!ether_spec->h_proto) if (spec->tclass)
*unused |= BIT(INNER_ETH_TYPE); return -EOPNOTSUPP;
break; if (spec->l4_4_bytes)
default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
}
if ((fs->flow_type & FLOW_EXT)) { return 0;
if (fs->h_ext.vlan_etype) }
static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
{
if (!spec || !unused_tuple)
return -EINVAL;
*unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
if (is_zero_ether_addr(spec->h_source))
*unused_tuple |= BIT(INNER_SRC_MAC);
if (is_zero_ether_addr(spec->h_dest))
*unused_tuple |= BIT(INNER_DST_MAC);
if (!spec->h_proto)
*unused_tuple |= BIT(INNER_ETH_TYPE);
return 0;
}
static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
struct ethtool_rx_flow_spec *fs,
u32 *unused_tuple)
{
if (fs->flow_type & FLOW_EXT) {
if (fs->h_ext.vlan_etype) {
dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
}
if (!fs->h_ext.vlan_tci) if (!fs->h_ext.vlan_tci)
*unused |= BIT(INNER_VLAN_TAG_FST); *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
if (fs->m_ext.vlan_tci) { if (fs->m_ext.vlan_tci &&
if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
return -EINVAL; dev_err(&hdev->pdev->dev,
"failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
return -EINVAL;
} }
} else { } else {
*unused |= BIT(INNER_VLAN_TAG_FST); *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
} }
if (fs->flow_type & FLOW_MAC_EXT) { if (fs->flow_type & FLOW_MAC_EXT) {
if (!(hdev->fd_cfg.proto_support & ETHER_FLOW)) if (hdev->fd_cfg.fd_mode !=
HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
dev_err(&hdev->pdev->dev,
"FLOW_MAC_EXT is not supported in current fd mode!\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
}
if (is_zero_ether_addr(fs->h_ext.h_dest)) if (is_zero_ether_addr(fs->h_ext.h_dest))
*unused |= BIT(INNER_DST_MAC); *unused_tuple |= BIT(INNER_DST_MAC);
else else
*unused &= ~(BIT(INNER_DST_MAC)); *unused_tuple &= ~BIT(INNER_DST_MAC);
} }
return 0; return 0;
} }
static int hclge_fd_check_spec(struct hclge_dev *hdev,
struct ethtool_rx_flow_spec *fs,
u32 *unused_tuple)
{
u32 flow_type;
int ret;
if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
dev_err(&hdev->pdev->dev,
"failed to config fd rules, invalid rule location: %u, max is %u\n.",
fs->location,
hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
return -EINVAL;
}
if ((fs->flow_type & FLOW_EXT) &&
(fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
return -EOPNOTSUPP;
}
flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
switch (flow_type) {
case SCTP_V4_FLOW:
case TCP_V4_FLOW:
case UDP_V4_FLOW:
ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
unused_tuple);
break;
case IP_USER_FLOW:
ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
unused_tuple);
break;
case SCTP_V6_FLOW:
case TCP_V6_FLOW:
case UDP_V6_FLOW:
ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
unused_tuple);
break;
case IPV6_USER_FLOW:
ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
unused_tuple);
break;
case ETHER_FLOW:
if (hdev->fd_cfg.fd_mode !=
HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
dev_err(&hdev->pdev->dev,
"ETHER_FLOW is not supported in current fd mode!\n");
return -EOPNOTSUPP;
}
ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
unused_tuple);
break;
default:
dev_err(&hdev->pdev->dev,
"unsupported protocol type, protocol type = %#x\n",
flow_type);
return -EOPNOTSUPP;
}
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to check flow union tuple, ret = %d\n",
ret);
return ret;
}
return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
}
static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location) static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
{ {
struct hclge_fd_rule *rule = NULL; struct hclge_fd_rule *rule = NULL;
...@@ -5618,7 +5696,7 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev, ...@@ -5618,7 +5696,7 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
break; break;
} }
if ((fs->flow_type & FLOW_EXT)) { if (fs->flow_type & FLOW_EXT) {
rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci); rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci); rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
} }
...@@ -5673,22 +5751,23 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, ...@@ -5673,22 +5751,23 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
u8 action; u8 action;
int ret; int ret;
if (!hnae3_dev_fd_supported(hdev)) if (!hnae3_dev_fd_supported(hdev)) {
dev_err(&hdev->pdev->dev,
"flow table director is not supported\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
}
if (!hdev->fd_en) { if (!hdev->fd_en) {
dev_warn(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Please enable flow director first\n"); "please enable flow director first\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
fs = (struct ethtool_rx_flow_spec *)&cmd->fs; fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
ret = hclge_fd_check_spec(hdev, fs, &unused); ret = hclge_fd_check_spec(hdev, fs, &unused);
if (ret) { if (ret)
dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
return ret; return ret;
}
if (fs->ring_cookie == RX_CLS_FLOW_DISC) { if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
action = HCLGE_FD_ACTION_DROP_PACKET; action = HCLGE_FD_ACTION_DROP_PACKET;
...@@ -5729,7 +5808,6 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, ...@@ -5729,7 +5808,6 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
} }
rule->flow_type = fs->flow_type; rule->flow_type = fs->flow_type;
rule->location = fs->location; rule->location = fs->location;
rule->unused_tuple = unused; rule->unused_tuple = unused;
rule->vf_id = dst_vport_id; rule->vf_id = dst_vport_id;
...@@ -5877,6 +5955,149 @@ static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle, ...@@ -5877,6 +5955,149 @@ static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
return 0; return 0;
} }
static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
struct ethtool_tcpip4_spec *spec,
struct ethtool_tcpip4_spec *spec_mask)
{
spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
spec->psrc = cpu_to_be16(rule->tuples.src_port);
spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
0 : cpu_to_be16(rule->tuples_mask.src_port);
spec->pdst = cpu_to_be16(rule->tuples.dst_port);
spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
0 : cpu_to_be16(rule->tuples_mask.dst_port);
spec->tos = rule->tuples.ip_tos;
spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
0 : rule->tuples_mask.ip_tos;
}
static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
struct ethtool_usrip4_spec *spec,
struct ethtool_usrip4_spec *spec_mask)
{
spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
spec->tos = rule->tuples.ip_tos;
spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
0 : rule->tuples_mask.ip_tos;
spec->proto = rule->tuples.ip_proto;
spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
0 : rule->tuples_mask.ip_proto;
spec->ip_ver = ETH_RX_NFC_IP4;
}
static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
struct ethtool_tcpip6_spec *spec,
struct ethtool_tcpip6_spec *spec_mask)
{
cpu_to_be32_array(spec->ip6src,
rule->tuples.src_ip, IPV6_SIZE);
cpu_to_be32_array(spec->ip6dst,
rule->tuples.dst_ip, IPV6_SIZE);
if (rule->unused_tuple & BIT(INNER_SRC_IP))
memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
else
cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
IPV6_SIZE);
if (rule->unused_tuple & BIT(INNER_DST_IP))
memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
else
cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
IPV6_SIZE);
spec->psrc = cpu_to_be16(rule->tuples.src_port);
spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
0 : cpu_to_be16(rule->tuples_mask.src_port);
spec->pdst = cpu_to_be16(rule->tuples.dst_port);
spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
0 : cpu_to_be16(rule->tuples_mask.dst_port);
}
static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
struct ethtool_usrip6_spec *spec,
struct ethtool_usrip6_spec *spec_mask)
{
cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
if (rule->unused_tuple & BIT(INNER_SRC_IP))
memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
else
cpu_to_be32_array(spec_mask->ip6src,
rule->tuples_mask.src_ip, IPV6_SIZE);
if (rule->unused_tuple & BIT(INNER_DST_IP))
memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
else
cpu_to_be32_array(spec_mask->ip6dst,
rule->tuples_mask.dst_ip, IPV6_SIZE);
spec->l4_proto = rule->tuples.ip_proto;
spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
0 : rule->tuples_mask.ip_proto;
}
static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
struct ethhdr *spec,
struct ethhdr *spec_mask)
{
ether_addr_copy(spec->h_source, rule->tuples.src_mac);
ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
if (rule->unused_tuple & BIT(INNER_SRC_MAC))
eth_zero_addr(spec_mask->h_source);
else
ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
if (rule->unused_tuple & BIT(INNER_DST_MAC))
eth_zero_addr(spec_mask->h_dest);
else
ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
0 : cpu_to_be16(rule->tuples_mask.ether_proto);
}
static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
struct hclge_fd_rule *rule)
{
if (fs->flow_type & FLOW_EXT) {
fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
fs->m_ext.vlan_tci =
rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
cpu_to_be16(VLAN_VID_MASK) :
cpu_to_be16(rule->tuples_mask.vlan_tag1);
}
if (fs->flow_type & FLOW_MAC_EXT) {
ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
if (rule->unused_tuple & BIT(INNER_DST_MAC))
eth_zero_addr(fs->m_u.ether_spec.h_dest);
else
ether_addr_copy(fs->m_u.ether_spec.h_dest,
rule->tuples_mask.dst_mac);
}
}
static int hclge_get_fd_rule_info(struct hnae3_handle *handle, static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd) struct ethtool_rxnfc *cmd)
{ {
...@@ -5909,162 +6130,34 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle, ...@@ -5909,162 +6130,34 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
case SCTP_V4_FLOW: case SCTP_V4_FLOW:
case TCP_V4_FLOW: case TCP_V4_FLOW:
case UDP_V4_FLOW: case UDP_V4_FLOW:
fs->h_u.tcp_ip4_spec.ip4src = hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); &fs->m_u.tcp_ip4_spec);
fs->m_u.tcp_ip4_spec.ip4src =
rule->unused_tuple & BIT(INNER_SRC_IP) ?
0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
fs->h_u.tcp_ip4_spec.ip4dst =
cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
fs->m_u.tcp_ip4_spec.ip4dst =
rule->unused_tuple & BIT(INNER_DST_IP) ?
0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
fs->m_u.tcp_ip4_spec.psrc =
rule->unused_tuple & BIT(INNER_SRC_PORT) ?
0 : cpu_to_be16(rule->tuples_mask.src_port);
fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
fs->m_u.tcp_ip4_spec.pdst =
rule->unused_tuple & BIT(INNER_DST_PORT) ?
0 : cpu_to_be16(rule->tuples_mask.dst_port);
fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
fs->m_u.tcp_ip4_spec.tos =
rule->unused_tuple & BIT(INNER_IP_TOS) ?
0 : rule->tuples_mask.ip_tos;
break; break;
case IP_USER_FLOW: case IP_USER_FLOW:
fs->h_u.usr_ip4_spec.ip4src = hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); &fs->m_u.usr_ip4_spec);
fs->m_u.tcp_ip4_spec.ip4src =
rule->unused_tuple & BIT(INNER_SRC_IP) ?
0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
fs->h_u.usr_ip4_spec.ip4dst =
cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
fs->m_u.usr_ip4_spec.ip4dst =
rule->unused_tuple & BIT(INNER_DST_IP) ?
0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
fs->m_u.usr_ip4_spec.tos =
rule->unused_tuple & BIT(INNER_IP_TOS) ?
0 : rule->tuples_mask.ip_tos;
fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
fs->m_u.usr_ip4_spec.proto =
rule->unused_tuple & BIT(INNER_IP_PROTO) ?
0 : rule->tuples_mask.ip_proto;
fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
break; break;
case SCTP_V6_FLOW: case SCTP_V6_FLOW:
case TCP_V6_FLOW: case TCP_V6_FLOW:
case UDP_V6_FLOW: case UDP_V6_FLOW:
cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src, hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
rule->tuples.src_ip, IPV6_SIZE); &fs->m_u.tcp_ip6_spec);
if (rule->unused_tuple & BIT(INNER_SRC_IP))
memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
sizeof(int) * IPV6_SIZE);
else
cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
rule->tuples_mask.src_ip, IPV6_SIZE);
cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
rule->tuples.dst_ip, IPV6_SIZE);
if (rule->unused_tuple & BIT(INNER_DST_IP))
memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
sizeof(int) * IPV6_SIZE);
else
cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
rule->tuples_mask.dst_ip, IPV6_SIZE);
fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
fs->m_u.tcp_ip6_spec.psrc =
rule->unused_tuple & BIT(INNER_SRC_PORT) ?
0 : cpu_to_be16(rule->tuples_mask.src_port);
fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
fs->m_u.tcp_ip6_spec.pdst =
rule->unused_tuple & BIT(INNER_DST_PORT) ?
0 : cpu_to_be16(rule->tuples_mask.dst_port);
break; break;
case IPV6_USER_FLOW: case IPV6_USER_FLOW:
cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src, hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
rule->tuples.src_ip, IPV6_SIZE); &fs->m_u.usr_ip6_spec);
if (rule->unused_tuple & BIT(INNER_SRC_IP))
memset(fs->m_u.usr_ip6_spec.ip6src, 0,
sizeof(int) * IPV6_SIZE);
else
cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
rule->tuples_mask.src_ip, IPV6_SIZE);
cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
rule->tuples.dst_ip, IPV6_SIZE);
if (rule->unused_tuple & BIT(INNER_DST_IP))
memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
sizeof(int) * IPV6_SIZE);
else
cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
rule->tuples_mask.dst_ip, IPV6_SIZE);
fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
fs->m_u.usr_ip6_spec.l4_proto =
rule->unused_tuple & BIT(INNER_IP_PROTO) ?
0 : rule->tuples_mask.ip_proto;
break;
case ETHER_FLOW:
ether_addr_copy(fs->h_u.ether_spec.h_source,
rule->tuples.src_mac);
if (rule->unused_tuple & BIT(INNER_SRC_MAC))
eth_zero_addr(fs->m_u.ether_spec.h_source);
else
ether_addr_copy(fs->m_u.ether_spec.h_source,
rule->tuples_mask.src_mac);
ether_addr_copy(fs->h_u.ether_spec.h_dest,
rule->tuples.dst_mac);
if (rule->unused_tuple & BIT(INNER_DST_MAC))
eth_zero_addr(fs->m_u.ether_spec.h_dest);
else
ether_addr_copy(fs->m_u.ether_spec.h_dest,
rule->tuples_mask.dst_mac);
fs->h_u.ether_spec.h_proto =
cpu_to_be16(rule->tuples.ether_proto);
fs->m_u.ether_spec.h_proto =
rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
0 : cpu_to_be16(rule->tuples_mask.ether_proto);
break; break;
/* The flow type of fd rule has been checked before adding in to rule
* list. As other flow types have been handled, it must be ETHER_FLOW
* for the default case
*/
default: default:
spin_unlock_bh(&hdev->fd_rule_lock); hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
return -EOPNOTSUPP; &fs->m_u.ether_spec);
} break;
if (fs->flow_type & FLOW_EXT) {
fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
fs->m_ext.vlan_tci =
rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
cpu_to_be16(VLAN_VID_MASK) :
cpu_to_be16(rule->tuples_mask.vlan_tag1);
} }
if (fs->flow_type & FLOW_MAC_EXT) { hclge_fd_get_ext_info(fs, rule);
ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
if (rule->unused_tuple & BIT(INNER_DST_MAC))
eth_zero_addr(fs->m_u.ether_spec.h_dest);
else
ether_addr_copy(fs->m_u.ether_spec.h_dest,
rule->tuples_mask.dst_mac);
}
if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
fs->ring_cookie = RX_CLS_FLOW_DISC; fs->ring_cookie = RX_CLS_FLOW_DISC;
...@@ -6202,7 +6295,6 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, ...@@ -6202,7 +6295,6 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
*/ */
if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) { if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
spin_unlock_bh(&hdev->fd_rule_lock); spin_unlock_bh(&hdev->fd_rule_lock);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -6216,14 +6308,12 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, ...@@ -6216,14 +6308,12 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM); bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
spin_unlock_bh(&hdev->fd_rule_lock); spin_unlock_bh(&hdev->fd_rule_lock);
return -ENOSPC; return -ENOSPC;
} }
rule = kzalloc(sizeof(*rule), GFP_ATOMIC); rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
if (!rule) { if (!rule) {
spin_unlock_bh(&hdev->fd_rule_lock); spin_unlock_bh(&hdev->fd_rule_lock);
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -580,7 +580,6 @@ struct hclge_fd_key_cfg { ...@@ -580,7 +580,6 @@ struct hclge_fd_key_cfg {
struct hclge_fd_cfg { struct hclge_fd_cfg {
u8 fd_mode; u8 fd_mode;
u16 max_key_length; /* use bit as unit */ u16 max_key_length; /* use bit as unit */
u32 proto_support;
u32 rule_num[MAX_STAGE_NUM]; /* rule entry number */ u32 rule_num[MAX_STAGE_NUM]; /* rule entry number */
u16 cnt_num[MAX_STAGE_NUM]; /* rule hit counter number */ u16 cnt_num[MAX_STAGE_NUM]; /* rule hit counter number */
struct hclge_fd_key_cfg key_cfg[MAX_STAGE_NUM]; struct hclge_fd_key_cfg key_cfg[MAX_STAGE_NUM];
......
...@@ -5,6 +5,9 @@ ...@@ -5,6 +5,9 @@
#include "hclge_mbx.h" #include "hclge_mbx.h"
#include "hnae3.h" #include "hnae3.h"
#define CREATE_TRACE_POINTS
#include "hclge_trace.h"
static u16 hclge_errno_to_resp(int errno) static u16 hclge_errno_to_resp(int errno)
{ {
return abs(errno); return abs(errno);
...@@ -90,6 +93,8 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, ...@@ -90,6 +93,8 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
memcpy(&resp_pf_to_vf->msg.vf_mbx_msg_code, msg, msg_len); memcpy(&resp_pf_to_vf->msg.vf_mbx_msg_code, msg, msg_len);
trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf);
status = hclge_cmd_send(&hdev->hw, &desc, 1); status = hclge_cmd_send(&hdev->hw, &desc, 1);
if (status) if (status)
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
...@@ -674,6 +679,8 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ...@@ -674,6 +679,8 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
vport = &hdev->vport[req->mbx_src_vfid]; vport = &hdev->vport[req->mbx_src_vfid];
trace_hclge_pf_mbx_get(hdev, req);
switch (req->msg.code) { switch (req->msg.code) {
case HCLGE_MBX_MAP_RING_TO_VECTOR: case HCLGE_MBX_MAP_RING_TO_VECTOR:
ret = hclge_map_unmap_ring_to_vf_vector(vport, true, ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
......
/* SPDX-License-Identifier: GPL-2.0+ */
/* Copyright (c) 2018-2020 Hisilicon Limited. */
/* This must be outside ifdef _HCLGE_TRACE_H */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM hns3
#if !defined(_HCLGE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _HCLGE_TRACE_H_
#include <linux/tracepoint.h>
#define PF_GET_MBX_LEN (sizeof(struct hclge_mbx_vf_to_pf_cmd) / sizeof(u32))
#define PF_SEND_MBX_LEN (sizeof(struct hclge_mbx_pf_to_vf_cmd) / sizeof(u32))
TRACE_EVENT(hclge_pf_mbx_get,
TP_PROTO(
struct hclge_dev *hdev,
struct hclge_mbx_vf_to_pf_cmd *req),
TP_ARGS(hdev, req),
TP_STRUCT__entry(
__field(u8, vfid)
__field(u8, code)
__field(u8, subcode)
__string(pciname, pci_name(hdev->pdev))
__string(devname, &hdev->vport[0].nic.kinfo.netdev->name)
__array(u32, mbx_data, PF_GET_MBX_LEN)
),
TP_fast_assign(
__entry->vfid = req->mbx_src_vfid;
__entry->code = req->msg.code;
__entry->subcode = req->msg.subcode;
__assign_str(pciname, pci_name(hdev->pdev));
__assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name);
memcpy(__entry->mbx_data, req,
sizeof(struct hclge_mbx_vf_to_pf_cmd));
),
TP_printk(
"%s %s vfid:%u code:%u subcode:%u data:%s",
__get_str(pciname), __get_str(devname), __entry->vfid,
__entry->code, __entry->subcode,
__print_array(__entry->mbx_data, PF_GET_MBX_LEN, sizeof(u32))
)
);
TRACE_EVENT(hclge_pf_mbx_send,
TP_PROTO(
struct hclge_dev *hdev,
struct hclge_mbx_pf_to_vf_cmd *req),
TP_ARGS(hdev, req),
TP_STRUCT__entry(
__field(u8, vfid)
__field(u16, code)
__string(pciname, pci_name(hdev->pdev))
__string(devname, &hdev->vport[0].nic.kinfo.netdev->name)
__array(u32, mbx_data, PF_SEND_MBX_LEN)
),
TP_fast_assign(
__entry->vfid = req->dest_vfid;
__entry->code = req->msg.code;
__assign_str(pciname, pci_name(hdev->pdev));
__assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name);
memcpy(__entry->mbx_data, req,
sizeof(struct hclge_mbx_pf_to_vf_cmd));
),
TP_printk(
"%s %s vfid:%u code:%u data:%s",
__get_str(pciname), __get_str(devname), __entry->vfid,
__entry->code,
__print_array(__entry->mbx_data, PF_SEND_MBX_LEN, sizeof(u32))
)
);
#endif /* _HCLGE_TRACE_H_ */
/* This must be outside ifdef _HCLGE_TRACE_H */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE hclge_trace
#include <trace/define_trace.h>
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
# #
ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3 ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
ccflags-y += -I $(srctree)/$(src)
obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o
...@@ -5,6 +5,9 @@ ...@@ -5,6 +5,9 @@
#include "hclgevf_main.h" #include "hclgevf_main.h"
#include "hnae3.h" #include "hnae3.h"
#define CREATE_TRACE_POINTS
#include "hclgevf_trace.h"
static int hclgevf_resp_to_errno(u16 resp_code) static int hclgevf_resp_to_errno(u16 resp_code)
{ {
return resp_code ? -resp_code : 0; return resp_code ? -resp_code : 0;
...@@ -106,6 +109,8 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, ...@@ -106,6 +109,8 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
memcpy(&req->msg, send_msg, sizeof(struct hclge_vf_to_pf_msg)); memcpy(&req->msg, send_msg, sizeof(struct hclge_vf_to_pf_msg));
trace_hclge_vf_mbx_send(hdev, req);
/* synchronous send */ /* synchronous send */
if (need_resp) { if (need_resp) {
mutex_lock(&hdev->mbx_resp.mbx_mutex); mutex_lock(&hdev->mbx_resp.mbx_mutex);
...@@ -179,6 +184,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) ...@@ -179,6 +184,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
continue; continue;
} }
trace_hclge_vf_mbx_get(hdev, req);
/* synchronous messages are time critical and need preferential /* synchronous messages are time critical and need preferential
* treatment. Therefore, we need to acknowledge all the sync * treatment. Therefore, we need to acknowledge all the sync
* responses as quickly as possible so that waiting tasks do not * responses as quickly as possible so that waiting tasks do not
......
/* SPDX-License-Identifier: GPL-2.0+ */
/* Copyright (c) 2018-2019 Hisilicon Limited. */
/* This must be outside ifdef _HCLGEVF_TRACE_H */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM hns3
#if !defined(_HCLGEVF_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _HCLGEVF_TRACE_H_
#include <linux/tracepoint.h>
#define VF_GET_MBX_LEN (sizeof(struct hclge_mbx_pf_to_vf_cmd) / sizeof(u32))
#define VF_SEND_MBX_LEN (sizeof(struct hclge_mbx_vf_to_pf_cmd) / sizeof(u32))
TRACE_EVENT(hclge_vf_mbx_get,
TP_PROTO(
struct hclgevf_dev *hdev,
struct hclge_mbx_pf_to_vf_cmd *req),
TP_ARGS(hdev, req),
TP_STRUCT__entry(
__field(u8, vfid)
__field(u16, code)
__string(pciname, pci_name(hdev->pdev))
__string(devname, &hdev->nic.kinfo.netdev->name)
__array(u32, mbx_data, VF_GET_MBX_LEN)
),
TP_fast_assign(
__entry->vfid = req->dest_vfid;
__entry->code = req->msg.code;
__assign_str(pciname, pci_name(hdev->pdev));
__assign_str(devname, &hdev->nic.kinfo.netdev->name);
memcpy(__entry->mbx_data, req,
sizeof(struct hclge_mbx_pf_to_vf_cmd));
),
TP_printk(
"%s %s vfid:%u code:%u data:%s",
__get_str(pciname), __get_str(devname), __entry->vfid,
__entry->code,
__print_array(__entry->mbx_data, VF_GET_MBX_LEN, sizeof(u32))
)
);
TRACE_EVENT(hclge_vf_mbx_send,
TP_PROTO(
struct hclgevf_dev *hdev,
struct hclge_mbx_vf_to_pf_cmd *req),
TP_ARGS(hdev, req),
TP_STRUCT__entry(
__field(u8, vfid)
__field(u8, code)
__field(u8, subcode)
__string(pciname, pci_name(hdev->pdev))
__string(devname, &hdev->nic.kinfo.netdev->name)
__array(u32, mbx_data, VF_SEND_MBX_LEN)
),
TP_fast_assign(
__entry->vfid = req->mbx_src_vfid;
__entry->code = req->msg.code;
__entry->subcode = req->msg.subcode;
__assign_str(pciname, pci_name(hdev->pdev));
__assign_str(devname, &hdev->nic.kinfo.netdev->name);
memcpy(__entry->mbx_data, req,
sizeof(struct hclge_mbx_vf_to_pf_cmd));
),
TP_printk(
"%s %s vfid:%u code:%u subcode:%u data:%s",
__get_str(pciname), __get_str(devname), __entry->vfid,
__entry->code, __entry->subcode,
__print_array(__entry->mbx_data, VF_SEND_MBX_LEN, sizeof(u32))
)
);
#endif /* _HCLGEVF_TRACE_H_ */
/* This must be outside ifdef _HCLGEVF_TRACE_H */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE hclgevf_trace
#include <trace/define_trace.h>
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment