Commit 1574cf83 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2019-11-01' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2019-11-01

Misc updates for mlx5 netdev and core driver

1) Steering Core: Replace CRC32 internal implementation with standard
   kernel lib.
2) Steering Core: Support IPv4 and IPv6 mixed matcher.
3) Steering Core: Lockless FTE read lookups
4) TC: Bit sized fields rewrite support.
5) FPGA: Standalone FPGA support.
6) SRIOV: Reset VF parameters configurations on SRIOV disable.
7) netdev: Dump WQs wqe descriptors on CQE with error events.
8) MISC Cleanups.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a37ac8ae 667f2646
...@@ -70,7 +70,7 @@ mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/t ...@@ -70,7 +70,7 @@ mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/t
mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o \ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o \
steering/dr_matcher.o steering/dr_rule.o \ steering/dr_matcher.o steering/dr_rule.o \
steering/dr_icm_pool.o steering/dr_crc32.o \ steering/dr_icm_pool.o \
steering/dr_ste.o steering/dr_send.o \ steering/dr_ste.o steering/dr_send.o \
steering/dr_cmd.o steering/dr_fw.o \ steering/dr_cmd.o steering/dr_fw.o \
steering/dr_action.o steering/fs_dr.o steering/dr_action.o steering/fs_dr.o
...@@ -866,7 +866,7 @@ static void cmd_work_handler(struct work_struct *work) ...@@ -866,7 +866,7 @@ static void cmd_work_handler(struct work_struct *work)
if (!ent->page_queue) { if (!ent->page_queue) {
alloc_ret = alloc_ent(cmd); alloc_ret = alloc_ent(cmd);
if (alloc_ret < 0) { if (alloc_ret < 0) {
mlx5_core_err(dev, "failed to allocate command entry\n"); mlx5_core_err_rl(dev, "failed to allocate command entry\n");
if (ent->callback) { if (ent->callback) {
ent->callback(-EAGAIN, ent->context); ent->callback(-EAGAIN, ent->context);
mlx5_free_cmd_msg(dev, ent->out); mlx5_free_cmd_msg(dev, ent->out);
......
...@@ -2241,13 +2241,14 @@ static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset, ...@@ -2241,13 +2241,14 @@ static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
struct mlx5_fields { struct mlx5_fields {
u8 field; u8 field;
u8 size; u8 field_bsize;
u32 field_mask;
u32 offset; u32 offset;
u32 match_offset; u32 match_offset;
}; };
#define OFFLOAD(fw_field, size, field, off, match_field) \ #define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
{MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, \ {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
offsetof(struct pedit_headers, field) + (off), \ offsetof(struct pedit_headers, field) + (off), \
MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)} MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
...@@ -2265,18 +2266,18 @@ struct mlx5_fields { ...@@ -2265,18 +2266,18 @@ struct mlx5_fields {
}) })
static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp, static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
void *matchmaskp, int size) void *matchmaskp, u8 bsize)
{ {
bool same = false; bool same = false;
switch (size) { switch (bsize) {
case sizeof(u8): case 8:
same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp); same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
break; break;
case sizeof(u16): case 16:
same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp); same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
break; break;
case sizeof(u32): case 32:
same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp); same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
break; break;
} }
...@@ -2285,41 +2286,43 @@ static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp, ...@@ -2285,41 +2286,43 @@ static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
} }
static struct mlx5_fields fields[] = { static struct mlx5_fields fields[] = {
OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0, dmac_47_16), OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16),
OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0, dmac_15_0), OFFLOAD(DMAC_15_0, 16, U16_MAX, eth.h_dest[4], 0, dmac_15_0),
OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0, smac_47_16), OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16),
OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0, smac_15_0), OFFLOAD(SMAC_15_0, 16, U16_MAX, eth.h_source[4], 0, smac_15_0),
OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0, ethertype), OFFLOAD(ETHERTYPE, 16, U16_MAX, eth.h_proto, 0, ethertype),
OFFLOAD(FIRST_VID, 2, vlan.h_vlan_TCI, 0, first_vid), OFFLOAD(FIRST_VID, 16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid),
OFFLOAD(IP_TTL, 1, ip4.ttl, 0, ttl_hoplimit), OFFLOAD(IP_DSCP, 8, 0xfc, ip4.tos, 0, ip_dscp),
OFFLOAD(SIPV4, 4, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4), OFFLOAD(IP_TTL, 8, U8_MAX, ip4.ttl, 0, ttl_hoplimit),
OFFLOAD(DIPV4, 4, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), OFFLOAD(SIPV4, 32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
OFFLOAD(DIPV4, 32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0,
OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0,
src_ipv4_src_ipv6.ipv6_layout.ipv6[0]), src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
OFFLOAD(SIPV6_95_64, 4, ip6.saddr.s6_addr32[1], 0, OFFLOAD(SIPV6_95_64, 32, U32_MAX, ip6.saddr.s6_addr32[1], 0,
src_ipv4_src_ipv6.ipv6_layout.ipv6[4]), src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
OFFLOAD(SIPV6_63_32, 4, ip6.saddr.s6_addr32[2], 0, OFFLOAD(SIPV6_63_32, 32, U32_MAX, ip6.saddr.s6_addr32[2], 0,
src_ipv4_src_ipv6.ipv6_layout.ipv6[8]), src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
OFFLOAD(SIPV6_31_0, 4, ip6.saddr.s6_addr32[3], 0, OFFLOAD(SIPV6_31_0, 32, U32_MAX, ip6.saddr.s6_addr32[3], 0,
src_ipv4_src_ipv6.ipv6_layout.ipv6[12]), src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0, OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]), dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
OFFLOAD(DIPV6_95_64, 4, ip6.daddr.s6_addr32[1], 0, OFFLOAD(DIPV6_95_64, 32, U32_MAX, ip6.daddr.s6_addr32[1], 0,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]), dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
OFFLOAD(DIPV6_63_32, 4, ip6.daddr.s6_addr32[2], 0, OFFLOAD(DIPV6_63_32, 32, U32_MAX, ip6.daddr.s6_addr32[2], 0,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]), dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
OFFLOAD(DIPV6_31_0, 4, ip6.daddr.s6_addr32[3], 0, OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]), dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0, ttl_hoplimit), OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
OFFLOAD(TCP_SPORT, 2, tcp.source, 0, tcp_sport), OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
OFFLOAD(TCP_DPORT, 2, tcp.dest, 0, tcp_dport), OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5, tcp_flags), /* in linux iphdr tcp_flags is 8 bits long */
OFFLOAD(TCP_FLAGS, 8, U8_MAX, tcp.ack_seq, 5, tcp_flags),
OFFLOAD(UDP_SPORT, 2, udp.source, 0, udp_sport), OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport),
OFFLOAD(UDP_DPORT, 2, udp.dest, 0, udp_dport), OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
}; };
/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at /* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
...@@ -2332,19 +2335,17 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs, ...@@ -2332,19 +2335,17 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals; struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
void *headers_c = get_match_headers_criteria(*action_flags,
&parse_attr->spec);
void *headers_v = get_match_headers_value(*action_flags,
&parse_attr->spec);
int i, action_size, nactions, max_actions, first, last, next_z; int i, action_size, nactions, max_actions, first, last, next_z;
void *s_masks_p, *a_masks_p, *vals_p; void *headers_c, *headers_v, *action, *vals_p;
u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
struct mlx5_fields *f; struct mlx5_fields *f;
u8 cmd, field_bsize;
u32 s_mask, a_mask;
unsigned long mask; unsigned long mask;
__be32 mask_be32; __be32 mask_be32;
__be16 mask_be16; __be16 mask_be16;
void *action; u8 cmd;
headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec);
headers_v = get_match_headers_value(*action_flags, &parse_attr->spec);
set_masks = &hdrs[0].masks; set_masks = &hdrs[0].masks;
add_masks = &hdrs[1].masks; add_masks = &hdrs[1].masks;
...@@ -2369,8 +2370,8 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs, ...@@ -2369,8 +2370,8 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
s_masks_p = (void *)set_masks + f->offset; s_masks_p = (void *)set_masks + f->offset;
a_masks_p = (void *)add_masks + f->offset; a_masks_p = (void *)add_masks + f->offset;
memcpy(&s_mask, s_masks_p, f->size); s_mask = *s_masks_p & f->field_mask;
memcpy(&a_mask, a_masks_p, f->size); a_mask = *a_masks_p & f->field_mask;
if (!s_mask && !a_mask) /* nothing to offload here */ if (!s_mask && !a_mask) /* nothing to offload here */
continue; continue;
...@@ -2399,38 +2400,34 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs, ...@@ -2399,38 +2400,34 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
vals_p = (void *)set_vals + f->offset; vals_p = (void *)set_vals + f->offset;
/* don't rewrite if we have a match on the same value */ /* don't rewrite if we have a match on the same value */
if (cmp_val_mask(vals_p, s_masks_p, match_val, if (cmp_val_mask(vals_p, s_masks_p, match_val,
match_mask, f->size)) match_mask, f->field_bsize))
skip = true; skip = true;
/* clear to denote we consumed this field */ /* clear to denote we consumed this field */
memset(s_masks_p, 0, f->size); *s_masks_p &= ~f->field_mask;
} else { } else {
u32 zero = 0;
cmd = MLX5_ACTION_TYPE_ADD; cmd = MLX5_ACTION_TYPE_ADD;
mask = a_mask; mask = a_mask;
vals_p = (void *)add_vals + f->offset; vals_p = (void *)add_vals + f->offset;
/* add 0 is no change */ /* add 0 is no change */
if (!memcmp(vals_p, &zero, f->size)) if ((*(u32 *)vals_p & f->field_mask) == 0)
skip = true; skip = true;
/* clear to denote we consumed this field */ /* clear to denote we consumed this field */
memset(a_masks_p, 0, f->size); *a_masks_p &= ~f->field_mask;
} }
if (skip) if (skip)
continue; continue;
field_bsize = f->size * BITS_PER_BYTE; if (f->field_bsize == 32) {
if (field_bsize == 32) {
mask_be32 = *(__be32 *)&mask; mask_be32 = *(__be32 *)&mask;
mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32)); mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
} else if (field_bsize == 16) { } else if (f->field_bsize == 16) {
mask_be16 = *(__be16 *)&mask; mask_be16 = *(__be16 *)&mask;
mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16)); mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
} }
first = find_first_bit(&mask, field_bsize); first = find_first_bit(&mask, f->field_bsize);
next_z = find_next_zero_bit(&mask, field_bsize, first); next_z = find_next_zero_bit(&mask, f->field_bsize, first);
last = find_last_bit(&mask, field_bsize); last = find_last_bit(&mask, f->field_bsize);
if (first < next_z && next_z < last) { if (first < next_z && next_z < last) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"rewrite of few sub-fields isn't supported"); "rewrite of few sub-fields isn't supported");
...@@ -2443,16 +2440,22 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs, ...@@ -2443,16 +2440,22 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
MLX5_SET(set_action_in, action, field, f->field); MLX5_SET(set_action_in, action, field, f->field);
if (cmd == MLX5_ACTION_TYPE_SET) { if (cmd == MLX5_ACTION_TYPE_SET) {
MLX5_SET(set_action_in, action, offset, first); int start;
/* if field is bit sized it can start not from first bit */
start = find_first_bit((unsigned long *)&f->field_mask,
f->field_bsize);
MLX5_SET(set_action_in, action, offset, first - start);
/* length is num of bits to be written, zero means length of 32 */ /* length is num of bits to be written, zero means length of 32 */
MLX5_SET(set_action_in, action, length, (last - first + 1)); MLX5_SET(set_action_in, action, length, (last - first + 1));
} }
if (field_bsize == 32) if (f->field_bsize == 32)
MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first); MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
else if (field_bsize == 16) else if (f->field_bsize == 16)
MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first); MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
else if (field_bsize == 8) else if (f->field_bsize == 8)
MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first); MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
action += action_size; action += action_size;
...@@ -3443,6 +3446,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, ...@@ -3443,6 +3446,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
} }
if (!(attr->action &
(MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
NL_SET_ERR_MSG(extack, "Rule must have at least one forward/drop action");
return -EOPNOTSUPP;
}
if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"current firmware doesn't support split rule for port mirroring"); "current firmware doesn't support split rule for port mirroring");
......
...@@ -461,8 +461,14 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) ...@@ -461,8 +461,14 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) { if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
&sq->state)) { &sq->state)) {
struct mlx5e_tx_wqe_info *wi;
u16 ci;
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
wi = &sq->db.wqe_info[ci];
mlx5e_dump_error_cqe(sq, mlx5e_dump_error_cqe(sq,
(struct mlx5_err_cqe *)cqe); (struct mlx5_err_cqe *)cqe);
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
queue_work(cq->channel->priv->wq, queue_work(cq->channel->priv->wq,
&sq->recover_work); &sq->recover_work);
} }
......
...@@ -1831,6 +1831,15 @@ static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw) ...@@ -1831,6 +1831,15 @@ static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
flush_workqueue(esw->work_queue); flush_workqueue(esw->work_queue);
} }
static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
int i;
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
memset(&vport->info, 0, sizeof(vport->info));
}
/* Public E-Switch API */ /* Public E-Switch API */
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev)) #define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
...@@ -1923,7 +1932,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) ...@@ -1923,7 +1932,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
return err; return err;
} }
void mlx5_eswitch_disable(struct mlx5_eswitch *esw) void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
{ {
int old_mode; int old_mode;
...@@ -1952,6 +1961,8 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw) ...@@ -1952,6 +1961,8 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH); mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
} }
if (clear_vf)
mlx5_eswitch_clear_vf_vports_info(esw);
} }
int mlx5_eswitch_init(struct mlx5_core_dev *dev) int mlx5_eswitch_init(struct mlx5_core_dev *dev)
......
...@@ -270,7 +270,7 @@ int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, ...@@ -270,7 +270,7 @@ int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
int mlx5_eswitch_init(struct mlx5_core_dev *dev); int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw); void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode); int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode);
void mlx5_eswitch_disable(struct mlx5_eswitch *esw); void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
u16 vport, u8 mac[ETH_ALEN]); u16 vport, u8 mac[ETH_ALEN]);
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
...@@ -603,7 +603,7 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw); ...@@ -603,7 +603,7 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) { return 0; } static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) { return 0; }
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {} static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {}
static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; } static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
......
...@@ -1369,7 +1369,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw, ...@@ -1369,7 +1369,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
return -EINVAL; return -EINVAL;
} }
mlx5_eswitch_disable(esw); mlx5_eswitch_disable(esw, false);
mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs); mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS); err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
if (err) { if (err) {
...@@ -2195,7 +2195,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw, ...@@ -2195,7 +2195,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
{ {
int err, err1; int err, err1;
mlx5_eswitch_disable(esw); mlx5_eswitch_disable(esw, false);
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY); err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
if (err) { if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy"); NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
......
...@@ -35,11 +35,11 @@ ...@@ -35,11 +35,11 @@
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
enum mlx5_fpga_device_id { enum mlx5_fpga_id {
MLX5_FPGA_DEVICE_UNKNOWN = 0, MLX5_FPGA_NEWTON = 0,
MLX5_FPGA_DEVICE_KU040 = 1, MLX5_FPGA_EDISON = 1,
MLX5_FPGA_DEVICE_KU060 = 2, MLX5_FPGA_MORSE = 2,
MLX5_FPGA_DEVICE_KU060_2 = 3, MLX5_FPGA_MORSEQ = 3,
}; };
enum mlx5_fpga_image { enum mlx5_fpga_image {
......
...@@ -81,19 +81,28 @@ static const char *mlx5_fpga_image_name(enum mlx5_fpga_image image) ...@@ -81,19 +81,28 @@ static const char *mlx5_fpga_image_name(enum mlx5_fpga_image image)
} }
} }
static const char *mlx5_fpga_device_name(u32 device) static const char *mlx5_fpga_name(u32 fpga_id)
{ {
switch (device) { static char ret[32];
case MLX5_FPGA_DEVICE_KU040:
return "ku040"; switch (fpga_id) {
case MLX5_FPGA_DEVICE_KU060: case MLX5_FPGA_NEWTON:
return "ku060"; return "Newton";
case MLX5_FPGA_DEVICE_KU060_2: case MLX5_FPGA_EDISON:
return "ku060_2"; return "Edison";
case MLX5_FPGA_DEVICE_UNKNOWN: case MLX5_FPGA_MORSE:
default: return "Morse";
return "unknown"; case MLX5_FPGA_MORSEQ:
return "MorseQ";
} }
snprintf(ret, sizeof(ret), "Unknown %d", fpga_id);
return ret;
}
static int mlx5_is_fpga_lookaside(u32 fpga_id)
{
return fpga_id != MLX5_FPGA_NEWTON && fpga_id != MLX5_FPGA_EDISON;
} }
static int mlx5_fpga_device_load_check(struct mlx5_fpga_device *fdev) static int mlx5_fpga_device_load_check(struct mlx5_fpga_device *fdev)
...@@ -110,8 +119,12 @@ static int mlx5_fpga_device_load_check(struct mlx5_fpga_device *fdev) ...@@ -110,8 +119,12 @@ static int mlx5_fpga_device_load_check(struct mlx5_fpga_device *fdev)
fdev->last_admin_image = query.admin_image; fdev->last_admin_image = query.admin_image;
fdev->last_oper_image = query.oper_image; fdev->last_oper_image = query.oper_image;
mlx5_fpga_dbg(fdev, "Status %u; Admin image %u; Oper image %u\n", mlx5_fpga_info(fdev, "Status %u; Admin image %u; Oper image %u\n",
query.status, query.admin_image, query.oper_image); query.status, query.admin_image, query.oper_image);
/* for FPGA lookaside projects FPGA load status is not important */
if (mlx5_is_fpga_lookaside(MLX5_CAP_FPGA(fdev->mdev, fpga_id)))
return 0;
if (query.status != MLX5_FPGA_STATUS_SUCCESS) { if (query.status != MLX5_FPGA_STATUS_SUCCESS) {
mlx5_fpga_err(fdev, "%s image failed to load; status %u\n", mlx5_fpga_err(fdev, "%s image failed to load; status %u\n",
...@@ -167,25 +180,30 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev) ...@@ -167,25 +180,30 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
struct mlx5_fpga_device *fdev = mdev->fpga; struct mlx5_fpga_device *fdev = mdev->fpga;
unsigned int max_num_qps; unsigned int max_num_qps;
unsigned long flags; unsigned long flags;
u32 fpga_device_id; u32 fpga_id;
int err; int err;
if (!fdev) if (!fdev)
return 0; return 0;
err = mlx5_fpga_device_load_check(fdev); err = mlx5_fpga_caps(fdev->mdev);
if (err) if (err)
goto out; goto out;
err = mlx5_fpga_caps(fdev->mdev); err = mlx5_fpga_device_load_check(fdev);
if (err) if (err)
goto out; goto out;
fpga_device_id = MLX5_CAP_FPGA(fdev->mdev, fpga_device); fpga_id = MLX5_CAP_FPGA(fdev->mdev, fpga_id);
mlx5_fpga_info(fdev, "%s:%u; %s image, version %u; SBU %06x:%04x version %d\n", mlx5_fpga_info(fdev, "FPGA card %s:%u\n", mlx5_fpga_name(fpga_id), fpga_id);
mlx5_fpga_device_name(fpga_device_id),
fpga_device_id, /* No QPs if FPGA does not participate in net processing */
if (mlx5_is_fpga_lookaside(fpga_id))
goto out;
mlx5_fpga_info(fdev, "%s(%d): image, version %u; SBU %06x:%04x version %d\n",
mlx5_fpga_image_name(fdev->last_oper_image), mlx5_fpga_image_name(fdev->last_oper_image),
fdev->last_oper_image,
MLX5_CAP_FPGA(fdev->mdev, image_version), MLX5_CAP_FPGA(fdev->mdev, image_version),
MLX5_CAP_FPGA(fdev->mdev, ieee_vendor_id), MLX5_CAP_FPGA(fdev->mdev, ieee_vendor_id),
MLX5_CAP_FPGA(fdev->mdev, sandbox_product_id), MLX5_CAP_FPGA(fdev->mdev, sandbox_product_id),
...@@ -264,6 +282,9 @@ void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev) ...@@ -264,6 +282,9 @@ void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev)
if (!fdev) if (!fdev)
return; return;
if (mlx5_is_fpga_lookaside(MLX5_CAP_FPGA(fdev->mdev, fpga_id)))
return;
spin_lock_irqsave(&fdev->state_lock, flags); spin_lock_irqsave(&fdev->state_lock, flags);
if (fdev->state != MLX5_FPGA_STATUS_SUCCESS) { if (fdev->state != MLX5_FPGA_STATUS_SUCCESS) {
spin_unlock_irqrestore(&fdev->state_lock, flags); spin_unlock_irqrestore(&fdev->state_lock, flags);
......
...@@ -531,9 +531,16 @@ static void del_hw_fte(struct fs_node *node) ...@@ -531,9 +531,16 @@ static void del_hw_fte(struct fs_node *node)
} }
} }
static void del_sw_fte_rcu(struct rcu_head *head)
{
struct fs_fte *fte = container_of(head, struct fs_fte, rcu);
struct mlx5_flow_steering *steering = get_steering(&fte->node);
kmem_cache_free(steering->ftes_cache, fte);
}
static void del_sw_fte(struct fs_node *node) static void del_sw_fte(struct fs_node *node)
{ {
struct mlx5_flow_steering *steering = get_steering(node);
struct mlx5_flow_group *fg; struct mlx5_flow_group *fg;
struct fs_fte *fte; struct fs_fte *fte;
int err; int err;
...@@ -546,7 +553,8 @@ static void del_sw_fte(struct fs_node *node) ...@@ -546,7 +553,8 @@ static void del_sw_fte(struct fs_node *node)
rhash_fte); rhash_fte);
WARN_ON(err); WARN_ON(err);
ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index); ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index);
kmem_cache_free(steering->ftes_cache, fte);
call_rcu(&fte->rcu, del_sw_fte_rcu);
} }
static void del_hw_flow_group(struct fs_node *node) static void del_hw_flow_group(struct fs_node *node)
...@@ -1623,22 +1631,47 @@ static u64 matched_fgs_get_version(struct list_head *match_head) ...@@ -1623,22 +1631,47 @@ static u64 matched_fgs_get_version(struct list_head *match_head)
} }
static struct fs_fte * static struct fs_fte *
lookup_fte_locked(struct mlx5_flow_group *g, lookup_fte_for_write_locked(struct mlx5_flow_group *g, const u32 *match_value)
const u32 *match_value,
bool take_write)
{ {
struct fs_fte *fte_tmp; struct fs_fte *fte_tmp;
if (take_write) nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
else fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value, rhash_fte);
nested_down_read_ref_node(&g->node, FS_LOCK_PARENT); if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value, fte_tmp = NULL;
rhash_fte); goto out;
}
if (!fte_tmp->node.active) {
tree_put_node(&fte_tmp->node, false);
fte_tmp = NULL;
goto out;
}
nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
out:
up_write_ref_node(&g->node, false);
return fte_tmp;
}
static struct fs_fte *
lookup_fte_for_read_locked(struct mlx5_flow_group *g, const u32 *match_value)
{
struct fs_fte *fte_tmp;
if (!tree_get_node(&g->node))
return NULL;
rcu_read_lock();
fte_tmp = rhashtable_lookup(&g->ftes_hash, match_value, rhash_fte);
if (!fte_tmp || !tree_get_node(&fte_tmp->node)) { if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
rcu_read_unlock();
fte_tmp = NULL; fte_tmp = NULL;
goto out; goto out;
} }
rcu_read_unlock();
if (!fte_tmp->node.active) { if (!fte_tmp->node.active) {
tree_put_node(&fte_tmp->node, false); tree_put_node(&fte_tmp->node, false);
fte_tmp = NULL; fte_tmp = NULL;
...@@ -1646,14 +1679,21 @@ lookup_fte_locked(struct mlx5_flow_group *g, ...@@ -1646,14 +1679,21 @@ lookup_fte_locked(struct mlx5_flow_group *g,
} }
nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
out: out:
if (take_write) tree_put_node(&g->node, false);
up_write_ref_node(&g->node, false);
else
up_read_ref_node(&g->node);
return fte_tmp; return fte_tmp;
} }
static struct fs_fte *
lookup_fte_locked(struct mlx5_flow_group *g, const u32 *match_value, bool write)
{
if (write)
return lookup_fte_for_write_locked(g, match_value);
else
return lookup_fte_for_read_locked(g, match_value);
}
static struct mlx5_flow_handle * static struct mlx5_flow_handle *
try_add_to_existing_fg(struct mlx5_flow_table *ft, try_add_to_existing_fg(struct mlx5_flow_table *ft,
struct list_head *match_head, struct list_head *match_head,
...@@ -1814,6 +1854,13 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, ...@@ -1814,6 +1854,13 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
return rule; return rule;
} }
fte = alloc_fte(ft, spec, flow_act);
if (IS_ERR(fte)) {
up_write_ref_node(&ft->node, false);
err = PTR_ERR(fte);
goto err_alloc_fte;
}
nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
up_write_ref_node(&ft->node, false); up_write_ref_node(&ft->node, false);
...@@ -1821,17 +1868,9 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, ...@@ -1821,17 +1868,9 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
if (err) if (err)
goto err_release_fg; goto err_release_fg;
fte = alloc_fte(ft, spec, flow_act);
if (IS_ERR(fte)) {
err = PTR_ERR(fte);
goto err_release_fg;
}
err = insert_fte(g, fte); err = insert_fte(g, fte);
if (err) { if (err)
kmem_cache_free(steering->ftes_cache, fte);
goto err_release_fg; goto err_release_fg;
}
nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD); nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
up_write_ref_node(&g->node, false); up_write_ref_node(&g->node, false);
...@@ -1843,6 +1882,8 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, ...@@ -1843,6 +1882,8 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
err_release_fg: err_release_fg:
up_write_ref_node(&g->node, false); up_write_ref_node(&g->node, false);
kmem_cache_free(steering->ftes_cache, fte);
err_alloc_fte:
tree_put_node(&g->node, false); tree_put_node(&g->node, false);
return ERR_PTR(err); return ERR_PTR(err);
} }
......
...@@ -202,6 +202,7 @@ struct fs_fte { ...@@ -202,6 +202,7 @@ struct fs_fte {
enum fs_fte_status status; enum fs_fte_status status;
struct mlx5_fc *counter; struct mlx5_fc *counter;
struct rhash_head hash; struct rhash_head hash;
struct rcu_head rcu;
int modify_mask; int modify_mask;
}; };
......
...@@ -145,34 +145,35 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, ...@@ -145,34 +145,35 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
{ {
*port1 = 1; *port1 = 1;
*port2 = 2; *port2 = 2;
if (!tracker->netdev_state[0].tx_enabled || if (!tracker->netdev_state[MLX5_LAG_P1].tx_enabled ||
!tracker->netdev_state[0].link_up) { !tracker->netdev_state[MLX5_LAG_P1].link_up) {
*port1 = 2; *port1 = 2;
return; return;
} }
if (!tracker->netdev_state[1].tx_enabled || if (!tracker->netdev_state[MLX5_LAG_P2].tx_enabled ||
!tracker->netdev_state[1].link_up) !tracker->netdev_state[MLX5_LAG_P2].link_up)
*port2 = 1; *port2 = 1;
} }
void mlx5_modify_lag(struct mlx5_lag *ldev, void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker) struct lag_tracker *tracker)
{ {
struct mlx5_core_dev *dev0 = ldev->pf[0].dev; struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
u8 v2p_port1, v2p_port2; u8 v2p_port1, v2p_port2;
int err; int err;
mlx5_infer_tx_affinity_mapping(tracker, &v2p_port1, mlx5_infer_tx_affinity_mapping(tracker, &v2p_port1,
&v2p_port2); &v2p_port2);
if (v2p_port1 != ldev->v2p_map[0] || if (v2p_port1 != ldev->v2p_map[MLX5_LAG_P1] ||
v2p_port2 != ldev->v2p_map[1]) { v2p_port2 != ldev->v2p_map[MLX5_LAG_P2]) {
ldev->v2p_map[0] = v2p_port1; ldev->v2p_map[MLX5_LAG_P1] = v2p_port1;
ldev->v2p_map[1] = v2p_port2; ldev->v2p_map[MLX5_LAG_P2] = v2p_port2;
mlx5_core_info(dev0, "modify lag map port 1:%d port 2:%d", mlx5_core_info(dev0, "modify lag map port 1:%d port 2:%d",
ldev->v2p_map[0], ldev->v2p_map[1]); ldev->v2p_map[MLX5_LAG_P1],
ldev->v2p_map[MLX5_LAG_P2]);
err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2); err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
if (err) if (err)
...@@ -185,16 +186,17 @@ void mlx5_modify_lag(struct mlx5_lag *ldev, ...@@ -185,16 +186,17 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
static int mlx5_create_lag(struct mlx5_lag *ldev, static int mlx5_create_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker) struct lag_tracker *tracker)
{ {
struct mlx5_core_dev *dev0 = ldev->pf[0].dev; struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
int err; int err;
mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[0], mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1],
&ldev->v2p_map[1]); &ldev->v2p_map[MLX5_LAG_P2]);
mlx5_core_info(dev0, "lag map port 1:%d port 2:%d", mlx5_core_info(dev0, "lag map port 1:%d port 2:%d",
ldev->v2p_map[0], ldev->v2p_map[1]); ldev->v2p_map[MLX5_LAG_P1], ldev->v2p_map[MLX5_LAG_P2]);
err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[0], ldev->v2p_map[1]); err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[MLX5_LAG_P1],
ldev->v2p_map[MLX5_LAG_P2]);
if (err) if (err)
mlx5_core_err(dev0, mlx5_core_err(dev0,
"Failed to create LAG (%d)\n", "Failed to create LAG (%d)\n",
...@@ -207,7 +209,7 @@ int mlx5_activate_lag(struct mlx5_lag *ldev, ...@@ -207,7 +209,7 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
u8 flags) u8 flags)
{ {
bool roce_lag = !!(flags & MLX5_LAG_FLAG_ROCE); bool roce_lag = !!(flags & MLX5_LAG_FLAG_ROCE);
struct mlx5_core_dev *dev0 = ldev->pf[0].dev; struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
int err; int err;
err = mlx5_create_lag(ldev, tracker); err = mlx5_create_lag(ldev, tracker);
...@@ -229,7 +231,7 @@ int mlx5_activate_lag(struct mlx5_lag *ldev, ...@@ -229,7 +231,7 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
static int mlx5_deactivate_lag(struct mlx5_lag *ldev) static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
{ {
struct mlx5_core_dev *dev0 = ldev->pf[0].dev; struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
bool roce_lag = __mlx5_lag_is_roce(ldev); bool roce_lag = __mlx5_lag_is_roce(ldev);
int err; int err;
...@@ -252,14 +254,15 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev) ...@@ -252,14 +254,15 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
{ {
if (!ldev->pf[0].dev || !ldev->pf[1].dev) if (!ldev->pf[MLX5_LAG_P1].dev || !ldev->pf[MLX5_LAG_P2].dev)
return false; return false;
#ifdef CONFIG_MLX5_ESWITCH #ifdef CONFIG_MLX5_ESWITCH
return mlx5_esw_lag_prereq(ldev->pf[0].dev, ldev->pf[1].dev); return mlx5_esw_lag_prereq(ldev->pf[MLX5_LAG_P1].dev,
ldev->pf[MLX5_LAG_P2].dev);
#else #else
return (!mlx5_sriov_is_enabled(ldev->pf[0].dev) && return (!mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P1].dev) &&
!mlx5_sriov_is_enabled(ldev->pf[1].dev)); !mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P2].dev));
#endif #endif
} }
...@@ -285,8 +288,8 @@ static void mlx5_lag_remove_ib_devices(struct mlx5_lag *ldev) ...@@ -285,8 +288,8 @@ static void mlx5_lag_remove_ib_devices(struct mlx5_lag *ldev)
static void mlx5_do_bond(struct mlx5_lag *ldev) static void mlx5_do_bond(struct mlx5_lag *ldev)
{ {
struct mlx5_core_dev *dev0 = ldev->pf[0].dev; struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[1].dev; struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
struct lag_tracker tracker; struct lag_tracker tracker;
bool do_bond, roce_lag; bool do_bond, roce_lag;
int err; int err;
...@@ -692,10 +695,11 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev) ...@@ -692,10 +695,11 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
goto unlock; goto unlock;
if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) { if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
ndev = ldev->tracker.netdev_state[0].tx_enabled ? ndev = ldev->tracker.netdev_state[MLX5_LAG_P1].tx_enabled ?
ldev->pf[0].netdev : ldev->pf[1].netdev; ldev->pf[MLX5_LAG_P1].netdev :
ldev->pf[MLX5_LAG_P2].netdev;
} else { } else {
ndev = ldev->pf[0].netdev; ndev = ldev->pf[MLX5_LAG_P1].netdev;
} }
if (ndev) if (ndev)
dev_hold(ndev); dev_hold(ndev);
...@@ -717,7 +721,8 @@ bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv) ...@@ -717,7 +721,8 @@ bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
return true; return true;
ldev = mlx5_lag_dev_get(dev); ldev = mlx5_lag_dev_get(dev);
if (!ldev || !__mlx5_lag_is_roce(ldev) || ldev->pf[0].dev == dev) if (!ldev || !__mlx5_lag_is_roce(ldev) ||
ldev->pf[MLX5_LAG_P1].dev == dev)
return true; return true;
/* If bonded, we do not add an IB device for PF1. */ /* If bonded, we do not add an IB device for PF1. */
...@@ -746,11 +751,11 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, ...@@ -746,11 +751,11 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
ldev = mlx5_lag_dev_get(dev); ldev = mlx5_lag_dev_get(dev);
if (ldev && __mlx5_lag_is_roce(ldev)) { if (ldev && __mlx5_lag_is_roce(ldev)) {
num_ports = MLX5_MAX_PORTS; num_ports = MLX5_MAX_PORTS;
mdev[0] = ldev->pf[0].dev; mdev[MLX5_LAG_P1] = ldev->pf[MLX5_LAG_P1].dev;
mdev[1] = ldev->pf[1].dev; mdev[MLX5_LAG_P2] = ldev->pf[MLX5_LAG_P2].dev;
} else { } else {
num_ports = 1; num_ports = 1;
mdev[0] = dev; mdev[MLX5_LAG_P1] = dev;
} }
for (i = 0; i < num_ports; ++i) { for (i = 0; i < num_ports; ++i) {
......
...@@ -7,6 +7,11 @@ ...@@ -7,6 +7,11 @@
#include "mlx5_core.h" #include "mlx5_core.h"
#include "lag_mp.h" #include "lag_mp.h"
enum {
MLX5_LAG_P1,
MLX5_LAG_P2,
};
enum { enum {
MLX5_LAG_FLAG_ROCE = 1 << 0, MLX5_LAG_FLAG_ROCE = 1 << 0,
MLX5_LAG_FLAG_SRIOV = 1 << 1, MLX5_LAG_FLAG_SRIOV = 1 << 1,
......
...@@ -11,10 +11,11 @@ ...@@ -11,10 +11,11 @@
static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev) static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
{ {
if (!ldev->pf[0].dev || !ldev->pf[1].dev) if (!ldev->pf[MLX5_LAG_P1].dev || !ldev->pf[MLX5_LAG_P2].dev)
return false; return false;
return mlx5_esw_multipath_prereq(ldev->pf[0].dev, ldev->pf[1].dev); return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
ldev->pf[MLX5_LAG_P2].dev);
} }
static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev) static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
...@@ -43,7 +44,8 @@ bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev) ...@@ -43,7 +44,8 @@ bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
* 2 - set affinity to port 2. * 2 - set affinity to port 2.
* *
**/ **/
static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev, int port) static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev,
enum mlx5_lag_port_affinity port)
{ {
struct lag_tracker tracker; struct lag_tracker tracker;
...@@ -51,37 +53,37 @@ static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev, int port) ...@@ -51,37 +53,37 @@ static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev, int port)
return; return;
switch (port) { switch (port) {
case 0: case MLX5_LAG_NORMAL_AFFINITY:
tracker.netdev_state[0].tx_enabled = true; tracker.netdev_state[MLX5_LAG_P1].tx_enabled = true;
tracker.netdev_state[1].tx_enabled = true; tracker.netdev_state[MLX5_LAG_P2].tx_enabled = true;
tracker.netdev_state[0].link_up = true; tracker.netdev_state[MLX5_LAG_P1].link_up = true;
tracker.netdev_state[1].link_up = true; tracker.netdev_state[MLX5_LAG_P2].link_up = true;
break; break;
case 1: case MLX5_LAG_P1_AFFINITY:
tracker.netdev_state[0].tx_enabled = true; tracker.netdev_state[MLX5_LAG_P1].tx_enabled = true;
tracker.netdev_state[0].link_up = true; tracker.netdev_state[MLX5_LAG_P1].link_up = true;
tracker.netdev_state[1].tx_enabled = false; tracker.netdev_state[MLX5_LAG_P2].tx_enabled = false;
tracker.netdev_state[1].link_up = false; tracker.netdev_state[MLX5_LAG_P2].link_up = false;
break; break;
case 2: case MLX5_LAG_P2_AFFINITY:
tracker.netdev_state[0].tx_enabled = false; tracker.netdev_state[MLX5_LAG_P1].tx_enabled = false;
tracker.netdev_state[0].link_up = false; tracker.netdev_state[MLX5_LAG_P1].link_up = false;
tracker.netdev_state[1].tx_enabled = true; tracker.netdev_state[MLX5_LAG_P2].tx_enabled = true;
tracker.netdev_state[1].link_up = true; tracker.netdev_state[MLX5_LAG_P2].link_up = true;
break; break;
default: default:
mlx5_core_warn(ldev->pf[0].dev, "Invalid affinity port %d", mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev,
port); "Invalid affinity port %d", port);
return; return;
} }
if (tracker.netdev_state[0].tx_enabled) if (tracker.netdev_state[MLX5_LAG_P1].tx_enabled)
mlx5_notifier_call_chain(ldev->pf[0].dev->priv.events, mlx5_notifier_call_chain(ldev->pf[MLX5_LAG_P1].dev->priv.events,
MLX5_DEV_EVENT_PORT_AFFINITY, MLX5_DEV_EVENT_PORT_AFFINITY,
(void *)0); (void *)0);
if (tracker.netdev_state[1].tx_enabled) if (tracker.netdev_state[MLX5_LAG_P2].tx_enabled)
mlx5_notifier_call_chain(ldev->pf[1].dev->priv.events, mlx5_notifier_call_chain(ldev->pf[MLX5_LAG_P2].dev->priv.events,
MLX5_DEV_EVENT_PORT_AFFINITY, MLX5_DEV_EVENT_PORT_AFFINITY,
(void *)0); (void *)0);
...@@ -141,11 +143,12 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, ...@@ -141,11 +143,12 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
/* Verify next hops are ports of the same hca */ /* Verify next hops are ports of the same hca */
fib_nh0 = fib_info_nh(fi, 0); fib_nh0 = fib_info_nh(fi, 0);
fib_nh1 = fib_info_nh(fi, 1); fib_nh1 = fib_info_nh(fi, 1);
if (!(fib_nh0->fib_nh_dev == ldev->pf[0].netdev && if (!(fib_nh0->fib_nh_dev == ldev->pf[MLX5_LAG_P1].netdev &&
fib_nh1->fib_nh_dev == ldev->pf[1].netdev) && fib_nh1->fib_nh_dev == ldev->pf[MLX5_LAG_P2].netdev) &&
!(fib_nh0->fib_nh_dev == ldev->pf[1].netdev && !(fib_nh0->fib_nh_dev == ldev->pf[MLX5_LAG_P2].netdev &&
fib_nh1->fib_nh_dev == ldev->pf[0].netdev)) { fib_nh1->fib_nh_dev == ldev->pf[MLX5_LAG_P1].netdev)) {
mlx5_core_warn(ldev->pf[0].dev, "Multipath offload require two ports of the same HCA\n"); mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev,
"Multipath offload require two ports of the same HCA\n");
return; return;
} }
...@@ -157,7 +160,7 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, ...@@ -157,7 +160,7 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
mlx5_activate_lag(ldev, &tracker, MLX5_LAG_FLAG_MULTIPATH); mlx5_activate_lag(ldev, &tracker, MLX5_LAG_FLAG_MULTIPATH);
} }
mlx5_lag_set_port_affinity(ldev, 0); mlx5_lag_set_port_affinity(ldev, MLX5_LAG_NORMAL_AFFINITY);
mp->mfi = fi; mp->mfi = fi;
} }
...@@ -182,7 +185,7 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev, ...@@ -182,7 +185,7 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
} }
} else if (event == FIB_EVENT_NH_ADD && } else if (event == FIB_EVENT_NH_ADD &&
fib_info_num_path(fi) == 2) { fib_info_num_path(fi) == 2) {
mlx5_lag_set_port_affinity(ldev, 0); mlx5_lag_set_port_affinity(ldev, MLX5_LAG_NORMAL_AFFINITY);
} }
} }
...@@ -267,8 +270,8 @@ static int mlx5_lag_fib_event(struct notifier_block *nb, ...@@ -267,8 +270,8 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
return notifier_from_errno(-EINVAL); return notifier_from_errno(-EINVAL);
} }
fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev; fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
if (fib_dev != ldev->pf[0].netdev && if (fib_dev != ldev->pf[MLX5_LAG_P1].netdev &&
fib_dev != ldev->pf[1].netdev) { fib_dev != ldev->pf[MLX5_LAG_P2].netdev) {
return NOTIFY_DONE; return NOTIFY_DONE;
} }
fib_work = mlx5_lag_init_fib_work(ldev, event); fib_work = mlx5_lag_init_fib_work(ldev, event);
......
...@@ -7,6 +7,12 @@ ...@@ -7,6 +7,12 @@
#include "lag.h" #include "lag.h"
#include "mlx5_core.h" #include "mlx5_core.h"
enum mlx5_lag_port_affinity {
MLX5_LAG_NORMAL_AFFINITY,
MLX5_LAG_P1_AFFINITY,
MLX5_LAG_P2_AFFINITY,
};
struct lag_mp { struct lag_mp {
struct notifier_block fib_nb; struct notifier_block fib_nb;
struct fib_info *mfi; /* used in tracking fib events */ struct fib_info *mfi; /* used in tracking fib events */
......
...@@ -1228,8 +1228,6 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) ...@@ -1228,8 +1228,6 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup) static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
{ {
int err = 0;
if (cleanup) { if (cleanup) {
mlx5_unregister_device(dev); mlx5_unregister_device(dev);
mlx5_drain_health_wq(dev); mlx5_drain_health_wq(dev);
...@@ -1257,7 +1255,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup) ...@@ -1257,7 +1255,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
mlx5_function_teardown(dev, cleanup); mlx5_function_teardown(dev, cleanup);
out: out:
mutex_unlock(&dev->intf_state_mutex); mutex_unlock(&dev->intf_state_mutex);
return err; return 0;
} }
static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
......
...@@ -108,7 +108,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) ...@@ -108,7 +108,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
return 0; return 0;
} }
static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev) static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev, bool clear_vf)
{ {
struct mlx5_core_sriov *sriov = &dev->priv.sriov; struct mlx5_core_sriov *sriov = &dev->priv.sriov;
int num_vfs = pci_num_vf(dev->pdev); int num_vfs = pci_num_vf(dev->pdev);
...@@ -127,7 +127,7 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev) ...@@ -127,7 +127,7 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
} }
if (MLX5_ESWITCH_MANAGER(dev)) if (MLX5_ESWITCH_MANAGER(dev))
mlx5_eswitch_disable(dev->priv.eswitch); mlx5_eswitch_disable(dev->priv.eswitch, clear_vf);
if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages)) if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages))
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
...@@ -147,7 +147,7 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs) ...@@ -147,7 +147,7 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
err = pci_enable_sriov(pdev, num_vfs); err = pci_enable_sriov(pdev, num_vfs);
if (err) { if (err) {
mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err); mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err);
mlx5_device_disable_sriov(dev); mlx5_device_disable_sriov(dev, true);
} }
return err; return err;
} }
...@@ -157,7 +157,7 @@ static void mlx5_sriov_disable(struct pci_dev *pdev) ...@@ -157,7 +157,7 @@ static void mlx5_sriov_disable(struct pci_dev *pdev)
struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
pci_disable_sriov(pdev); pci_disable_sriov(pdev);
mlx5_device_disable_sriov(dev); mlx5_device_disable_sriov(dev, true);
} }
int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs) int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
...@@ -192,7 +192,7 @@ void mlx5_sriov_detach(struct mlx5_core_dev *dev) ...@@ -192,7 +192,7 @@ void mlx5_sriov_detach(struct mlx5_core_dev *dev)
if (!mlx5_core_is_pf(dev)) if (!mlx5_core_is_pf(dev))
return; return;
mlx5_device_disable_sriov(dev); mlx5_device_disable_sriov(dev, false);
} }
static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev) static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev)
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2019 Mellanox Technologies. */
/* Copyright (c) 2011-2015 Stephan Brumme. All rights reserved.
* Slicing-by-16 contributed by Bulat Ziganshin
*
* This software is provided 'as-is', without any express or implied warranty.
* In no event will the author be held liable for any damages arising from the
* of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software.
* 2. If you use this software in a product, an acknowledgment in the product
* documentation would be appreciated but is not required.
* 3. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
*
* Taken from http://create.stephan-brumme.com/crc32/ and adapted.
*/
#include "dr_types.h"
#define DR_STE_CRC_POLY 0xEDB88320L
static u32 dr_ste_crc_tab32[8][256];
static void dr_crc32_calc_lookup_entry(u32 (*tbl)[256], u8 i, u8 j)
{
tbl[i][j] = (tbl[i - 1][j] >> 8) ^ tbl[0][tbl[i - 1][j] & 0xff];
}
void mlx5dr_crc32_init_table(void)
{
u32 crc, i, j;
for (i = 0; i < 256; i++) {
crc = i;
for (j = 0; j < 8; j++) {
if (crc & 0x00000001L)
crc = (crc >> 1) ^ DR_STE_CRC_POLY;
else
crc = crc >> 1;
}
dr_ste_crc_tab32[0][i] = crc;
}
/* Init CRC lookup tables according to crc_slice_8 algorithm */
for (i = 0; i < 256; i++) {
dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 1, i);
dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 2, i);
dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 3, i);
dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 4, i);
dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 5, i);
dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 6, i);
dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 7, i);
}
}
/* Compute CRC32 (Slicing-by-8 algorithm) */
u32 mlx5dr_crc32_slice8_calc(const void *input_data, size_t length)
{
const u32 *curr = (const u32 *)input_data;
const u8 *curr_char;
u32 crc = 0, one, two;
if (!input_data)
return 0;
/* Process eight bytes at once (Slicing-by-8) */
while (length >= 8) {
one = *curr++ ^ crc;
two = *curr++;
crc = dr_ste_crc_tab32[0][(two >> 24) & 0xff]
^ dr_ste_crc_tab32[1][(two >> 16) & 0xff]
^ dr_ste_crc_tab32[2][(two >> 8) & 0xff]
^ dr_ste_crc_tab32[3][two & 0xff]
^ dr_ste_crc_tab32[4][(one >> 24) & 0xff]
^ dr_ste_crc_tab32[5][(one >> 16) & 0xff]
^ dr_ste_crc_tab32[6][(one >> 8) & 0xff]
^ dr_ste_crc_tab32[7][one & 0xff];
length -= 8;
}
curr_char = (const u8 *)curr;
/* Remaining 1 to 7 bytes (standard algorithm) */
while (length-- != 0)
crc = (crc >> 8) ^ dr_ste_crc_tab32[0][(crc & 0xff)
^ *curr_char++];
return ((crc >> 24) & 0xff) | ((crc << 8) & 0xff0000) |
((crc >> 8) & 0xff00) | ((crc << 24) & 0xff000000);
}
...@@ -326,9 +326,6 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type) ...@@ -326,9 +326,6 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
goto uninit_resourses; goto uninit_resourses;
} }
/* Init CRC table for htbl CRC calculation */
mlx5dr_crc32_init_table();
return dmn; return dmn;
uninit_resourses: uninit_resourses:
......
...@@ -146,17 +146,15 @@ dr_matcher_supp_flex_parser_vxlan_gpe(struct mlx5dr_domain *dmn) ...@@ -146,17 +146,15 @@ dr_matcher_supp_flex_parser_vxlan_gpe(struct mlx5dr_domain *dmn)
int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher, int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher, struct mlx5dr_matcher_rx_tx *nic_matcher,
bool ipv6) enum mlx5dr_ipv outer_ipv,
enum mlx5dr_ipv inner_ipv)
{ {
if (ipv6) { nic_matcher->ste_builder =
nic_matcher->ste_builder = nic_matcher->ste_builder6; nic_matcher->ste_builder_arr[outer_ipv][inner_ipv];
nic_matcher->num_of_builders = nic_matcher->num_of_builders6; nic_matcher->num_of_builders =
} else { nic_matcher->num_of_builders_arr[outer_ipv][inner_ipv];
nic_matcher->ste_builder = nic_matcher->ste_builder4;
nic_matcher->num_of_builders = nic_matcher->num_of_builders4;
}
if (!nic_matcher->num_of_builders) { if (!nic_matcher->ste_builder) {
mlx5dr_dbg(matcher->tbl->dmn, mlx5dr_dbg(matcher->tbl->dmn,
"Rule not supported on this matcher due to IP related fields\n"); "Rule not supported on this matcher due to IP related fields\n");
return -EINVAL; return -EINVAL;
...@@ -167,26 +165,19 @@ int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher, ...@@ -167,26 +165,19 @@ int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher, struct mlx5dr_matcher_rx_tx *nic_matcher,
bool ipv6) enum mlx5dr_ipv outer_ipv,
enum mlx5dr_ipv inner_ipv)
{ {
struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn; struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
struct mlx5dr_domain *dmn = matcher->tbl->dmn; struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_match_param mask = {}; struct mlx5dr_match_param mask = {};
struct mlx5dr_match_misc3 *misc3; struct mlx5dr_match_misc3 *misc3;
struct mlx5dr_ste_build *sb; struct mlx5dr_ste_build *sb;
u8 *num_of_builders;
bool inner, rx; bool inner, rx;
int idx = 0; int idx = 0;
int ret, i; int ret, i;
if (ipv6) { sb = nic_matcher->ste_builder_arr[outer_ipv][inner_ipv];
sb = nic_matcher->ste_builder6;
num_of_builders = &nic_matcher->num_of_builders6;
} else {
sb = nic_matcher->ste_builder4;
num_of_builders = &nic_matcher->num_of_builders4;
}
rx = nic_dmn->ste_type == MLX5DR_STE_TYPE_RX; rx = nic_dmn->ste_type == MLX5DR_STE_TYPE_RX;
/* Create a temporary mask to track and clear used mask fields */ /* Create a temporary mask to track and clear used mask fields */
...@@ -249,7 +240,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, ...@@ -249,7 +240,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (DR_MASK_IS_L2_DST(mask.outer, mask.misc, outer)) if (DR_MASK_IS_L2_DST(mask.outer, mask.misc, outer))
mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx); mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx);
if (ipv6) { if (outer_ipv == DR_RULE_IPV6) {
if (dr_mask_is_dst_addr_set(&mask.outer)) if (dr_mask_is_dst_addr_set(&mask.outer))
mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask, mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask,
inner, rx); inner, rx);
...@@ -325,7 +316,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, ...@@ -325,7 +316,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (DR_MASK_IS_L2_DST(mask.inner, mask.misc, inner)) if (DR_MASK_IS_L2_DST(mask.inner, mask.misc, inner))
mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx); mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx);
if (ipv6) { if (inner_ipv == DR_RULE_IPV6) {
if (dr_mask_is_dst_addr_set(&mask.inner)) if (dr_mask_is_dst_addr_set(&mask.inner))
mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask, mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask,
inner, rx); inner, rx);
...@@ -373,7 +364,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, ...@@ -373,7 +364,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
} }
} }
*num_of_builders = idx; nic_matcher->ste_builder = sb;
nic_matcher->num_of_builders_arr[outer_ipv][inner_ipv] = idx;
return 0; return 0;
} }
...@@ -524,24 +516,33 @@ static void dr_matcher_uninit(struct mlx5dr_matcher *matcher) ...@@ -524,24 +516,33 @@ static void dr_matcher_uninit(struct mlx5dr_matcher *matcher)
} }
} }
static int dr_matcher_init_nic(struct mlx5dr_matcher *matcher, static int dr_matcher_set_all_ste_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher) struct mlx5dr_matcher_rx_tx *nic_matcher)
{ {
struct mlx5dr_domain *dmn = matcher->tbl->dmn; struct mlx5dr_domain *dmn = matcher->tbl->dmn;
int ret, ret_v4, ret_v6;
ret_v4 = dr_matcher_set_ste_builders(matcher, nic_matcher, false); dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV4, DR_RULE_IPV4);
ret_v6 = dr_matcher_set_ste_builders(matcher, nic_matcher, true); dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV4, DR_RULE_IPV6);
dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV6, DR_RULE_IPV4);
dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV6, DR_RULE_IPV6);
if (ret_v4 && ret_v6) { if (!nic_matcher->ste_builder) {
mlx5dr_dbg(dmn, "Cannot generate IPv4 or IPv6 rules with given mask\n"); mlx5dr_dbg(dmn, "Cannot generate IPv4 or IPv6 rules with given mask\n");
return -EINVAL; return -EINVAL;
} }
if (!ret_v4) return 0;
nic_matcher->ste_builder = nic_matcher->ste_builder4; }
else
nic_matcher->ste_builder = nic_matcher->ste_builder6; static int dr_matcher_init_nic(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher)
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
int ret;
ret = dr_matcher_set_all_ste_builders(matcher, nic_matcher);
if (ret)
return ret;
nic_matcher->e_anchor = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool, nic_matcher->e_anchor = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
DR_CHUNK_SIZE_1, DR_CHUNK_SIZE_1,
......
...@@ -954,12 +954,12 @@ static int dr_rule_destroy_rule(struct mlx5dr_rule *rule) ...@@ -954,12 +954,12 @@ static int dr_rule_destroy_rule(struct mlx5dr_rule *rule)
return 0; return 0;
} }
static bool dr_rule_is_ipv6(struct mlx5dr_match_param *param) static enum mlx5dr_ipv dr_rule_get_ipv(struct mlx5dr_match_spec *spec)
{ {
return (param->outer.ip_version == 6 || if (spec->ip_version == 6 || spec->ethertype == ETH_P_IPV6)
param->inner.ip_version == 6 || return DR_RULE_IPV6;
param->outer.ethertype == ETH_P_IPV6 ||
param->inner.ethertype == ETH_P_IPV6); return DR_RULE_IPV4;
} }
static bool dr_rule_skip(enum mlx5dr_domain_type domain, static bool dr_rule_skip(enum mlx5dr_domain_type domain,
...@@ -1023,7 +1023,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule, ...@@ -1023,7 +1023,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
ret = mlx5dr_matcher_select_builders(matcher, ret = mlx5dr_matcher_select_builders(matcher,
nic_matcher, nic_matcher,
dr_rule_is_ipv6(param)); dr_rule_get_ipv(&param->outer),
dr_rule_get_ipv(&param->inner));
if (ret) if (ret)
goto out_err; goto out_err;
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
/* Copyright (c) 2019 Mellanox Technologies. */ /* Copyright (c) 2019 Mellanox Technologies. */
#include <linux/types.h> #include <linux/types.h>
#include <linux/crc32.h>
#include "dr_types.h" #include "dr_types.h"
#define DR_STE_CRC_POLY 0xEDB88320L #define DR_STE_CRC_POLY 0xEDB88320L
...@@ -107,6 +108,13 @@ struct dr_hw_ste_format { ...@@ -107,6 +108,13 @@ struct dr_hw_ste_format {
u8 mask[DR_STE_SIZE_MASK]; u8 mask[DR_STE_SIZE_MASK];
}; };
static u32 dr_ste_crc32_calc(const void *input_data, size_t length)
{
u32 crc = crc32(0, input_data, length);
return htonl(crc);
}
u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl) u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
{ {
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
...@@ -128,7 +136,7 @@ u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl) ...@@ -128,7 +136,7 @@ u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
bit = bit >> 1; bit = bit >> 1;
} }
crc32 = mlx5dr_crc32_slice8_calc(masked, DR_STE_SIZE_TAG); crc32 = dr_ste_crc32_calc(masked, DR_STE_SIZE_TAG);
index = crc32 & (htbl->chunk->num_of_entries - 1); index = crc32 & (htbl->chunk->num_of_entries - 1);
return index; return index;
......
...@@ -106,6 +106,12 @@ enum mlx5dr_action_type { ...@@ -106,6 +106,12 @@ enum mlx5dr_action_type {
DR_ACTION_TYP_MAX, DR_ACTION_TYP_MAX,
}; };
enum mlx5dr_ipv {
DR_RULE_IPV4,
DR_RULE_IPV6,
DR_RULE_IPV_MAX,
};
struct mlx5dr_icm_pool; struct mlx5dr_icm_pool;
struct mlx5dr_icm_chunk; struct mlx5dr_icm_chunk;
struct mlx5dr_icm_bucket; struct mlx5dr_icm_bucket;
...@@ -679,11 +685,11 @@ struct mlx5dr_matcher_rx_tx { ...@@ -679,11 +685,11 @@ struct mlx5dr_matcher_rx_tx {
struct mlx5dr_ste_htbl *s_htbl; struct mlx5dr_ste_htbl *s_htbl;
struct mlx5dr_ste_htbl *e_anchor; struct mlx5dr_ste_htbl *e_anchor;
struct mlx5dr_ste_build *ste_builder; struct mlx5dr_ste_build *ste_builder;
struct mlx5dr_ste_build ste_builder4[DR_RULE_MAX_STES]; struct mlx5dr_ste_build ste_builder_arr[DR_RULE_IPV_MAX]
struct mlx5dr_ste_build ste_builder6[DR_RULE_MAX_STES]; [DR_RULE_IPV_MAX]
[DR_RULE_MAX_STES];
u8 num_of_builders; u8 num_of_builders;
u8 num_of_builders4; u8 num_of_builders_arr[DR_RULE_IPV_MAX][DR_RULE_IPV_MAX];
u8 num_of_builders6;
u64 default_icm_addr; u64 default_icm_addr;
struct mlx5dr_table_rx_tx *nic_tbl; struct mlx5dr_table_rx_tx *nic_tbl;
}; };
...@@ -812,7 +818,8 @@ mlx5dr_matcher_supp_flex_parser_icmp_v6(struct mlx5dr_cmd_caps *caps) ...@@ -812,7 +818,8 @@ mlx5dr_matcher_supp_flex_parser_icmp_v6(struct mlx5dr_cmd_caps *caps)
int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher, int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher, struct mlx5dr_matcher_rx_tx *nic_matcher,
bool ipv6); enum mlx5dr_ipv outer_ipv,
enum mlx5dr_ipv inner_ipv);
static inline u32 static inline u32
mlx5dr_icm_pool_chunk_size_to_entries(enum mlx5dr_icm_chunk_size chunk_size) mlx5dr_icm_pool_chunk_size_to_entries(enum mlx5dr_icm_chunk_size chunk_size)
...@@ -962,9 +969,6 @@ void mlx5dr_ste_copy_param(u8 match_criteria, ...@@ -962,9 +969,6 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
struct mlx5dr_match_param *set_param, struct mlx5dr_match_param *set_param,
struct mlx5dr_match_parameters *mask); struct mlx5dr_match_parameters *mask);
void mlx5dr_crc32_init_table(void);
u32 mlx5dr_crc32_slice8_calc(const void *input_data, size_t length);
struct mlx5dr_qp { struct mlx5dr_qp {
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
struct mlx5_wq_qp wq; struct mlx5_wq_qp wq;
......
...@@ -34,26 +34,6 @@ ...@@ -34,26 +34,6 @@
#include "wq.h" #include "wq.h"
#include "mlx5_core.h" #include "mlx5_core.h"
u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
{
return (u32)wq->fbc.sz_m1 + 1;
}
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
{
return wq->fbc.sz_m1 + 1;
}
u8 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq)
{
return wq->fbc.log_stride;
}
u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
{
return (u32)wq->fbc.sz_m1 + 1;
}
static u32 wq_get_byte_sz(u8 log_sz, u8 log_stride) static u32 wq_get_byte_sz(u8 log_sz, u8 log_stride)
{ {
return ((u32)1 << log_sz) << log_stride; return ((u32)1 << log_sz) << log_stride;
...@@ -96,6 +76,24 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, ...@@ -96,6 +76,24 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
return err; return err;
} }
void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides)
{
size_t len;
void *wqe;
if (!net_ratelimit())
return;
nstrides = max_t(u8, nstrides, 1);
len = nstrides << wq->fbc.log_stride;
wqe = mlx5_wq_cyc_get_wqe(wq, ix);
pr_info("WQE DUMP: WQ size %d WQ cur size %d, WQE index 0x%x, len: %ld\n",
mlx5_wq_cyc_get_size(wq), wq->cur_sz, ix, len);
print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, wqe, len, false);
}
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq, void *qpc, struct mlx5_wq_qp *wq,
struct mlx5_wq_ctrl *wq_ctrl) struct mlx5_wq_ctrl *wq_ctrl)
......
...@@ -79,7 +79,7 @@ struct mlx5_wq_ll { ...@@ -79,7 +79,7 @@ struct mlx5_wq_ll {
int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_cyc *wq, void *wqc, struct mlx5_wq_cyc *wq,
struct mlx5_wq_ctrl *wq_ctrl); struct mlx5_wq_ctrl *wq_ctrl);
u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides);
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq, void *qpc, struct mlx5_wq_qp *wq,
...@@ -88,16 +88,18 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, ...@@ -88,16 +88,18 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *cqc, struct mlx5_cqwq *wq, void *cqc, struct mlx5_cqwq *wq,
struct mlx5_wq_ctrl *wq_ctrl); struct mlx5_wq_ctrl *wq_ctrl);
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
u8 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq);
int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_ll *wq, void *wqc, struct mlx5_wq_ll *wq,
struct mlx5_wq_ctrl *wq_ctrl); struct mlx5_wq_ctrl *wq_ctrl);
u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq);
void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl); void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
static inline u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
{
return (u32)wq->fbc.sz_m1 + 1;
}
static inline int mlx5_wq_cyc_is_full(struct mlx5_wq_cyc *wq) static inline int mlx5_wq_cyc_is_full(struct mlx5_wq_cyc *wq)
{ {
return wq->cur_sz == wq->sz; return wq->cur_sz == wq->sz;
...@@ -168,6 +170,16 @@ static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2) ...@@ -168,6 +170,16 @@ static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
return !equal && !smaller; return !equal && !smaller;
} }
static inline u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
{
return wq->fbc.sz_m1 + 1;
}
static inline u8 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq)
{
return wq->fbc.log_stride;
}
static inline u32 mlx5_cqwq_ctr2ix(struct mlx5_cqwq *wq, u32 ctr) static inline u32 mlx5_cqwq_ctr2ix(struct mlx5_cqwq *wq, u32 ctr)
{ {
return ctr & wq->fbc.sz_m1; return ctr & wq->fbc.sz_m1;
...@@ -224,6 +236,11 @@ static inline struct mlx5_cqe64 *mlx5_cqwq_get_cqe(struct mlx5_cqwq *wq) ...@@ -224,6 +236,11 @@ static inline struct mlx5_cqe64 *mlx5_cqwq_get_cqe(struct mlx5_cqwq *wq)
return cqe; return cqe;
} }
static inline u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
{
return (u32)wq->fbc.sz_m1 + 1;
}
static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq) static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq)
{ {
return wq->cur_sz == wq->fbc.sz_m1; return wq->cur_sz == wq->fbc.sz_m1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment