Commit 815f7480 authored by Jason Gunthorpe's avatar Jason Gunthorpe

Merge branch 'mlx5-next' into rdma.git for-next

From
git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux

To resolve conflicts with net-next and pick up the first patch.

* branch 'mlx5-next':
  net/mlx5: Factor out HCA capabilities functions
  IB/mlx5: Add support for 50Gbps per lane link modes
  net/mlx5: Add support to ext_* fields introduced in Port Type and Speed register
  net/mlx5: Add new fields to Port Type and Speed register
  net/mlx5: Refactor queries to speed fields in Port Type and Speed register
  net/mlx5: E-Switch, Avoid magic numbers when initializing offloads mode
  net/mlx5: Relocate vport macros to the vport header file
  net/mlx5: E-Switch, Normalize the name of uplink vport number
  net/mlx5: Provide an alternative VF upper bound for ECPF
  net/mlx5: Add host params change event
  net/mlx5: Add query host params command
  net/mlx5: Update enable HCA dependency
  net/mlx5: Introduce Mellanox SmartNIC and modify page management logic
  IB/mlx5: Use unified register/load function for uplink and VF vports
  net/mlx5: Use consistent vport num argument type
  net/mlx5: Use void pointer as the type in address_of macro
  net/mlx5: Align ODP capability function with netdev coding style
  mlx5: use RCU lock in mlx5_eq_cq_get()
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parents ec95e0fa 37b6bb77
......@@ -3,10 +3,11 @@
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
*/
#include <linux/mlx5/vport.h>
#include "ib_rep.h"
#include "srq.h"
static const struct mlx5_ib_profile rep_profile = {
static const struct mlx5_ib_profile vf_rep_profile = {
STAGE_CREATE(MLX5_IB_STAGE_INIT,
mlx5_ib_stage_init_init,
mlx5_ib_stage_init_cleanup),
......@@ -45,31 +46,17 @@ static const struct mlx5_ib_profile rep_profile = {
NULL),
};
static int
mlx5_ib_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
struct mlx5_ib_dev *ibdev;
ibdev = mlx5_ib_rep_to_dev(rep);
if (!__mlx5_ib_add(ibdev, ibdev->profile))
return -EINVAL;
return 0;
}
static void
mlx5_ib_nic_rep_unload(struct mlx5_eswitch_rep *rep)
{
struct mlx5_ib_dev *ibdev;
ibdev = mlx5_ib_rep_to_dev(rep);
__mlx5_ib_remove(ibdev, ibdev->profile, MLX5_IB_STAGE_MAX);
}
static int
mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
const struct mlx5_ib_profile *profile;
struct mlx5_ib_dev *ibdev;
if (rep->vport == MLX5_VPORT_UPLINK)
profile = &uplink_rep_profile;
else
profile = &vf_rep_profile;
ibdev = ib_alloc_device(mlx5_ib_dev, ib_dev);
if (!ibdev)
return -ENOMEM;
......@@ -78,7 +65,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
ibdev->mdev = dev;
ibdev->num_ports = max(MLX5_CAP_GEN(dev, num_ports),
MLX5_CAP_GEN(dev, num_vhca_ports));
if (!__mlx5_ib_add(ibdev, &rep_profile)) {
if (!__mlx5_ib_add(ibdev, profile)) {
ib_dealloc_device(&ibdev->ib_dev);
return -EINVAL;
}
......@@ -107,15 +94,14 @@ static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
return mlx5_ib_rep_to_dev(rep);
}
static void mlx5_ib_rep_register_vf_vports(struct mlx5_ib_dev *dev)
void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev)
{
struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
int total_vfs = MLX5_TOTAL_VPORTS(dev->mdev);
struct mlx5_eswitch *esw = mdev->priv.eswitch;
int total_vports = MLX5_TOTAL_VPORTS(mdev);
struct mlx5_eswitch_rep_if rep_if = {};
int vport;
for (vport = 1; vport < total_vfs; vport++) {
struct mlx5_eswitch_rep_if rep_if = {};
for (vport = 0; vport < total_vports; vport++) {
rep_if.load = mlx5_ib_vport_rep_load;
rep_if.unload = mlx5_ib_vport_rep_unload;
rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev;
......@@ -123,39 +109,16 @@ static void mlx5_ib_rep_register_vf_vports(struct mlx5_ib_dev *dev)
}
}
static void mlx5_ib_rep_unregister_vf_vports(struct mlx5_ib_dev *dev)
void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev)
{
struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
int total_vfs = MLX5_TOTAL_VPORTS(dev->mdev);
struct mlx5_eswitch *esw = mdev->priv.eswitch;
int total_vports = MLX5_TOTAL_VPORTS(mdev);
int vport;
for (vport = 1; vport < total_vfs; vport++)
for (vport = total_vports - 1; vport >= 0; vport--)
mlx5_eswitch_unregister_vport_rep(esw, vport, REP_IB);
}
void mlx5_ib_register_vport_reps(struct mlx5_ib_dev *dev)
{
struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
struct mlx5_eswitch_rep_if rep_if = {};
rep_if.load = mlx5_ib_nic_rep_load;
rep_if.unload = mlx5_ib_nic_rep_unload;
rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev;
rep_if.priv = dev;
mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_IB);
mlx5_ib_rep_register_vf_vports(dev);
}
void mlx5_ib_unregister_vport_reps(struct mlx5_ib_dev *dev)
{
struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
mlx5_ib_rep_unregister_vf_vports(dev); /* VFs vports */
mlx5_eswitch_unregister_vport_rep(esw, 0, REP_IB); /* UPLINK PF*/
}
u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw)
{
return mlx5_eswitch_mode(esw);
......
......@@ -10,14 +10,16 @@
#include "mlx5_ib.h"
#ifdef CONFIG_MLX5_ESWITCH
extern const struct mlx5_ib_profile uplink_rep_profile;
u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw);
struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
int vport_index);
struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw);
struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw,
int vport_index);
void mlx5_ib_register_vport_reps(struct mlx5_ib_dev *dev);
void mlx5_ib_unregister_vport_reps(struct mlx5_ib_dev *dev);
void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev);
void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev);
int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq);
struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
......@@ -48,8 +50,8 @@ struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw,
return NULL;
}
static inline void mlx5_ib_register_vport_reps(struct mlx5_ib_dev *dev) {}
static inline void mlx5_ib_unregister_vport_reps(struct mlx5_ib_dev *dev) {}
static inline void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev) {}
static inline void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev) {}
static inline int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq)
{
......
......@@ -331,8 +331,8 @@ void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u8 port_num)
spin_unlock(&port->mp.mpi_lock);
}
static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
u8 *active_width)
static int translate_eth_legacy_proto_oper(u32 eth_proto_oper, u8 *active_speed,
u8 *active_width)
{
switch (eth_proto_oper) {
case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII):
......@@ -389,10 +389,66 @@ static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
return 0;
}
static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u8 *active_speed,
u8 *active_width)
{
switch (eth_proto_oper) {
case MLX5E_PROT_MASK(MLX5E_SGMII_100M):
case MLX5E_PROT_MASK(MLX5E_1000BASE_X_SGMII):
*active_width = IB_WIDTH_1X;
*active_speed = IB_SPEED_SDR;
break;
case MLX5E_PROT_MASK(MLX5E_5GBASE_R):
*active_width = IB_WIDTH_1X;
*active_speed = IB_SPEED_DDR;
break;
case MLX5E_PROT_MASK(MLX5E_10GBASE_XFI_XAUI_1):
*active_width = IB_WIDTH_1X;
*active_speed = IB_SPEED_QDR;
break;
case MLX5E_PROT_MASK(MLX5E_40GBASE_XLAUI_4_XLPPI_4):
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_QDR;
break;
case MLX5E_PROT_MASK(MLX5E_25GAUI_1_25GBASE_CR_KR):
*active_width = IB_WIDTH_1X;
*active_speed = IB_SPEED_EDR;
break;
case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2):
case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR):
*active_width = IB_WIDTH_1X;
*active_speed = IB_SPEED_HDR;
break;
case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2):
*active_width = IB_WIDTH_2X;
*active_speed = IB_SPEED_HDR;
break;
case MLX5E_PROT_MASK(MLX5E_200GAUI_4_200GBASE_CR4_KR4):
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_HDR;
break;
default:
return -EINVAL;
}
return 0;
}
static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
u8 *active_width, bool ext)
{
return ext ?
translate_eth_ext_proto_oper(eth_proto_oper, active_speed,
active_width) :
translate_eth_legacy_proto_oper(eth_proto_oper, active_speed,
active_width);
}
static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
struct ib_port_attr *props)
{
struct mlx5_ib_dev *dev = to_mdev(device);
u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
struct mlx5_core_dev *mdev;
struct net_device *ndev, *upper;
enum ib_mtu ndev_ib_mtu;
......@@ -400,6 +456,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
u16 qkey_viol_cntr;
u32 eth_prot_oper;
u8 mdev_port_num;
bool ext;
int err;
mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
......@@ -416,16 +473,18 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
/* Possible bad flows are checked before filling out props so in case
* of an error it will still be zeroed out.
*/
err = mlx5_query_port_eth_proto_oper(mdev, &eth_prot_oper,
mdev_port_num);
err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
mdev_port_num);
if (err)
goto out;
ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet);
eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
props->active_width = IB_WIDTH_4X;
props->active_speed = IB_SPEED_QDR;
translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
&props->active_width);
&props->active_width, ext);
props->port_cap_flags |= IB_PORT_CM_SUP;
props->ip_gids = true;
......@@ -6435,7 +6494,7 @@ static const struct mlx5_ib_profile pf_profile = {
mlx5_ib_stage_delay_drop_cleanup),
};
static const struct mlx5_ib_profile nic_rep_profile = {
const struct mlx5_ib_profile uplink_rep_profile = {
STAGE_CREATE(MLX5_IB_STAGE_INIT,
mlx5_ib_stage_init_init,
mlx5_ib_stage_init_cleanup),
......@@ -6528,6 +6587,12 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
printk_once(KERN_INFO "%s", mlx5_version);
if (MLX5_ESWITCH_MANAGER(mdev) &&
mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
mlx5_ib_register_vport_reps(mdev);
return mdev;
}
port_type_cap = MLX5_CAP_GEN(mdev, port_type);
ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
......@@ -6542,14 +6607,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
MLX5_CAP_GEN(mdev, num_vhca_ports));
if (MLX5_ESWITCH_MANAGER(mdev) &&
mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0);
dev->profile = &nic_rep_profile;
mlx5_ib_register_vport_reps(dev);
return dev;
}
return __mlx5_ib_add(dev, &pf_profile);
}
......@@ -6558,6 +6615,11 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
struct mlx5_ib_multiport_info *mpi;
struct mlx5_ib_dev *dev;
if (MLX5_ESWITCH_MANAGER(mdev) && context == mdev) {
mlx5_ib_unregister_vport_reps(mdev);
return;
}
if (mlx5_core_is_mp_slave(mdev)) {
mpi = context;
mutex_lock(&mlx5_ib_multiport_mutex);
......@@ -6569,10 +6631,7 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
}
dev = context;
if (dev->profile == &nic_rep_profile)
mlx5_ib_unregister_vport_reps(dev);
else
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
ib_dealloc_device((struct ib_device *)dev);
}
......
......@@ -35,7 +35,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o
#
# Core extra
#
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o ecpf.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
......
......@@ -316,6 +316,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
case MLX5_CMD_OP_DEALLOC_MEMIC:
case MLX5_CMD_OP_PAGE_FAULT_RESUME:
case MLX5_CMD_OP_QUERY_HOST_PARAMS:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
......@@ -627,6 +628,7 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT);
MLX5_COMMAND_STR_CASE(ALLOC_MEMIC);
MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC);
MLX5_COMMAND_STR_CASE(QUERY_HOST_PARAMS);
default: return "unknown command opcode";
}
}
......
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2019 Mellanox Technologies. */
#include "ecpf.h"
bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev)
{
return (ioread32be(&dev->iseg->initializing) >> MLX5_ECPU_BIT_NUM) & 1;
}
static int mlx5_peer_pf_enable_hca(struct mlx5_core_dev *dev)
{
u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {};
u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {};
MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
MLX5_SET(enable_hca_in, in, function_id, 0);
MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0);
return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
}
static int mlx5_peer_pf_disable_hca(struct mlx5_core_dev *dev)
{
u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {};
u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
MLX5_SET(disable_hca_in, in, function_id, 0);
MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
static int mlx5_peer_pf_init(struct mlx5_core_dev *dev)
{
int err;
err = mlx5_peer_pf_enable_hca(dev);
if (err)
mlx5_core_err(dev, "Failed to enable peer PF HCA err(%d)\n",
err);
return err;
}
static void mlx5_peer_pf_cleanup(struct mlx5_core_dev *dev)
{
int err;
err = mlx5_peer_pf_disable_hca(dev);
if (err) {
mlx5_core_err(dev, "Failed to disable peer PF HCA err(%d)\n",
err);
return;
}
err = mlx5_wait_for_pages(dev, &dev->priv.peer_pf_pages);
if (err)
mlx5_core_warn(dev, "Timeout reclaiming peer PF pages err(%d)\n",
err);
}
int mlx5_ec_init(struct mlx5_core_dev *dev)
{
int err = 0;
if (!mlx5_core_is_ecpf(dev))
return 0;
/* ECPF shall enable HCA for peer PF in the same way a PF
* does this for its VFs.
*/
err = mlx5_peer_pf_init(dev);
if (err)
return err;
return 0;
}
void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
{
if (!mlx5_core_is_ecpf(dev))
return;
mlx5_peer_pf_cleanup(dev);
}
static int mlx5_query_host_params_context(struct mlx5_core_dev *dev,
u32 *out, int outlen)
{
u32 in[MLX5_ST_SZ_DW(query_host_params_in)] = {};
MLX5_SET(query_host_params_in, in, opcode,
MLX5_CMD_OP_QUERY_HOST_PARAMS);
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
int mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf)
{
u32 out[MLX5_ST_SZ_DW(query_host_params_out)] = {};
int err;
err = mlx5_query_host_params_context(dev, out, sizeof(out));
if (err)
return err;
*num_vf = MLX5_GET(query_host_params_out, out,
host_params_context.host_num_of_vfs);
mlx5_core_dbg(dev, "host_num_of_vfs %d\n", *num_vf);
return 0;
}
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2019 Mellanox Technologies. */
#ifndef __MLX5_ECPF_H__
#define __MLX5_ECPF_H__
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
#ifdef CONFIG_MLX5_ESWITCH
enum {
MLX5_ECPU_BIT_NUM = 23,
};
bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev);
int mlx5_ec_init(struct mlx5_core_dev *dev);
void mlx5_ec_cleanup(struct mlx5_core_dev *dev);
int mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf);
#else /* CONFIG_MLX5_ESWITCH */
static inline bool
mlx5_read_embedded_cpu(struct mlx5_core_dev *dev) { return false; }
static inline int mlx5_ec_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_ec_cleanup(struct mlx5_core_dev *dev) {}
static inline int
mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf)
{ return -EOPNOTSUPP; }
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ECPF_H__ */
......@@ -63,66 +63,168 @@ static const u32 mlx5e_link_speed[MLX5E_LINK_MODES_NUMBER] = {
[MLX5E_50GBASE_KR2] = 50000,
};
u32 mlx5e_port_ptys2speed(u32 eth_proto_oper)
static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = {
[MLX5E_SGMII_100M] = 100,
[MLX5E_1000BASE_X_SGMII] = 1000,
[MLX5E_5GBASE_R] = 5000,
[MLX5E_10GBASE_XFI_XAUI_1] = 10000,
[MLX5E_40GBASE_XLAUI_4_XLPPI_4] = 40000,
[MLX5E_25GAUI_1_25GBASE_CR_KR] = 25000,
[MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2] = 50000,
[MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR] = 50000,
[MLX5E_CAUI_4_100GBASE_CR4_KR4] = 100000,
[MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000,
[MLX5E_400GAUI_8] = 400000,
};
static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev,
const u32 **arr, u32 *size)
{
bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
*size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) :
ARRAY_SIZE(mlx5e_link_speed);
*arr = ext ? mlx5e_ext_link_speed : mlx5e_link_speed;
}
int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
struct mlx5e_port_eth_proto *eproto)
{
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
int err;
if (!eproto)
return -EINVAL;
if (ext != MLX5_CAP_PCAM_FEATURE(dev, ptys_extended_ethernet))
return -EOPNOTSUPP;
err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port);
if (err)
return err;
eproto->cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
eth_proto_capability);
eproto->admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_admin);
eproto->oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
return 0;
}
void mlx5_port_query_eth_autoneg(struct mlx5_core_dev *dev, u8 *an_status,
u8 *an_disable_cap, u8 *an_disable_admin)
{
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
*an_status = 0;
*an_disable_cap = 0;
*an_disable_admin = 0;
if (mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, 1))
return;
*an_status = MLX5_GET(ptys_reg, out, an_status);
*an_disable_cap = MLX5_GET(ptys_reg, out, an_disable_cap);
*an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
}
int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable,
u32 proto_admin, bool ext)
{
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
u32 in[MLX5_ST_SZ_DW(ptys_reg)];
u8 an_disable_admin;
u8 an_disable_cap;
u8 an_status;
mlx5_port_query_eth_autoneg(dev, &an_status, &an_disable_cap,
&an_disable_admin);
if (!an_disable_cap && an_disable)
return -EPERM;
memset(in, 0, sizeof(in));
MLX5_SET(ptys_reg, in, local_port, 1);
MLX5_SET(ptys_reg, in, an_disable_admin, an_disable);
MLX5_SET(ptys_reg, in, proto_mask, MLX5_PTYS_EN);
if (ext)
MLX5_SET(ptys_reg, in, ext_eth_proto_admin, proto_admin);
else
MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin);
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PTYS, 0, 1);
}
u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper)
{
unsigned long temp = eth_proto_oper;
const u32 *table;
u32 speed = 0;
u32 max_size;
int i;
i = find_first_bit(&temp, MLX5E_LINK_MODES_NUMBER);
if (i < MLX5E_LINK_MODES_NUMBER)
speed = mlx5e_link_speed[i];
mlx5e_port_get_speed_arr(mdev, &table, &max_size);
i = find_first_bit(&temp, max_size);
if (i < max_size)
speed = table[i];
return speed;
}
int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
{
u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {};
u32 eth_proto_oper;
struct mlx5e_port_eth_proto eproto;
bool ext;
int err;
err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
if (err)
return err;
goto out;
eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
*speed = mlx5e_port_ptys2speed(eth_proto_oper);
*speed = mlx5e_port_ptys2speed(mdev, eproto.oper);
if (!(*speed))
err = -EINVAL;
out:
return err;
}
int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
{
struct mlx5e_port_eth_proto eproto;
u32 max_speed = 0;
u32 proto_cap;
const u32 *table;
u32 max_size;
bool ext;
int err;
int i;
err = mlx5_query_port_proto_cap(mdev, &proto_cap, MLX5_PTYS_EN);
ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
if (err)
return err;
for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i)
if (proto_cap & MLX5E_PROT_MASK(i))
max_speed = max(max_speed, mlx5e_link_speed[i]);
mlx5e_port_get_speed_arr(mdev, &table, &max_size);
for (i = 0; i < max_size; ++i)
if (eproto.cap & MLX5E_PROT_MASK(i))
max_speed = max(max_speed, table[i]);
*speed = max_speed;
return 0;
}
u32 mlx5e_port_speed2linkmodes(u32 speed)
u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed)
{
u32 link_modes = 0;
const u32 *table;
u32 max_size;
int i;
for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
if (mlx5e_link_speed[i] == speed)
mlx5e_port_get_speed_arr(mdev, &table, &max_size);
for (i = 0; i < max_size; ++i) {
if (table[i] == speed)
link_modes |= MLX5E_PROT_MASK(i);
}
return link_modes;
}
......
......@@ -36,10 +36,22 @@
#include <linux/mlx5/driver.h>
#include "en.h"
u32 mlx5e_port_ptys2speed(u32 eth_proto_oper);
struct mlx5e_port_eth_proto {
u32 cap;
u32 admin;
u32 oper;
};
int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
struct mlx5e_port_eth_proto *eproto);
void mlx5_port_query_eth_autoneg(struct mlx5_core_dev *dev, u8 *an_status,
u8 *an_disable_cap, u8 *an_disable_admin);
int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable,
u32 proto_admin, bool ext);
u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper);
int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
u32 mlx5e_port_speed2linkmodes(u32 speed);
u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed);
int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out);
int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in);
......
......@@ -695,13 +695,14 @@ static void get_speed_duplex(struct net_device *netdev,
u32 eth_proto_oper,
struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
u32 speed = SPEED_UNKNOWN;
u8 duplex = DUPLEX_UNKNOWN;
if (!netif_carrier_ok(netdev))
goto out;
speed = mlx5e_port_ptys2speed(eth_proto_oper);
speed = mlx5e_port_ptys2speed(priv->mdev, eth_proto_oper);
if (!speed) {
speed = SPEED_UNKNOWN;
goto out;
......@@ -885,7 +886,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
const struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 eth_proto_cap, eth_proto_admin;
struct mlx5e_port_eth_proto eproto;
bool an_changes = false;
u8 an_disable_admin;
u8 an_disable_cap;
......@@ -899,16 +900,16 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
link_modes = link_ksettings->base.autoneg == AUTONEG_ENABLE ?
mlx5e_ethtool2ptys_adver_link(link_ksettings->link_modes.advertising) :
mlx5e_port_speed2linkmodes(speed);
mlx5e_port_speed2linkmodes(mdev, speed);
err = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
err = mlx5_port_query_eth_proto(mdev, 1, false, &eproto);
if (err) {
netdev_err(priv->netdev, "%s: query port eth proto cap failed: %d\n",
netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n",
__func__, err);
goto out;
}
link_modes = link_modes & eth_proto_cap;
link_modes = link_modes & eproto.cap;
if (!link_modes) {
netdev_err(priv->netdev, "%s: Not supported link mode(s) requested",
__func__);
......@@ -916,24 +917,17 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
goto out;
}
err = mlx5_query_port_proto_admin(mdev, &eth_proto_admin, MLX5_PTYS_EN);
if (err) {
netdev_err(priv->netdev, "%s: query port eth proto admin failed: %d\n",
__func__, err);
goto out;
}
mlx5_query_port_autoneg(mdev, MLX5_PTYS_EN, &an_status,
&an_disable_cap, &an_disable_admin);
mlx5_port_query_eth_autoneg(mdev, &an_status, &an_disable_cap,
&an_disable_admin);
an_disable = link_ksettings->base.autoneg == AUTONEG_DISABLE;
an_changes = ((!an_disable && an_disable_admin) ||
(an_disable && !an_disable_admin));
if (!an_changes && link_modes == eth_proto_admin)
if (!an_changes && link_modes == eproto.admin)
goto out;
mlx5_set_port_ptys(mdev, an_disable, link_modes, MLX5_PTYS_EN);
mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, false);
mlx5_toggle_port_link(mdev);
out:
......
......@@ -153,7 +153,7 @@ static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
if (rep->vport == FDB_UPLINK_VPORT)
if (rep->vport == MLX5_VPORT_UPLINK)
mlx5e_uplink_rep_update_hw_counters(priv);
else
mlx5e_vf_rep_update_hw_counters(priv);
......@@ -1132,7 +1132,7 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
if (ret)
return ret;
if (rep->vport == FDB_UPLINK_VPORT)
if (rep->vport == MLX5_VPORT_UPLINK)
ret = snprintf(buf, len, "p%d", pf_num);
else
ret = snprintf(buf, len, "pf%dvf%d", pf_num, rep->vport - 1);
......@@ -1219,7 +1219,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
return false;
rep = rpriv->rep;
return (rep->vport == FDB_UPLINK_VPORT);
return (rep->vport == MLX5_VPORT_UPLINK);
}
static bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id)
......@@ -1368,7 +1368,7 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
params->sw_mtu = netdev->mtu;
/* SQ */
if (rep->vport == FDB_UPLINK_VPORT)
if (rep->vport == MLX5_VPORT_UPLINK)
params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
else
params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
......@@ -1395,7 +1395,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_core_dev *mdev = priv->mdev;
if (rep->vport == FDB_UPLINK_VPORT) {
if (rep->vport == MLX5_VPORT_UPLINK) {
SET_NETDEV_DEV(netdev, &priv->mdev->pdev->dev);
netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
/* we want a persistent mac for the uplink rep */
......@@ -1427,7 +1427,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_TSO6;
netdev->hw_features |= NETIF_F_RXCSUM;
if (rep->vport != FDB_UPLINK_VPORT)
if (rep->vport != MLX5_VPORT_UPLINK)
netdev->features |= NETIF_F_VLAN_CHALLENGED;
netdev->features |= netdev->hw_features;
......@@ -1580,7 +1580,7 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
return err;
}
if (rpriv->rep->vport == FDB_UPLINK_VPORT) {
if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
uplink_priv = &rpriv->uplink_priv;
/* init shared tc flow table */
......@@ -1616,7 +1616,7 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
for (tc = 0; tc < priv->profile->max_tc; tc++)
mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
if (rpriv->rep->vport == FDB_UPLINK_VPORT) {
if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
/* clean indirect TC block notifications */
unregister_netdevice_notifier(&rpriv->uplink_priv.netdevice_nb);
mlx5e_rep_indr_clean_block_privs(rpriv);
......@@ -1735,7 +1735,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
rpriv->rep = rep;
nch = mlx5e_get_max_num_channels(dev);
profile = (rep->vport == FDB_UPLINK_VPORT) ? &mlx5e_uplink_rep_profile : &mlx5e_vf_rep_profile;
profile = (rep->vport == MLX5_VPORT_UPLINK) ? &mlx5e_uplink_rep_profile : &mlx5e_vf_rep_profile;
netdev = mlx5e_create_netdev(dev, profile, nch, rpriv);
if (!netdev) {
pr_warn("Failed to create representor netdev for vport %d\n",
......@@ -1748,7 +1748,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
rep->rep_if[REP_ETH].priv = rpriv;
INIT_LIST_HEAD(&rpriv->vport_sqs_list);
if (rep->vport == FDB_UPLINK_VPORT) {
if (rep->vport == MLX5_VPORT_UPLINK) {
err = mlx5e_create_mdev_resources(dev);
if (err)
goto err_destroy_netdev;
......@@ -1784,7 +1784,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
mlx5e_detach_netdev(netdev_priv(netdev));
err_destroy_mdev_resources:
if (rep->vport == FDB_UPLINK_VPORT)
if (rep->vport == MLX5_VPORT_UPLINK)
mlx5e_destroy_mdev_resources(dev);
err_destroy_netdev:
......@@ -1804,7 +1804,7 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
unregister_netdev(netdev);
mlx5e_rep_neigh_cleanup(rpriv);
mlx5e_detach_netdev(priv);
if (rep->vport == FDB_UPLINK_VPORT)
if (rep->vport == MLX5_VPORT_UPLINK)
mlx5e_destroy_mdev_resources(priv->mdev);
mlx5e_destroy_netdev(priv);
kfree(ppriv); /* mlx5e_rep_priv */
......
......@@ -1834,7 +1834,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
rep = rpriv->rep;
if (rep->vport != FDB_UPLINK_VPORT &&
if (rep->vport != MLX5_VPORT_UPLINK &&
(esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
esw->offloads.inline_mode < match_level)) {
NL_SET_ERR_MSG_MOD(extack,
......@@ -2724,7 +2724,7 @@ static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv, int flags)
static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
{
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
bool is_rep_ingress = attr->in_rep->vport != FDB_UPLINK_VPORT &&
bool is_rep_ingress = attr->in_rep->vport != MLX5_VPORT_UPLINK &&
flow->flags & MLX5E_TC_FLOW_INGRESS;
bool act_is_encap = !!(attr->action &
MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
......@@ -2849,7 +2849,7 @@ static int mlx5e_tc_add_fdb_peer_flow(struct tc_cls_flower_offload *f,
* original flow and packets redirected from uplink use the
* peer mdev.
*/
if (flow->esw_attr->in_rep->vport == FDB_UPLINK_VPORT)
if (flow->esw_attr->in_rep->vport == MLX5_VPORT_UPLINK)
in_mdev = peer_priv->mdev;
else
in_mdev = priv->mdev;
......
......@@ -34,6 +34,7 @@
#include <linux/notifier.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/eq.h>
#include <linux/mlx5/cmd.h>
#ifdef CONFIG_RFS_ACCEL
......@@ -114,11 +115,11 @@ static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
struct mlx5_cq_table *table = &eq->cq_table;
struct mlx5_core_cq *cq = NULL;
spin_lock(&table->lock);
rcu_read_lock();
cq = radix_tree_lookup(&table->tree, cqn);
if (likely(cq))
mlx5_cq_hold(cq);
spin_unlock(&table->lock);
rcu_read_unlock();
return cq;
}
......@@ -371,9 +372,9 @@ int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
struct mlx5_cq_table *table = &eq->cq_table;
int err;
spin_lock_irq(&table->lock);
spin_lock(&table->lock);
err = radix_tree_insert(&table->tree, cq->cqn, cq);
spin_unlock_irq(&table->lock);
spin_unlock(&table->lock);
return err;
}
......@@ -383,9 +384,9 @@ int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
struct mlx5_cq_table *table = &eq->cq_table;
struct mlx5_core_cq *tmp;
spin_lock_irq(&table->lock);
spin_lock(&table->lock);
tmp = radix_tree_delete(&table->tree, cq->cqn);
spin_unlock_irq(&table->lock);
spin_unlock(&table->lock);
if (!tmp) {
mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn);
......@@ -530,6 +531,9 @@ static u64 gather_async_events_mask(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER);
if (mlx5_core_is_ecpf_esw_manager(dev))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE);
return async_event_mask;
}
......
......@@ -40,8 +40,6 @@
#include "eswitch.h"
#include "fs_core.h"
#define UPLINK_VPORT 0xFFFF
enum {
MLX5_ACTION_NONE = 0,
MLX5_ACTION_ADD = 1,
......@@ -52,7 +50,7 @@ enum {
struct vport_addr {
struct l2addr_node node;
u8 action;
u32 vport;
u16 vport;
struct mlx5_flow_handle *flow_rule;
bool mpfs; /* UC MAC was added to MPFs */
/* A flag indicating that mac was added due to mc promiscuous vport */
......@@ -115,7 +113,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
u16 vlan, u8 qos, u8 set_flags)
{
u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
......@@ -152,7 +150,7 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
/* E-Switch FDB */
static struct mlx5_flow_handle *
__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule,
u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
{
int match_header = (is_zero_ether_addr(mac_c) ? 0 :
......@@ -188,7 +186,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
misc_parameters);
mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters);
MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT);
MLX5_SET(fte_match_set_misc, mv_misc, source_port, MLX5_VPORT_UPLINK);
MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
}
......@@ -215,7 +213,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
}
static struct mlx5_flow_handle *
esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u16 vport)
{
u8 mac_c[ETH_ALEN];
......@@ -224,7 +222,7 @@ esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
}
static struct mlx5_flow_handle *
esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u16 vport)
{
u8 mac_c[ETH_ALEN];
u8 mac_v[ETH_ALEN];
......@@ -237,7 +235,7 @@ esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
}
static struct mlx5_flow_handle *
esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport)
{
u8 mac_c[ETH_ALEN];
u8 mac_v[ETH_ALEN];
......@@ -377,7 +375,7 @@ typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
{
u8 *mac = vaddr->node.addr;
u32 vport = vaddr->vport;
u16 vport = vaddr->vport;
int err;
/* Skip mlx5_mpfs_add_mac for PFs,
......@@ -409,7 +407,7 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
{
u8 *mac = vaddr->node.addr;
u32 vport = vaddr->vport;
u16 vport = vaddr->vport;
int err = 0;
/* Skip mlx5_mpfs_del_mac for PFs,
......@@ -438,7 +436,7 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw,
struct esw_mc_addr *esw_mc)
{
u8 *mac = vaddr->node.addr;
u32 vport_idx = 0;
u16 vport_idx = 0;
for (vport_idx = 0; vport_idx < esw->total_vports; vport_idx++) {
struct mlx5_vport *vport = &esw->vports[vport_idx];
......@@ -485,7 +483,7 @@ static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
struct hlist_head *hash = esw->mc_table;
struct esw_mc_addr *esw_mc;
u8 *mac = vaddr->node.addr;
u32 vport = vaddr->vport;
u16 vport = vaddr->vport;
if (!esw->fdb_table.legacy.fdb)
return 0;
......@@ -499,7 +497,7 @@ static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
return -ENOMEM;
esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT);
esw_fdb_set_vport_rule(esw, mac, MLX5_VPORT_UPLINK);
/* Add this multicast mac to all the mc promiscuous vports */
update_allmulti_vports(esw, vaddr, esw_mc);
......@@ -525,7 +523,7 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
struct hlist_head *hash = esw->mc_table;
struct esw_mc_addr *esw_mc;
u8 *mac = vaddr->node.addr;
u32 vport = vaddr->vport;
u16 vport = vaddr->vport;
if (!esw->fdb_table.legacy.fdb)
return 0;
......@@ -564,7 +562,7 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
/* Apply vport UC/MC list to HW l2 table and FDB table */
static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
u32 vport_num, int list_type)
u16 vport_num, int list_type)
{
struct mlx5_vport *vport = &esw->vports[vport_num];
bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
......@@ -599,7 +597,7 @@ static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
/* Sync vport UC/MC list from vport context */
static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
u32 vport_num, int list_type)
u16 vport_num, int list_type)
{
struct mlx5_vport *vport = &esw->vports[vport_num];
bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
......@@ -686,7 +684,7 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
/* Sync vport UC/MC list from vport context
* Must be called after esw_update_vport_addr_list
*/
static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num)
static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_vport *vport = &esw->vports[vport_num];
struct l2addr_node *node;
......@@ -721,7 +719,7 @@ static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num)
}
/* Apply vport rx mode to HW FDB table */
static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num,
bool promisc, bool mc_promisc)
{
struct esw_mc_addr *allmulti_addr = &esw->mc_promisc;
......@@ -736,7 +734,7 @@ static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
if (!allmulti_addr->uplink_rule)
allmulti_addr->uplink_rule =
esw_fdb_set_vport_allmulti_rule(esw,
UPLINK_VPORT);
MLX5_VPORT_UPLINK);
allmulti_addr->refcnt++;
} else if (vport->allmulti_rule) {
mlx5_del_flow_rules(vport->allmulti_rule);
......@@ -764,7 +762,7 @@ static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
}
/* Sync vport rx mode from vport context */
static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num)
static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_vport *vport = &esw->vports[vport_num];
int promisc_all = 0;
......@@ -1633,7 +1631,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
} else {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
err = esw_offloads_init(esw, nvfs + 1);
err = esw_offloads_init(esw, nvfs + MLX5_SPECIAL_VPORTS);
}
if (err)
......
......@@ -49,8 +49,6 @@
#define MLX5_MAX_MC_PER_VPORT(dev) \
(1 << MLX5_CAP_GEN(dev, log_max_current_mc_list))
#define FDB_UPLINK_VPORT 0xffff
#define MLX5_MIN_BW_SHARE 1
#define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \
......
......@@ -46,6 +46,11 @@ enum {
FDB_SLOW_PATH
};
/* There are two match-all miss flows, one for unicast dst mac and
* one for multicast.
*/
#define MLX5_ESW_MISS_FLOWS (2)
#define fdb_prio_table(esw, chain, prio, level) \
(esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
......@@ -359,15 +364,15 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
in_rep = attr->in_rep;
out_rep = attr->dests[0].rep;
if (push && in_rep->vport == FDB_UPLINK_VPORT)
if (push && in_rep->vport == MLX5_VPORT_UPLINK)
goto out_notsupp;
if (pop && out_rep->vport == FDB_UPLINK_VPORT)
if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
goto out_notsupp;
/* vport has vlan push configured, can't offload VF --> wire rules w.o it */
if (!push && !pop && fwd)
if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
goto out_notsupp;
/* protects against (1) setting rules with different vlans to push and
......@@ -409,7 +414,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
if (!push && !pop && fwd) {
/* tracks VF --> wire rules without vlan push action */
if (attr->dests[0].rep->vport == FDB_UPLINK_VPORT) {
if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
vport->vlan_refcount++;
attr->vlan_handled = true;
}
......@@ -469,7 +474,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
if (!push && !pop && fwd) {
/* tracks VF --> wire rules without vlan push action */
if (attr->dests[0].rep->vport == FDB_UPLINK_VPORT)
if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
vport->vlan_refcount--;
return 0;
......@@ -904,8 +909,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
esw->fdb_table.offloads.fdb_left[i] =
ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2 +
esw->total_vports;
table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
MLX5_ESW_MISS_FLOWS + esw->total_vports;
/* create the slow path fdb with encap set, so further table instances
* can be created at run time while VFs are probed if the FW allows that.
......@@ -999,7 +1004,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
dmac[0] = 0x01;
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 2);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
ix + MLX5_ESW_MISS_FLOWS);
g = mlx5_create_flow_group(fdb, flow_group_in);
if (IS_ERR(g)) {
......@@ -1048,7 +1054,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
esw_destroy_offloads_fast_fdb_tables(esw);
}
static int esw_create_offloads_table(struct mlx5_eswitch *esw)
static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
......@@ -1062,7 +1068,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
return -EOPNOTSUPP;
}
ft_attr.max_fte = dev->priv.sriov.num_vfs + 2;
ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft_offloads)) {
......@@ -1082,16 +1088,15 @@ static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
mlx5_destroy_flow_table(offloads->ft_offloads);
}
static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *g;
struct mlx5_priv *priv = &esw->dev->priv;
u32 *flow_group_in;
void *match_criteria, *misc;
int err = 0;
int nvports = priv->sriov.num_vfs + 2;
nvports = nvports + MLX5_ESW_MISS_FLOWS;
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
......@@ -1227,7 +1232,7 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
ether_addr_copy(rep->hw_id, hw_id);
}
offloads->vport_reps[0].vport = FDB_UPLINK_VPORT;
offloads->vport_reps[0].vport = MLX5_VPORT_UPLINK;
return 0;
}
......@@ -1407,11 +1412,11 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
if (err)
return err;
err = esw_create_offloads_table(esw);
err = esw_create_offloads_table(esw, nvports);
if (err)
goto create_ft_err;
err = esw_create_vport_rx_group(esw);
err = esw_create_vport_rx_group(esw, nvports);
if (err)
goto create_fg_err;
......@@ -1811,7 +1816,7 @@ void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
struct mlx5_esw_offload *offloads = &esw->offloads;
struct mlx5_eswitch_rep *rep;
if (vport == FDB_UPLINK_VPORT)
if (vport == MLX5_VPORT_UPLINK)
vport = UPLINK_REP_INDEX;
rep = &offloads->vport_reps[vport];
......
......@@ -103,6 +103,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_STALL_EVENT";
case MLX5_EVENT_TYPE_CMD:
return "MLX5_EVENT_TYPE_CMD";
case MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE:
return "MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE";
case MLX5_EVENT_TYPE_PAGE_REQUEST:
return "MLX5_EVENT_TYPE_PAGE_REQUEST";
case MLX5_EVENT_TYPE_PAGE_FAULT:
......
......@@ -32,6 +32,7 @@
#include <linux/mutex.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/eswitch.h>
#include "mlx5_core.h"
......
......@@ -65,6 +65,7 @@
#include "lib/vxlan.h"
#include "lib/devcom.h"
#include "diag/fw_tracer.h"
#include "ecpf.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
......@@ -461,27 +462,25 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
static int handle_hca_cap_odp(struct mlx5_core_dev *dev)
{
void *set_ctx;
void *set_hca_cap;
int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
void *set_ctx;
int set_sz;
int err;
if (!MLX5_CAP_GEN(dev, pg))
if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) ||
!MLX5_CAP_GEN(dev, pg))
return 0;
err = mlx5_core_get_caps(dev, MLX5_CAP_ODP);
if (err)
return err;
/**
* If all bits are cleared we shouldn't try to set it
* or we might fail while trying to access a reserved bit.
*/
if (!(MLX5_CAP_ODP_MAX(dev, ud_odp_caps.srq_receive) ||
MLX5_CAP_ODP_MAX(dev, rc_odp_caps.srq_receive) ||
MLX5_CAP_ODP_MAX(dev, xrc_odp_caps.srq_receive)))
return 0;
set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
set_ctx = kzalloc(set_sz, GFP_KERNEL);
if (!set_ctx)
return -ENOMEM;
......@@ -492,13 +491,13 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev)
/* set ODP SRQ support for RC/UD and XRC transports */
MLX5_SET(odp_cap, set_hca_cap, ud_odp_caps.srq_receive,
(MLX5_CAP_ODP_MAX(dev, ud_odp_caps.srq_receive)));
MLX5_CAP_ODP_MAX(dev, ud_odp_caps.srq_receive));
MLX5_SET(odp_cap, set_hca_cap, rc_odp_caps.srq_receive,
(MLX5_CAP_ODP_MAX(dev, rc_odp_caps.srq_receive)));
MLX5_CAP_ODP_MAX(dev, rc_odp_caps.srq_receive));
MLX5_SET(odp_cap, set_hca_cap, xrc_odp_caps.srq_receive,
(MLX5_CAP_ODP_MAX(dev, xrc_odp_caps.srq_receive)));
MLX5_CAP_ODP_MAX(dev, xrc_odp_caps.srq_receive));
err = set_caps(dev, set_ctx, set_sz, MLX5_SET_HCA_CAP_OP_MOD_ODP);
......@@ -579,6 +578,33 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
return err;
}
static int set_hca_cap(struct mlx5_core_dev *dev)
{
struct pci_dev *pdev = dev->pdev;
int err;
err = handle_hca_cap(dev);
if (err) {
dev_err(&pdev->dev, "handle_hca_cap failed\n");
goto out;
}
err = handle_hca_cap_atomic(dev);
if (err) {
dev_err(&pdev->dev, "handle_hca_cap_atomic failed\n");
goto out;
}
err = handle_hca_cap_odp(dev);
if (err) {
dev_err(&pdev->dev, "handle_hca_cap_odp failed\n");
goto out;
}
out:
return err;
}
static int set_hca_ctrl(struct mlx5_core_dev *dev)
{
struct mlx5_reg_host_endianness he_in;
......@@ -614,6 +640,8 @@ int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
MLX5_SET(enable_hca_in, in, function_id, func_id);
MLX5_SET(enable_hca_in, in, embedded_cpu_function,
dev->caps.embedded_cpu);
return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
}
......@@ -624,6 +652,8 @@ int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
MLX5_SET(disable_hca_in, in, function_id, func_id);
MLX5_SET(enable_hca_in, in, embedded_cpu_function,
dev->caps.embedded_cpu);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
......@@ -901,6 +931,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
struct pci_dev *pdev = dev->pdev;
int err;
dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
mutex_lock(&dev->intf_state_mutex);
if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
......@@ -966,21 +997,9 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto reclaim_boot_pages;
}
err = handle_hca_cap(dev);
if (err) {
dev_err(&pdev->dev, "handle_hca_cap failed\n");
goto reclaim_boot_pages;
}
err = handle_hca_cap_atomic(dev);
if (err) {
dev_err(&pdev->dev, "handle_hca_cap_atomic failed\n");
goto reclaim_boot_pages;
}
err = handle_hca_cap_odp(dev);
err = set_hca_cap(dev);
if (err) {
dev_err(&pdev->dev, "handle_hca_cap_odp failed\n");
dev_err(&pdev->dev, "set_hca_cap failed\n");
goto reclaim_boot_pages;
}
......@@ -1072,6 +1091,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_sriov;
}
err = mlx5_ec_init(dev);
if (err) {
dev_err(&pdev->dev, "Failed to init embedded CPU\n");
goto err_ec;
}
if (mlx5_device_registered(dev)) {
mlx5_attach_device(dev);
} else {
......@@ -1089,6 +1114,9 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
return 0;
err_reg_dev:
mlx5_ec_cleanup(dev);
err_ec:
mlx5_sriov_detach(dev);
err_sriov:
......@@ -1163,6 +1191,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
if (mlx5_device_registered(dev))
mlx5_detach_device(dev);
mlx5_ec_cleanup(dev);
mlx5_sriov_detach(dev);
mlx5_cleanup_fs(dev);
mlx5_accel_ipsec_cleanup(dev);
......
......@@ -121,7 +121,7 @@ int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
u32 modify_bitmask);
int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
u32 element_id);
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages);
u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
struct ptp_system_timestamp *sts);
......
......@@ -48,6 +48,7 @@ enum {
struct mlx5_pages_req {
struct mlx5_core_dev *dev;
u16 func_id;
u8 ec_function;
s32 npages;
struct work_struct work;
};
......@@ -143,6 +144,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
MLX5_SET(query_pages_in, in, op_mod, boot ?
MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev));
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
......@@ -253,7 +255,8 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
return err;
}
static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id)
static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
bool ec_function)
{
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
......@@ -262,6 +265,7 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id)
MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
MLX5_SET(manage_pages_in, in, function_id, func_id);
MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
......@@ -270,7 +274,7 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id)
}
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int notify_fail)
int notify_fail, bool ec_function)
{
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
......@@ -305,6 +309,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
MLX5_SET(manage_pages_in, in, function_id, func_id);
MLX5_SET(manage_pages_in, in, input_num_entries, npages);
MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (err) {
......@@ -316,8 +321,11 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
dev->priv.fw_pages += npages;
if (func_id)
dev->priv.vfs_pages += npages;
else if (mlx5_core_is_ecpf(dev) && !ec_function)
dev->priv.peer_pf_pages += npages;
mlx5_core_dbg(dev, "err %d\n", err);
mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
npages, ec_function, func_id, err);
kvfree(in);
return 0;
......@@ -328,7 +336,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
out_free:
kvfree(in);
if (notify_fail)
page_notify_fail(dev, func_id);
page_notify_fail(dev, func_id, ec_function);
return err;
}
......@@ -364,7 +372,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
}
static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
int *nclaimed)
int *nclaimed, bool ec_function)
{
int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
......@@ -385,6 +393,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
MLX5_SET(manage_pages_in, in, function_id, func_id);
MLX5_SET(manage_pages_in, in, input_num_entries, npages);
MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
......@@ -410,6 +419,8 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
dev->priv.fw_pages -= num_claimed;
if (func_id)
dev->priv.vfs_pages -= num_claimed;
else if (mlx5_core_is_ecpf(dev) && !ec_function)
dev->priv.peer_pf_pages -= num_claimed;
out_free:
kvfree(out);
......@@ -423,9 +434,10 @@ static void pages_work_handler(struct work_struct *work)
int err = 0;
if (req->npages < 0)
err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL);
err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL,
req->ec_function);
else if (req->npages > 0)
err = give_pages(dev, req->func_id, req->npages, 1);
err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function);
if (err)
mlx5_core_warn(dev, "%s fail %d\n",
......@@ -434,6 +446,10 @@ static void pages_work_handler(struct work_struct *work)
kfree(req);
}
enum {
EC_FUNCTION_MASK = 0x8000,
};
static int req_pages_handler(struct notifier_block *nb,
unsigned long type, void *data)
{
......@@ -441,6 +457,7 @@ static int req_pages_handler(struct notifier_block *nb,
struct mlx5_core_dev *dev;
struct mlx5_priv *priv;
struct mlx5_eqe *eqe;
bool ec_function;
u16 func_id;
s32 npages;
......@@ -450,6 +467,7 @@ static int req_pages_handler(struct notifier_block *nb,
func_id = be16_to_cpu(eqe->data.req_pages.func_id);
npages = be32_to_cpu(eqe->data.req_pages.num_pages);
ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK;
mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
func_id, npages);
req = kzalloc(sizeof(*req), GFP_ATOMIC);
......@@ -461,6 +479,7 @@ static int req_pages_handler(struct notifier_block *nb,
req->dev = dev;
req->func_id = func_id;
req->npages = npages;
req->ec_function = ec_function;
INIT_WORK(&req->work, pages_work_handler);
queue_work(dev->priv.pg_wq, &req->work);
return NOTIFY_OK;
......@@ -479,7 +498,7 @@ int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
npages, boot ? "boot" : "init", func_id);
return give_pages(dev, func_id, npages, 0);
return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev));
}
enum {
......@@ -513,7 +532,7 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
fwp = rb_entry(p, struct fw_page, rb_node);
err = reclaim_pages(dev, fwp->func_id,
optimal_reclaimed_pages(),
&nclaimed);
&nclaimed, mlx5_core_is_ecpf(dev));
if (err) {
mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
......@@ -535,6 +554,9 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
WARN(dev->priv.vfs_pages,
"VFs FW pages counter is %d after reclaiming all pages\n",
dev->priv.vfs_pages);
WARN(dev->priv.peer_pf_pages,
"Peer PF FW pages counter is %d after reclaiming all pages\n",
dev->priv.peer_pf_pages);
return 0;
}
......@@ -567,10 +589,10 @@ void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
flush_workqueue(dev->priv.pg_wq);
}
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev)
int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
{
unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
int prev_vfs_pages = dev->priv.vfs_pages;
int prev_pages = *pages;
/* In case of internal error we will free the pages manually later */
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
......@@ -578,16 +600,16 @@ int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev)
return 0;
}
mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_vfs_pages,
mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_pages,
dev->priv.name);
while (dev->priv.vfs_pages) {
while (*pages) {
if (time_after(jiffies, end)) {
mlx5_core_warn(dev, "aborting while there are %d pending pages\n", dev->priv.vfs_pages);
mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages);
return -ETIMEDOUT;
}
if (dev->priv.vfs_pages < prev_vfs_pages) {
if (*pages < prev_pages) {
end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
prev_vfs_pages = dev->priv.vfs_pages;
prev_pages = *pages;
}
msleep(50);
}
......
......@@ -30,10 +30,7 @@
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/port.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
......@@ -157,44 +154,6 @@ int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration)
sizeof(out), MLX5_REG_MLCR, 0, 1);
}
int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
u32 *proto_cap, int proto_mask)
{
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
int err;
err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1);
if (err)
return err;
if (proto_mask == MLX5_PTYS_EN)
*proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
else
*proto_cap = MLX5_GET(ptys_reg, out, ib_proto_capability);
return 0;
}
EXPORT_SYMBOL_GPL(mlx5_query_port_proto_cap);
int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
u32 *proto_admin, int proto_mask)
{
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
int err;
err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1);
if (err)
return err;
if (proto_mask == MLX5_PTYS_EN)
*proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
else
*proto_admin = MLX5_GET(ptys_reg, out, ib_proto_admin);
return 0;
}
EXPORT_SYMBOL_GPL(mlx5_query_port_proto_admin);
int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
u8 *link_width_oper, u8 local_port)
{
......@@ -211,23 +170,6 @@ int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_query_port_link_width_oper);
int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev,
u32 *proto_oper, u8 local_port)
{
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
int err;
err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN,
local_port);
if (err)
return err;
*proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
return 0;
}
EXPORT_SYMBOL(mlx5_query_port_eth_proto_oper);
int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
u8 *proto_oper, u8 local_port)
{
......@@ -245,35 +187,6 @@ int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL(mlx5_query_port_ib_proto_oper);
int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
u32 proto_admin, int proto_mask)
{
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
u32 in[MLX5_ST_SZ_DW(ptys_reg)];
u8 an_disable_admin;
u8 an_disable_cap;
u8 an_status;
mlx5_query_port_autoneg(dev, proto_mask, &an_status,
&an_disable_cap, &an_disable_admin);
if (!an_disable_cap && an_disable)
return -EPERM;
memset(in, 0, sizeof(in));
MLX5_SET(ptys_reg, in, local_port, 1);
MLX5_SET(ptys_reg, in, an_disable_admin, an_disable);
MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
if (proto_mask == MLX5_PTYS_EN)
MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin);
else
MLX5_SET(ptys_reg, in, ib_proto_admin, proto_admin);
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PTYS, 0, 1);
}
EXPORT_SYMBOL_GPL(mlx5_set_port_ptys);
/* This function should be used after setting a port register only */
void mlx5_toggle_port_link(struct mlx5_core_dev *dev)
{
......@@ -606,25 +519,6 @@ int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
}
EXPORT_SYMBOL_GPL(mlx5_query_port_pfc);
void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask,
u8 *an_status,
u8 *an_disable_cap, u8 *an_disable_admin)
{
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
*an_status = 0;
*an_disable_cap = 0;
*an_disable_admin = 0;
if (mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1))
return;
*an_status = MLX5_GET(ptys_reg, out, an_status);
*an_disable_cap = MLX5_GET(ptys_reg, out, an_disable_cap);
*an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
}
EXPORT_SYMBOL_GPL(mlx5_query_port_autoneg);
int mlx5_max_tc(struct mlx5_core_dev *mdev)
{
u8 num_tc = MLX5_CAP_GEN(mdev, max_tc) ? : 8;
......
......@@ -147,7 +147,7 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
if (MLX5_ESWITCH_MANAGER(dev))
mlx5_eswitch_disable_sriov(dev->priv.eswitch);
if (mlx5_wait_for_vf_pages(dev))
if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages))
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
}
......
......@@ -255,7 +255,7 @@ int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
u32 vport,
u16 vport,
enum mlx5_list_type list_type,
u8 addr_list[][ETH_ALEN],
int *list_size)
......@@ -373,7 +373,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
u32 vport,
u16 vport,
u16 vlans[],
int *size)
{
......@@ -526,7 +526,7 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
u32 vport, u64 node_guid)
u16 vport, u64 node_guid)
{
int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
void *nic_vport_context;
......@@ -827,7 +827,7 @@ int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
u32 vport,
u16 vport,
int *promisc_uc,
int *promisc_mc,
int *promisc_all)
......
......@@ -67,7 +67,7 @@
#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
#define MLX5_ADDR_OF(typ, p, fld) ((void *)((uint8_t *)(p) + MLX5_BYTE_OFF(typ, fld)))
/* insert a value to a struct */
#define MLX5_SET(typ, p, fld, v) do { \
......@@ -342,6 +342,8 @@ enum mlx5_event {
MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE = 0xe,
MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c,
MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
......@@ -591,7 +593,7 @@ struct mlx5_eqe_cmd {
};
struct mlx5_eqe_page_req {
u8 rsvd0[2];
__be16 ec_function;
__be16 func_id;
__be32 num_pages;
__be32 rsvd1[5];
......
......@@ -523,6 +523,7 @@ struct mlx5_priv {
atomic_t reg_pages;
struct list_head free_list;
int vfs_pages;
int peer_pf_pages;
struct mlx5_core_health health;
......@@ -653,6 +654,7 @@ struct mlx5_core_dev {
u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
u8 embedded_cpu;
} caps;
u64 sys_image_guid;
phys_addr_t iseg_base;
......@@ -923,7 +925,7 @@ void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
void mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
s32 npages);
s32 npages, bool ec_function);
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
void mlx5_register_debugfs(void);
......@@ -1073,11 +1075,24 @@ static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)
return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF);
}
#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs((mdev)->pdev))
#define MLX5_VPORT_MANAGER(mdev) \
(MLX5_CAP_GEN(mdev, vport_group_manager) && \
(MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \
mlx5_core_is_pf(mdev))
static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev)
{
return dev->caps.embedded_cpu;
}
static inline bool mlx5_core_is_ecpf_esw_manager(struct mlx5_core_dev *dev)
{
return dev->caps.embedded_cpu && MLX5_CAP_GEN(dev, eswitch_manager);
}
#define MLX5_HOST_PF_MAX_VFS (127u)
static inline u16 mlx5_core_max_vfs(struct mlx5_core_dev *dev)
{
if (mlx5_core_is_ecpf_esw_manager(dev))
return MLX5_HOST_PF_MAX_VFS;
else
return pci_sriov_get_totalvfs(dev->pdev);
}
static inline int mlx5_get_gid_table_len(u16 param)
{
......
......@@ -142,6 +142,7 @@ enum {
MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY = 0x725,
MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY = 0x726,
MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727,
MLX5_CMD_OP_QUERY_HOST_PARAMS = 0x740,
MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750,
MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751,
MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752,
......@@ -4441,7 +4442,8 @@ struct mlx5_ifc_query_pages_out_bits {
u8 syndrome[0x20];
u8 reserved_at_40[0x10];
u8 embedded_cpu_function[0x1];
u8 reserved_at_41[0xf];
u8 function_id[0x10];
u8 num_pages[0x20];
......@@ -4460,7 +4462,8 @@ struct mlx5_ifc_query_pages_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 reserved_at_40[0x10];
u8 embedded_cpu_function[0x1];
u8 reserved_at_41[0xf];
u8 function_id[0x10];
u8 reserved_at_60[0x20];
......@@ -5880,7 +5883,8 @@ struct mlx5_ifc_manage_pages_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 reserved_at_40[0x10];
u8 embedded_cpu_function[0x1];
u8 reserved_at_41[0xf];
u8 function_id[0x10];
u8 input_num_entries[0x20];
......@@ -6058,7 +6062,8 @@ struct mlx5_ifc_enable_hca_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 reserved_at_40[0x10];
u8 embedded_cpu_function[0x1];
u8 reserved_at_41[0xf];
u8 function_id[0x10];
u8 reserved_at_60[0x20];
......@@ -6102,7 +6107,8 @@ struct mlx5_ifc_disable_hca_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 reserved_at_40[0x10];
u8 embedded_cpu_function[0x1];
u8 reserved_at_41[0xf];
u8 function_id[0x10];
u8 reserved_at_60[0x20];
......@@ -7820,21 +7826,23 @@ struct mlx5_ifc_ptys_reg_bits {
u8 proto_mask[0x3];
u8 an_status[0x4];
u8 reserved_at_24[0x3c];
u8 reserved_at_24[0x1c];
u8 ext_eth_proto_capability[0x20];
u8 eth_proto_capability[0x20];
u8 ib_link_width_capability[0x10];
u8 ib_proto_capability[0x10];
u8 reserved_at_a0[0x20];
u8 ext_eth_proto_admin[0x20];
u8 eth_proto_admin[0x20];
u8 ib_link_width_admin[0x10];
u8 ib_proto_admin[0x10];
u8 reserved_at_100[0x20];
u8 ext_eth_proto_oper[0x20];
u8 eth_proto_oper[0x20];
......@@ -8283,7 +8291,9 @@ struct mlx5_ifc_mpegc_reg_bits {
struct mlx5_ifc_pcam_enhanced_features_bits {
u8 reserved_at_0[0x6d];
u8 rx_icrc_encapsulated_counter[0x1];
u8 reserved_at_6e[0x8];
u8 reserved_at_6e[0x4];
u8 ptys_extended_ethernet[0x1];
u8 reserved_at_73[0x3];
u8 pfcc_mask[0x1];
u8 reserved_at_77[0x3];
u8 per_lane_error_counters[0x1];
......@@ -8749,7 +8759,8 @@ struct mlx5_ifc_initial_seg_bits {
u8 initializing[0x1];
u8 reserved_at_fe1[0x4];
u8 nic_interface_supported[0x3];
u8 reserved_at_fe8[0x18];
u8 embedded_cpu[0x1];
u8 reserved_at_fe9[0x17];
struct mlx5_ifc_health_buffer_bits health_buffer;
......@@ -9516,4 +9527,44 @@ struct mlx5_ifc_mtrc_ctrl_bits {
u8 reserved_at_80[0x180];
};
struct mlx5_ifc_host_params_context_bits {
u8 host_number[0x8];
u8 reserved_at_8[0x8];
u8 host_num_of_vfs[0x10];
u8 reserved_at_20[0x10];
u8 host_pci_bus[0x10];
u8 reserved_at_40[0x10];
u8 host_pci_device[0x10];
u8 reserved_at_60[0x10];
u8 host_pci_function[0x10];
u8 reserved_at_80[0x180];
};
struct mlx5_ifc_query_host_params_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_query_host_params_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x40];
struct mlx5_ifc_host_params_context_bits host_params_context;
u8 reserved_at_280[0x180];
};
#endif /* MLX5_IFC_H */
......@@ -92,6 +92,22 @@ enum mlx5e_link_mode {
MLX5E_LINK_MODES_NUMBER,
};
enum mlx5e_ext_link_mode {
MLX5E_SGMII_100M = 0,
MLX5E_1000BASE_X_SGMII = 1,
MLX5E_5GBASE_R = 3,
MLX5E_10GBASE_XFI_XAUI_1 = 4,
MLX5E_40GBASE_XLAUI_4_XLPPI_4 = 5,
MLX5E_25GAUI_1_25GBASE_CR_KR = 6,
MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2 = 7,
MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR = 8,
MLX5E_CAUI_4_100GBASE_CR4_KR4 = 9,
MLX5E_100GAUI_2_100GBASE_CR2_KR2 = 10,
MLX5E_200GAUI_4_200GBASE_CR4_KR4 = 12,
MLX5E_400GAUI_8 = 15,
MLX5E_EXT_LINK_MODES_NUMBER,
};
enum mlx5e_connector_type {
MLX5E_PORT_UNKNOWN = 0,
MLX5E_PORT_NONE = 1,
......@@ -106,31 +122,23 @@ enum mlx5e_connector_type {
};
#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
#define MLX5_GET_ETH_PROTO(reg, out, ext, field) \
(ext ? MLX5_GET(reg, out, ext_##field) : \
MLX5_GET(reg, out, field))
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask, u8 local_port);
int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
u32 *proto_cap, int proto_mask);
int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
u32 *proto_admin, int proto_mask);
int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
u8 *link_width_oper, u8 local_port);
int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
u8 *proto_oper, u8 local_port);
int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev,
u32 *proto_oper, u8 local_port);
int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
u32 proto_admin, int proto_mask);
void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status status);
int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status *status);
int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask,
u8 *an_status,
u8 *an_disable_cap, u8 *an_disable_admin);
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
......
......@@ -36,12 +36,25 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/device.h>
#define MLX5_VPORT_PF_PLACEHOLDER (1u)
#define MLX5_SPECIAL_VPORTS (MLX5_VPORT_PF_PLACEHOLDER)
#define MLX5_TOTAL_VPORTS(mdev) (MLX5_SPECIAL_VPORTS + mlx5_core_max_vfs(mdev))
#define MLX5_VPORT_MANAGER(mdev) \
(MLX5_CAP_GEN(mdev, vport_group_manager) && \
(MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \
mlx5_core_is_pf(mdev))
enum {
MLX5_CAP_INLINE_MODE_L2,
MLX5_CAP_INLINE_MODE_VPORT_CONTEXT,
MLX5_CAP_INLINE_MODE_NOT_REQUIRED,
};
enum {
MLX5_VPORT_UPLINK = 0xffff
};
u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport);
int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u8 state);
......@@ -60,7 +73,7 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
u64 *system_image_guid);
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
u32 vport, u64 node_guid);
u16 vport, u64 node_guid);
int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
u16 *qkey_viol_cntr);
int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
......@@ -78,7 +91,7 @@ int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
u64 *node_guid);
int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
u32 vport,
u16 vport,
enum mlx5_list_type list_type,
u8 addr_list[][ETH_ALEN],
int *list_size);
......@@ -87,7 +100,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
u8 addr_list[][ETH_ALEN],
int list_size);
int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
u32 vport,
u16 vport,
int *promisc_uc,
int *promisc_mc,
int *promisc_all);
......@@ -96,7 +109,7 @@ int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
int promisc_mc,
int promisc_all);
int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
u32 vport,
u16 vport,
u16 vlans[],
int *size);
int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment