Commit c515e70d authored by Saeed Mahameed's avatar Saeed Mahameed

Merge branch 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux

This merge commit includes some misc shared code updates from mlx5-next branch needed
for net-next.

1) From Aya: Enable general events on all physical link types and
   restrict general event handling of subtype DELAY_DROP_TIMEOUT in mlx5 rdma
   driver to ethernet links only as it was intended.

2) From Eli: Introduce low level bits for prio tag mode

3) From Maor: Low level steering updates to support RDMA RX flow
   steering and enables RoCE loopback traffic when switchdev is enabled.

4) From Vu and Parav: Two small mlx5 core cleanups

5) From Yevgeny add HW definitions of geneve offloads
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parents 2a369ae0 91a40a48
...@@ -181,7 +181,7 @@ static int mlx5_netdev_event(struct notifier_block *this, ...@@ -181,7 +181,7 @@ static int mlx5_netdev_event(struct notifier_block *this,
ibdev->rep->vport); ibdev->rep->vport);
if (rep_ndev == ndev) if (rep_ndev == ndev)
roce->netdev = ndev; roce->netdev = ndev;
} else if (ndev->dev.parent == &mdev->pdev->dev) { } else if (ndev->dev.parent == mdev->device) {
roce->netdev = ndev; roce->netdev = ndev;
} }
write_unlock(&roce->netdev_lock); write_unlock(&roce->netdev_lock);
...@@ -4354,9 +4354,13 @@ static void delay_drop_handler(struct work_struct *work) ...@@ -4354,9 +4354,13 @@ static void delay_drop_handler(struct work_struct *work)
static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe, static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
struct ib_event *ibev) struct ib_event *ibev)
{ {
u8 port = (eqe->data.port.port >> 4) & 0xf;
switch (eqe->sub_type) { switch (eqe->sub_type) {
case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT: case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
schedule_work(&ibdev->delay_drop.delay_drop_work); if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
IB_LINK_LAYER_ETHERNET)
schedule_work(&ibdev->delay_drop.delay_drop_work);
break; break;
default: /* do nothing */ default: /* do nothing */
return; return;
...@@ -5673,7 +5677,8 @@ static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev) ...@@ -5673,7 +5677,8 @@ static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
} }
if (bound) { if (bound) {
dev_dbg(&mpi->mdev->pdev->dev, "removing port from unaffiliated list.\n"); dev_dbg(mpi->mdev->device,
"removing port from unaffiliated list.\n");
mlx5_ib_dbg(dev, "port %d bound\n", i + 1); mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
list_del(&mpi->list); list_del(&mpi->list);
break; break;
...@@ -5872,7 +5877,7 @@ int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) ...@@ -5872,7 +5877,7 @@ int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
dev->ib_dev.phys_port_cnt = dev->num_ports; dev->ib_dev.phys_port_cnt = dev->num_ports;
dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_count(mdev); dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_count(mdev);
dev->ib_dev.dev.parent = &mdev->pdev->dev; dev->ib_dev.dev.parent = mdev->device;
mutex_init(&dev->cap_mask_mutex); mutex_init(&dev->cap_mask_mutex);
INIT_LIST_HEAD(&dev->qp_list); INIT_LIST_HEAD(&dev->qp_list);
...@@ -6561,7 +6566,8 @@ static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev) ...@@ -6561,7 +6566,8 @@ static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
if (!bound) { if (!bound) {
list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list); list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
dev_dbg(&mdev->pdev->dev, "no suitable IB device found to bind to, added to unaffiliated list.\n"); dev_dbg(mdev->device,
"no suitable IB device found to bind to, added to unaffiliated list.\n");
} }
mutex_unlock(&mlx5_ib_multiport_mutex); mutex_unlock(&mlx5_ib_multiport_mutex);
......
...@@ -36,7 +36,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tu ...@@ -36,7 +36,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tu
# #
# Core extra # Core extra
# #
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o ecpf.o mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o ecpf.o rdma.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
......
...@@ -57,15 +57,16 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev, ...@@ -57,15 +57,16 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
int node) int node)
{ {
struct mlx5_priv *priv = &dev->priv; struct mlx5_priv *priv = &dev->priv;
struct device *device = dev->device;
int original_node; int original_node;
void *cpu_handle; void *cpu_handle;
mutex_lock(&priv->alloc_mutex); mutex_lock(&priv->alloc_mutex);
original_node = dev_to_node(&dev->pdev->dev); original_node = dev_to_node(device);
set_dev_node(&dev->pdev->dev, node); set_dev_node(device, node);
cpu_handle = dma_alloc_coherent(&dev->pdev->dev, size, dma_handle, cpu_handle = dma_alloc_coherent(device, size, dma_handle,
GFP_KERNEL); GFP_KERNEL);
set_dev_node(&dev->pdev->dev, original_node); set_dev_node(device, original_node);
mutex_unlock(&priv->alloc_mutex); mutex_unlock(&priv->alloc_mutex);
return cpu_handle; return cpu_handle;
} }
...@@ -110,7 +111,7 @@ EXPORT_SYMBOL(mlx5_buf_alloc); ...@@ -110,7 +111,7 @@ EXPORT_SYMBOL(mlx5_buf_alloc);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf) void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
{ {
dma_free_coherent(&dev->pdev->dev, buf->size, buf->frags->buf, dma_free_coherent(dev->device, buf->size, buf->frags->buf,
buf->frags->map); buf->frags->map);
kfree(buf->frags); kfree(buf->frags);
...@@ -139,7 +140,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size, ...@@ -139,7 +140,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
if (!frag->buf) if (!frag->buf)
goto err_free_buf; goto err_free_buf;
if (frag->map & ((1 << buf->page_shift) - 1)) { if (frag->map & ((1 << buf->page_shift) - 1)) {
dma_free_coherent(&dev->pdev->dev, frag_sz, dma_free_coherent(dev->device, frag_sz,
buf->frags[i].buf, buf->frags[i].map); buf->frags[i].buf, buf->frags[i].map);
mlx5_core_warn(dev, "unexpected map alignment: %pad, page_shift=%d\n", mlx5_core_warn(dev, "unexpected map alignment: %pad, page_shift=%d\n",
&frag->map, buf->page_shift); &frag->map, buf->page_shift);
...@@ -152,7 +153,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size, ...@@ -152,7 +153,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
err_free_buf: err_free_buf:
while (i--) while (i--)
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, buf->frags[i].buf, dma_free_coherent(dev->device, PAGE_SIZE, buf->frags[i].buf,
buf->frags[i].map); buf->frags[i].map);
kfree(buf->frags); kfree(buf->frags);
err_out: err_out:
...@@ -168,7 +169,7 @@ void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf) ...@@ -168,7 +169,7 @@ void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
for (i = 0; i < buf->npages; i++) { for (i = 0; i < buf->npages; i++) {
int frag_sz = min_t(int, size, PAGE_SIZE); int frag_sz = min_t(int, size, PAGE_SIZE);
dma_free_coherent(&dev->pdev->dev, frag_sz, buf->frags[i].buf, dma_free_coherent(dev->device, frag_sz, buf->frags[i].buf,
buf->frags[i].map); buf->frags[i].map);
size -= frag_sz; size -= frag_sz;
} }
...@@ -274,7 +275,7 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) ...@@ -274,7 +275,7 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
__set_bit(db->index, db->u.pgdir->bitmap); __set_bit(db->index, db->u.pgdir->bitmap);
if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) { if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, dma_free_coherent(dev->device, PAGE_SIZE,
db->u.pgdir->db_page, db->u.pgdir->db_dma); db->u.pgdir->db_page, db->u.pgdir->db_dma);
list_del(&db->u.pgdir->list); list_del(&db->u.pgdir->list);
bitmap_free(db->u.pgdir->bitmap); bitmap_free(db->u.pgdir->bitmap);
......
...@@ -1347,7 +1347,7 @@ static void set_wqname(struct mlx5_core_dev *dev) ...@@ -1347,7 +1347,7 @@ static void set_wqname(struct mlx5_core_dev *dev)
struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd *cmd = &dev->cmd;
snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s", snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
dev->priv.name); dev_name(dev->device));
} }
static void clean_debug_files(struct mlx5_core_dev *dev) static void clean_debug_files(struct mlx5_core_dev *dev)
...@@ -1852,7 +1852,7 @@ static void create_msg_cache(struct mlx5_core_dev *dev) ...@@ -1852,7 +1852,7 @@ static void create_msg_cache(struct mlx5_core_dev *dev)
static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
{ {
struct device *ddev = &dev->pdev->dev; struct device *ddev = dev->device;
cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
&cmd->alloc_dma, GFP_KERNEL); &cmd->alloc_dma, GFP_KERNEL);
...@@ -1883,7 +1883,7 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) ...@@ -1883,7 +1883,7 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
{ {
struct device *ddev = &dev->pdev->dev; struct device *ddev = dev->device;
dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf, dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
cmd->alloc_dma); cmd->alloc_dma);
...@@ -1908,8 +1908,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) ...@@ -1908,8 +1908,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
return -EINVAL; return -EINVAL;
} }
cmd->pool = dma_pool_create("mlx5_cmd", &dev->pdev->dev, size, align, cmd->pool = dma_pool_create("mlx5_cmd", dev->device, size, align, 0);
0);
if (!cmd->pool) if (!cmd->pool)
return -ENOMEM; return -ENOMEM;
......
...@@ -47,7 +47,7 @@ TRACE_EVENT(mlx5_fw, ...@@ -47,7 +47,7 @@ TRACE_EVENT(mlx5_fw,
TP_ARGS(tracer, trace_timestamp, lost, event_id, msg), TP_ARGS(tracer, trace_timestamp, lost, event_id, msg),
TP_STRUCT__entry( TP_STRUCT__entry(
__string(dev_name, tracer->dev->priv.name) __string(dev_name, dev_name(tracer->dev->device))
__field(u64, trace_timestamp) __field(u64, trace_timestamp)
__field(bool, lost) __field(bool, lost)
__field(u8, event_id) __field(u8, event_id)
...@@ -55,7 +55,8 @@ TRACE_EVENT(mlx5_fw, ...@@ -55,7 +55,8 @@ TRACE_EVENT(mlx5_fw,
), ),
TP_fast_assign( TP_fast_assign(
__assign_str(dev_name, tracer->dev->priv.name); __assign_str(dev_name,
dev_name(tracer->dev->device));
__entry->trace_timestamp = trace_timestamp; __entry->trace_timestamp = trace_timestamp;
__entry->lost = lost; __entry->lost = lost;
__entry->event_id = event_id; __entry->event_id = event_id;
......
...@@ -1794,7 +1794,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, ...@@ -1794,7 +1794,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->tstamp = &priv->tstamp; c->tstamp = &priv->tstamp;
c->ix = ix; c->ix = ix;
c->cpu = cpu; c->cpu = cpu;
c->pdev = &priv->mdev->pdev->dev; c->pdev = priv->mdev->device;
c->netdev = priv->netdev; c->netdev = priv->netdev;
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
c->num_tc = params->num_tc; c->num_tc = params->num_tc;
...@@ -2047,7 +2047,7 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, ...@@ -2047,7 +2047,7 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable); MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en); MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); param->wq.buf_numa_node = dev_to_node(mdev->device);
} }
static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv, static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
...@@ -2062,7 +2062,7 @@ static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv, ...@@ -2062,7 +2062,7 @@ static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1)); mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter); MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter);
param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); param->wq.buf_numa_node = dev_to_node(mdev->device);
} }
static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv, static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
...@@ -2074,7 +2074,7 @@ static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv, ...@@ -2074,7 +2074,7 @@ static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn); MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); param->wq.buf_numa_node = dev_to_node(priv->mdev->device);
} }
static void mlx5e_build_sq_param(struct mlx5e_priv *priv, static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
...@@ -3001,8 +3001,8 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev, ...@@ -3001,8 +3001,8 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
struct mlx5e_cq *cq, struct mlx5e_cq *cq,
struct mlx5e_cq_param *param) struct mlx5e_cq_param *param)
{ {
param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); param->wq.buf_numa_node = dev_to_node(mdev->device);
param->wq.db_numa_node = dev_to_node(&mdev->pdev->dev); param->wq.db_numa_node = dev_to_node(mdev->device);
return mlx5e_alloc_cq_common(mdev, param, cq); return mlx5e_alloc_cq_common(mdev, param, cq);
} }
...@@ -4600,7 +4600,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) ...@@ -4600,7 +4600,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
bool fcs_supported; bool fcs_supported;
bool fcs_enabled; bool fcs_enabled;
SET_NETDEV_DEV(netdev, &mdev->pdev->dev); SET_NETDEV_DEV(netdev, mdev->device);
netdev->netdev_ops = &mlx5e_netdev_ops; netdev->netdev_ops = &mlx5e_netdev_ops;
......
...@@ -1390,7 +1390,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) ...@@ -1390,7 +1390,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
if (rep->vport == MLX5_VPORT_UPLINK) { if (rep->vport == MLX5_VPORT_UPLINK) {
SET_NETDEV_DEV(netdev, &priv->mdev->pdev->dev); SET_NETDEV_DEV(netdev, mdev->device);
netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep; netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
/* we want a persistent mac for the uplink rep */ /* we want a persistent mac for the uplink rep */
mlx5_query_nic_vport_mac_address(mdev, 0, netdev->dev_addr); mlx5_query_nic_vport_mac_address(mdev, 0, netdev->dev_addr);
......
...@@ -664,7 +664,8 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, ...@@ -664,7 +664,8 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
} }
netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n", netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
hp->tirn, hp->pair->rqn[0], hp->pair->peer_mdev->priv.name, hp->tirn, hp->pair->rqn[0],
dev_name(hp->pair->peer_mdev->device),
hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets); hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
hpe->hp = hp; hpe->hp = hp;
...@@ -701,7 +702,7 @@ static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv, ...@@ -701,7 +702,7 @@ static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
hpe = list_entry(next, struct mlx5e_hairpin_entry, flows); hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
netdev_dbg(priv->netdev, "del hairpin: peer %s\n", netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
hpe->hp->pair->peer_mdev->priv.name); dev_name(hpe->hp->pair->peer_mdev->device));
mlx5e_hairpin_destroy(hpe->hp); mlx5e_hairpin_destroy(hpe->hp);
hash_del(&hpe->hairpin_hlist); hash_del(&hpe->hairpin_hlist);
......
...@@ -504,8 +504,7 @@ static u64 gather_async_events_mask(struct mlx5_core_dev *dev) ...@@ -504,8 +504,7 @@ static u64 gather_async_events_mask(struct mlx5_core_dev *dev)
if (MLX5_VPORT_MANAGER(dev)) if (MLX5_VPORT_MANAGER(dev))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE); async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH && if (MLX5_CAP_GEN(dev, general_notification_event))
MLX5_CAP_GEN(dev, general_notification_event))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT); async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT);
if (MLX5_CAP_GEN(dev, port_module_event)) if (MLX5_CAP_GEN(dev, port_module_event))
......
...@@ -376,11 +376,11 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, ...@@ -376,11 +376,11 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
#define MLX5_DEBUG_ESWITCH_MASK BIT(3) #define MLX5_DEBUG_ESWITCH_MASK BIT(3)
#define esw_info(dev, format, ...) \ #define esw_info(__dev, format, ...) \
pr_info("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__) dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
#define esw_warn(dev, format, ...) \ #define esw_warn(__dev, format, ...) \
pr_warn("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__) dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
#define esw_debug(dev, format, ...) \ #define esw_debug(dev, format, ...) \
mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__) mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <linux/mlx5/fs.h> #include <linux/mlx5/fs.h>
#include "mlx5_core.h" #include "mlx5_core.h"
#include "eswitch.h" #include "eswitch.h"
#include "rdma.h"
#include "en.h" #include "en.h"
#include "fs_core.h" #include "fs_core.h"
#include "lib/devcom.h" #include "lib/devcom.h"
...@@ -1710,6 +1711,8 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports, ...@@ -1710,6 +1711,8 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
esw->host_info.num_vfs = vf_nvports; esw->host_info.num_vfs = vf_nvports;
} }
mlx5_rdma_enable_roce(esw->dev);
return 0; return 0;
err_reps: err_reps:
...@@ -1748,6 +1751,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw) ...@@ -1748,6 +1751,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw)
num_vfs = esw->dev->priv.sriov.num_vfs; num_vfs = esw->dev->priv.sriov.num_vfs;
} }
mlx5_rdma_disable_roce(esw->dev);
esw_offloads_devcom_cleanup(esw); esw_offloads_devcom_cleanup(esw);
esw_offloads_unload_all_reps(esw, num_vfs); esw_offloads_unload_all_reps(esw, num_vfs);
esw_offloads_steering_cleanup(esw); esw_offloads_steering_cleanup(esw);
......
...@@ -989,32 +989,33 @@ static enum fs_flow_table_type egress_to_fs_ft(bool egress) ...@@ -989,32 +989,33 @@ static enum fs_flow_table_type egress_to_fs_ft(bool egress)
return egress ? FS_FT_NIC_TX : FS_FT_NIC_RX; return egress ? FS_FT_NIC_TX : FS_FT_NIC_RX;
} }
static int fpga_ipsec_fs_create_flow_group(struct mlx5_core_dev *dev, static int fpga_ipsec_fs_create_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, struct mlx5_flow_table *ft,
u32 *in, u32 *in,
unsigned int *group_id, struct mlx5_flow_group *fg,
bool is_egress) bool is_egress)
{ {
int (*create_flow_group)(struct mlx5_core_dev *dev, int (*create_flow_group)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, u32 *in, struct mlx5_flow_table *ft, u32 *in,
unsigned int *group_id) = struct mlx5_flow_group *fg) =
mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_flow_group; mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_flow_group;
char *misc_params_c = MLX5_ADDR_OF(create_flow_group_in, in, char *misc_params_c = MLX5_ADDR_OF(create_flow_group_in, in,
match_criteria.misc_parameters); match_criteria.misc_parameters);
struct mlx5_core_dev *dev = ns->dev;
u32 saved_outer_esp_spi_mask; u32 saved_outer_esp_spi_mask;
u8 match_criteria_enable; u8 match_criteria_enable;
int ret; int ret;
if (MLX5_CAP_FLOWTABLE(dev, if (MLX5_CAP_FLOWTABLE(dev,
flow_table_properties_nic_receive.ft_field_support.outer_esp_spi)) flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
return create_flow_group(dev, ft, in, group_id); return create_flow_group(ns, ft, in, fg);
match_criteria_enable = match_criteria_enable =
MLX5_GET(create_flow_group_in, in, match_criteria_enable); MLX5_GET(create_flow_group_in, in, match_criteria_enable);
saved_outer_esp_spi_mask = saved_outer_esp_spi_mask =
MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi); MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
if (!match_criteria_enable || !saved_outer_esp_spi_mask) if (!match_criteria_enable || !saved_outer_esp_spi_mask)
return create_flow_group(dev, ft, in, group_id); return create_flow_group(ns, ft, in, fg);
MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 0); MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 0);
...@@ -1023,7 +1024,7 @@ static int fpga_ipsec_fs_create_flow_group(struct mlx5_core_dev *dev, ...@@ -1023,7 +1024,7 @@ static int fpga_ipsec_fs_create_flow_group(struct mlx5_core_dev *dev,
MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_SET(create_flow_group_in, in, match_criteria_enable,
match_criteria_enable & ~MLX5_MATCH_MISC_PARAMETERS); match_criteria_enable & ~MLX5_MATCH_MISC_PARAMETERS);
ret = create_flow_group(dev, ft, in, group_id); ret = create_flow_group(ns, ft, in, fg);
MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, saved_outer_esp_spi_mask); MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, saved_outer_esp_spi_mask);
MLX5_SET(create_flow_group_in, in, match_criteria_enable, match_criteria_enable); MLX5_SET(create_flow_group_in, in, match_criteria_enable, match_criteria_enable);
...@@ -1031,17 +1032,18 @@ static int fpga_ipsec_fs_create_flow_group(struct mlx5_core_dev *dev, ...@@ -1031,17 +1032,18 @@ static int fpga_ipsec_fs_create_flow_group(struct mlx5_core_dev *dev,
return ret; return ret;
} }
static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev, static int fpga_ipsec_fs_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg, struct mlx5_flow_group *fg,
struct fs_fte *fte, struct fs_fte *fte,
bool is_egress) bool is_egress)
{ {
int (*create_fte)(struct mlx5_core_dev *dev, int (*create_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg, struct mlx5_flow_group *fg,
struct fs_fte *fte) = struct fs_fte *fte) =
mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_fte; mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_fte;
struct mlx5_core_dev *dev = ns->dev;
struct mlx5_fpga_device *fdev = dev->fpga; struct mlx5_fpga_device *fdev = dev->fpga;
struct mlx5_fpga_ipsec *fipsec = fdev->ipsec; struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
struct mlx5_fpga_ipsec_rule *rule; struct mlx5_fpga_ipsec_rule *rule;
...@@ -1053,7 +1055,7 @@ static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev, ...@@ -1053,7 +1055,7 @@ static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev,
!(fte->action.action & !(fte->action.action &
(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_DECRYPT))) MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
return create_fte(dev, ft, fg, fte); return create_fte(ns, ft, fg, fte);
rule = kzalloc(sizeof(*rule), GFP_KERNEL); rule = kzalloc(sizeof(*rule), GFP_KERNEL);
if (!rule) if (!rule)
...@@ -1070,7 +1072,7 @@ static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev, ...@@ -1070,7 +1072,7 @@ static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev,
WARN_ON(rule_insert(fipsec, rule)); WARN_ON(rule_insert(fipsec, rule));
modify_spec_mailbox(dev, fte, &mbox_mod); modify_spec_mailbox(dev, fte, &mbox_mod);
ret = create_fte(dev, ft, fg, fte); ret = create_fte(ns, ft, fg, fte);
restore_spec_mailbox(fte, &mbox_mod); restore_spec_mailbox(fte, &mbox_mod);
if (ret) { if (ret) {
_rule_delete(fipsec, rule); _rule_delete(fipsec, rule);
...@@ -1081,19 +1083,20 @@ static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev, ...@@ -1081,19 +1083,20 @@ static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev,
return ret; return ret;
} }
static int fpga_ipsec_fs_update_fte(struct mlx5_core_dev *dev, static int fpga_ipsec_fs_update_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, struct mlx5_flow_table *ft,
unsigned int group_id, struct mlx5_flow_group *fg,
int modify_mask, int modify_mask,
struct fs_fte *fte, struct fs_fte *fte,
bool is_egress) bool is_egress)
{ {
int (*update_fte)(struct mlx5_core_dev *dev, int (*update_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, struct mlx5_flow_table *ft,
unsigned int group_id, struct mlx5_flow_group *fg,
int modify_mask, int modify_mask,
struct fs_fte *fte) = struct fs_fte *fte) =
mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->update_fte; mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->update_fte;
struct mlx5_core_dev *dev = ns->dev;
bool is_esp = fte->action.esp_id; bool is_esp = fte->action.esp_id;
struct mailbox_mod mbox_mod; struct mailbox_mod mbox_mod;
int ret; int ret;
...@@ -1102,24 +1105,25 @@ static int fpga_ipsec_fs_update_fte(struct mlx5_core_dev *dev, ...@@ -1102,24 +1105,25 @@ static int fpga_ipsec_fs_update_fte(struct mlx5_core_dev *dev,
!(fte->action.action & !(fte->action.action &
(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_DECRYPT))) MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
return update_fte(dev, ft, group_id, modify_mask, fte); return update_fte(ns, ft, fg, modify_mask, fte);
modify_spec_mailbox(dev, fte, &mbox_mod); modify_spec_mailbox(dev, fte, &mbox_mod);
ret = update_fte(dev, ft, group_id, modify_mask, fte); ret = update_fte(ns, ft, fg, modify_mask, fte);
restore_spec_mailbox(fte, &mbox_mod); restore_spec_mailbox(fte, &mbox_mod);
return ret; return ret;
} }
static int fpga_ipsec_fs_delete_fte(struct mlx5_core_dev *dev, static int fpga_ipsec_fs_delete_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, struct mlx5_flow_table *ft,
struct fs_fte *fte, struct fs_fte *fte,
bool is_egress) bool is_egress)
{ {
int (*delete_fte)(struct mlx5_core_dev *dev, int (*delete_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, struct mlx5_flow_table *ft,
struct fs_fte *fte) = struct fs_fte *fte) =
mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->delete_fte; mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->delete_fte;
struct mlx5_core_dev *dev = ns->dev;
struct mlx5_fpga_device *fdev = dev->fpga; struct mlx5_fpga_device *fdev = dev->fpga;
struct mlx5_fpga_ipsec *fipsec = fdev->ipsec; struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
struct mlx5_fpga_ipsec_rule *rule; struct mlx5_fpga_ipsec_rule *rule;
...@@ -1131,7 +1135,7 @@ static int fpga_ipsec_fs_delete_fte(struct mlx5_core_dev *dev, ...@@ -1131,7 +1135,7 @@ static int fpga_ipsec_fs_delete_fte(struct mlx5_core_dev *dev,
!(fte->action.action & !(fte->action.action &
(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_DECRYPT))) MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
return delete_fte(dev, ft, fte); return delete_fte(ns, ft, fte);
rule = rule_search(fipsec, fte); rule = rule_search(fipsec, fte);
if (!rule) if (!rule)
...@@ -1141,84 +1145,84 @@ static int fpga_ipsec_fs_delete_fte(struct mlx5_core_dev *dev, ...@@ -1141,84 +1145,84 @@ static int fpga_ipsec_fs_delete_fte(struct mlx5_core_dev *dev,
rule_delete(fipsec, rule); rule_delete(fipsec, rule);
modify_spec_mailbox(dev, fte, &mbox_mod); modify_spec_mailbox(dev, fte, &mbox_mod);
ret = delete_fte(dev, ft, fte); ret = delete_fte(ns, ft, fte);
restore_spec_mailbox(fte, &mbox_mod); restore_spec_mailbox(fte, &mbox_mod);
return ret; return ret;
} }
static int static int
mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_core_dev *dev, mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, struct mlx5_flow_table *ft,
u32 *in, u32 *in,
unsigned int *group_id) struct mlx5_flow_group *fg)
{ {
return fpga_ipsec_fs_create_flow_group(dev, ft, in, group_id, true); return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, true);
} }
static int static int
mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_core_dev *dev, mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg, struct mlx5_flow_group *fg,
struct fs_fte *fte) struct fs_fte *fte)
{ {
return fpga_ipsec_fs_create_fte(dev, ft, fg, fte, true); return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, true);
} }
static int static int
mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_core_dev *dev, mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, struct mlx5_flow_table *ft,
unsigned int group_id, struct mlx5_flow_group *fg,
int modify_mask, int modify_mask,
struct fs_fte *fte) struct fs_fte *fte)
{ {
return fpga_ipsec_fs_update_fte(dev, ft, group_id, modify_mask, fte, return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte,
true); true);
} }
static int static int
mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_core_dev *dev, mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, struct mlx5_flow_table *ft,
struct fs_fte *fte) struct fs_fte *fte)
{ {
return fpga_ipsec_fs_delete_fte(dev, ft, fte, true); return fpga_ipsec_fs_delete_fte(ns, ft, fte, true);
} }
static int static int
mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_core_dev *dev, mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, struct mlx5_flow_table *ft,
u32 *in, u32 *in,
unsigned int *group_id) struct mlx5_flow_group *fg)
{ {
return fpga_ipsec_fs_create_flow_group(dev, ft, in, group_id, false); return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, false);
} }
static int static int
mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_core_dev *dev, mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg, struct mlx5_flow_group *fg,
struct fs_fte *fte) struct fs_fte *fte)
{ {
return fpga_ipsec_fs_create_fte(dev, ft, fg, fte, false); return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, false);
} }
static int static int
mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_core_dev *dev, mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, struct mlx5_flow_table *ft,
unsigned int group_id, struct mlx5_flow_group *fg,
int modify_mask, int modify_mask,
struct fs_fte *fte) struct fs_fte *fte)
{ {
return fpga_ipsec_fs_update_fte(dev, ft, group_id, modify_mask, fte, return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte,
false); false);
} }
static int static int
mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_core_dev *dev, mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, struct mlx5_flow_table *ft,
struct fs_fte *fte) struct fs_fte *fte)
{ {
return fpga_ipsec_fs_delete_fte(dev, ft, fte, false); return fpga_ipsec_fs_delete_fte(ns, ft, fte, false);
} }
static struct mlx5_flow_cmds fpga_ipsec_ingress; static struct mlx5_flow_cmds fpga_ipsec_ingress;
......
...@@ -36,45 +36,42 @@ ...@@ -36,45 +36,42 @@
#include "fs_core.h" #include "fs_core.h"
struct mlx5_flow_cmds { struct mlx5_flow_cmds {
int (*create_flow_table)(struct mlx5_core_dev *dev, int (*create_flow_table)(struct mlx5_flow_root_namespace *ns,
u16 vport, struct mlx5_flow_table *ft,
enum fs_flow_table_op_mod op_mod, unsigned int log_size,
enum fs_flow_table_type type, struct mlx5_flow_table *next_ft);
unsigned int level, unsigned int log_size, int (*destroy_flow_table)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *next_ft,
unsigned int *table_id, u32 flags);
int (*destroy_flow_table)(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft); struct mlx5_flow_table *ft);
int (*modify_flow_table)(struct mlx5_core_dev *dev, int (*modify_flow_table)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft); struct mlx5_flow_table *next_ft);
int (*create_flow_group)(struct mlx5_core_dev *dev, int (*create_flow_group)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, struct mlx5_flow_table *ft,
u32 *in, u32 *in,
unsigned int *group_id); struct mlx5_flow_group *fg);
int (*destroy_flow_group)(struct mlx5_core_dev *dev, int (*destroy_flow_group)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, struct mlx5_flow_table *ft,
unsigned int group_id); struct mlx5_flow_group *fg);
int (*create_fte)(struct mlx5_core_dev *dev, int (*create_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg, struct mlx5_flow_group *fg,
struct fs_fte *fte); struct fs_fte *fte);
int (*update_fte)(struct mlx5_core_dev *dev, int (*update_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, struct mlx5_flow_table *ft,
unsigned int group_id, struct mlx5_flow_group *fg,
int modify_mask, int modify_mask,
struct fs_fte *fte); struct fs_fte *fte);
int (*delete_fte)(struct mlx5_core_dev *dev, int (*delete_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, struct mlx5_flow_table *ft,
struct fs_fte *fte); struct fs_fte *fte);
int (*update_root_ft)(struct mlx5_core_dev *dev, int (*update_root_ft)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, struct mlx5_flow_table *ft,
u32 underlay_qpn, u32 underlay_qpn,
bool disconnect); bool disconnect);
......
...@@ -403,7 +403,7 @@ static void del_hw_flow_table(struct fs_node *node) ...@@ -403,7 +403,7 @@ static void del_hw_flow_table(struct fs_node *node)
trace_mlx5_fs_del_ft(ft); trace_mlx5_fs_del_ft(ft);
if (node->active) { if (node->active) {
err = root->cmds->destroy_flow_table(dev, ft); err = root->cmds->destroy_flow_table(root, ft);
if (err) if (err)
mlx5_core_warn(dev, "flow steering can't destroy ft\n"); mlx5_core_warn(dev, "flow steering can't destroy ft\n");
} }
...@@ -435,7 +435,7 @@ static void modify_fte(struct fs_fte *fte) ...@@ -435,7 +435,7 @@ static void modify_fte(struct fs_fte *fte)
dev = get_dev(&fte->node); dev = get_dev(&fte->node);
root = find_root(&ft->node); root = find_root(&ft->node);
err = root->cmds->update_fte(dev, ft, fg->id, fte->modify_mask, fte); err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
if (err) if (err)
mlx5_core_warn(dev, mlx5_core_warn(dev,
"%s can't del rule fg id=%d fte_index=%d\n", "%s can't del rule fg id=%d fte_index=%d\n",
...@@ -492,7 +492,7 @@ static void del_hw_fte(struct fs_node *node) ...@@ -492,7 +492,7 @@ static void del_hw_fte(struct fs_node *node)
dev = get_dev(&ft->node); dev = get_dev(&ft->node);
root = find_root(&ft->node); root = find_root(&ft->node);
if (node->active) { if (node->active) {
err = root->cmds->delete_fte(dev, ft, fte); err = root->cmds->delete_fte(root, ft, fte);
if (err) if (err)
mlx5_core_warn(dev, mlx5_core_warn(dev,
"flow steering can't delete fte in index %d of flow group id %d\n", "flow steering can't delete fte in index %d of flow group id %d\n",
...@@ -532,7 +532,7 @@ static void del_hw_flow_group(struct fs_node *node) ...@@ -532,7 +532,7 @@ static void del_hw_flow_group(struct fs_node *node)
trace_mlx5_fs_del_fg(fg); trace_mlx5_fs_del_fg(fg);
root = find_root(&ft->node); root = find_root(&ft->node);
if (fg->node.active && root->cmds->destroy_flow_group(dev, ft, fg->id)) if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg))
mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n", mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
fg->id, ft->id); fg->id, ft->id);
} }
...@@ -783,7 +783,7 @@ static int connect_fts_in_prio(struct mlx5_core_dev *dev, ...@@ -783,7 +783,7 @@ static int connect_fts_in_prio(struct mlx5_core_dev *dev,
fs_for_each_ft(iter, prio) { fs_for_each_ft(iter, prio) {
i++; i++;
err = root->cmds->modify_flow_table(dev, iter, ft); err = root->cmds->modify_flow_table(root, iter, ft);
if (err) { if (err) {
mlx5_core_warn(dev, "Failed to modify flow table %d\n", mlx5_core_warn(dev, "Failed to modify flow table %d\n",
iter->id); iter->id);
...@@ -831,11 +831,11 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio ...@@ -831,11 +831,11 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
if (list_empty(&root->underlay_qpns)) { if (list_empty(&root->underlay_qpns)) {
/* Don't set any QPN (zero) in case QPN list is empty */ /* Don't set any QPN (zero) in case QPN list is empty */
qpn = 0; qpn = 0;
err = root->cmds->update_root_ft(root->dev, ft, qpn, false); err = root->cmds->update_root_ft(root, ft, qpn, false);
} else { } else {
list_for_each_entry(uqp, &root->underlay_qpns, list) { list_for_each_entry(uqp, &root->underlay_qpns, list) {
qpn = uqp->qpn; qpn = uqp->qpn;
err = root->cmds->update_root_ft(root->dev, ft, err = root->cmds->update_root_ft(root, ft,
qpn, false); qpn, false);
if (err) if (err)
break; break;
...@@ -871,7 +871,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, ...@@ -871,7 +871,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
memcpy(&rule->dest_attr, dest, sizeof(*dest)); memcpy(&rule->dest_attr, dest, sizeof(*dest));
root = find_root(&ft->node); root = find_root(&ft->node);
err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id, err = root->cmds->update_fte(root, ft, fg,
modify_mask, fte); modify_mask, fte);
up_write_ref_node(&fte->node, false); up_write_ref_node(&fte->node, false);
...@@ -1013,9 +1013,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa ...@@ -1013,9 +1013,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table); tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0; log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
next_ft = find_next_chained_ft(fs_prio); next_ft = find_next_chained_ft(fs_prio);
err = root->cmds->create_flow_table(root->dev, ft->vport, ft->op_mod, err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft);
ft->type, ft->level, log_table_sz,
next_ft, &ft->id, ft->flags);
if (err) if (err)
goto free_ft; goto free_ft;
...@@ -1032,7 +1030,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa ...@@ -1032,7 +1030,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
trace_mlx5_fs_add_ft(ft); trace_mlx5_fs_add_ft(ft);
return ft; return ft;
destroy_ft: destroy_ft:
root->cmds->destroy_flow_table(root->dev, ft); root->cmds->destroy_flow_table(root, ft);
free_ft: free_ft:
kfree(ft); kfree(ft);
unlock_root: unlock_root:
...@@ -1114,7 +1112,6 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, ...@@ -1114,7 +1112,6 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
start_flow_index); start_flow_index);
int end_index = MLX5_GET(create_flow_group_in, fg_in, int end_index = MLX5_GET(create_flow_group_in, fg_in,
end_flow_index); end_flow_index);
struct mlx5_core_dev *dev = get_dev(&ft->node);
struct mlx5_flow_group *fg; struct mlx5_flow_group *fg;
int err; int err;
...@@ -1129,7 +1126,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, ...@@ -1129,7 +1126,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
if (IS_ERR(fg)) if (IS_ERR(fg))
return fg; return fg;
err = root->cmds->create_flow_group(dev, ft, fg_in, &fg->id); err = root->cmds->create_flow_group(root, ft, fg_in, fg);
if (err) { if (err) {
tree_put_node(&fg->node, false); tree_put_node(&fg->node, false);
return ERR_PTR(err); return ERR_PTR(err);
...@@ -1269,11 +1266,9 @@ add_rule_fte(struct fs_fte *fte, ...@@ -1269,11 +1266,9 @@ add_rule_fte(struct fs_fte *fte,
fs_get_obj(ft, fg->node.parent); fs_get_obj(ft, fg->node.parent);
root = find_root(&fg->node); root = find_root(&fg->node);
if (!(fte->status & FS_FTE_STATUS_EXISTING)) if (!(fte->status & FS_FTE_STATUS_EXISTING))
err = root->cmds->create_fte(get_dev(&ft->node), err = root->cmds->create_fte(root, ft, fg, fte);
ft, fg, fte);
else else
err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id, err = root->cmds->update_fte(root, ft, fg, modify_mask, fte);
modify_mask, fte);
if (err) if (err)
goto free_handle; goto free_handle;
...@@ -1339,7 +1334,6 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft, ...@@ -1339,7 +1334,6 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg) struct mlx5_flow_group *fg)
{ {
struct mlx5_flow_root_namespace *root = find_root(&ft->node); struct mlx5_flow_root_namespace *root = find_root(&ft->node);
struct mlx5_core_dev *dev = get_dev(&ft->node);
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
void *match_criteria_addr; void *match_criteria_addr;
u8 src_esw_owner_mask_on; u8 src_esw_owner_mask_on;
...@@ -1369,7 +1363,7 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft, ...@@ -1369,7 +1363,7 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
memcpy(match_criteria_addr, fg->mask.match_criteria, memcpy(match_criteria_addr, fg->mask.match_criteria,
sizeof(fg->mask.match_criteria)); sizeof(fg->mask.match_criteria));
err = root->cmds->create_flow_group(dev, ft, in, &fg->id); err = root->cmds->create_flow_group(root, ft, in, fg);
if (!err) { if (!err) {
fg->node.active = true; fg->node.active = true;
trace_mlx5_fs_add_fg(fg); trace_mlx5_fs_add_fg(fg);
...@@ -1941,12 +1935,12 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft) ...@@ -1941,12 +1935,12 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft)
if (list_empty(&root->underlay_qpns)) { if (list_empty(&root->underlay_qpns)) {
/* Don't set any QPN (zero) in case QPN list is empty */ /* Don't set any QPN (zero) in case QPN list is empty */
qpn = 0; qpn = 0;
err = root->cmds->update_root_ft(root->dev, new_root_ft, err = root->cmds->update_root_ft(root, new_root_ft,
qpn, false); qpn, false);
} else { } else {
list_for_each_entry(uqp, &root->underlay_qpns, list) { list_for_each_entry(uqp, &root->underlay_qpns, list) {
qpn = uqp->qpn; qpn = uqp->qpn;
err = root->cmds->update_root_ft(root->dev, err = root->cmds->update_root_ft(root,
new_root_ft, qpn, new_root_ft, qpn,
false); false);
if (err) if (err)
...@@ -2060,6 +2054,10 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, ...@@ -2060,6 +2054,10 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
if (steering->sniffer_tx_root_ns) if (steering->sniffer_tx_root_ns)
return &steering->sniffer_tx_root_ns->ns; return &steering->sniffer_tx_root_ns->ns;
return NULL; return NULL;
case MLX5_FLOW_NAMESPACE_RDMA_RX:
if (steering->rdma_rx_root_ns)
return &steering->rdma_rx_root_ns->ns;
return NULL;
default: default:
break; break;
} }
...@@ -2456,6 +2454,7 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev) ...@@ -2456,6 +2454,7 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
steering->fdb_sub_ns = NULL; steering->fdb_sub_ns = NULL;
cleanup_root_ns(steering->sniffer_rx_root_ns); cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns); cleanup_root_ns(steering->sniffer_tx_root_ns);
cleanup_root_ns(steering->rdma_rx_root_ns);
cleanup_root_ns(steering->egress_root_ns); cleanup_root_ns(steering->egress_root_ns);
mlx5_cleanup_fc_stats(dev); mlx5_cleanup_fc_stats(dev);
kmem_cache_destroy(steering->ftes_cache); kmem_cache_destroy(steering->ftes_cache);
...@@ -2497,6 +2496,25 @@ static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering) ...@@ -2497,6 +2496,25 @@ static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
return 0; return 0;
} }
static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
{
struct fs_prio *prio;
steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
if (!steering->rdma_rx_root_ns)
return -ENOMEM;
steering->rdma_rx_root_ns->def_miss_action =
MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN;
/* Create single prio */
prio = fs_create_prio(&steering->rdma_rx_root_ns->ns, 0, 1);
if (IS_ERR(prio)) {
cleanup_root_ns(steering->rdma_rx_root_ns);
return PTR_ERR(prio);
}
return 0;
}
static int init_fdb_root_ns(struct mlx5_flow_steering *steering) static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
{ {
struct mlx5_flow_namespace *ns; struct mlx5_flow_namespace *ns;
...@@ -2733,6 +2751,13 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) ...@@ -2733,6 +2751,13 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err; goto err;
} }
if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
err = init_rdma_rx_root_ns(steering);
if (err)
goto err;
}
if (MLX5_IPSEC_DEV(dev) || MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) { if (MLX5_IPSEC_DEV(dev) || MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
err = init_egress_root_ns(steering); err = init_egress_root_ns(steering);
if (err) if (err)
...@@ -2762,7 +2787,7 @@ int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn) ...@@ -2762,7 +2787,7 @@ int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
goto update_ft_fail; goto update_ft_fail;
} }
err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn, err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
false); false);
if (err) { if (err) {
mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n", mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
...@@ -2806,7 +2831,7 @@ int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn) ...@@ -2806,7 +2831,7 @@ int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
goto out; goto out;
} }
err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn, err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
true); true);
if (err) if (err)
mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n", mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
......
...@@ -67,6 +67,7 @@ enum fs_flow_table_type { ...@@ -67,6 +67,7 @@ enum fs_flow_table_type {
FS_FT_FDB = 0X4, FS_FT_FDB = 0X4,
FS_FT_SNIFFER_RX = 0X5, FS_FT_SNIFFER_RX = 0X5,
FS_FT_SNIFFER_TX = 0X6, FS_FT_SNIFFER_TX = 0X6,
FS_FT_RDMA_RX = 0X7,
FS_FT_MAX_TYPE = FS_FT_SNIFFER_TX, FS_FT_MAX_TYPE = FS_FT_SNIFFER_TX,
}; };
...@@ -90,6 +91,7 @@ struct mlx5_flow_steering { ...@@ -90,6 +91,7 @@ struct mlx5_flow_steering {
struct mlx5_flow_root_namespace **esw_ingress_root_ns; struct mlx5_flow_root_namespace **esw_ingress_root_ns;
struct mlx5_flow_root_namespace *sniffer_tx_root_ns; struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
struct mlx5_flow_root_namespace *sniffer_rx_root_ns; struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
struct mlx5_flow_root_namespace *rdma_rx_root_ns;
struct mlx5_flow_root_namespace *egress_root_ns; struct mlx5_flow_root_namespace *egress_root_ns;
}; };
...@@ -150,7 +152,7 @@ struct mlx5_ft_underlay_qp { ...@@ -150,7 +152,7 @@ struct mlx5_ft_underlay_qp {
u32 qpn; u32 qpn;
}; };
#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_800 #define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_a00
/* Calculate the fte_match_param length and without the reserved length. /* Calculate the fte_match_param length and without the reserved length.
* Make sure the reserved field is the last. * Make sure the reserved field is the last.
*/ */
...@@ -216,6 +218,7 @@ struct mlx5_flow_root_namespace { ...@@ -216,6 +218,7 @@ struct mlx5_flow_root_namespace {
struct mutex chain_lock; struct mutex chain_lock;
struct list_head underlay_qpns; struct list_head underlay_qpns;
const struct mlx5_flow_cmds *cmds; const struct mlx5_flow_cmds *cmds;
enum mlx5_flow_table_miss_action def_miss_action;
}; };
int mlx5_init_fc_stats(struct mlx5_core_dev *dev); int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
......
...@@ -380,7 +380,7 @@ int mlx5_health_init(struct mlx5_core_dev *dev) ...@@ -380,7 +380,7 @@ int mlx5_health_init(struct mlx5_core_dev *dev)
return -ENOMEM; return -ENOMEM;
strcpy(name, "mlx5_health"); strcpy(name, "mlx5_health");
strcat(name, dev->priv.name); strcat(name, dev_name(dev->device));
health->wq = create_singlethread_workqueue(name); health->wq = create_singlethread_workqueue(name);
kfree(name); kfree(name);
if (!health->wq) if (!health->wq)
......
...@@ -721,7 +721,6 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev, ...@@ -721,7 +721,6 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
struct mlx5_priv *priv = &dev->priv; struct mlx5_priv *priv = &dev->priv;
int err = 0; int err = 0;
dev->pdev = pdev;
priv->pci_dev_data = id->driver_data; priv->pci_dev_data = id->driver_data;
pci_set_drvdata(dev->pdev, dev); pci_set_drvdata(dev->pdev, dev);
...@@ -1222,14 +1221,11 @@ static const struct devlink_ops mlx5_devlink_ops = { ...@@ -1222,14 +1221,11 @@ static const struct devlink_ops mlx5_devlink_ops = {
#endif #endif
}; };
static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx, const char *name) static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
{ {
struct mlx5_priv *priv = &dev->priv; struct mlx5_priv *priv = &dev->priv;
int err; int err;
strncpy(priv->name, name, MLX5_MAX_NAME_LEN);
priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
dev->profile = &profile[profile_idx]; dev->profile = &profile[profile_idx];
INIT_LIST_HEAD(&priv->ctx_list); INIT_LIST_HEAD(&priv->ctx_list);
...@@ -1247,9 +1243,10 @@ static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx, const char ...@@ -1247,9 +1243,10 @@ static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx, const char
INIT_LIST_HEAD(&priv->pgdir_list); INIT_LIST_HEAD(&priv->pgdir_list);
spin_lock_init(&priv->mkey_lock); spin_lock_init(&priv->mkey_lock);
priv->dbg_root = debugfs_create_dir(name, mlx5_debugfs_root); priv->dbg_root = debugfs_create_dir(dev_name(dev->device),
mlx5_debugfs_root);
if (!priv->dbg_root) { if (!priv->dbg_root) {
pr_err("mlx5_core: %s error, Cannot create debugfs dir, aborting\n", name); dev_err(dev->device, "mlx5_core: error, Cannot create debugfs dir, aborting\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -1292,8 +1289,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1292,8 +1289,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
} }
dev = devlink_priv(devlink); dev = devlink_priv(devlink);
dev->device = &pdev->dev;
dev->pdev = pdev;
err = mlx5_mdev_init(dev, prof_sel, dev_name(&pdev->dev)); err = mlx5_mdev_init(dev, prof_sel);
if (err) if (err)
goto mdev_init_err; goto mdev_init_err;
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include <linux/ptp_clock_kernel.h> #include <linux/ptp_clock_kernel.h>
#include <linux/mlx5/cq.h> #include <linux/mlx5/cq.h>
#include <linux/mlx5/fs.h> #include <linux/mlx5/fs.h>
#include <linux/mlx5/driver.h>
#define DRIVER_NAME "mlx5_core" #define DRIVER_NAME "mlx5_core"
#define DRIVER_VERSION "5.0-0" #define DRIVER_VERSION "5.0-0"
...@@ -48,53 +49,57 @@ ...@@ -48,53 +49,57 @@
extern uint mlx5_core_debug_mask; extern uint mlx5_core_debug_mask;
#define mlx5_core_dbg(__dev, format, ...) \ #define mlx5_core_dbg(__dev, format, ...) \
pr_debug("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \ dev_dbg((__dev)->device, "%s:%d:(pid %d): " format, \
__func__, __LINE__, current->pid, \ __func__, __LINE__, current->pid, \
##__VA_ARGS__) ##__VA_ARGS__)
#define mlx5_core_dbg_once(__dev, format, ...) \ #define mlx5_core_dbg_once(__dev, format, ...) \
pr_debug_once("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \ dev_dbg_once((__dev)->device, \
__func__, __LINE__, current->pid, \ "%s:%d:(pid %d): " format, \
__func__, __LINE__, current->pid, \
##__VA_ARGS__) ##__VA_ARGS__)
#define mlx5_core_dbg_mask(__dev, mask, format, ...) \ #define mlx5_core_dbg_mask(__dev, mask, format, ...) \
do { \ do { \
if ((mask) & mlx5_core_debug_mask) \ if ((mask) & mlx5_core_debug_mask) \
mlx5_core_dbg(__dev, format, ##__VA_ARGS__); \ mlx5_core_dbg(__dev, format, ##__VA_ARGS__); \
} while (0) } while (0)
#define mlx5_core_err(__dev, format, ...) \ #define mlx5_core_err(__dev, format, ...) \
pr_err("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \ dev_err((__dev)->device, "%s:%d:(pid %d): " format, \
__func__, __LINE__, current->pid, \ __func__, __LINE__, current->pid, \
##__VA_ARGS__) ##__VA_ARGS__)
#define mlx5_core_err_rl(__dev, format, ...) \ #define mlx5_core_err_rl(__dev, format, ...) \
pr_err_ratelimited("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \ dev_err_ratelimited((__dev)->device, \
__func__, __LINE__, current->pid, \ "%s:%d:(pid %d): " format, \
##__VA_ARGS__) __func__, __LINE__, current->pid, \
##__VA_ARGS__)
#define mlx5_core_warn(__dev, format, ...) \ #define mlx5_core_warn(__dev, format, ...) \
pr_warn("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \ dev_warn((__dev)->device, "%s:%d:(pid %d): " format, \
__func__, __LINE__, current->pid, \ __func__, __LINE__, current->pid, \
##__VA_ARGS__) ##__VA_ARGS__)
#define mlx5_core_warn_once(__dev, format, ...) \ #define mlx5_core_warn_once(__dev, format, ...) \
pr_warn_once("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \ dev_warn_once((__dev)->device, "%s:%d:(pid %d): " format, \
__func__, __LINE__, current->pid, \ __func__, __LINE__, current->pid, \
##__VA_ARGS__) ##__VA_ARGS__)
#define mlx5_core_warn_rl(__dev, format, ...) \ #define mlx5_core_warn_rl(__dev, format, ...) \
pr_warn_ratelimited("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \ dev_warn_ratelimited((__dev)->device, \
__func__, __LINE__, current->pid, \ "%s:%d:(pid %d): " format, \
##__VA_ARGS__) __func__, __LINE__, current->pid, \
##__VA_ARGS__)
#define mlx5_core_info(__dev, format, ...) \ #define mlx5_core_info(__dev, format, ...) \
pr_info("%s " format, (__dev)->priv.name, ##__VA_ARGS__) dev_info((__dev)->device, format, ##__VA_ARGS__)
#define mlx5_core_info_rl(__dev, format, ...) \ #define mlx5_core_info_rl(__dev, format, ...) \
pr_info_ratelimited("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \ dev_info_ratelimited((__dev)->device, \
__func__, __LINE__, current->pid, \ "%s:%d:(pid %d): " format, \
##__VA_ARGS__) __func__, __LINE__, current->pid, \
##__VA_ARGS__)
enum { enum {
MLX5_CMD_DATA, /* print command payload only */ MLX5_CMD_DATA, /* print command payload only */
......
...@@ -200,7 +200,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr) ...@@ -200,7 +200,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr)
rb_erase(&fwp->rb_node, &dev->priv.page_root); rb_erase(&fwp->rb_node, &dev->priv.page_root);
if (fwp->free_count != 1) if (fwp->free_count != 1)
list_del(&fwp->list); list_del(&fwp->list);
dma_unmap_page(&dev->pdev->dev, addr & MLX5_U64_4K_PAGE_MASK, dma_unmap_page(dev->device, addr & MLX5_U64_4K_PAGE_MASK,
PAGE_SIZE, DMA_BIDIRECTIONAL); PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(fwp->page); __free_page(fwp->page);
kfree(fwp); kfree(fwp);
...@@ -211,11 +211,12 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr) ...@@ -211,11 +211,12 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr)
static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
{ {
struct device *device = dev->device;
int nid = dev_to_node(device);
struct page *page; struct page *page;
u64 zero_addr = 1; u64 zero_addr = 1;
u64 addr; u64 addr;
int err; int err;
int nid = dev_to_node(&dev->pdev->dev);
page = alloc_pages_node(nid, GFP_HIGHUSER, 0); page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
if (!page) { if (!page) {
...@@ -223,9 +224,8 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) ...@@ -223,9 +224,8 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
return -ENOMEM; return -ENOMEM;
} }
map: map:
addr = dma_map_page(&dev->pdev->dev, page, 0, addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
PAGE_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(device, addr)) {
if (dma_mapping_error(&dev->pdev->dev, addr)) {
mlx5_core_warn(dev, "failed dma mapping page\n"); mlx5_core_warn(dev, "failed dma mapping page\n");
err = -ENOMEM; err = -ENOMEM;
goto err_mapping; goto err_mapping;
...@@ -240,8 +240,7 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) ...@@ -240,8 +240,7 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
err = insert_page(dev, addr, page, func_id); err = insert_page(dev, addr, page, func_id);
if (err) { if (err) {
mlx5_core_err(dev, "failed to track allocated page\n"); mlx5_core_err(dev, "failed to track allocated page\n");
dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
DMA_BIDIRECTIONAL);
} }
err_mapping: err_mapping:
...@@ -249,7 +248,7 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) ...@@ -249,7 +248,7 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
__free_page(page); __free_page(page);
if (zero_addr == 0) if (zero_addr == 0)
dma_unmap_page(&dev->pdev->dev, zero_addr, PAGE_SIZE, dma_unmap_page(device, zero_addr, PAGE_SIZE,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
return err; return err;
...@@ -600,8 +599,7 @@ int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages) ...@@ -600,8 +599,7 @@ int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
return 0; return 0;
} }
mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_pages, mlx5_core_dbg(dev, "Waiting for %d pages\n", prev_pages);
dev->priv.name);
while (*pages) { while (*pages) {
if (time_after(jiffies, end)) { if (time_after(jiffies, end)) {
mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages); mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages);
...@@ -614,6 +612,6 @@ int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages) ...@@ -614,6 +612,6 @@ int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
msleep(50); msleep(50);
} }
mlx5_core_dbg(dev, "All pages received from %s\n", dev->priv.name); mlx5_core_dbg(dev, "All pages received\n");
return 0; return 0;
} }
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2019 Mellanox Technologies */
#include <linux/mlx5/vport.h>
#include <rdma/ib_verbs.h>
#include <net/addrconf.h>
#include "lib/mlx5.h"
#include "eswitch.h"
#include "fs_core.h"
#include "rdma.h"
static void mlx5_rdma_disable_roce_steering(struct mlx5_core_dev *dev)
{
struct mlx5_core_roce *roce = &dev->priv.roce;
if (!roce->ft)
return;
mlx5_del_flow_rules(roce->allow_rule);
mlx5_destroy_flow_group(roce->fg);
mlx5_destroy_flow_table(roce->ft);
}
static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_core_roce *roce = &dev->priv.roce;
struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *ns = NULL;
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_spec *spec;
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
void *match_criteria;
u32 *flow_group_in;
void *misc;
int err;
if (!(MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)))
return -EOPNOTSUPP;
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
kvfree(flow_group_in);
return -ENOMEM;
}
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_RDMA_RX);
if (!ns) {
mlx5_core_err(dev, "Failed to get RDMA RX namespace");
err = -EOPNOTSUPP;
goto free;
}
ft_attr.max_fte = 1;
ft = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft)) {
mlx5_core_err(dev, "Failed to create RDMA RX flow table");
err = PTR_ERR(ft);
goto free;
}
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
MLX5_MATCH_MISC_PARAMETERS);
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
match_criteria);
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
misc_parameters.source_port);
fg = mlx5_create_flow_group(ft, flow_group_in);
if (IS_ERR(fg)) {
err = PTR_ERR(fg);
mlx5_core_err(dev, "Failed to create RDMA RX flow group err(%d)\n", err);
goto destroy_flow_table;
}
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_port,
dev->priv.eswitch->manager_vport);
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, NULL, 0);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
mlx5_core_err(dev, "Failed to add RoCE allow rule, err=%d\n",
err);
goto destroy_flow_group;
}
kvfree(spec);
kvfree(flow_group_in);
roce->ft = ft;
roce->fg = fg;
roce->allow_rule = flow_rule;
return 0;
destroy_flow_table:
mlx5_destroy_flow_table(ft);
destroy_flow_group:
mlx5_destroy_flow_group(fg);
free:
kvfree(spec);
kvfree(flow_group_in);
return err;
}
static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev)
{
mlx5_core_roce_gid_set(dev, 0, 0, 0,
NULL, NULL, false, 0, 0);
}
static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *gid)
{
u8 hw_id[ETH_ALEN];
mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
addrconf_addr_eui48(&gid->raw[8], hw_id);
}
static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev)
{
union ib_gid gid;
u8 mac[ETH_ALEN];
mlx5_rdma_make_default_gid(dev, &gid);
return mlx5_core_roce_gid_set(dev, 0,
MLX5_ROCE_VERSION_1,
0, gid.raw, mac,
false, 0, 1);
}
void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev)
{
mlx5_rdma_disable_roce_steering(dev);
mlx5_rdma_del_roce_addr(dev);
mlx5_nic_vport_disable_roce(dev);
}
void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
{
int err;
err = mlx5_nic_vport_enable_roce(dev);
if (err) {
mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
return;
}
err = mlx5_rdma_add_roce_addr(dev);
if (err) {
mlx5_core_err(dev, "Failed to add RoCE address: %d\n", err);
goto disable_roce;
}
err = mlx5_rdma_enable_roce_steering(dev);
if (err) {
mlx5_core_err(dev, "Failed to enable RoCE steering: %d\n", err);
goto del_roce_addr;
}
return;
del_roce_addr:
mlx5_rdma_del_roce_addr(dev);
disable_roce:
mlx5_nic_vport_disable_roce(dev);
return;
}
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2019 Mellanox Technologies. */
#ifndef __MLX5_RDMA_H__
#define __MLX5_RDMA_H__
#include "mlx5_core.h"
#ifdef CONFIG_MLX5_ESWITCH
void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev);
#else /* CONFIG_MLX5_ESWITCH */
static inline void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) {}
static inline void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) {}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_RDMA_H__ */
...@@ -182,16 +182,24 @@ int mlx5_core_query_sq_state(struct mlx5_core_dev *dev, u32 sqn, u8 *state) ...@@ -182,16 +182,24 @@ int mlx5_core_query_sq_state(struct mlx5_core_dev *dev, u32 sqn, u8 *state)
} }
EXPORT_SYMBOL_GPL(mlx5_core_query_sq_state); EXPORT_SYMBOL_GPL(mlx5_core_query_sq_state);
int mlx5_core_create_tir_out(struct mlx5_core_dev *dev,
u32 *in, int inlen,
u32 *out, int outlen)
{
MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
return mlx5_cmd_exec(dev, in, inlen, out, outlen);
}
EXPORT_SYMBOL(mlx5_core_create_tir_out);
int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tirn) u32 *tirn)
{ {
u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {0}; u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {};
int err; int err;
MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR); err = mlx5_core_create_tir_out(dev, in, inlen,
out, sizeof(out));
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err) if (!err)
*tirn = MLX5_GET(create_tir_out, out, tirn); *tirn = MLX5_GET(create_tir_out, out, tirn);
......
...@@ -1002,7 +1002,8 @@ enum { ...@@ -1002,7 +1002,8 @@ enum {
MLX5_MATCH_OUTER_HEADERS = 1 << 0, MLX5_MATCH_OUTER_HEADERS = 1 << 0,
MLX5_MATCH_MISC_PARAMETERS = 1 << 1, MLX5_MATCH_MISC_PARAMETERS = 1 << 1,
MLX5_MATCH_INNER_HEADERS = 1 << 2, MLX5_MATCH_INNER_HEADERS = 1 << 2,
MLX5_MATCH_MISC_PARAMETERS_2 = 1 << 3,
MLX5_MATCH_MISC_PARAMETERS_3 = 1 << 4,
}; };
enum { enum {
...@@ -1046,6 +1047,7 @@ enum mlx5_mpls_supported_fields { ...@@ -1046,6 +1047,7 @@ enum mlx5_mpls_supported_fields {
}; };
enum mlx5_flex_parser_protos { enum mlx5_flex_parser_protos {
MLX5_FLEX_PROTO_GENEVE = 1 << 3,
MLX5_FLEX_PROTO_CW_MPLS_GRE = 1 << 4, MLX5_FLEX_PROTO_CW_MPLS_GRE = 1 << 4,
MLX5_FLEX_PROTO_CW_MPLS_UDP = 1 << 5, MLX5_FLEX_PROTO_CW_MPLS_UDP = 1 << 5,
}; };
...@@ -1167,6 +1169,12 @@ enum mlx5_qcam_feature_groups { ...@@ -1167,6 +1169,12 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \ #define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap) MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap)
#define MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_rdma.cap)
#define MLX5_CAP_FLOWTABLE_RDMA_RX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_rdma.cap)
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \ MLX5_GET(flow_table_eswitch_cap, \
mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
......
...@@ -56,7 +56,6 @@ ...@@ -56,7 +56,6 @@
enum { enum {
MLX5_BOARD_ID_LEN = 64, MLX5_BOARD_ID_LEN = 64,
MLX5_MAX_NAME_LEN = 16,
}; };
enum { enum {
...@@ -513,8 +512,13 @@ struct mlx5_rl_table { ...@@ -513,8 +512,13 @@ struct mlx5_rl_table {
struct mlx5_rl_entry *rl_entry; struct mlx5_rl_entry *rl_entry;
}; };
struct mlx5_core_roce {
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
struct mlx5_flow_handle *allow_rule;
};
struct mlx5_priv { struct mlx5_priv {
char name[MLX5_MAX_NAME_LEN];
struct mlx5_eq_table *eq_table; struct mlx5_eq_table *eq_table;
/* pages stuff */ /* pages stuff */
...@@ -567,6 +571,7 @@ struct mlx5_priv { ...@@ -567,6 +571,7 @@ struct mlx5_priv {
struct mlx5_lag *lag; struct mlx5_lag *lag;
struct mlx5_devcom *devcom; struct mlx5_devcom *devcom;
unsigned long pci_dev_data; unsigned long pci_dev_data;
struct mlx5_core_roce roce;
struct mlx5_fc_stats fc_stats; struct mlx5_fc_stats fc_stats;
struct mlx5_rl_table rl_table; struct mlx5_rl_table rl_table;
...@@ -643,6 +648,7 @@ struct mlx5_fw_tracer; ...@@ -643,6 +648,7 @@ struct mlx5_fw_tracer;
struct mlx5_vxlan; struct mlx5_vxlan;
struct mlx5_core_dev { struct mlx5_core_dev {
struct device *device;
struct pci_dev *pdev; struct pci_dev *pdev;
/* sync pci state */ /* sync pci state */
struct mutex pci_status_mutex; struct mutex pci_status_mutex;
......
...@@ -73,6 +73,7 @@ enum mlx5_flow_namespace_type { ...@@ -73,6 +73,7 @@ enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_SNIFFER_RX, MLX5_FLOW_NAMESPACE_SNIFFER_RX,
MLX5_FLOW_NAMESPACE_SNIFFER_TX, MLX5_FLOW_NAMESPACE_SNIFFER_TX,
MLX5_FLOW_NAMESPACE_EGRESS, MLX5_FLOW_NAMESPACE_EGRESS,
MLX5_FLOW_NAMESPACE_RDMA_RX,
}; };
enum { enum {
......
...@@ -80,6 +80,19 @@ enum { ...@@ -80,6 +80,19 @@ enum {
MLX5_SHARED_RESOURCE_UID = 0xffff, MLX5_SHARED_RESOURCE_UID = 0xffff,
}; };
enum {
MLX5_OBJ_TYPE_SW_ICM = 0x0008,
};
enum {
MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM),
MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT = (1ULL << 11),
};
enum {
MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b,
};
enum { enum {
MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
MLX5_CMD_OP_QUERY_ADAPTER = 0x101, MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
...@@ -299,7 +312,11 @@ struct mlx5_ifc_flow_table_fields_supported_bits { ...@@ -299,7 +312,11 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 outer_gre_protocol[0x1]; u8 outer_gre_protocol[0x1];
u8 outer_gre_key[0x1]; u8 outer_gre_key[0x1];
u8 outer_vxlan_vni[0x1]; u8 outer_vxlan_vni[0x1];
u8 reserved_at_1a[0x5]; u8 outer_geneve_vni[0x1];
u8 outer_geneve_oam[0x1];
u8 outer_geneve_protocol_type[0x1];
u8 outer_geneve_opt_len[0x1];
u8 reserved_at_1e[0x1];
u8 source_eswitch_port[0x1]; u8 source_eswitch_port[0x1];
u8 inner_dmac[0x1]; u8 inner_dmac[0x1];
...@@ -327,7 +344,8 @@ struct mlx5_ifc_flow_table_fields_supported_bits { ...@@ -327,7 +344,8 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 inner_tcp_flags[0x1]; u8 inner_tcp_flags[0x1];
u8 reserved_at_37[0x9]; u8 reserved_at_37[0x9];
u8 reserved_at_40[0x5]; u8 geneve_tlv_option_0_data[0x1];
u8 reserved_at_41[0x4];
u8 outer_first_mpls_over_udp[0x4]; u8 outer_first_mpls_over_udp[0x4];
u8 outer_first_mpls_over_gre[0x4]; u8 outer_first_mpls_over_gre[0x4];
u8 inner_first_mpls[0x4]; u8 inner_first_mpls[0x4];
...@@ -357,11 +375,14 @@ struct mlx5_ifc_flow_table_prop_layout_bits { ...@@ -357,11 +375,14 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 pop_vlan_2[0x1]; u8 pop_vlan_2[0x1];
u8 push_vlan_2[0x1]; u8 push_vlan_2[0x1];
u8 reformat_and_vlan_action[0x1]; u8 reformat_and_vlan_action[0x1];
u8 reserved_at_10[0x2]; u8 reserved_at_10[0x1];
u8 sw_owner[0x1];
u8 reformat_l3_tunnel_to_l2[0x1]; u8 reformat_l3_tunnel_to_l2[0x1];
u8 reformat_l2_to_l3_tunnel[0x1]; u8 reformat_l2_to_l3_tunnel[0x1];
u8 reformat_and_modify_action[0x1]; u8 reformat_and_modify_action[0x1];
u8 reserved_at_15[0xb]; u8 reserved_at_15[0x2];
u8 table_miss_action_domain[0x1];
u8 reserved_at_18[0x8];
u8 reserved_at_20[0x2]; u8 reserved_at_20[0x2];
u8 log_max_ft_size[0x6]; u8 log_max_ft_size[0x6];
u8 log_max_modify_header_context[0x8]; u8 log_max_modify_header_context[0x8];
...@@ -469,7 +490,9 @@ struct mlx5_ifc_fte_match_set_misc_bits { ...@@ -469,7 +490,9 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 vxlan_vni[0x18]; u8 vxlan_vni[0x18];
u8 reserved_at_b8[0x8]; u8 reserved_at_b8[0x8];
u8 reserved_at_c0[0x20]; u8 geneve_vni[0x18];
u8 reserved_at_d8[0x7];
u8 geneve_oam[0x1];
u8 reserved_at_e0[0xc]; u8 reserved_at_e0[0xc];
u8 outer_ipv6_flow_label[0x14]; u8 outer_ipv6_flow_label[0x14];
...@@ -477,7 +500,11 @@ struct mlx5_ifc_fte_match_set_misc_bits { ...@@ -477,7 +500,11 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_100[0xc]; u8 reserved_at_100[0xc];
u8 inner_ipv6_flow_label[0x14]; u8 inner_ipv6_flow_label[0x14];
u8 reserved_at_120[0x28]; u8 reserved_at_120[0xa];
u8 geneve_opt_len[0x6];
u8 geneve_protocol_type[0x10];
u8 reserved_at_140[0x8];
u8 bth_dst_qp[0x18]; u8 bth_dst_qp[0x18];
u8 reserved_at_160[0x20]; u8 reserved_at_160[0x20];
u8 outer_esp_spi[0x20]; u8 outer_esp_spi[0x20];
...@@ -507,6 +534,12 @@ struct mlx5_ifc_fte_match_set_misc2_bits { ...@@ -507,6 +534,12 @@ struct mlx5_ifc_fte_match_set_misc2_bits {
u8 reserved_at_1a0[0x60]; u8 reserved_at_1a0[0x60];
}; };
struct mlx5_ifc_fte_match_set_misc3_bits {
u8 reserved_at_0[0x120];
u8 geneve_tlv_option_0_data[0x20];
u8 reserved_at_140[0xc0];
};
struct mlx5_ifc_cmd_pas_bits { struct mlx5_ifc_cmd_pas_bits {
u8 pa_h[0x20]; u8 pa_h[0x20];
...@@ -589,7 +622,7 @@ struct mlx5_ifc_flow_table_nic_cap_bits { ...@@ -589,7 +622,7 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
u8 reserved_at_400[0x200]; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_rdma;
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer;
...@@ -770,7 +803,19 @@ struct mlx5_ifc_device_mem_cap_bits { ...@@ -770,7 +803,19 @@ struct mlx5_ifc_device_mem_cap_bits {
u8 max_memic_size[0x20]; u8 max_memic_size[0x20];
u8 reserved_at_c0[0x740]; u8 steering_sw_icm_start_address[0x40];
u8 reserved_at_100[0x8];
u8 log_header_modify_sw_icm_size[0x8];
u8 reserved_at_110[0x2];
u8 log_sw_icm_alloc_granularity[0x6];
u8 log_steering_sw_icm_size[0x8];
u8 reserved_at_120[0x20];
u8 header_modify_sw_icm_start_address[0x40];
u8 reserved_at_180[0x680];
}; };
enum { enum {
...@@ -919,6 +964,7 @@ enum { ...@@ -919,6 +964,7 @@ enum {
enum { enum {
MLX5_UCTX_CAP_RAW_TX = 1UL << 0, MLX5_UCTX_CAP_RAW_TX = 1UL << 0,
MLX5_UCTX_CAP_INTERNAL_DEV_RES = 1UL << 1,
}; };
struct mlx5_ifc_cmd_hca_cap_bits { struct mlx5_ifc_cmd_hca_cap_bits {
...@@ -929,7 +975,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -929,7 +975,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_srq_sz[0x8]; u8 log_max_srq_sz[0x8];
u8 log_max_qp_sz[0x8]; u8 log_max_qp_sz[0x8];
u8 reserved_at_90[0xb]; u8 reserved_at_90[0x8];
u8 prio_tag_required[0x1];
u8 reserved_at_99[0x2];
u8 log_max_qp[0x5]; u8 log_max_qp[0x5];
u8 reserved_at_a0[0xb]; u8 reserved_at_a0[0xb];
...@@ -1211,7 +1259,11 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -1211,7 +1259,11 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 num_of_uars_per_page[0x20]; u8 num_of_uars_per_page[0x20];
u8 flex_parser_protocols[0x20]; u8 flex_parser_protocols[0x20];
u8 reserved_at_560[0x20];
u8 max_geneve_tlv_options[0x8];
u8 reserved_at_568[0x3];
u8 max_geneve_tlv_option_data_len[0x5];
u8 reserved_at_570[0x10];
u8 reserved_at_580[0x3c]; u8 reserved_at_580[0x3c];
u8 mini_cqe_resp_stride_index[0x1]; u8 mini_cqe_resp_stride_index[0x1];
...@@ -1247,7 +1299,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -1247,7 +1299,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 uctx_cap[0x20]; u8 uctx_cap[0x20];
u8 reserved_at_6c0[0x140]; u8 reserved_at_6c0[0x4];
u8 flex_parser_id_geneve_tlv_option_0[0x4];
u8 reserved_at_6c8[0x138];
}; };
enum mlx5_flow_destination_type { enum mlx5_flow_destination_type {
...@@ -1260,6 +1314,12 @@ enum mlx5_flow_destination_type { ...@@ -1260,6 +1314,12 @@ enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM = 0x101, MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM = 0x101,
}; };
enum mlx5_flow_table_miss_action {
MLX5_FLOW_TABLE_MISS_ACTION_DEF,
MLX5_FLOW_TABLE_MISS_ACTION_FWD,
MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
};
struct mlx5_ifc_dest_format_struct_bits { struct mlx5_ifc_dest_format_struct_bits {
u8 destination_type[0x8]; u8 destination_type[0x8];
u8 destination_id[0x18]; u8 destination_id[0x18];
...@@ -1299,7 +1359,9 @@ struct mlx5_ifc_fte_match_param_bits { ...@@ -1299,7 +1359,9 @@ struct mlx5_ifc_fte_match_param_bits {
struct mlx5_ifc_fte_match_set_misc2_bits misc_parameters_2; struct mlx5_ifc_fte_match_set_misc2_bits misc_parameters_2;
u8 reserved_at_800[0x800]; struct mlx5_ifc_fte_match_set_misc3_bits misc_parameters_3;
u8 reserved_at_a00[0x600];
}; };
enum { enum {
...@@ -2920,6 +2982,7 @@ enum { ...@@ -2920,6 +2982,7 @@ enum {
MLX5_MKC_ACCESS_MODE_MTT = 0x1, MLX5_MKC_ACCESS_MODE_MTT = 0x1,
MLX5_MKC_ACCESS_MODE_KLMS = 0x2, MLX5_MKC_ACCESS_MODE_KLMS = 0x2,
MLX5_MKC_ACCESS_MODE_KSM = 0x3, MLX5_MKC_ACCESS_MODE_KSM = 0x3,
MLX5_MKC_ACCESS_MODE_SW_ICM = 0x4,
MLX5_MKC_ACCESS_MODE_MEMIC = 0x5, MLX5_MKC_ACCESS_MODE_MEMIC = 0x5,
}; };
...@@ -4807,6 +4870,7 @@ enum { ...@@ -4807,6 +4870,7 @@ enum {
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3, MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3,
MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_3 = 0x4,
}; };
struct mlx5_ifc_query_flow_group_out_bits { struct mlx5_ifc_query_flow_group_out_bits {
...@@ -6875,14 +6939,14 @@ struct mlx5_ifc_create_tis_in_bits { ...@@ -6875,14 +6939,14 @@ struct mlx5_ifc_create_tis_in_bits {
struct mlx5_ifc_create_tir_out_bits { struct mlx5_ifc_create_tir_out_bits {
u8 status[0x8]; u8 status[0x8];
u8 reserved_at_8[0x18]; u8 icm_address_63_40[0x18];
u8 syndrome[0x20]; u8 syndrome[0x20];
u8 reserved_at_40[0x8]; u8 icm_address_39_32[0x8];
u8 tirn[0x18]; u8 tirn[0x18];
u8 reserved_at_60[0x20]; u8 icm_address_31_0[0x20];
}; };
struct mlx5_ifc_create_tir_in_bits { struct mlx5_ifc_create_tir_in_bits {
...@@ -9492,6 +9556,33 @@ struct mlx5_ifc_uctx_bits { ...@@ -9492,6 +9556,33 @@ struct mlx5_ifc_uctx_bits {
u8 reserved_at_20[0x160]; u8 reserved_at_20[0x160];
}; };
struct mlx5_ifc_sw_icm_bits {
u8 modify_field_select[0x40];
u8 reserved_at_40[0x18];
u8 log_sw_icm_size[0x8];
u8 reserved_at_60[0x20];
u8 sw_icm_start_addr[0x40];
u8 reserved_at_c0[0x140];
};
struct mlx5_ifc_geneve_tlv_option_bits {
u8 modify_field_select[0x40];
u8 reserved_at_40[0x18];
u8 geneve_option_fte_index[0x8];
u8 option_class[0x10];
u8 option_type[0x8];
u8 reserved_at_78[0x3];
u8 option_data_length[0x5];
u8 reserved_at_80[0x180];
};
struct mlx5_ifc_create_umem_in_bits { struct mlx5_ifc_create_umem_in_bits {
u8 opcode[0x10]; u8 opcode[0x10];
u8 uid[0x10]; u8 uid[0x10];
...@@ -9529,6 +9620,16 @@ struct mlx5_ifc_destroy_uctx_in_bits { ...@@ -9529,6 +9620,16 @@ struct mlx5_ifc_destroy_uctx_in_bits {
u8 reserved_at_60[0x20]; u8 reserved_at_60[0x20];
}; };
struct mlx5_ifc_create_sw_icm_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
struct mlx5_ifc_sw_icm_bits sw_icm;
};
struct mlx5_ifc_create_geneve_tlv_option_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
struct mlx5_ifc_geneve_tlv_option_bits geneve_tlv_opt;
};
struct mlx5_ifc_mtrc_string_db_param_bits { struct mlx5_ifc_mtrc_string_db_param_bits {
u8 string_db_base_address[0x20]; u8 string_db_base_address[0x20];
......
...@@ -50,6 +50,9 @@ int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out); ...@@ -50,6 +50,9 @@ int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out);
int mlx5_core_query_sq_state(struct mlx5_core_dev *dev, u32 sqn, u8 *state); int mlx5_core_query_sq_state(struct mlx5_core_dev *dev, u32 sqn, u8 *state);
int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tirn); u32 *tirn);
int mlx5_core_create_tir_out(struct mlx5_core_dev *dev,
u32 *in, int inlen,
u32 *out, int outlen);
int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in, int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
int inlen); int inlen);
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn); void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment