Commit a70ed9d8 authored by Saeed Mahameed's avatar Saeed Mahameed

Merge branch 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux

This series adds some HW bits and definitions for mlx5 driver, to be
used by downstream features in both rdma and netdev branches.

* 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux:
  net/mlx5: HW bit for goto chain offload support
  net/mlx5: Expose link speed directly
  net/mlx5: Introduce TLS and IPSec objects enums
  net/mlx5: Introduce egress acl forward-to-vport capability
  net/mlx5: Expose raw packet pacing APIs
  net/mlx5e: Replace zero-length array with flexible-array member
  net/mlx5: fix spelling mistake "reserverd" -> "reserved"
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parents 34a568a2 e0ebd8eb
......@@ -204,7 +204,7 @@ struct mlx5e_tx_wqe {
struct mlx5e_rx_wqe_ll {
struct mlx5_wqe_srq_next_seg next;
struct mlx5_wqe_data_seg data[0];
struct mlx5_wqe_data_seg data[];
};
struct mlx5e_rx_wqe_cyc {
......
......@@ -57,7 +57,7 @@ struct mlx5_fpga_ipsec_cmd_context {
struct completion complete;
struct mlx5_fpga_device *dev;
struct list_head list; /* Item in pending_cmds */
u8 command[0];
u8 command[];
};
struct mlx5_fpga_esp_xfrm;
......
......@@ -470,7 +470,7 @@ struct mlx5_fc_bulk {
u32 base_id;
int bulk_len;
unsigned long *bitmask;
struct mlx5_fc fcs[0];
struct mlx5_fc fcs[];
};
static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
......
......@@ -56,7 +56,7 @@ struct mlx5i_priv {
u32 qkey;
u16 pkey_index;
struct mlx5i_pkey_qpn_ht *qpn_htbl;
char *mlx5e_priv[0];
char *mlx5e_priv[];
};
int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn);
......@@ -107,7 +107,7 @@ struct mlx5i_tx_wqe {
struct mlx5_wqe_datagram_seg datagram;
struct mlx5_wqe_eth_pad pad;
struct mlx5_wqe_eth_seg eth;
struct mlx5_wqe_data_seg data[0];
struct mlx5_wqe_data_seg data[];
};
static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq,
......
......@@ -42,7 +42,7 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
MLX5_SET(encryption_key_obj, obj, key_size, general_obj_key_size);
MLX5_SET(encryption_key_obj, obj, key_type,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_DEK);
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS);
MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
......
......@@ -101,22 +101,39 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
static bool mlx5_rl_are_equal_raw(struct mlx5_rl_entry *entry, void *rl_in,
u16 uid)
{
return (!memcmp(entry->rl_raw, rl_in, sizeof(entry->rl_raw)) &&
entry->uid == uid);
}
/* Finds an entry where we can register the given rate
* If the rate already exists, return the entry where it is registered,
* otherwise return the first available entry.
* If the table is full, return NULL
*/
static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
struct mlx5_rate_limit *rl)
void *rl_in, u16 uid, bool dedicated)
{
struct mlx5_rl_entry *ret_entry = NULL;
bool empty_found = false;
int i;
for (i = 0; i < table->max_size; i++) {
if (mlx5_rl_are_equal(&table->rl_entry[i].rl, rl))
if (dedicated) {
if (!table->rl_entry[i].refcount)
return &table->rl_entry[i];
if (!empty_found && !table->rl_entry[i].rl.rate) {
continue;
}
if (table->rl_entry[i].refcount) {
if (table->rl_entry[i].dedicated)
continue;
if (mlx5_rl_are_equal_raw(&table->rl_entry[i], rl_in,
uid))
return &table->rl_entry[i];
} else if (!empty_found) {
empty_found = true;
ret_entry = &table->rl_entry[i];
}
......@@ -126,18 +143,19 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
}
static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
u16 index,
struct mlx5_rate_limit *rl)
struct mlx5_rl_entry *entry, bool set)
{
u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {0};
u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {0};
u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {};
u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {};
void *pp_context;
pp_context = MLX5_ADDR_OF(set_pp_rate_limit_in, in, ctx);
MLX5_SET(set_pp_rate_limit_in, in, opcode,
MLX5_CMD_OP_SET_PP_RATE_LIMIT);
MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, index);
MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rl->rate);
MLX5_SET(set_pp_rate_limit_in, in, burst_upper_bound, rl->max_burst_sz);
MLX5_SET(set_pp_rate_limit_in, in, typical_packet_size, rl->typical_pkt_sz);
MLX5_SET(set_pp_rate_limit_in, in, uid, entry->uid);
MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, entry->index);
if (set)
memcpy(pp_context, entry->rl_raw, sizeof(entry->rl_raw));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
......@@ -158,23 +176,25 @@ bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
}
EXPORT_SYMBOL(mlx5_rl_are_equal);
int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
struct mlx5_rate_limit *rl)
int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
bool dedicated_entry, u16 *index)
{
struct mlx5_rl_table *table = &dev->priv.rl_table;
struct mlx5_rl_entry *entry;
int err = 0;
u32 rate;
rate = MLX5_GET(set_pp_rate_limit_context, rl_in, rate_limit);
mutex_lock(&table->rl_lock);
if (!rl->rate || !mlx5_rl_is_in_range(dev, rl->rate)) {
if (!rate || !mlx5_rl_is_in_range(dev, rate)) {
mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n",
rl->rate, table->min_rate, table->max_rate);
rate, table->min_rate, table->max_rate);
err = -EINVAL;
goto out;
}
entry = find_rl_entry(table, rl);
entry = find_rl_entry(table, rl_in, uid, dedicated_entry);
if (!entry) {
mlx5_core_err(dev, "Max number of %u rates reached\n",
table->max_size);
......@@ -185,16 +205,24 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
/* rate already configured */
entry->refcount++;
} else {
memcpy(entry->rl_raw, rl_in, sizeof(entry->rl_raw));
entry->uid = uid;
/* new rate limit */
err = mlx5_set_pp_rate_limit_cmd(dev, entry->index, rl);
err = mlx5_set_pp_rate_limit_cmd(dev, entry, true);
if (err) {
mlx5_core_err(dev, "Failed configuring rate limit(err %d): rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
err, rl->rate, rl->max_burst_sz,
rl->typical_pkt_sz);
mlx5_core_err(
dev,
"Failed configuring rate limit(err %d): rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
err, rate,
MLX5_GET(set_pp_rate_limit_context, rl_in,
burst_upper_bound),
MLX5_GET(set_pp_rate_limit_context, rl_in,
typical_packet_size));
goto out;
}
entry->rl = *rl;
entry->refcount = 1;
entry->dedicated = dedicated_entry;
}
*index = entry->index;
......@@ -202,20 +230,61 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
mutex_unlock(&table->rl_lock);
return err;
}
EXPORT_SYMBOL(mlx5_rl_add_rate_raw);
void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index)
{
struct mlx5_rl_table *table = &dev->priv.rl_table;
struct mlx5_rl_entry *entry;
mutex_lock(&table->rl_lock);
entry = &table->rl_entry[index - 1];
entry->refcount--;
if (!entry->refcount)
/* need to remove rate */
mlx5_set_pp_rate_limit_cmd(dev, entry, false);
mutex_unlock(&table->rl_lock);
}
EXPORT_SYMBOL(mlx5_rl_remove_rate_raw);
int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
struct mlx5_rate_limit *rl)
{
u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
MLX5_SET(set_pp_rate_limit_context, rl_raw, rate_limit, rl->rate);
MLX5_SET(set_pp_rate_limit_context, rl_raw, burst_upper_bound,
rl->max_burst_sz);
MLX5_SET(set_pp_rate_limit_context, rl_raw, typical_packet_size,
rl->typical_pkt_sz);
return mlx5_rl_add_rate_raw(dev, rl_raw,
MLX5_CAP_QOS(dev, packet_pacing_uid) ?
MLX5_SHARED_RESOURCE_UID : 0,
false, index);
}
EXPORT_SYMBOL(mlx5_rl_add_rate);
void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl)
{
u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
struct mlx5_rl_table *table = &dev->priv.rl_table;
struct mlx5_rl_entry *entry = NULL;
struct mlx5_rate_limit reset_rl = {0};
/* 0 is a reserved value for unlimited rate */
if (rl->rate == 0)
return;
MLX5_SET(set_pp_rate_limit_context, rl_raw, rate_limit, rl->rate);
MLX5_SET(set_pp_rate_limit_context, rl_raw, burst_upper_bound,
rl->max_burst_sz);
MLX5_SET(set_pp_rate_limit_context, rl_raw, typical_packet_size,
rl->typical_pkt_sz);
mutex_lock(&table->rl_lock);
entry = find_rl_entry(table, rl);
entry = find_rl_entry(table, rl_raw,
MLX5_CAP_QOS(dev, packet_pacing_uid) ?
MLX5_SHARED_RESOURCE_UID : 0, false);
if (!entry || !entry->refcount) {
mlx5_core_warn(dev, "Rate %u, max_burst_sz %u typical_pkt_sz %u are not configured\n",
rl->rate, rl->max_burst_sz, rl->typical_pkt_sz);
......@@ -223,11 +292,9 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl)
}
entry->refcount--;
if (!entry->refcount) {
if (!entry->refcount)
/* need to remove rate */
mlx5_set_pp_rate_limit_cmd(dev, entry->index, &reset_rl);
entry->rl = reset_rl;
}
mlx5_set_pp_rate_limit_cmd(dev, entry, false);
out:
mutex_unlock(&table->rl_lock);
......@@ -273,14 +340,13 @@ int mlx5_init_rl_table(struct mlx5_core_dev *dev)
void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
{
struct mlx5_rl_table *table = &dev->priv.rl_table;
struct mlx5_rate_limit rl = {0};
int i;
/* Clear all configured rates */
for (i = 0; i < table->max_size; i++)
if (table->rl_entry[i].rl.rate)
mlx5_set_pp_rate_limit_cmd(dev, table->rl_entry[i].index,
&rl);
if (table->rl_entry[i].refcount)
mlx5_set_pp_rate_limit_cmd(dev, &table->rl_entry[i],
false);
kfree(dev->priv.rl_table.rl_entry);
}
......@@ -518,9 +518,11 @@ struct mlx5_rate_limit {
};
struct mlx5_rl_entry {
struct mlx5_rate_limit rl;
u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)];
u16 index;
u16 refcount;
u64 refcount;
u16 uid;
u8 dedicated : 1;
};
struct mlx5_rl_table {
......@@ -1008,6 +1010,9 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
struct mlx5_rate_limit *rl);
void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl);
bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
bool dedicated_entry, u16 *index);
void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index);
bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
struct mlx5_rate_limit *rl_1);
int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
......
......@@ -414,7 +414,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reserved_at_16[0x1];
u8 table_miss_action_domain[0x1];
u8 termination_table[0x1];
u8 reserved_at_19[0x7];
u8 reformat_and_fwd_to_table[0x1];
u8 reserved_at_1a[0x6];
u8 reserved_at_20[0x2];
u8 log_max_ft_size[0x6];
u8 log_max_modify_header_context[0x8];
......@@ -741,7 +742,7 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
u8 flow_source[0x1];
u8 reserved_at_18[0x2];
u8 multi_fdb_encap[0x1];
u8 reserved_at_1b[0x1];
u8 egress_acl_forward_to_vport[0x1];
u8 fdb_multi_path_to_table[0x1];
u8 reserved_at_1d[0x3];
......@@ -813,7 +814,9 @@ struct mlx5_ifc_qos_cap_bits {
u8 reserved_at_4[0x1];
u8 packet_pacing_burst_bound[0x1];
u8 packet_pacing_typical_size[0x1];
u8 reserved_at_7[0x19];
u8 reserved_at_7[0x4];
u8 packet_pacing_uid[0x1];
u8 reserved_at_c[0x14];
u8 reserved_at_20[0x20];
......@@ -8265,9 +8268,20 @@ struct mlx5_ifc_set_pp_rate_limit_out_bits {
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_set_pp_rate_limit_context_bits {
u8 rate_limit[0x20];
u8 burst_upper_bound[0x20];
u8 reserved_at_40[0x10];
u8 typical_packet_size[0x10];
u8 reserved_at_60[0x120];
};
struct mlx5_ifc_set_pp_rate_limit_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
......@@ -8277,14 +8291,7 @@ struct mlx5_ifc_set_pp_rate_limit_in_bits {
u8 reserved_at_60[0x20];
u8 rate_limit[0x20];
u8 burst_upper_bound[0x20];
u8 reserved_at_c0[0x10];
u8 typical_packet_size[0x10];
u8 reserved_at_e0[0x120];
struct mlx5_ifc_set_pp_rate_limit_context_bits ctx;
};
struct mlx5_ifc_access_register_out_bits {
......@@ -8420,7 +8427,8 @@ struct mlx5_ifc_ptys_reg_bits {
u8 proto_mask[0x3];
u8 an_status[0x4];
u8 reserved_at_24[0x1c];
u8 reserved_at_24[0xc];
u8 data_rate_oper[0x10];
u8 ext_eth_proto_capability[0x20];
......@@ -10486,7 +10494,8 @@ enum {
};
enum {
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_DEK = 0x1,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS = 0x1,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_IPSEC = 0x2,
};
struct mlx5_ifc_tls_static_params_bits {
......
......@@ -608,7 +608,7 @@ struct mlx5_ifc_tls_cmd_bits {
struct mlx5_ifc_tls_resp_bits {
u8 syndrome[0x20];
u8 stream_id[0x20];
u8 reserverd[0x40];
u8 reserved[0x40];
};
#define MLX5_TLS_COMMAND_SIZE (0x100)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment