Commit 5c312574 authored by Paolo Abeni's avatar Paolo Abeni

Merge tag 'mlx5-fixes-2023-01-18' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================

This series provides bug fixes to mlx5 driver.

* tag 'mlx5-fixes-2023-01-18' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net: mlx5: eliminate anonymous module_init & module_exit
  net/mlx5: E-switch, Fix switchdev mode after devlink reload
  net/mlx5e: Protect global IPsec ASO
  net/mlx5e: Remove optimization which prevented update of ESN state
  net/mlx5e: Set decap action based on attr for sample
  net/mlx5e: QoS, Fix wrongfully setting parent_element_id on MODIFY_SCHEDULING_ELEMENT
  net/mlx5: E-switch, Fix setting of reserved fields on MODIFY_SCHEDULING_ELEMENT
  net/mlx5e: Remove redundant xsk pointer check in mlx5e_mpwrq_validate_xsk
  net/mlx5e: Avoid false lock dependency warning on tc_ht even more
  net/mlx5: fix missing mutex_unlock in mlx5_fw_fatal_reporter_err_work()
====================

Link: https://lore.kernel.org/r/Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents 55ba18dc 2c1e1b94
...@@ -637,7 +637,7 @@ mlx5e_htb_update_children(struct mlx5e_htb *htb, struct mlx5e_qos_node *node, ...@@ -637,7 +637,7 @@ mlx5e_htb_update_children(struct mlx5e_htb *htb, struct mlx5e_qos_node *node,
if (child->bw_share == old_bw_share) if (child->bw_share == old_bw_share)
continue; continue;
err_one = mlx5_qos_update_node(htb->mdev, child->hw_id, child->bw_share, err_one = mlx5_qos_update_node(htb->mdev, child->bw_share,
child->max_average_bw, child->hw_id); child->max_average_bw, child->hw_id);
if (!err && err_one) { if (!err && err_one) {
err = err_one; err = err_one;
...@@ -671,7 +671,7 @@ mlx5e_htb_node_modify(struct mlx5e_htb *htb, u16 classid, u64 rate, u64 ceil, ...@@ -671,7 +671,7 @@ mlx5e_htb_node_modify(struct mlx5e_htb *htb, u16 classid, u64 rate, u64 ceil,
mlx5e_htb_convert_rate(htb, rate, node->parent, &bw_share); mlx5e_htb_convert_rate(htb, rate, node->parent, &bw_share);
mlx5e_htb_convert_ceil(htb, ceil, &max_average_bw); mlx5e_htb_convert_ceil(htb, ceil, &max_average_bw);
err = mlx5_qos_update_node(htb->mdev, node->parent->hw_id, bw_share, err = mlx5_qos_update_node(htb->mdev, bw_share,
max_average_bw, node->hw_id); max_average_bw, node->hw_id);
if (err) { if (err) {
NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a node."); NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a node.");
......
...@@ -578,7 +578,6 @@ int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *pa ...@@ -578,7 +578,6 @@ int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *pa
{ {
enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
bool unaligned = xsk ? xsk->unaligned : false;
u16 max_mtu_pkts; u16 max_mtu_pkts;
if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode)) if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode))
...@@ -591,7 +590,7 @@ int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *pa ...@@ -591,7 +590,7 @@ int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *pa
* needed number of WQEs exceeds the maximum. * needed number of WQEs exceeds the maximum.
*/ */
max_mtu_pkts = min_t(u8, MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE, max_mtu_pkts = min_t(u8, MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE,
mlx5e_mpwrq_max_log_rq_pkts(mdev, page_shift, unaligned)); mlx5e_mpwrq_max_log_rq_pkts(mdev, page_shift, xsk->unaligned));
if (params->log_rq_mtu_frames > max_mtu_pkts) { if (params->log_rq_mtu_frames > max_mtu_pkts) {
mlx5_core_err(mdev, "Current RQ length %d is too big for XSK with given frame size %u\n", mlx5_core_err(mdev, "Current RQ length %d is too big for XSK with given frame size %u\n",
1 << params->log_rq_mtu_frames, xsk->chunk_size); 1 << params->log_rq_mtu_frames, xsk->chunk_size);
......
...@@ -477,7 +477,6 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample, ...@@ -477,7 +477,6 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
struct mlx5e_sample_flow *sample_flow; struct mlx5e_sample_flow *sample_flow;
struct mlx5e_sample_attr *sample_attr; struct mlx5e_sample_attr *sample_attr;
struct mlx5_flow_attr *pre_attr; struct mlx5_flow_attr *pre_attr;
u32 tunnel_id = attr->tunnel_id;
struct mlx5_eswitch *esw; struct mlx5_eswitch *esw;
u32 default_tbl_id; u32 default_tbl_id;
u32 obj_id; u32 obj_id;
...@@ -522,7 +521,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample, ...@@ -522,7 +521,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
restore_obj.sample.group_id = sample_attr->group_num; restore_obj.sample.group_id = sample_attr->group_num;
restore_obj.sample.rate = sample_attr->rate; restore_obj.sample.rate = sample_attr->rate;
restore_obj.sample.trunc_size = sample_attr->trunc_size; restore_obj.sample.trunc_size = sample_attr->trunc_size;
restore_obj.sample.tunnel_id = tunnel_id; restore_obj.sample.tunnel_id = attr->tunnel_id;
err = mapping_add(esw->offloads.reg_c0_obj_pool, &restore_obj, &obj_id); err = mapping_add(esw->offloads.reg_c0_obj_pool, &restore_obj, &obj_id);
if (err) if (err)
goto err_obj_id; goto err_obj_id;
...@@ -548,7 +547,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample, ...@@ -548,7 +547,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
/* For decap action, do decap in the original flow table instead of the /* For decap action, do decap in the original flow table instead of the
* default flow table. * default flow table.
*/ */
if (tunnel_id) if (attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
pre_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; pre_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
pre_attr->modify_hdr = sample_flow->restore->modify_hdr; pre_attr->modify_hdr = sample_flow->restore->modify_hdr;
pre_attr->flags = MLX5_ATTR_FLAG_SAMPLE; pre_attr->flags = MLX5_ATTR_FLAG_SAMPLE;
......
...@@ -122,11 +122,8 @@ struct mlx5e_ipsec_aso { ...@@ -122,11 +122,8 @@ struct mlx5e_ipsec_aso {
u8 ctx[MLX5_ST_SZ_BYTES(ipsec_aso)]; u8 ctx[MLX5_ST_SZ_BYTES(ipsec_aso)];
dma_addr_t dma_addr; dma_addr_t dma_addr;
struct mlx5_aso *aso; struct mlx5_aso *aso;
/* IPsec ASO caches data on every query call, /* Protect ASO WQ access, as it is global to whole IPsec */
* so in nested calls, we can use this boolean to save spinlock_t lock;
* recursive calls to mlx5e_ipsec_aso_query()
*/
u8 use_cache : 1;
}; };
struct mlx5e_ipsec { struct mlx5e_ipsec {
......
...@@ -320,7 +320,6 @@ static void mlx5e_ipsec_handle_event(struct work_struct *_work) ...@@ -320,7 +320,6 @@ static void mlx5e_ipsec_handle_event(struct work_struct *_work)
if (ret) if (ret)
goto unlock; goto unlock;
aso->use_cache = true;
if (attrs->esn_trigger && if (attrs->esn_trigger &&
!MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) { !MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) {
u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter); u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter);
...@@ -333,7 +332,6 @@ static void mlx5e_ipsec_handle_event(struct work_struct *_work) ...@@ -333,7 +332,6 @@ static void mlx5e_ipsec_handle_event(struct work_struct *_work)
!MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm) || !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm) ||
!MLX5_GET(ipsec_aso, aso->ctx, remove_flow_enable)) !MLX5_GET(ipsec_aso, aso->ctx, remove_flow_enable))
xfrm_state_check_expire(sa_entry->x); xfrm_state_check_expire(sa_entry->x);
aso->use_cache = false;
unlock: unlock:
spin_unlock(&sa_entry->x->lock); spin_unlock(&sa_entry->x->lock);
...@@ -398,6 +396,7 @@ int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec) ...@@ -398,6 +396,7 @@ int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
goto err_aso_create; goto err_aso_create;
} }
spin_lock_init(&aso->lock);
ipsec->nb.notifier_call = mlx5e_ipsec_event; ipsec->nb.notifier_call = mlx5e_ipsec_event;
mlx5_notifier_register(mdev, &ipsec->nb); mlx5_notifier_register(mdev, &ipsec->nb);
...@@ -456,13 +455,12 @@ int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry, ...@@ -456,13 +455,12 @@ int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5e_hw_objs *res; struct mlx5e_hw_objs *res;
struct mlx5_aso_wqe *wqe; struct mlx5_aso_wqe *wqe;
u8 ds_cnt; u8 ds_cnt;
int ret;
lockdep_assert_held(&sa_entry->x->lock); lockdep_assert_held(&sa_entry->x->lock);
if (aso->use_cache)
return 0;
res = &mdev->mlx5e_res.hw_objs; res = &mdev->mlx5e_res.hw_objs;
spin_lock_bh(&aso->lock);
memset(aso->ctx, 0, sizeof(aso->ctx)); memset(aso->ctx, 0, sizeof(aso->ctx));
wqe = mlx5_aso_get_wqe(aso->aso); wqe = mlx5_aso_get_wqe(aso->aso);
ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS); ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
...@@ -477,7 +475,9 @@ int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry, ...@@ -477,7 +475,9 @@ int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
mlx5e_ipsec_aso_copy(ctrl, data); mlx5e_ipsec_aso_copy(ctrl, data);
mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl); mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
return mlx5_aso_poll_cq(aso->aso, false); ret = mlx5_aso_poll_cq(aso->aso, false);
spin_unlock_bh(&aso->lock);
return ret;
} }
void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry, void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
......
...@@ -166,6 +166,7 @@ struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc) ...@@ -166,6 +166,7 @@ struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc)
* it's different than the ht->mutex here. * it's different than the ht->mutex here.
*/ */
static struct lock_class_key tc_ht_lock_key; static struct lock_class_key tc_ht_lock_key;
static struct lock_class_key tc_ht_wq_key;
static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow); static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
static void free_flow_post_acts(struct mlx5e_tc_flow *flow); static void free_flow_post_acts(struct mlx5e_tc_flow *flow);
...@@ -5182,6 +5183,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) ...@@ -5182,6 +5183,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
return err; return err;
lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key); lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key);
lockdep_init_map(&tc->ht.run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0);
mapping_id = mlx5_query_nic_system_image_guid(dev); mapping_id = mlx5_query_nic_system_image_guid(dev);
...@@ -5288,6 +5290,7 @@ int mlx5e_tc_ht_init(struct rhashtable *tc_ht) ...@@ -5288,6 +5290,7 @@ int mlx5e_tc_ht_init(struct rhashtable *tc_ht)
return err; return err;
lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key); lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
lockdep_init_map(&tc_ht->run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0);
return 0; return 0;
} }
......
...@@ -22,15 +22,13 @@ struct mlx5_esw_rate_group { ...@@ -22,15 +22,13 @@ struct mlx5_esw_rate_group {
}; };
static int esw_qos_tsar_config(struct mlx5_core_dev *dev, u32 *sched_ctx, static int esw_qos_tsar_config(struct mlx5_core_dev *dev, u32 *sched_ctx,
u32 parent_ix, u32 tsar_ix, u32 tsar_ix, u32 max_rate, u32 bw_share)
u32 max_rate, u32 bw_share)
{ {
u32 bitmask = 0; u32 bitmask = 0;
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
return -EOPNOTSUPP; return -EOPNOTSUPP;
MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_ix);
MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate); MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate);
MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
...@@ -51,7 +49,7 @@ static int esw_qos_group_config(struct mlx5_eswitch *esw, struct mlx5_esw_rate_g ...@@ -51,7 +49,7 @@ static int esw_qos_group_config(struct mlx5_eswitch *esw, struct mlx5_esw_rate_g
int err; int err;
err = esw_qos_tsar_config(dev, sched_ctx, err = esw_qos_tsar_config(dev, sched_ctx,
esw->qos.root_tsar_ix, group->tsar_ix, group->tsar_ix,
max_rate, bw_share); max_rate, bw_share);
if (err) if (err)
NL_SET_ERR_MSG_MOD(extack, "E-Switch modify group TSAR element failed"); NL_SET_ERR_MSG_MOD(extack, "E-Switch modify group TSAR element failed");
...@@ -67,23 +65,13 @@ static int esw_qos_vport_config(struct mlx5_eswitch *esw, ...@@ -67,23 +65,13 @@ static int esw_qos_vport_config(struct mlx5_eswitch *esw,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_esw_rate_group *group = vport->qos.group;
struct mlx5_core_dev *dev = esw->dev; struct mlx5_core_dev *dev = esw->dev;
u32 parent_tsar_ix;
void *vport_elem;
int err; int err;
if (!vport->qos.enabled) if (!vport->qos.enabled)
return -EIO; return -EIO;
parent_tsar_ix = group ? group->tsar_ix : esw->qos.root_tsar_ix; err = esw_qos_tsar_config(dev, sched_ctx, vport->qos.esw_tsar_ix,
MLX5_SET(scheduling_context, sched_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
element_attributes);
MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
err = esw_qos_tsar_config(dev, sched_ctx, parent_tsar_ix, vport->qos.esw_tsar_ix,
max_rate, bw_share); max_rate, bw_share);
if (err) { if (err) {
esw_warn(esw->dev, esw_warn(esw->dev,
......
...@@ -1464,6 +1464,7 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw) ...@@ -1464,6 +1464,7 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
mlx5_lag_disable_change(esw->dev); mlx5_lag_disable_change(esw->dev);
down_write(&esw->mode_lock); down_write(&esw->mode_lock);
mlx5_eswitch_disable_locked(esw); mlx5_eswitch_disable_locked(esw);
esw->mode = MLX5_ESWITCH_LEGACY;
up_write(&esw->mode_lock); up_write(&esw->mode_lock);
mlx5_lag_enable_change(esw->dev); mlx5_lag_enable_change(esw->dev);
} }
......
...@@ -677,6 +677,7 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work) ...@@ -677,6 +677,7 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
mutex_lock(&dev->intf_state_mutex); mutex_lock(&dev->intf_state_mutex);
if (test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) { if (test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) {
mlx5_core_err(dev, "health works are not permitted at this stage\n"); mlx5_core_err(dev, "health works are not permitted at this stage\n");
mutex_unlock(&dev->intf_state_mutex);
return; return;
} }
mutex_unlock(&dev->intf_state_mutex); mutex_unlock(&dev->intf_state_mutex);
......
...@@ -2098,7 +2098,7 @@ static void mlx5_core_verify_params(void) ...@@ -2098,7 +2098,7 @@ static void mlx5_core_verify_params(void)
} }
} }
static int __init init(void) static int __init mlx5_init(void)
{ {
int err; int err;
...@@ -2133,7 +2133,7 @@ static int __init init(void) ...@@ -2133,7 +2133,7 @@ static int __init init(void)
return err; return err;
} }
static void __exit cleanup(void) static void __exit mlx5_cleanup(void)
{ {
mlx5e_cleanup(); mlx5e_cleanup();
mlx5_sf_driver_unregister(); mlx5_sf_driver_unregister();
...@@ -2141,5 +2141,5 @@ static void __exit cleanup(void) ...@@ -2141,5 +2141,5 @@ static void __exit cleanup(void)
mlx5_unregister_debugfs(); mlx5_unregister_debugfs();
} }
module_init(init); module_init(mlx5_init);
module_exit(cleanup); module_exit(mlx5_cleanup);
...@@ -62,13 +62,12 @@ int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id) ...@@ -62,13 +62,12 @@ int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id)
return mlx5_qos_create_inner_node(mdev, MLX5_QOS_DEFAULT_DWRR_UID, 0, 0, id); return mlx5_qos_create_inner_node(mdev, MLX5_QOS_DEFAULT_DWRR_UID, 0, 0, id);
} }
int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id, int mlx5_qos_update_node(struct mlx5_core_dev *mdev,
u32 bw_share, u32 max_avg_bw, u32 id) u32 bw_share, u32 max_avg_bw, u32 id)
{ {
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
u32 bitmask = 0; u32 bitmask = 0;
MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_avg_bw); MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_avg_bw);
......
...@@ -23,7 +23,7 @@ int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id, ...@@ -23,7 +23,7 @@ int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id,
int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id, int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id,
u32 bw_share, u32 max_avg_bw, u32 *id); u32 bw_share, u32 max_avg_bw, u32 *id);
int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id); int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id);
int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id, u32 bw_share, int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 bw_share,
u32 max_avg_bw, u32 id); u32 max_avg_bw, u32 id);
int mlx5_qos_destroy_node(struct mlx5_core_dev *mdev, u32 id); int mlx5_qos_destroy_node(struct mlx5_core_dev *mdev, u32 id);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment