Commit c491ded0 authored by Jianbo Liu's avatar Jianbo Liu Committed by Saeed Mahameed

net/mlx5: Implement interfaces to control ASO SQ and CQ

Add interfaces to use ASO object control channel. The channel consists
of a control SQ and CQ to which user can post ACCESS_ASO work requests
to modify ASO objects. The functions to get wqe from SQ, fill wqe,
post the request, and poll the completion of the work, are provided.
Signed-off-by: default avatarJianbo Liu <jianbol@nvidia.com>
Reviewed-by: default avatarAriel Levkovich <lariel@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent cdd04f4d
...@@ -334,3 +334,100 @@ void mlx5_aso_destroy(struct mlx5_aso *aso) ...@@ -334,3 +334,100 @@ void mlx5_aso_destroy(struct mlx5_aso *aso)
mlx5_aso_destroy_cq(&aso->cq); mlx5_aso_destroy_cq(&aso->cq);
kfree(aso); kfree(aso);
} }
void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
struct mlx5_aso_wqe *aso_wqe,
u32 obj_id, u32 opc_mode)
{
struct mlx5_wqe_ctrl_seg *cseg = &aso_wqe->ctrl;
cseg->opmod_idx_opcode = cpu_to_be32((opc_mode << MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT) |
(aso->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
MLX5_OPCODE_ACCESS_ASO);
cseg->qpn_ds = cpu_to_be32((aso->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | ds_cnt);
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
cseg->general_id = cpu_to_be32(obj_id);
}
void *mlx5_aso_get_wqe(struct mlx5_aso *aso)
{
u16 pi;
pi = mlx5_wq_cyc_ctr2ix(&aso->wq, aso->pc);
return mlx5_wq_cyc_get_wqe(&aso->wq, pi);
}
void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
struct mlx5_wqe_ctrl_seg *doorbell_cseg)
{
doorbell_cseg->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
/* ensure wqe is visible to device before updating doorbell record */
dma_wmb();
if (with_data)
aso->pc += MLX5_ASO_WQEBBS_DATA;
else
aso->pc += MLX5_ASO_WQEBBS;
*aso->wq.db = cpu_to_be32(aso->pc);
/* ensure doorbell record is visible to device before ringing the
* doorbell
*/
wmb();
mlx5_write64((__be32 *)doorbell_cseg, aso->uar_map);
/* Ensure doorbell is written on uar_page before poll_cq */
WRITE_ONCE(doorbell_cseg, NULL);
}
int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data, u32 interval_ms)
{
struct mlx5_aso_cq *cq = &aso->cq;
struct mlx5_cqe64 *cqe;
unsigned long expires;
cqe = mlx5_cqwq_get_cqe(&cq->wq);
expires = jiffies + msecs_to_jiffies(interval_ms);
while (!cqe && time_is_after_jiffies(expires)) {
usleep_range(2, 10);
cqe = mlx5_cqwq_get_cqe(&cq->wq);
}
if (!cqe)
return -ETIMEDOUT;
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
* otherwise a cq overrun may occur
*/
mlx5_cqwq_pop(&cq->wq);
if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
struct mlx5_err_cqe *err_cqe;
mlx5_core_err(cq->mdev, "Bad OP in ASOSQ CQE: 0x%x\n",
get_cqe_opcode(cqe));
err_cqe = (struct mlx5_err_cqe *)cqe;
mlx5_core_err(cq->mdev, "vendor_err_synd=%x\n",
err_cqe->vendor_err_synd);
mlx5_core_err(cq->mdev, "syndrome=%x\n",
err_cqe->syndrome);
print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET,
16, 1, err_cqe,
sizeof(*err_cqe), false);
}
mlx5_cqwq_update_db_record(&cq->wq);
/* ensure cq space is freed before enabling more cqes */
wmb();
if (with_data)
aso->cc += MLX5_ASO_WQEBBS_DATA;
else
aso->cc += MLX5_ASO_WQEBBS;
return 0;
}
...@@ -4,10 +4,75 @@ ...@@ -4,10 +4,75 @@
#ifndef __MLX5_LIB_ASO_H__ #ifndef __MLX5_LIB_ASO_H__
#define __MLX5_LIB_ASO_H__ #define __MLX5_LIB_ASO_H__
#include <linux/mlx5/qp.h>
#include "mlx5_core.h" #include "mlx5_core.h"
#define MLX5_ASO_WQEBBS \
(DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_BB))
#define MLX5_ASO_WQEBBS_DATA \
(DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe_data), MLX5_SEND_WQE_BB))
#define MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT 24
struct mlx5_wqe_aso_ctrl_seg {
__be32 va_h;
__be32 va_l; /* include read_enable */
__be32 l_key;
u8 data_mask_mode;
u8 condition_1_0_operand;
u8 condition_1_0_offset;
u8 data_offset_condition_operand;
__be32 condition_0_data;
__be32 condition_0_mask;
__be32 condition_1_data;
__be32 condition_1_mask;
__be64 bitwise_data;
__be64 data_mask;
};
struct mlx5_wqe_aso_data_seg {
__be32 bytewise_data[16];
};
struct mlx5_aso_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_aso_ctrl_seg aso_ctrl;
};
struct mlx5_aso_wqe_data {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_aso_ctrl_seg aso_ctrl;
struct mlx5_wqe_aso_data_seg aso_data;
};
enum {
MLX5_ASO_ALWAYS_FALSE,
MLX5_ASO_ALWAYS_TRUE,
MLX5_ASO_EQUAL,
MLX5_ASO_NOT_EQUAL,
MLX5_ASO_GREATER_OR_EQUAL,
MLX5_ASO_LESSER_OR_EQUAL,
MLX5_ASO_LESSER,
MLX5_ASO_GREATER,
MLX5_ASO_CYCLIC_GREATER,
MLX5_ASO_CYCLIC_LESSER,
};
enum {
MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT,
MLX5_ASO_DATA_MASK_MODE_BYTEWISE_64BYTE,
MLX5_ASO_DATA_MASK_MODE_CALCULATED_64BYTE,
};
struct mlx5_aso; struct mlx5_aso;
void *mlx5_aso_get_wqe(struct mlx5_aso *aso);
void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
struct mlx5_aso_wqe *aso_wqe,
u32 obj_id, u32 opc_mode);
void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
struct mlx5_wqe_ctrl_seg *doorbell_cseg);
int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data, u32 interval_ms);
struct mlx5_aso *mlx5_aso_create(struct mlx5_core_dev *mdev, u32 pdn); struct mlx5_aso *mlx5_aso_create(struct mlx5_core_dev *mdev, u32 pdn);
void mlx5_aso_destroy(struct mlx5_aso *aso); void mlx5_aso_destroy(struct mlx5_aso *aso);
#endif /* __MLX5_LIB_ASO_H__ */ #endif /* __MLX5_LIB_ASO_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment