Commit 9152e0b7 authored by Eddie Wai's avatar Eddie Wai Committed by Doug Ledford

RDMA/bnxt_re: HW workarounds for handling specific conditions

This patch implements the following HW workarounds

1. The SQ depth needs to be augmented  by 128 + 1 to avoid running
   into an Out of order CQE issue
2. Workaround to handle the problem where the HW fast path engine continues
   to access DMA memory in retranmission mode even after the WQE has
   already been completed. If the HW reports this condition, driver detects
   it and posts a Fence WQE. The driver stops reporting the completions
   to stack until it receives completion  for Fence WQE.
Signed-off-by: default avatarEddie Wai <eddie.wai@broadcom.com>
Signed-off-by: default avatarSriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
Signed-off-by: default avatarSelvin Xavier <selvin.xavier@broadcom.com>
Reviewed-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent cc1ec769
This diff is collapsed.
...@@ -44,11 +44,23 @@ struct bnxt_re_gid_ctx { ...@@ -44,11 +44,23 @@ struct bnxt_re_gid_ctx {
u32 refcnt; u32 refcnt;
}; };
#define BNXT_RE_FENCE_BYTES 64
struct bnxt_re_fence_data {
u32 size;
u8 va[BNXT_RE_FENCE_BYTES];
dma_addr_t dma_addr;
struct bnxt_re_mr *mr;
struct ib_mw *mw;
struct bnxt_qplib_swqe bind_wqe;
u32 bind_rkey;
};
struct bnxt_re_pd { struct bnxt_re_pd {
struct bnxt_re_dev *rdev; struct bnxt_re_dev *rdev;
struct ib_pd ib_pd; struct ib_pd ib_pd;
struct bnxt_qplib_pd qplib_pd; struct bnxt_qplib_pd qplib_pd;
struct bnxt_qplib_dpi dpi; struct bnxt_qplib_dpi dpi;
struct bnxt_re_fence_data fence;
}; };
struct bnxt_re_ah { struct bnxt_re_ah {
...@@ -181,6 +193,9 @@ int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents, ...@@ -181,6 +193,9 @@ int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type, struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type,
u32 max_num_sg); u32 max_num_sg);
int bnxt_re_dereg_mr(struct ib_mr *mr); int bnxt_re_dereg_mr(struct ib_mr *mr);
struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
struct ib_udata *udata);
int bnxt_re_dealloc_mw(struct ib_mw *mw);
struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *pd, int mr_access_flags, struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
struct ib_fmr_attr *fmr_attr); struct ib_fmr_attr *fmr_attr);
int bnxt_re_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list, int list_len, int bnxt_re_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list, int list_len,
......
...@@ -1083,8 +1083,12 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, ...@@ -1083,8 +1083,12 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
rc = -EINVAL; rc = -EINVAL;
goto done; goto done;
} }
if (HWQ_CMP((sq->hwq.prod + 1), &sq->hwq) ==
HWQ_CMP(sq->hwq.cons, &sq->hwq)) { if (bnxt_qplib_queue_full(sq)) {
dev_err(&sq->hwq.pdev->dev,
"QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x",
sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements,
sq->q_full_delta);
rc = -ENOMEM; rc = -ENOMEM;
goto done; goto done;
} }
...@@ -1332,8 +1336,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, ...@@ -1332,8 +1336,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
rc = -EINVAL; rc = -EINVAL;
goto done; goto done;
} }
if (HWQ_CMP((rq->hwq.prod + 1), &rq->hwq) == if (bnxt_qplib_queue_full(rq)) {
HWQ_CMP(rq->hwq.cons, &rq->hwq)) {
dev_err(&rq->hwq.pdev->dev, dev_err(&rq->hwq.pdev->dev,
"QPLIB: FP: QP (0x%x) RQ is full!", qp->id); "QPLIB: FP: QP (0x%x) RQ is full!", qp->id);
rc = -EINVAL; rc = -EINVAL;
...@@ -1551,14 +1554,113 @@ static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp, ...@@ -1551,14 +1554,113 @@ static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
return rc; return rc;
} }
/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
* CQE is track from sw_cq_cons to max_element but valid only if VALID=1
*/
static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons)
{
struct bnxt_qplib_q *sq = &qp->sq;
struct bnxt_qplib_swq *swq;
u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr;
struct cq_req *peek_req_hwcqe;
struct bnxt_qplib_qp *peek_qp;
struct bnxt_qplib_q *peek_sq;
int i, rc = 0;
/* Normal mode */
/* Check for the psn_search marking before completing */
swq = &sq->swq[sw_sq_cons];
if (swq->psn_search &&
le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
/* Unmark */
swq->psn_search->flags_next_psn = cpu_to_le32
(le32_to_cpu(swq->psn_search->flags_next_psn)
& ~0x80000000);
dev_dbg(&cq->hwq.pdev->dev,
"FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
sq->condition = true;
sq->send_phantom = true;
/* TODO: Only ARM if the previous SQE is ARMALL */
bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL);
rc = -EAGAIN;
goto out;
}
if (sq->condition) {
/* Peek at the completions */
peek_raw_cq_cons = cq->hwq.cons;
peek_sw_cq_cons = cq_cons;
i = cq->hwq.max_elements;
while (i--) {
peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)]
[CQE_IDX(peek_sw_cq_cons)];
/* If the next hwcqe is VALID */
if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
cq->hwq.max_elements)) {
/* If the next hwcqe is a REQ */
if ((peek_hwcqe->cqe_type_toggle &
CQ_BASE_CQE_TYPE_MASK) ==
CQ_BASE_CQE_TYPE_REQ) {
peek_req_hwcqe = (struct cq_req *)
peek_hwcqe;
peek_qp = (struct bnxt_qplib_qp *)
((unsigned long)
le64_to_cpu
(peek_req_hwcqe->qp_handle));
peek_sq = &peek_qp->sq;
peek_sq_cons_idx = HWQ_CMP(le16_to_cpu(
peek_req_hwcqe->sq_cons_idx) - 1
, &sq->hwq);
/* If the hwcqe's sq's wr_id matches */
if (peek_sq == sq &&
sq->swq[peek_sq_cons_idx].wr_id ==
BNXT_QPLIB_FENCE_WRID) {
/*
* Unbreak only if the phantom
* comes back
*/
dev_dbg(&cq->hwq.pdev->dev,
"FP:Got Phantom CQE");
sq->condition = false;
sq->single = true;
rc = 0;
goto out;
}
}
/* Valid but not the phantom, so keep looping */
} else {
/* Not valid yet, just exit and wait */
rc = -EINVAL;
goto out;
}
peek_sw_cq_cons++;
peek_raw_cq_cons++;
}
dev_err(&cq->hwq.pdev->dev,
"Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x",
cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
rc = -EINVAL;
}
out:
return rc;
}
static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
struct cq_req *hwcqe, struct cq_req *hwcqe,
struct bnxt_qplib_cqe **pcqe, int *budget) struct bnxt_qplib_cqe **pcqe, int *budget,
u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
{ {
struct bnxt_qplib_qp *qp; struct bnxt_qplib_qp *qp;
struct bnxt_qplib_q *sq; struct bnxt_qplib_q *sq;
struct bnxt_qplib_cqe *cqe; struct bnxt_qplib_cqe *cqe;
u32 sw_cons, cqe_cons; u32 sw_sq_cons, cqe_sq_cons;
struct bnxt_qplib_swq *swq;
int rc = 0; int rc = 0;
qp = (struct bnxt_qplib_qp *)((unsigned long) qp = (struct bnxt_qplib_qp *)((unsigned long)
...@@ -1570,13 +1672,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, ...@@ -1570,13 +1672,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
} }
sq = &qp->sq; sq = &qp->sq;
cqe_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq); cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
if (cqe_cons > sq->hwq.max_elements) { if (cqe_sq_cons > sq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev, dev_err(&cq->hwq.pdev->dev,
"QPLIB: FP: CQ Process req reported "); "QPLIB: FP: CQ Process req reported ");
dev_err(&cq->hwq.pdev->dev, dev_err(&cq->hwq.pdev->dev,
"QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x", "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
cqe_cons, sq->hwq.max_elements); cqe_sq_cons, sq->hwq.max_elements);
return -EINVAL; return -EINVAL;
} }
/* If we were in the middle of flushing the SQ, continue */ /* If we were in the middle of flushing the SQ, continue */
...@@ -1585,53 +1687,74 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, ...@@ -1585,53 +1687,74 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
/* Require to walk the sq's swq to fabricate CQEs for all previously /* Require to walk the sq's swq to fabricate CQEs for all previously
* signaled SWQEs due to CQE aggregation from the current sq cons * signaled SWQEs due to CQE aggregation from the current sq cons
* to the cqe_cons * to the cqe_sq_cons
*/ */
cqe = *pcqe; cqe = *pcqe;
while (*budget) { while (*budget) {
sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq); sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
if (sw_cons == cqe_cons) if (sw_sq_cons == cqe_sq_cons)
/* Done */
break; break;
swq = &sq->swq[sw_sq_cons];
memset(cqe, 0, sizeof(*cqe)); memset(cqe, 0, sizeof(*cqe));
cqe->opcode = CQ_BASE_CQE_TYPE_REQ; cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
cqe->qp_handle = (u64)(unsigned long)qp; cqe->qp_handle = (u64)(unsigned long)qp;
cqe->src_qp = qp->id; cqe->src_qp = qp->id;
cqe->wr_id = sq->swq[sw_cons].wr_id; cqe->wr_id = swq->wr_id;
cqe->type = sq->swq[sw_cons].type; if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
goto skip;
cqe->type = swq->type;
/* For the last CQE, check for status. For errors, regardless /* For the last CQE, check for status. For errors, regardless
* of the request being signaled or not, it must complete with * of the request being signaled or not, it must complete with
* the hwcqe error status * the hwcqe error status
*/ */
if (HWQ_CMP((sw_cons + 1), &sq->hwq) == cqe_cons && if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons &&
hwcqe->status != CQ_REQ_STATUS_OK) { hwcqe->status != CQ_REQ_STATUS_OK) {
cqe->status = hwcqe->status; cqe->status = hwcqe->status;
dev_err(&cq->hwq.pdev->dev, dev_err(&cq->hwq.pdev->dev,
"QPLIB: FP: CQ Processed Req "); "QPLIB: FP: CQ Processed Req ");
dev_err(&cq->hwq.pdev->dev, dev_err(&cq->hwq.pdev->dev,
"QPLIB: wr_id[%d] = 0x%llx with status 0x%x", "QPLIB: wr_id[%d] = 0x%llx with status 0x%x",
sw_cons, cqe->wr_id, cqe->status); sw_sq_cons, cqe->wr_id, cqe->status);
cqe++; cqe++;
(*budget)--; (*budget)--;
sq->flush_in_progress = true; sq->flush_in_progress = true;
/* Must block new posting of SQ and RQ */ /* Must block new posting of SQ and RQ */
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
sq->condition = false;
sq->single = false;
} else { } else {
if (sq->swq[sw_cons].flags & if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
SQ_SEND_FLAGS_SIGNAL_COMP) { /* Before we complete, do WA 9060 */
if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
cqe_sq_cons)) {
*lib_qp = qp;
goto out;
}
cqe->status = CQ_REQ_STATUS_OK; cqe->status = CQ_REQ_STATUS_OK;
cqe++; cqe++;
(*budget)--; (*budget)--;
} }
} }
skip:
sq->hwq.cons++; sq->hwq.cons++;
if (sq->single)
break;
} }
out:
*pcqe = cqe; *pcqe = cqe;
if (!*budget && HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_cons) { if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) {
/* Out of budget */ /* Out of budget */
rc = -EAGAIN; rc = -EAGAIN;
goto done; goto done;
} }
/*
* Back to normal completion mode only after it has completed all of
* the WC for this CQE
*/
sq->single = false;
if (!sq->flush_in_progress) if (!sq->flush_in_progress)
goto done; goto done;
flush: flush:
...@@ -1961,7 +2084,7 @@ static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq, ...@@ -1961,7 +2084,7 @@ static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
} }
int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
int num_cqes) int num_cqes, struct bnxt_qplib_qp **lib_qp)
{ {
struct cq_base *hw_cqe, **hw_cqe_ptr; struct cq_base *hw_cqe, **hw_cqe_ptr;
unsigned long flags; unsigned long flags;
...@@ -1986,7 +2109,8 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, ...@@ -1986,7 +2109,8 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
case CQ_BASE_CQE_TYPE_REQ: case CQ_BASE_CQE_TYPE_REQ:
rc = bnxt_qplib_cq_process_req(cq, rc = bnxt_qplib_cq_process_req(cq,
(struct cq_req *)hw_cqe, (struct cq_req *)hw_cqe,
&cqe, &budget); &cqe, &budget,
sw_cons, lib_qp);
break; break;
case CQ_BASE_CQE_TYPE_RES_RC: case CQ_BASE_CQE_TYPE_RES_RC:
rc = bnxt_qplib_cq_process_res_rc(cq, rc = bnxt_qplib_cq_process_res_rc(cq,
......
...@@ -88,6 +88,7 @@ struct bnxt_qplib_swq { ...@@ -88,6 +88,7 @@ struct bnxt_qplib_swq {
struct bnxt_qplib_swqe { struct bnxt_qplib_swqe {
/* General */ /* General */
#define BNXT_QPLIB_FENCE_WRID 0x46454E43 /* "FENC" */
u64 wr_id; u64 wr_id;
u8 reqs_type; u8 reqs_type;
u8 type; u8 type;
...@@ -216,9 +217,16 @@ struct bnxt_qplib_q { ...@@ -216,9 +217,16 @@ struct bnxt_qplib_q {
struct scatterlist *sglist; struct scatterlist *sglist;
u32 nmap; u32 nmap;
u32 max_wqe; u32 max_wqe;
u16 q_full_delta;
u16 max_sge; u16 max_sge;
u32 psn; u32 psn;
bool flush_in_progress; bool flush_in_progress;
bool condition;
bool single;
bool send_phantom;
u32 phantom_wqe_cnt;
u32 phantom_cqe_cnt;
u32 next_cq_cons;
}; };
struct bnxt_qplib_qp { struct bnxt_qplib_qp {
...@@ -301,6 +309,13 @@ struct bnxt_qplib_qp { ...@@ -301,6 +309,13 @@ struct bnxt_qplib_qp {
(!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \ (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \
!((raw_cons) & (cp_bit))) !((raw_cons) & (cp_bit)))
static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *qplib_q)
{
return HWQ_CMP((qplib_q->hwq.prod + qplib_q->q_full_delta),
&qplib_q->hwq) == HWQ_CMP(qplib_q->hwq.cons,
&qplib_q->hwq);
}
struct bnxt_qplib_cqe { struct bnxt_qplib_cqe {
u8 status; u8 status;
u8 type; u8 type;
...@@ -432,7 +447,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, ...@@ -432,7 +447,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
int num); int num, struct bnxt_qplib_qp **qp);
void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq); int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
......
...@@ -52,7 +52,6 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero; ...@@ -52,7 +52,6 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
((HWQ_CMP(hwq->prod, hwq)\ ((HWQ_CMP(hwq->prod, hwq)\
- HWQ_CMP(hwq->cons, hwq))\ - HWQ_CMP(hwq->cons, hwq))\
& (hwq->max_elements - 1))) & (hwq->max_elements - 1)))
enum bnxt_qplib_hwq_type { enum bnxt_qplib_hwq_type {
HWQ_TYPE_CTX, HWQ_TYPE_CTX,
HWQ_TYPE_QUEUE, HWQ_TYPE_QUEUE,
......
...@@ -88,6 +88,11 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, ...@@ -88,6 +88,11 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ? sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom; BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom;
attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr); attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr);
/*
* 128 WQEs needs to be reserved for the HW (8916). Prevent
* reporting the max number
*/
attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS;
attr->max_qp_sges = sb->max_sge; attr->max_qp_sges = sb->max_sge;
attr->max_cq = le32_to_cpu(sb->max_cq); attr->max_cq = le32_to_cpu(sb->max_cq);
attr->max_cq_wqes = le32_to_cpu(sb->max_cqe); attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
......
...@@ -40,6 +40,8 @@ ...@@ -40,6 +40,8 @@
#ifndef __BNXT_QPLIB_SP_H__ #ifndef __BNXT_QPLIB_SP_H__
#define __BNXT_QPLIB_SP_H__ #define __BNXT_QPLIB_SP_H__
#define BNXT_QPLIB_RESERVED_QP_WRS 128
struct bnxt_qplib_dev_attr { struct bnxt_qplib_dev_attr {
char fw_ver[32]; char fw_ver[32];
u16 max_sgid; u16 max_sgid;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment