Commit d109d83f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma

Pull rdma fixes from Doug Ledford:
 "This is a pretty small pull request. Only 6 patches in total. There
  are no outstanding -rc patches on the mailing list after this pull
  request, so only if some new issues are discovered in the remainder of
  the rc cycles will you hear from me again.

  Summary:
   - a fix for iwpm netlink usage
   - a fix for error unwinding in mlx5
   - two fixes to vlan handling in qedr
   - a couple small i40iw fixes"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma:
  i40iw: Fix port number for query QP
  i40iw: Add missing memory barriers
  RDMA/qedr: Parse vlan priority as sl
  RDMA/qedr: Parse VLAN ID correctly and ignore the value of zero
  IB/mlx5: Fix label order in error path handling
  RDMA/iwpm: Properly mark end of NL messages
parents bf2db0b9 789f903f
...@@ -100,6 +100,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client) ...@@ -100,6 +100,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
if (ret) if (ret)
goto pid_query_error; goto pid_query_error;
nlmsg_end(skb, nlh);
pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n", pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n",
__func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name); __func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name);
...@@ -170,6 +172,8 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client) ...@@ -170,6 +172,8 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
&pm_msg->loc_addr, IWPM_NLA_MANAGE_ADDR); &pm_msg->loc_addr, IWPM_NLA_MANAGE_ADDR);
if (ret) if (ret)
goto add_mapping_error; goto add_mapping_error;
nlmsg_end(skb, nlh);
nlmsg_request->req_buffer = pm_msg; nlmsg_request->req_buffer = pm_msg;
ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
...@@ -246,6 +250,8 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client) ...@@ -246,6 +250,8 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
&pm_msg->rem_addr, IWPM_NLA_QUERY_REMOTE_ADDR); &pm_msg->rem_addr, IWPM_NLA_QUERY_REMOTE_ADDR);
if (ret) if (ret)
goto query_mapping_error; goto query_mapping_error;
nlmsg_end(skb, nlh);
nlmsg_request->req_buffer = pm_msg; nlmsg_request->req_buffer = pm_msg;
ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
...@@ -308,6 +314,8 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client) ...@@ -308,6 +314,8 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
if (ret) if (ret)
goto remove_mapping_error; goto remove_mapping_error;
nlmsg_end(skb, nlh);
ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
if (ret) { if (ret) {
skb = NULL; /* skb is freed in the netlink send-op handling */ skb = NULL; /* skb is freed in the netlink send-op handling */
......
...@@ -597,6 +597,9 @@ static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid) ...@@ -597,6 +597,9 @@ static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid)
&mapping_num, IWPM_NLA_MAPINFO_SEND_NUM); &mapping_num, IWPM_NLA_MAPINFO_SEND_NUM);
if (ret) if (ret)
goto mapinfo_num_error; goto mapinfo_num_error;
nlmsg_end(skb, nlh);
ret = rdma_nl_unicast(skb, iwpm_pid); ret = rdma_nl_unicast(skb, iwpm_pid);
if (ret) { if (ret) {
skb = NULL; skb = NULL;
...@@ -678,6 +681,8 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid) ...@@ -678,6 +681,8 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)
if (ret) if (ret)
goto send_mapping_info_unlock; goto send_mapping_info_unlock;
nlmsg_end(skb, nlh);
iwpm_print_sockaddr(&map_info->local_sockaddr, iwpm_print_sockaddr(&map_info->local_sockaddr,
"send_mapping_info: Local sockaddr:"); "send_mapping_info: Local sockaddr:");
iwpm_print_sockaddr(&map_info->mapped_sockaddr, iwpm_print_sockaddr(&map_info->mapped_sockaddr,
......
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
* @wqe: cqp wqe for header * @wqe: cqp wqe for header
* @header: header for the cqp wqe * @header: header for the cqp wqe
*/ */
static inline void i40iw_insert_wqe_hdr(u64 *wqe, u64 header) void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
{ {
wmb(); /* make sure WQE is populated before polarity is set */ wmb(); /* make sure WQE is populated before polarity is set */
set_64bit_val(wqe, 24, header); set_64bit_val(wqe, 24, header);
......
...@@ -59,6 +59,8 @@ enum i40iw_status_code i40iw_sc_mr_fast_register(struct i40iw_sc_qp *qp, ...@@ -59,6 +59,8 @@ enum i40iw_status_code i40iw_sc_mr_fast_register(struct i40iw_sc_qp *qp,
struct i40iw_fast_reg_stag_info *info, struct i40iw_fast_reg_stag_info *info,
bool post_sq); bool post_sq);
void i40iw_insert_wqe_hdr(u64 *wqe, u64 header);
/* HMC/FPM functions */ /* HMC/FPM functions */
enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev,
u8 hmc_fn_id); u8 hmc_fn_id);
......
...@@ -123,12 +123,11 @@ static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx, ...@@ -123,12 +123,11 @@ static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx,
get_64bit_val(wqe, 24, &offset24); get_64bit_val(wqe, 24, &offset24);
offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID); offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
set_64bit_val(wqe, 24, offset24);
set_64bit_val(wqe, 0, buf->mem.pa); set_64bit_val(wqe, 0, buf->mem.pa);
set_64bit_val(wqe, 8, set_64bit_val(wqe, 8,
LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN)); LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN));
set_64bit_val(wqe, 24, offset24); i40iw_insert_wqe_hdr(wqe, offset24);
} }
/** /**
...@@ -409,9 +408,7 @@ enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp, ...@@ -409,9 +408,7 @@ enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN)); set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));
set_64bit_val(wqe, 16, header[0]); set_64bit_val(wqe, 16, header[0]);
/* Ensure all data is written before writing valid bit */ i40iw_insert_wqe_hdr(wqe, header[1]);
wmb();
set_64bit_val(wqe, 24, header[1]);
i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32); i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
i40iw_qp_post_wr(&qp->qp_uk); i40iw_qp_post_wr(&qp->qp_uk);
...@@ -539,7 +536,7 @@ static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_sc_dev *dev, struct ...@@ -539,7 +536,7 @@ static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_sc_dev *dev, struct
LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) | LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) |
LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
set_64bit_val(wqe, 24, header); i40iw_insert_wqe_hdr(wqe, header);
i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32); i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32);
i40iw_sc_cqp_post_sq(cqp); i40iw_sc_cqp_post_sq(cqp);
...@@ -655,7 +652,7 @@ static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct ...@@ -655,7 +652,7 @@ static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct
LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) | LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) | LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
set_64bit_val(wqe, 24, header); i40iw_insert_wqe_hdr(wqe, header);
i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE", i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
wqe, I40IW_CQP_WQE_SIZE * 8); wqe, I40IW_CQP_WQE_SIZE * 8);
......
...@@ -826,12 +826,14 @@ static int i40iw_query_qp(struct ib_qp *ibqp, ...@@ -826,12 +826,14 @@ static int i40iw_query_qp(struct ib_qp *ibqp,
attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE; attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT; attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT; attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
attr->port_num = 1;
init_attr->event_handler = iwqp->ibqp.event_handler; init_attr->event_handler = iwqp->ibqp.event_handler;
init_attr->qp_context = iwqp->ibqp.qp_context; init_attr->qp_context = iwqp->ibqp.qp_context;
init_attr->send_cq = iwqp->ibqp.send_cq; init_attr->send_cq = iwqp->ibqp.send_cq;
init_attr->recv_cq = iwqp->ibqp.recv_cq; init_attr->recv_cq = iwqp->ibqp.recv_cq;
init_attr->srq = iwqp->ibqp.srq; init_attr->srq = iwqp->ibqp.srq;
init_attr->cap = attr->cap; init_attr->cap = attr->cap;
init_attr->port_num = 1;
return 0; return 0;
} }
......
...@@ -4174,9 +4174,9 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) ...@@ -4174,9 +4174,9 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
err_uar_page: err_uar_page:
mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar); mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
err_cnt:
mlx5_ib_cleanup_cong_debugfs(dev);
err_cong: err_cong:
mlx5_ib_cleanup_cong_debugfs(dev);
err_cnt:
if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
mlx5_ib_dealloc_counters(dev); mlx5_ib_dealloc_counters(dev);
......
...@@ -387,7 +387,7 @@ struct qedr_qp { ...@@ -387,7 +387,7 @@ struct qedr_qp {
u8 wqe_size; u8 wqe_size;
u8 smac[ETH_ALEN]; u8 smac[ETH_ALEN];
u16 vlan_id; u16 vlan;
int rc; int rc;
} *rqe_wr_id; } *rqe_wr_id;
......
...@@ -105,7 +105,7 @@ void qedr_ll2_complete_rx_packet(void *cxt, ...@@ -105,7 +105,7 @@ void qedr_ll2_complete_rx_packet(void *cxt,
qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ? qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ?
-EINVAL : 0; -EINVAL : 0;
qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = data->vlan; qp->rqe_wr_id[qp->rq.gsi_cons].vlan = data->vlan;
/* note: length stands for data length i.e. GRH is excluded */ /* note: length stands for data length i.e. GRH is excluded */
qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length =
data->length.data_length; data->length.data_length;
...@@ -694,6 +694,7 @@ int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) ...@@ -694,6 +694,7 @@ int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
struct qedr_cq *cq = get_qedr_cq(ibcq); struct qedr_cq *cq = get_qedr_cq(ibcq);
struct qedr_qp *qp = dev->gsi_qp; struct qedr_qp *qp = dev->gsi_qp;
unsigned long flags; unsigned long flags;
u16 vlan_id;
int i = 0; int i = 0;
spin_lock_irqsave(&cq->cq_lock, flags); spin_lock_irqsave(&cq->cq_lock, flags);
...@@ -712,9 +713,14 @@ int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) ...@@ -712,9 +713,14 @@ int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK; wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac); ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
wc[i].wc_flags |= IB_WC_WITH_SMAC; wc[i].wc_flags |= IB_WC_WITH_SMAC;
if (qp->rqe_wr_id[qp->rq.cons].vlan_id) {
vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan &
VLAN_VID_MASK;
if (vlan_id) {
wc[i].wc_flags |= IB_WC_WITH_VLAN; wc[i].wc_flags |= IB_WC_WITH_VLAN;
wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id; wc[i].vlan_id = vlan_id;
wc[i].sl = (qp->rqe_wr_id[qp->rq.cons].vlan &
VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
} }
qedr_inc_sw_cons(&qp->rq); qedr_inc_sw_cons(&qp->rq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment