Commit 2eb7f910 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

Pull infiniband/RDMA updates from Roland Dreier:
 - large set of iSER initiator improvements
 - hardware driver fixes for cxgb4, mlx5 and ocrdma
 - small fixes to core midlayer

* tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (47 commits)
  RDMA/cxgb4: Fix ntuple calculation for ipv6 and remove duplicate line
  RDMA/cxgb4: Add missing neigh_release in find_route
  RDMA/cxgb4: Take IPv6 into account for best_mtu and set_emss
  RDMA/cxgb4: Make c4iw_wr_log_size_order static
  IB/core: Fix XRC race condition in ib_uverbs_open_qp
  IB/core: Clear AH attr variable to prevent garbage data
  RDMA/ocrdma: Save the bit environment, spare unncessary parenthesis
  RDMA/ocrdma: The kernel has a perfectly good BIT() macro - use it
  RDMA/ocrdma: Don't memset() buffers we just allocated with kzalloc()
  RDMA/ocrdma: Remove a unused-label warning
  RDMA/ocrdma: Convert kernel VA to PA for mmap in user
  RDMA/ocrdma: Get vlan tag from ib_qp_attrs
  RDMA/ocrdma: Add default GID at index 0
  IB/mlx5, iser, isert: Add Signature API additions
  Target/iser: Centralize ib_sig_domain setting
  IB/iser: Centralize ib_sig_domain settings
  IB/mlx5: Use extended internal signature layout
  IB/iser: Set IP_CSUM as default guard type
  IB/iser: Remove redundant assignment
  IB/mlx5: Use enumerations for PI copy mask
  ...
parents 1f6075f9 7b909bb4
...@@ -5112,6 +5112,7 @@ F: include/scsi/*iscsi* ...@@ -5112,6 +5112,7 @@ F: include/scsi/*iscsi*
ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR
M: Or Gerlitz <ogerlitz@mellanox.com> M: Or Gerlitz <ogerlitz@mellanox.com>
M: Sagi Grimberg <sagig@mellanox.com>
M: Roi Dayan <roid@mellanox.com> M: Roi Dayan <roid@mellanox.com>
L: linux-rdma@vger.kernel.org L: linux-rdma@vger.kernel.org
S: Supported S: Supported
......
...@@ -2518,6 +2518,8 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, ...@@ -2518,6 +2518,8 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
attr.grh.sgid_index = cmd.attr.grh.sgid_index; attr.grh.sgid_index = cmd.attr.grh.sgid_index;
attr.grh.hop_limit = cmd.attr.grh.hop_limit; attr.grh.hop_limit = cmd.attr.grh.hop_limit;
attr.grh.traffic_class = cmd.attr.grh.traffic_class; attr.grh.traffic_class = cmd.attr.grh.traffic_class;
attr.vlan_id = 0;
memset(&attr.dmac, 0, sizeof(attr.dmac));
memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16); memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
ah = ib_create_ah(pd, &attr); ah = ib_create_ah(pd, &attr);
......
...@@ -477,6 +477,7 @@ static void ib_uverbs_async_handler(struct ib_uverbs_file *file, ...@@ -477,6 +477,7 @@ static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
entry->desc.async.element = element; entry->desc.async.element = element;
entry->desc.async.event_type = event; entry->desc.async.event_type = event;
entry->desc.async.reserved = 0;
entry->counter = counter; entry->counter = counter;
list_add_tail(&entry->list, &file->async_file->event_list); list_add_tail(&entry->list, &file->async_file->event_list);
...@@ -502,6 +503,10 @@ void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr) ...@@ -502,6 +503,10 @@ void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr)
{ {
struct ib_uevent_object *uobj; struct ib_uevent_object *uobj;
/* for XRC target qp's, check that qp is live */
if (!event->element.qp->uobject || !event->element.qp->uobject->live)
return;
uobj = container_of(event->element.qp->uobject, uobj = container_of(event->element.qp->uobject,
struct ib_uevent_object, uobject); struct ib_uevent_object, uobject);
......
...@@ -236,10 +236,12 @@ static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) ...@@ -236,10 +236,12 @@ static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
static void set_emss(struct c4iw_ep *ep, u16 opt) static void set_emss(struct c4iw_ep *ep, u16 opt)
{ {
ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] -
sizeof(struct iphdr) - sizeof(struct tcphdr); ((AF_INET == ep->com.remote_addr.ss_family) ?
sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
sizeof(struct tcphdr);
ep->mss = ep->emss; ep->mss = ep->emss;
if (GET_TCPOPT_TSTAMP(opt)) if (GET_TCPOPT_TSTAMP(opt))
ep->emss -= 12; ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
if (ep->emss < 128) if (ep->emss < 128)
ep->emss = 128; ep->emss = 128;
if (ep->emss & 7) if (ep->emss & 7)
...@@ -415,6 +417,7 @@ static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip, ...@@ -415,6 +417,7 @@ static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip,
return NULL; return NULL;
if (!our_interface(dev, n->dev) && if (!our_interface(dev, n->dev) &&
!(n->dev->flags & IFF_LOOPBACK)) { !(n->dev->flags & IFF_LOOPBACK)) {
neigh_release(n);
dst_release(&rt->dst); dst_release(&rt->dst);
return NULL; return NULL;
} }
...@@ -581,11 +584,14 @@ static void c4iw_record_pm_msg(struct c4iw_ep *ep, ...@@ -581,11 +584,14 @@ static void c4iw_record_pm_msg(struct c4iw_ep *ep,
} }
static void best_mtu(const unsigned short *mtus, unsigned short mtu, static void best_mtu(const unsigned short *mtus, unsigned short mtu,
unsigned int *idx, int use_ts) unsigned int *idx, int use_ts, int ipv6)
{ {
unsigned short hdr_size = sizeof(struct iphdr) + unsigned short hdr_size = (ipv6 ?
sizeof(struct ipv6hdr) :
sizeof(struct iphdr)) +
sizeof(struct tcphdr) + sizeof(struct tcphdr) +
(use_ts ? 12 : 0); (use_ts ?
round_up(TCPOLEN_TIMESTAMP, 4) : 0);
unsigned short data_size = mtu - hdr_size; unsigned short data_size = mtu - hdr_size;
cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx); cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
...@@ -634,7 +640,8 @@ static int send_connect(struct c4iw_ep *ep) ...@@ -634,7 +640,8 @@ static int send_connect(struct c4iw_ep *ep)
set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
enable_tcp_timestamps); enable_tcp_timestamps,
(AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
wscale = compute_wscale(rcv_win); wscale = compute_wscale(rcv_win);
/* /*
...@@ -668,6 +675,7 @@ static int send_connect(struct c4iw_ep *ep) ...@@ -668,6 +675,7 @@ static int send_connect(struct c4iw_ep *ep)
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
opt2 |= T5_OPT_2_VALID; opt2 |= T5_OPT_2_VALID;
opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
} }
t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure); t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
...@@ -713,8 +721,6 @@ static int send_connect(struct c4iw_ep *ep) ...@@ -713,8 +721,6 @@ static int send_connect(struct c4iw_ep *ep)
} else { } else {
u32 isn = (prandom_u32() & ~7UL) - 1; u32 isn = (prandom_u32() & ~7UL) - 1;
opt2 |= T5_OPT_2_VALID;
opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
if (peer2peer) if (peer2peer)
isn += 4; isn += 4;
...@@ -756,10 +762,10 @@ static int send_connect(struct c4iw_ep *ep) ...@@ -756,10 +762,10 @@ static int send_connect(struct c4iw_ep *ep)
t5_req6->peer_ip_lo = *((__be64 *) t5_req6->peer_ip_lo = *((__be64 *)
(ra6->sin6_addr.s6_addr + 8)); (ra6->sin6_addr.s6_addr + 8));
t5_req6->opt0 = cpu_to_be64(opt0); t5_req6->opt0 = cpu_to_be64(opt0);
t5_req6->params = (__force __be64)cpu_to_be32( t5_req6->params = cpu_to_be64(V_FILTER_TUPLE(
cxgb4_select_ntuple( cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0], ep->com.dev->rdev.lldi.ports[0],
ep->l2t)); ep->l2t)));
t5_req6->rsvd = cpu_to_be32(isn); t5_req6->rsvd = cpu_to_be32(isn);
PDBG("%s snd_isn %u\n", __func__, PDBG("%s snd_isn %u\n", __func__,
be32_to_cpu(t5_req6->rsvd)); be32_to_cpu(t5_req6->rsvd));
...@@ -1763,7 +1769,8 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) ...@@ -1763,7 +1769,8 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
req->tcb.tx_max = (__force __be32) jiffies; req->tcb.tx_max = (__force __be32) jiffies;
req->tcb.rcv_adv = htons(1); req->tcb.rcv_adv = htons(1);
best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
enable_tcp_timestamps); enable_tcp_timestamps,
(AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
wscale = compute_wscale(rcv_win); wscale = compute_wscale(rcv_win);
/* /*
...@@ -2162,7 +2169,8 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, ...@@ -2162,7 +2169,8 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
ep->hwtid)); ep->hwtid));
best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
enable_tcp_timestamps && req->tcpopt.tstamp); enable_tcp_timestamps && req->tcpopt.tstamp,
(AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
wscale = compute_wscale(rcv_win); wscale = compute_wscale(rcv_win);
/* /*
......
...@@ -60,7 +60,7 @@ int c4iw_wr_log = 0; ...@@ -60,7 +60,7 @@ int c4iw_wr_log = 0;
module_param(c4iw_wr_log, int, 0444); module_param(c4iw_wr_log, int, 0444);
MODULE_PARM_DESC(c4iw_wr_log, "Enables logging of work request timing data."); MODULE_PARM_DESC(c4iw_wr_log, "Enables logging of work request timing data.");
int c4iw_wr_log_size_order = 12; static int c4iw_wr_log_size_order = 12;
module_param(c4iw_wr_log_size_order, int, 0444); module_param(c4iw_wr_log_size_order, int, 0444);
MODULE_PARM_DESC(c4iw_wr_log_size_order, MODULE_PARM_DESC(c4iw_wr_log_size_order,
"Number of entries (log2) in the work request timing log."); "Number of entries (log2) in the work request timing log.");
......
...@@ -657,13 +657,13 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm ...@@ -657,13 +657,13 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
return -EINVAL; return -EINVAL;
idx = get_index(vma->vm_pgoff); idx = get_index(vma->vm_pgoff);
if (idx >= uuari->num_uars)
return -EINVAL;
pfn = uar_index2pfn(dev, uuari->uars[idx].index); pfn = uar_index2pfn(dev, uuari->uars[idx].index);
mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn 0x%llx\n", idx, mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn 0x%llx\n", idx,
(unsigned long long)pfn); (unsigned long long)pfn);
if (idx >= uuari->num_uars)
return -EINVAL;
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
if (io_remap_pfn_range(vma, vma->vm_start, pfn, if (io_remap_pfn_range(vma, vma->vm_start, pfn,
PAGE_SIZE, vma->vm_page_prot)) PAGE_SIZE, vma->vm_page_prot))
...@@ -1425,8 +1425,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) ...@@ -1425,8 +1425,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
{ {
struct mlx5_ib_dev *dev = context; struct mlx5_ib_dev *dev = context;
destroy_umrc_res(dev);
ib_unregister_device(&dev->ib_dev); ib_unregister_device(&dev->ib_dev);
destroy_umrc_res(dev);
destroy_dev_resources(&dev->devr); destroy_dev_resources(&dev->devr);
free_comp_eqs(dev); free_comp_eqs(dev);
ib_dealloc_device(&dev->ib_dev); ib_dealloc_device(&dev->ib_dev);
......
...@@ -55,16 +55,17 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, ...@@ -55,16 +55,17 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
u64 pfn; u64 pfn;
struct scatterlist *sg; struct scatterlist *sg;
int entry; int entry;
unsigned long page_shift = ilog2(umem->page_size);
addr = addr >> PAGE_SHIFT; addr = addr >> page_shift;
tmp = (unsigned long)addr; tmp = (unsigned long)addr;
m = find_first_bit(&tmp, sizeof(tmp)); m = find_first_bit(&tmp, sizeof(tmp));
skip = 1 << m; skip = 1 << m;
mask = skip - 1; mask = skip - 1;
i = 0; i = 0;
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
len = sg_dma_len(sg) >> PAGE_SHIFT; len = sg_dma_len(sg) >> page_shift;
pfn = sg_dma_address(sg) >> PAGE_SHIFT; pfn = sg_dma_address(sg) >> page_shift;
for (k = 0; k < len; k++) { for (k = 0; k < len; k++) {
if (!(i & mask)) { if (!(i & mask)) {
tmp = (unsigned long)pfn; tmp = (unsigned long)pfn;
...@@ -103,14 +104,15 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, ...@@ -103,14 +104,15 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
*ncont = 0; *ncont = 0;
} }
*shift = PAGE_SHIFT + m; *shift = page_shift + m;
*count = i; *count = i;
} }
void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
int page_shift, __be64 *pas, int umr) int page_shift, __be64 *pas, int umr)
{ {
int shift = page_shift - PAGE_SHIFT; unsigned long umem_page_shift = ilog2(umem->page_size);
int shift = page_shift - umem_page_shift;
int mask = (1 << shift) - 1; int mask = (1 << shift) - 1;
int i, k; int i, k;
u64 cur = 0; u64 cur = 0;
...@@ -121,11 +123,11 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, ...@@ -121,11 +123,11 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
i = 0; i = 0;
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
len = sg_dma_len(sg) >> PAGE_SHIFT; len = sg_dma_len(sg) >> umem_page_shift;
base = sg_dma_address(sg); base = sg_dma_address(sg);
for (k = 0; k < len; k++) { for (k = 0; k < len; k++) {
if (!(i & mask)) { if (!(i & mask)) {
cur = base + (k << PAGE_SHIFT); cur = base + (k << umem_page_shift);
if (umr) if (umr)
cur |= 3; cur |= 3;
...@@ -134,7 +136,7 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, ...@@ -134,7 +136,7 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
i >> shift, be64_to_cpu(pas[i >> shift])); i >> shift, be64_to_cpu(pas[i >> shift]));
} else } else
mlx5_ib_dbg(dev, "=====> 0x%llx\n", mlx5_ib_dbg(dev, "=====> 0x%llx\n",
base + (k << PAGE_SHIFT)); base + (k << umem_page_shift));
i++; i++;
} }
} }
......
...@@ -881,12 +881,12 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -881,12 +881,12 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int order; int order;
int err; int err;
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx\n", mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
start, virt_addr, length); start, virt_addr, length, access_flags);
umem = ib_umem_get(pd->uobject->context, start, length, access_flags, umem = ib_umem_get(pd->uobject->context, start, length, access_flags,
0); 0);
if (IS_ERR(umem)) { if (IS_ERR(umem)) {
mlx5_ib_dbg(dev, "umem get failed\n"); mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
return (void *)umem; return (void *)umem;
} }
......
...@@ -1317,6 +1317,11 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah, ...@@ -1317,6 +1317,11 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
path->rlid = cpu_to_be16(ah->dlid); path->rlid = cpu_to_be16(ah->dlid);
if (ah->ah_flags & IB_AH_GRH) { if (ah->ah_flags & IB_AH_GRH) {
if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) {
pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n",
ah->grh.sgid_index, gen->port[port - 1].gid_table_len);
return -EINVAL;
}
path->grh_mlid |= 1 << 7; path->grh_mlid |= 1 << 7;
path->mgid_index = ah->grh.sgid_index; path->mgid_index = ah->grh.sgid_index;
path->hop_limit = ah->grh.hop_limit; path->hop_limit = ah->grh.hop_limit;
...@@ -1332,22 +1337,6 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah, ...@@ -1332,22 +1337,6 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
path->static_rate = err; path->static_rate = err;
path->port = port; path->port = port;
if (ah->ah_flags & IB_AH_GRH) {
if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) {
pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n",
ah->grh.sgid_index, gen->port[port - 1].gid_table_len);
return -EINVAL;
}
path->grh_mlid |= 1 << 7;
path->mgid_index = ah->grh.sgid_index;
path->hop_limit = ah->grh.hop_limit;
path->tclass_flowlabel =
cpu_to_be32((ah->grh.traffic_class << 20) |
(ah->grh.flow_label));
memcpy(path->rgid, ah->grh.dgid.raw, 16);
}
if (attr_mask & IB_QP_TIMEOUT) if (attr_mask & IB_QP_TIMEOUT)
path->ackto_lt = attr->timeout << 3; path->ackto_lt = attr->timeout << 3;
...@@ -2039,56 +2028,31 @@ static u8 bs_selector(int block_size) ...@@ -2039,56 +2028,31 @@ static u8 bs_selector(int block_size)
} }
} }
static int format_selector(struct ib_sig_attrs *attr, static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain,
struct ib_sig_domain *domain, struct mlx5_bsf_inl *inl)
int *selector)
{ {
/* Valid inline section and allow BSF refresh */
inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID |
MLX5_BSF_REFRESH_DIF);
inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag);
inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag);
/* repeating block */
inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK;
inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ?
MLX5_DIF_CRC : MLX5_DIF_IPCS;
#define FORMAT_DIF_NONE 0 if (domain->sig.dif.ref_remap)
#define FORMAT_DIF_CRC_INC 8 inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG;
#define FORMAT_DIF_CRC_NO_INC 12
#define FORMAT_DIF_CSUM_INC 13
#define FORMAT_DIF_CSUM_NO_INC 14
switch (domain->sig.dif.type) { if (domain->sig.dif.app_escape) {
case IB_T10DIF_NONE: if (domain->sig.dif.ref_escape)
/* No DIF */ inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE;
*selector = FORMAT_DIF_NONE; else
break; inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE;
case IB_T10DIF_TYPE1: /* Fall through */
case IB_T10DIF_TYPE2:
switch (domain->sig.dif.bg_type) {
case IB_T10DIF_CRC:
*selector = FORMAT_DIF_CRC_INC;
break;
case IB_T10DIF_CSUM:
*selector = FORMAT_DIF_CSUM_INC;
break;
default:
return 1;
}
break;
case IB_T10DIF_TYPE3:
switch (domain->sig.dif.bg_type) {
case IB_T10DIF_CRC:
*selector = domain->sig.dif.type3_inc_reftag ?
FORMAT_DIF_CRC_INC :
FORMAT_DIF_CRC_NO_INC;
break;
case IB_T10DIF_CSUM:
*selector = domain->sig.dif.type3_inc_reftag ?
FORMAT_DIF_CSUM_INC :
FORMAT_DIF_CSUM_NO_INC;
break;
default:
return 1;
}
break;
default:
return 1;
} }
return 0; inl->dif_app_bitmask_check =
cpu_to_be16(domain->sig.dif.apptag_check_mask);
} }
static int mlx5_set_bsf(struct ib_mr *sig_mr, static int mlx5_set_bsf(struct ib_mr *sig_mr,
...@@ -2099,45 +2063,49 @@ static int mlx5_set_bsf(struct ib_mr *sig_mr, ...@@ -2099,45 +2063,49 @@ static int mlx5_set_bsf(struct ib_mr *sig_mr,
struct mlx5_bsf_basic *basic = &bsf->basic; struct mlx5_bsf_basic *basic = &bsf->basic;
struct ib_sig_domain *mem = &sig_attrs->mem; struct ib_sig_domain *mem = &sig_attrs->mem;
struct ib_sig_domain *wire = &sig_attrs->wire; struct ib_sig_domain *wire = &sig_attrs->wire;
int ret, selector;
memset(bsf, 0, sizeof(*bsf)); memset(bsf, 0, sizeof(*bsf));
/* Basic + Extended + Inline */
basic->bsf_size_sbs = 1 << 7;
/* Input domain check byte mask */
basic->check_byte_mask = sig_attrs->check_mask;
basic->raw_data_size = cpu_to_be32(data_size);
/* Memory domain */
switch (sig_attrs->mem.sig_type) { switch (sig_attrs->mem.sig_type) {
case IB_SIG_TYPE_NONE:
break;
case IB_SIG_TYPE_T10_DIF: case IB_SIG_TYPE_T10_DIF:
if (sig_attrs->wire.sig_type != IB_SIG_TYPE_T10_DIF) basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
return -EINVAL; basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx);
mlx5_fill_inl_bsf(mem, &bsf->m_inl);
break;
default:
return -EINVAL;
}
/* Input domain check byte mask */ /* Wire domain */
basic->check_byte_mask = sig_attrs->check_mask; switch (sig_attrs->wire.sig_type) {
case IB_SIG_TYPE_NONE:
break;
case IB_SIG_TYPE_T10_DIF:
if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval && if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
mem->sig.dif.type == wire->sig.dif.type) { mem->sig_type == wire->sig_type) {
/* Same block structure */ /* Same block structure */
basic->bsf_size_sbs = 1 << 4; basic->bsf_size_sbs |= 1 << 4;
if (mem->sig.dif.bg_type == wire->sig.dif.bg_type) if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
basic->wire.copy_byte_mask |= 0xc0; basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK;
if (mem->sig.dif.app_tag == wire->sig.dif.app_tag) if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
basic->wire.copy_byte_mask |= 0x30; basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK;
if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag) if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
basic->wire.copy_byte_mask |= 0x0f; basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK;
} else } else
basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval); basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval);
basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval); basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx);
basic->raw_data_size = cpu_to_be32(data_size); mlx5_fill_inl_bsf(wire, &bsf->w_inl);
ret = format_selector(sig_attrs, mem, &selector);
if (ret)
return -EINVAL;
basic->m_bfs_psv = cpu_to_be32(selector << 24 |
msig->psv_memory.psv_idx);
ret = format_selector(sig_attrs, wire, &selector);
if (ret)
return -EINVAL;
basic->w_bfs_psv = cpu_to_be32(selector << 24 |
msig->psv_wire.psv_idx);
break; break;
default: default:
return -EINVAL; return -EINVAL;
} }
...@@ -2336,20 +2304,21 @@ static int set_psv_wr(struct ib_sig_domain *domain, ...@@ -2336,20 +2304,21 @@ static int set_psv_wr(struct ib_sig_domain *domain,
memset(psv_seg, 0, sizeof(*psv_seg)); memset(psv_seg, 0, sizeof(*psv_seg));
psv_seg->psv_num = cpu_to_be32(psv_idx); psv_seg->psv_num = cpu_to_be32(psv_idx);
switch (domain->sig_type) { switch (domain->sig_type) {
case IB_SIG_TYPE_NONE:
break;
case IB_SIG_TYPE_T10_DIF: case IB_SIG_TYPE_T10_DIF:
psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 | psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
domain->sig.dif.app_tag); domain->sig.dif.app_tag);
psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag); psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
*seg += sizeof(*psv_seg);
*size += sizeof(*psv_seg) / 16;
break; break;
default: default:
pr_err("Bad signature type given.\n"); pr_err("Bad signature type given.\n");
return 1; return 1;
} }
*seg += sizeof(*psv_seg);
*size += sizeof(*psv_seg) / 16;
return 0; return 0;
} }
......
...@@ -348,11 +348,6 @@ static void *ocrdma_init_emb_mqe(u8 opcode, u32 cmd_len) ...@@ -348,11 +348,6 @@ static void *ocrdma_init_emb_mqe(u8 opcode, u32 cmd_len)
return mqe; return mqe;
} }
static void *ocrdma_alloc_mqe(void)
{
return kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL);
}
static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q) static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q)
{ {
dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma); dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma);
...@@ -566,8 +561,8 @@ static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev, ...@@ -566,8 +561,8 @@ static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev,
cmd->cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT); cmd->cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT);
cmd->async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID; cmd->async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID;
cmd->async_event_bitmap = Bit(OCRDMA_ASYNC_GRP5_EVE_CODE); cmd->async_event_bitmap = BIT(OCRDMA_ASYNC_GRP5_EVE_CODE);
cmd->async_event_bitmap |= Bit(OCRDMA_ASYNC_RDMA_EVE_CODE); cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_RDMA_EVE_CODE);
cmd->async_cqid_ringsize = cq->id; cmd->async_cqid_ringsize = cq->id;
cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) << cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
...@@ -1189,10 +1184,10 @@ int ocrdma_mbx_rdma_stats(struct ocrdma_dev *dev, bool reset) ...@@ -1189,10 +1184,10 @@ int ocrdma_mbx_rdma_stats(struct ocrdma_dev *dev, bool reset)
{ {
struct ocrdma_rdma_stats_req *req = dev->stats_mem.va; struct ocrdma_rdma_stats_req *req = dev->stats_mem.va;
struct ocrdma_mqe *mqe = &dev->stats_mem.mqe; struct ocrdma_mqe *mqe = &dev->stats_mem.mqe;
struct ocrdma_rdma_stats_resp *old_stats = NULL; struct ocrdma_rdma_stats_resp *old_stats;
int status; int status;
old_stats = kzalloc(sizeof(*old_stats), GFP_KERNEL); old_stats = kmalloc(sizeof(*old_stats), GFP_KERNEL);
if (old_stats == NULL) if (old_stats == NULL)
return -ENOMEM; return -ENOMEM;
...@@ -1235,10 +1230,9 @@ static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev) ...@@ -1235,10 +1230,9 @@ static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev)
struct ocrdma_get_ctrl_attribs_rsp *ctrl_attr_rsp; struct ocrdma_get_ctrl_attribs_rsp *ctrl_attr_rsp;
struct mgmt_hba_attribs *hba_attribs; struct mgmt_hba_attribs *hba_attribs;
mqe = ocrdma_alloc_mqe(); mqe = kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL);
if (!mqe) if (!mqe)
return status; return status;
memset(mqe, 0, sizeof(*mqe));
dma.size = sizeof(struct ocrdma_get_ctrl_attribs_rsp); dma.size = sizeof(struct ocrdma_get_ctrl_attribs_rsp);
dma.va = dma_alloc_coherent(&dev->nic_info.pdev->dev, dma.va = dma_alloc_coherent(&dev->nic_info.pdev->dev,
...@@ -2279,7 +2273,8 @@ int ocrdma_mbx_query_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp, ...@@ -2279,7 +2273,8 @@ int ocrdma_mbx_query_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
static int ocrdma_set_av_params(struct ocrdma_qp *qp, static int ocrdma_set_av_params(struct ocrdma_qp *qp,
struct ocrdma_modify_qp *cmd, struct ocrdma_modify_qp *cmd,
struct ib_qp_attr *attrs) struct ib_qp_attr *attrs,
int attr_mask)
{ {
int status; int status;
struct ib_ah_attr *ah_attr = &attrs->ah_attr; struct ib_ah_attr *ah_attr = &attrs->ah_attr;
...@@ -2319,8 +2314,8 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp, ...@@ -2319,8 +2314,8 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid)); ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid));
ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid)); ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));
cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8); cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
vlan_id = ah_attr->vlan_id; if (attr_mask & IB_QP_VID) {
if (vlan_id && (vlan_id < 0x1000)) { vlan_id = attrs->vlan_id;
cmd->params.vlan_dmac_b4_to_b5 |= cmd->params.vlan_dmac_b4_to_b5 |=
vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT; vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID; cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
...@@ -2347,7 +2342,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp, ...@@ -2347,7 +2342,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
cmd->flags |= OCRDMA_QP_PARA_QKEY_VALID; cmd->flags |= OCRDMA_QP_PARA_QKEY_VALID;
} }
if (attr_mask & IB_QP_AV) { if (attr_mask & IB_QP_AV) {
status = ocrdma_set_av_params(qp, cmd, attrs); status = ocrdma_set_av_params(qp, cmd, attrs, attr_mask);
if (status) if (status)
return status; return status;
} else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) { } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
......
...@@ -388,6 +388,15 @@ static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev) ...@@ -388,6 +388,15 @@ static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev)
device_remove_file(&dev->ibdev.dev, ocrdma_attributes[i]); device_remove_file(&dev->ibdev.dev, ocrdma_attributes[i]);
} }
static void ocrdma_add_default_sgid(struct ocrdma_dev *dev)
{
/* GID Index 0 - Invariant manufacturer-assigned EUI-64 */
union ib_gid *sgid = &dev->sgid_tbl[0];
sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
ocrdma_get_guid(dev, &sgid->raw[8]);
}
static void ocrdma_init_ipv4_gids(struct ocrdma_dev *dev, static void ocrdma_init_ipv4_gids(struct ocrdma_dev *dev,
struct net_device *net) struct net_device *net)
{ {
...@@ -434,6 +443,7 @@ static void ocrdma_init_gid_table(struct ocrdma_dev *dev) ...@@ -434,6 +443,7 @@ static void ocrdma_init_gid_table(struct ocrdma_dev *dev)
rdma_vlan_dev_real_dev(net_dev) : net_dev; rdma_vlan_dev_real_dev(net_dev) : net_dev;
if (real_dev == dev->nic_info.netdev) { if (real_dev == dev->nic_info.netdev) {
ocrdma_add_default_sgid(dev);
ocrdma_init_ipv4_gids(dev, net_dev); ocrdma_init_ipv4_gids(dev, net_dev);
ocrdma_init_ipv6_gids(dev, net_dev); ocrdma_init_ipv6_gids(dev, net_dev);
} }
...@@ -646,8 +656,10 @@ static int __init ocrdma_init_module(void) ...@@ -646,8 +656,10 @@ static int __init ocrdma_init_module(void)
return 0; return 0;
err_be_reg: err_be_reg:
#if IS_ENABLED(CONFIG_IPV6)
ocrdma_unregister_inet6addr_notifier(); ocrdma_unregister_inet6addr_notifier();
err_notifier6: err_notifier6:
#endif
ocrdma_unregister_inetaddr_notifier(); ocrdma_unregister_inetaddr_notifier();
return status; return status;
} }
......
This diff is collapsed.
...@@ -388,7 +388,7 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev, ...@@ -388,7 +388,7 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
memset(&resp, 0, sizeof(resp)); memset(&resp, 0, sizeof(resp));
resp.ah_tbl_len = ctx->ah_tbl.len; resp.ah_tbl_len = ctx->ah_tbl.len;
resp.ah_tbl_page = ctx->ah_tbl.pa; resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va);
status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len); status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
if (status) if (status)
...@@ -870,7 +870,7 @@ static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq, ...@@ -870,7 +870,7 @@ static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
uresp.page_size = PAGE_ALIGN(cq->len); uresp.page_size = PAGE_ALIGN(cq->len);
uresp.num_pages = 1; uresp.num_pages = 1;
uresp.max_hw_cqe = cq->max_hw_cqe; uresp.max_hw_cqe = cq->max_hw_cqe;
uresp.page_addr[0] = cq->pa; uresp.page_addr[0] = virt_to_phys(cq->va);
uresp.db_page_addr = ocrdma_get_db_addr(dev, uctx->cntxt_pd->id); uresp.db_page_addr = ocrdma_get_db_addr(dev, uctx->cntxt_pd->id);
uresp.db_page_size = dev->nic_info.db_page_size; uresp.db_page_size = dev->nic_info.db_page_size;
uresp.phase_change = cq->phase_change ? 1 : 0; uresp.phase_change = cq->phase_change ? 1 : 0;
...@@ -1123,13 +1123,13 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, ...@@ -1123,13 +1123,13 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
uresp.sq_dbid = qp->sq.dbid; uresp.sq_dbid = qp->sq.dbid;
uresp.num_sq_pages = 1; uresp.num_sq_pages = 1;
uresp.sq_page_size = PAGE_ALIGN(qp->sq.len); uresp.sq_page_size = PAGE_ALIGN(qp->sq.len);
uresp.sq_page_addr[0] = qp->sq.pa; uresp.sq_page_addr[0] = virt_to_phys(qp->sq.va);
uresp.num_wqe_allocated = qp->sq.max_cnt; uresp.num_wqe_allocated = qp->sq.max_cnt;
if (!srq) { if (!srq) {
uresp.rq_dbid = qp->rq.dbid; uresp.rq_dbid = qp->rq.dbid;
uresp.num_rq_pages = 1; uresp.num_rq_pages = 1;
uresp.rq_page_size = PAGE_ALIGN(qp->rq.len); uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
uresp.rq_page_addr[0] = qp->rq.pa; uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va);
uresp.num_rqe_allocated = qp->rq.max_cnt; uresp.num_rqe_allocated = qp->rq.max_cnt;
} }
uresp.db_page_addr = usr_db; uresp.db_page_addr = usr_db;
...@@ -1680,7 +1680,7 @@ static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq, ...@@ -1680,7 +1680,7 @@ static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
memset(&uresp, 0, sizeof(uresp)); memset(&uresp, 0, sizeof(uresp));
uresp.rq_dbid = srq->rq.dbid; uresp.rq_dbid = srq->rq.dbid;
uresp.num_rq_pages = 1; uresp.num_rq_pages = 1;
uresp.rq_page_addr[0] = srq->rq.pa; uresp.rq_page_addr[0] = virt_to_phys(srq->rq.va);
uresp.rq_page_size = srq->rq.len; uresp.rq_page_size = srq->rq.len;
uresp.db_page_addr = dev->nic_info.unmapped_db + uresp.db_page_addr = dev->nic_info.unmapped_db +
(srq->pd->id * dev->nic_info.db_page_size); (srq->pd->id * dev->nic_info.db_page_size);
......
This diff is collapsed.
This diff is collapsed.
...@@ -49,7 +49,7 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, ...@@ -49,7 +49,7 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data_copy, struct iser_data_buf *data_copy,
enum iser_data_dir cmd_dir) enum iser_data_dir cmd_dir)
{ {
struct ib_device *dev = iser_task->ib_conn->device->ib_device; struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device;
struct scatterlist *sgl = (struct scatterlist *)data->buf; struct scatterlist *sgl = (struct scatterlist *)data->buf;
struct scatterlist *sg; struct scatterlist *sg;
char *mem = NULL; char *mem = NULL;
...@@ -116,7 +116,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, ...@@ -116,7 +116,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
struct ib_device *dev; struct ib_device *dev;
unsigned long cmd_data_len; unsigned long cmd_data_len;
dev = iser_task->ib_conn->device->ib_device; dev = iser_task->iser_conn->ib_conn.device->ib_device;
ib_dma_unmap_sg(dev, &data_copy->sg_single, 1, ib_dma_unmap_sg(dev, &data_copy->sg_single, 1,
(cmd_dir == ISER_DIR_OUT) ? (cmd_dir == ISER_DIR_OUT) ?
...@@ -322,7 +322,7 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, ...@@ -322,7 +322,7 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
struct ib_device *dev; struct ib_device *dev;
iser_task->dir[iser_dir] = 1; iser_task->dir[iser_dir] = 1;
dev = iser_task->ib_conn->device->ib_device; dev = iser_task->iser_conn->ib_conn.device->ib_device;
data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
if (data->dma_nents == 0) { if (data->dma_nents == 0) {
...@@ -337,7 +337,7 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, ...@@ -337,7 +337,7 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
{ {
struct ib_device *dev; struct ib_device *dev;
dev = iser_task->ib_conn->device->ib_device; dev = iser_task->iser_conn->ib_conn.device->ib_device;
ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
} }
...@@ -348,7 +348,7 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, ...@@ -348,7 +348,7 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir, enum iser_data_dir cmd_dir,
int aligned_len) int aligned_len)
{ {
struct iscsi_conn *iscsi_conn = iser_task->ib_conn->iscsi_conn; struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
iscsi_conn->fmr_unalign_cnt++; iscsi_conn->fmr_unalign_cnt++;
iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n", iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n",
...@@ -377,7 +377,7 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, ...@@ -377,7 +377,7 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir) enum iser_data_dir cmd_dir)
{ {
struct iser_conn *ib_conn = iser_task->ib_conn; struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
struct iser_device *device = ib_conn->device; struct iser_device *device = ib_conn->device;
struct ib_device *ibdev = device->ib_device; struct ib_device *ibdev = device->ib_device;
struct iser_data_buf *mem = &iser_task->data[cmd_dir]; struct iser_data_buf *mem = &iser_task->data[cmd_dir];
...@@ -432,7 +432,7 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, ...@@ -432,7 +432,7 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
ib_conn->fmr.page_vec->offset); ib_conn->fmr.page_vec->offset);
for (i = 0; i < ib_conn->fmr.page_vec->length; i++) for (i = 0; i < ib_conn->fmr.page_vec->length; i++)
iser_err("page_vec[%d] = 0x%llx\n", i, iser_err("page_vec[%d] = 0x%llx\n", i,
(unsigned long long) ib_conn->fmr.page_vec->pages[i]); (unsigned long long)ib_conn->fmr.page_vec->pages[i]);
} }
if (err) if (err)
return err; return err;
...@@ -440,77 +440,74 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, ...@@ -440,77 +440,74 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
return 0; return 0;
} }
static inline enum ib_t10_dif_type static inline void
scsi2ib_prot_type(unsigned char prot_type) iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs,
struct ib_sig_domain *domain)
{ {
switch (prot_type) { domain->sig_type = IB_SIG_TYPE_T10_DIF;
case SCSI_PROT_DIF_TYPE0: domain->sig.dif.pi_interval = sc->device->sector_size;
return IB_T10DIF_NONE; domain->sig.dif.ref_tag = scsi_get_lba(sc) & 0xffffffff;
case SCSI_PROT_DIF_TYPE1: /*
return IB_T10DIF_TYPE1; * At the moment we hard code those, but in the future
case SCSI_PROT_DIF_TYPE2: * we will take them from sc.
return IB_T10DIF_TYPE2; */
case SCSI_PROT_DIF_TYPE3: domain->sig.dif.apptag_check_mask = 0xffff;
return IB_T10DIF_TYPE3; domain->sig.dif.app_escape = true;
default: domain->sig.dif.ref_escape = true;
return IB_T10DIF_NONE; if (scsi_get_prot_type(sc) == SCSI_PROT_DIF_TYPE1 ||
} scsi_get_prot_type(sc) == SCSI_PROT_DIF_TYPE2)
} domain->sig.dif.ref_remap = true;
};
static int static int
iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs) iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
{ {
unsigned char scsi_ptype = scsi_get_prot_type(sc);
sig_attrs->mem.sig_type = IB_SIG_TYPE_T10_DIF;
sig_attrs->wire.sig_type = IB_SIG_TYPE_T10_DIF;
sig_attrs->mem.sig.dif.pi_interval = sc->device->sector_size;
sig_attrs->wire.sig.dif.pi_interval = sc->device->sector_size;
switch (scsi_get_prot_op(sc)) { switch (scsi_get_prot_op(sc)) {
case SCSI_PROT_WRITE_INSERT: case SCSI_PROT_WRITE_INSERT:
case SCSI_PROT_READ_STRIP: case SCSI_PROT_READ_STRIP:
sig_attrs->mem.sig.dif.type = IB_T10DIF_NONE; sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
sig_attrs->wire.sig.dif.type = scsi2ib_prot_type(scsi_ptype); iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC; sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
sig_attrs->wire.sig.dif.ref_tag = scsi_get_lba(sc) &
0xffffffff;
break; break;
case SCSI_PROT_READ_INSERT: case SCSI_PROT_READ_INSERT:
case SCSI_PROT_WRITE_STRIP: case SCSI_PROT_WRITE_STRIP:
sig_attrs->mem.sig.dif.type = scsi2ib_prot_type(scsi_ptype); sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC; iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
sig_attrs->mem.sig.dif.ref_tag = scsi_get_lba(sc) & /*
0xffffffff; * At the moment we use this modparam to tell what is
sig_attrs->wire.sig.dif.type = IB_T10DIF_NONE; * the memory bg_type, in the future we will take it
* from sc.
*/
sig_attrs->mem.sig.dif.bg_type = iser_pi_guard ? IB_T10DIF_CSUM :
IB_T10DIF_CRC;
break; break;
case SCSI_PROT_READ_PASS: case SCSI_PROT_READ_PASS:
case SCSI_PROT_WRITE_PASS: case SCSI_PROT_WRITE_PASS:
sig_attrs->mem.sig.dif.type = scsi2ib_prot_type(scsi_ptype); iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC;
sig_attrs->mem.sig.dif.ref_tag = scsi_get_lba(sc) &
0xffffffff;
sig_attrs->wire.sig.dif.type = scsi2ib_prot_type(scsi_ptype);
sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC; sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
sig_attrs->wire.sig.dif.ref_tag = scsi_get_lba(sc) & iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
0xffffffff; /*
* At the moment we use this modparam to tell what is
* the memory bg_type, in the future we will take it
* from sc.
*/
sig_attrs->mem.sig.dif.bg_type = iser_pi_guard ? IB_T10DIF_CSUM :
IB_T10DIF_CRC;
break; break;
default: default:
iser_err("Unsupported PI operation %d\n", iser_err("Unsupported PI operation %d\n",
scsi_get_prot_op(sc)); scsi_get_prot_op(sc));
return -EINVAL; return -EINVAL;
} }
return 0; return 0;
} }
static int static int
iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask) iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
{ {
switch (scsi_get_prot_type(sc)) { switch (scsi_get_prot_type(sc)) {
case SCSI_PROT_DIF_TYPE0: case SCSI_PROT_DIF_TYPE0:
*mask = 0x0;
break; break;
case SCSI_PROT_DIF_TYPE1: case SCSI_PROT_DIF_TYPE1:
case SCSI_PROT_DIF_TYPE2: case SCSI_PROT_DIF_TYPE2:
...@@ -533,7 +530,7 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task, ...@@ -533,7 +530,7 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
struct fast_reg_descriptor *desc, struct ib_sge *data_sge, struct fast_reg_descriptor *desc, struct ib_sge *data_sge,
struct ib_sge *prot_sge, struct ib_sge *sig_sge) struct ib_sge *prot_sge, struct ib_sge *sig_sge)
{ {
struct iser_conn *ib_conn = iser_task->ib_conn; struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
struct iser_pi_context *pi_ctx = desc->pi_ctx; struct iser_pi_context *pi_ctx = desc->pi_ctx;
struct ib_send_wr sig_wr, inv_wr; struct ib_send_wr sig_wr, inv_wr;
struct ib_send_wr *bad_wr, *wr = NULL; struct ib_send_wr *bad_wr, *wr = NULL;
...@@ -609,7 +606,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, ...@@ -609,7 +606,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
struct ib_sge *sge) struct ib_sge *sge)
{ {
struct fast_reg_descriptor *desc = regd_buf->reg.mem_h; struct fast_reg_descriptor *desc = regd_buf->reg.mem_h;
struct iser_conn *ib_conn = iser_task->ib_conn; struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
struct iser_device *device = ib_conn->device; struct iser_device *device = ib_conn->device;
struct ib_device *ibdev = device->ib_device; struct ib_device *ibdev = device->ib_device;
struct ib_mr *mr; struct ib_mr *mr;
...@@ -700,7 +697,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, ...@@ -700,7 +697,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir) enum iser_data_dir cmd_dir)
{ {
struct iser_conn *ib_conn = iser_task->ib_conn; struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
struct iser_device *device = ib_conn->device; struct iser_device *device = ib_conn->device;
struct ib_device *ibdev = device->ib_device; struct ib_device *ibdev = device->ib_device;
struct iser_data_buf *mem = &iser_task->data[cmd_dir]; struct iser_data_buf *mem = &iser_task->data[cmd_dir];
......
This diff is collapsed.
...@@ -2609,58 +2609,45 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, ...@@ -2609,58 +2609,45 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
return ret; return ret;
} }
static inline enum ib_t10_dif_type static inline void
se2ib_prot_type(enum target_prot_type prot_type) isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
{ struct ib_sig_domain *domain)
switch (prot_type) { {
case TARGET_DIF_TYPE0_PROT: domain->sig_type = IB_SIG_TYPE_T10_DIF;
return IB_T10DIF_NONE; domain->sig.dif.bg_type = IB_T10DIF_CRC;
case TARGET_DIF_TYPE1_PROT: domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size;
return IB_T10DIF_TYPE1; domain->sig.dif.ref_tag = se_cmd->reftag_seed;
case TARGET_DIF_TYPE2_PROT: /*
return IB_T10DIF_TYPE2; * At the moment we hard code those, but if in the future
case TARGET_DIF_TYPE3_PROT: * the target core would like to use it, we will take it
return IB_T10DIF_TYPE3; * from se_cmd.
default: */
return IB_T10DIF_NONE; domain->sig.dif.apptag_check_mask = 0xffff;
} domain->sig.dif.app_escape = true;
} domain->sig.dif.ref_escape = true;
if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT ||
se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)
domain->sig.dif.ref_remap = true;
};
static int static int
isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs) isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
{ {
enum ib_t10_dif_type ib_prot_type = se2ib_prot_type(se_cmd->prot_type);
sig_attrs->mem.sig_type = IB_SIG_TYPE_T10_DIF;
sig_attrs->wire.sig_type = IB_SIG_TYPE_T10_DIF;
sig_attrs->mem.sig.dif.pi_interval =
se_cmd->se_dev->dev_attrib.block_size;
sig_attrs->wire.sig.dif.pi_interval =
se_cmd->se_dev->dev_attrib.block_size;
switch (se_cmd->prot_op) { switch (se_cmd->prot_op) {
case TARGET_PROT_DIN_INSERT: case TARGET_PROT_DIN_INSERT:
case TARGET_PROT_DOUT_STRIP: case TARGET_PROT_DOUT_STRIP:
sig_attrs->mem.sig.dif.type = IB_T10DIF_NONE; sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
sig_attrs->wire.sig.dif.type = ib_prot_type; isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
sig_attrs->wire.sig.dif.ref_tag = se_cmd->reftag_seed;
break; break;
case TARGET_PROT_DOUT_INSERT: case TARGET_PROT_DOUT_INSERT:
case TARGET_PROT_DIN_STRIP: case TARGET_PROT_DIN_STRIP:
sig_attrs->mem.sig.dif.type = ib_prot_type; sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC; isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
sig_attrs->mem.sig.dif.ref_tag = se_cmd->reftag_seed;
sig_attrs->wire.sig.dif.type = IB_T10DIF_NONE;
break; break;
case TARGET_PROT_DIN_PASS: case TARGET_PROT_DIN_PASS:
case TARGET_PROT_DOUT_PASS: case TARGET_PROT_DOUT_PASS:
sig_attrs->mem.sig.dif.type = ib_prot_type; isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC; isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
sig_attrs->mem.sig.dif.ref_tag = se_cmd->reftag_seed;
sig_attrs->wire.sig.dif.type = ib_prot_type;
sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
sig_attrs->wire.sig.dif.ref_tag = se_cmd->reftag_seed;
break; break;
default: default:
pr_err("Unsupported PI operation %d\n", se_cmd->prot_op); pr_err("Unsupported PI operation %d\n", se_cmd->prot_op);
......
...@@ -40,6 +40,15 @@ ...@@ -40,6 +40,15 @@
#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5) #define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5)
#define MLX5_DIF_SIZE 8 #define MLX5_DIF_SIZE 8
#define MLX5_STRIDE_BLOCK_OP 0x400 #define MLX5_STRIDE_BLOCK_OP 0x400
#define MLX5_CPY_GRD_MASK 0xc0
#define MLX5_CPY_APP_MASK 0x30
#define MLX5_CPY_REF_MASK 0x0f
#define MLX5_BSF_INC_REFTAG (1 << 6)
#define MLX5_BSF_INL_VALID (1 << 15)
#define MLX5_BSF_REFRESH_DIF (1 << 14)
#define MLX5_BSF_REPEAT_BLOCK (1 << 7)
#define MLX5_BSF_APPTAG_ESCAPE 0x1
#define MLX5_BSF_APPREF_ESCAPE 0x2
enum mlx5_qp_optpar { enum mlx5_qp_optpar {
MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
...@@ -287,6 +296,22 @@ struct mlx5_wqe_inline_seg { ...@@ -287,6 +296,22 @@ struct mlx5_wqe_inline_seg {
__be32 byte_count; __be32 byte_count;
}; };
enum mlx5_sig_type {
MLX5_DIF_CRC = 0x1,
MLX5_DIF_IPCS = 0x2,
};
struct mlx5_bsf_inl {
__be16 vld_refresh;
__be16 dif_apptag;
__be32 dif_reftag;
u8 sig_type;
u8 rp_inv_seed;
u8 rsvd[3];
u8 dif_inc_ref_guard_check;
__be16 dif_app_bitmask_check;
};
struct mlx5_bsf { struct mlx5_bsf {
struct mlx5_bsf_basic { struct mlx5_bsf_basic {
u8 bsf_size_sbs; u8 bsf_size_sbs;
...@@ -310,14 +335,8 @@ struct mlx5_bsf { ...@@ -310,14 +335,8 @@ struct mlx5_bsf {
__be32 w_tfs_psv; __be32 w_tfs_psv;
__be32 m_tfs_psv; __be32 m_tfs_psv;
} ext; } ext;
struct mlx5_bsf_inl { struct mlx5_bsf_inl w_inl;
__be32 w_inl_vld; struct mlx5_bsf_inl m_inl;
__be32 w_rsvd;
__be64 w_block_format;
__be32 m_inl_vld;
__be32 m_rsvd;
__be64 m_block_format;
} inl;
}; };
struct mlx5_klm { struct mlx5_klm {
......
...@@ -491,20 +491,14 @@ struct ib_mr_init_attr { ...@@ -491,20 +491,14 @@ struct ib_mr_init_attr {
u32 flags; u32 flags;
}; };
enum ib_signature_type {
IB_SIG_TYPE_T10_DIF,
};
/** /**
* T10-DIF Signature types * Signature types
* T10-DIF types are defined by SCSI * IB_SIG_TYPE_NONE: Unprotected.
* specifications. * IB_SIG_TYPE_T10_DIF: Type T10-DIF
*/ */
enum ib_t10_dif_type { enum ib_signature_type {
IB_T10DIF_NONE, IB_SIG_TYPE_NONE,
IB_T10DIF_TYPE1, IB_SIG_TYPE_T10_DIF,
IB_T10DIF_TYPE2,
IB_T10DIF_TYPE3
}; };
/** /**
...@@ -520,24 +514,26 @@ enum ib_t10_dif_bg_type { ...@@ -520,24 +514,26 @@ enum ib_t10_dif_bg_type {
/** /**
* struct ib_t10_dif_domain - Parameters specific for T10-DIF * struct ib_t10_dif_domain - Parameters specific for T10-DIF
* domain. * domain.
* @type: T10-DIF type (0|1|2|3)
* @bg_type: T10-DIF block guard type (CRC|CSUM) * @bg_type: T10-DIF block guard type (CRC|CSUM)
* @pi_interval: protection information interval. * @pi_interval: protection information interval.
* @bg: seed of guard computation. * @bg: seed of guard computation.
* @app_tag: application tag of guard block * @app_tag: application tag of guard block
* @ref_tag: initial guard block reference tag. * @ref_tag: initial guard block reference tag.
* @type3_inc_reftag: T10-DIF type 3 does not state * @ref_remap: Indicate wethear the reftag increments each block
* about the reference tag, it is the user * @app_escape: Indicate to skip block check if apptag=0xffff
* choice to increment it or not. * @ref_escape: Indicate to skip block check if reftag=0xffffffff
* @apptag_check_mask: check bitmask of application tag.
*/ */
struct ib_t10_dif_domain { struct ib_t10_dif_domain {
enum ib_t10_dif_type type;
enum ib_t10_dif_bg_type bg_type; enum ib_t10_dif_bg_type bg_type;
u16 pi_interval; u16 pi_interval;
u16 bg; u16 bg;
u16 app_tag; u16 app_tag;
u32 ref_tag; u32 ref_tag;
bool type3_inc_reftag; bool ref_remap;
bool app_escape;
bool ref_escape;
u16 apptag_check_mask;
}; };
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment