Commit 6dd7abae authored by Doug Ledford's avatar Doug Ledford

Merge branch 'k.o/for-4.10-rc' into HEAD

parents 6df6b4a9 646ebd41
...@@ -2851,7 +2851,8 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, ...@@ -2851,7 +2851,8 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
if (!src_addr || !src_addr->sa_family) { if (!src_addr || !src_addr->sa_family) {
src_addr = (struct sockaddr *) &id->route.addr.src_addr; src_addr = (struct sockaddr *) &id->route.addr.src_addr;
src_addr->sa_family = dst_addr->sa_family; src_addr->sa_family = dst_addr->sa_family;
if (dst_addr->sa_family == AF_INET6) { if (IS_ENABLED(CONFIG_IPV6) &&
dst_addr->sa_family == AF_INET6) {
struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr; struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr;
struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr; struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr;
src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
......
...@@ -134,6 +134,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, ...@@ -134,6 +134,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND)); IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
if (access & IB_ACCESS_ON_DEMAND) { if (access & IB_ACCESS_ON_DEMAND) {
put_pid(umem->pid);
ret = ib_umem_odp_get(context, umem); ret = ib_umem_odp_get(context, umem);
if (ret) { if (ret) {
kfree(umem); kfree(umem);
...@@ -149,6 +150,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, ...@@ -149,6 +150,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
page_list = (struct page **) __get_free_page(GFP_KERNEL); page_list = (struct page **) __get_free_page(GFP_KERNEL);
if (!page_list) { if (!page_list) {
put_pid(umem->pid);
kfree(umem); kfree(umem);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
......
...@@ -1135,16 +1135,7 @@ static int iwch_query_port(struct ib_device *ibdev, ...@@ -1135,16 +1135,7 @@ static int iwch_query_port(struct ib_device *ibdev,
memset(props, 0, sizeof(struct ib_port_attr)); memset(props, 0, sizeof(struct ib_port_attr));
props->max_mtu = IB_MTU_4096; props->max_mtu = IB_MTU_4096;
if (netdev->mtu >= 4096) props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
props->active_mtu = IB_MTU_4096;
else if (netdev->mtu >= 2048)
props->active_mtu = IB_MTU_2048;
else if (netdev->mtu >= 1024)
props->active_mtu = IB_MTU_1024;
else if (netdev->mtu >= 512)
props->active_mtu = IB_MTU_512;
else
props->active_mtu = IB_MTU_256;
if (!netif_carrier_ok(netdev)) if (!netif_carrier_ok(netdev))
props->state = IB_PORT_DOWN; props->state = IB_PORT_DOWN;
......
...@@ -1804,20 +1804,21 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -1804,20 +1804,21 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
skb_trim(skb, dlen); skb_trim(skb, dlen);
mutex_lock(&ep->com.mutex); mutex_lock(&ep->com.mutex);
/* update RX credits */
update_rx_credits(ep, dlen);
switch (ep->com.state) { switch (ep->com.state) {
case MPA_REQ_SENT: case MPA_REQ_SENT:
update_rx_credits(ep, dlen);
ep->rcv_seq += dlen; ep->rcv_seq += dlen;
disconnect = process_mpa_reply(ep, skb); disconnect = process_mpa_reply(ep, skb);
break; break;
case MPA_REQ_WAIT: case MPA_REQ_WAIT:
update_rx_credits(ep, dlen);
ep->rcv_seq += dlen; ep->rcv_seq += dlen;
disconnect = process_mpa_request(ep, skb); disconnect = process_mpa_request(ep, skb);
break; break;
case FPDU_MODE: { case FPDU_MODE: {
struct c4iw_qp_attributes attrs; struct c4iw_qp_attributes attrs;
update_rx_credits(ep, dlen);
BUG_ON(!ep->com.qp); BUG_ON(!ep->com.qp);
if (status) if (status)
pr_err("%s Unexpected streaming data." \ pr_err("%s Unexpected streaming data." \
......
...@@ -504,6 +504,15 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, ...@@ -504,6 +504,15 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
goto skip_cqe; goto skip_cqe;
} }
/*
* Special cqe for drain WR completions...
*/
if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
*cookie = CQE_DRAIN_COOKIE(hw_cqe);
*cqe = *hw_cqe;
goto skip_cqe;
}
/* /*
* Gotta tweak READ completions: * Gotta tweak READ completions:
* 1) the cqe doesn't contain the sq_wptr from the wr. * 1) the cqe doesn't contain the sq_wptr from the wr.
...@@ -753,6 +762,9 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) ...@@ -753,6 +762,9 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
c4iw_invalidate_mr(qhp->rhp, c4iw_invalidate_mr(qhp->rhp,
CQE_WRID_FR_STAG(&cqe)); CQE_WRID_FR_STAG(&cqe));
break; break;
case C4IW_DRAIN_OPCODE:
wc->opcode = IB_WC_SEND;
break;
default: default:
printk(KERN_ERR MOD "Unexpected opcode %d " printk(KERN_ERR MOD "Unexpected opcode %d "
"in the CQE received for QPID=0x%0x\n", "in the CQE received for QPID=0x%0x\n",
...@@ -817,15 +829,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) ...@@ -817,15 +829,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
} }
} }
out: out:
if (wq) { if (wq)
if (unlikely(qhp->attr.state != C4IW_QP_STATE_RTS)) {
if (t4_sq_empty(wq))
complete(&qhp->sq_drained);
if (t4_rq_empty(wq))
complete(&qhp->rq_drained);
}
spin_unlock(&qhp->lock); spin_unlock(&qhp->lock);
}
return ret; return ret;
} }
......
...@@ -881,9 +881,17 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) ...@@ -881,9 +881,17 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
} }
} }
rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
if (!rdev->free_workq) {
err = -ENOMEM;
goto err_free_status_page;
}
rdev->status_page->db_off = 0; rdev->status_page->db_off = 0;
return 0; return 0;
err_free_status_page:
free_page((unsigned long)rdev->status_page);
destroy_ocqp_pool: destroy_ocqp_pool:
c4iw_ocqp_pool_destroy(rdev); c4iw_ocqp_pool_destroy(rdev);
destroy_rqtpool: destroy_rqtpool:
...@@ -897,6 +905,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) ...@@ -897,6 +905,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
static void c4iw_rdev_close(struct c4iw_rdev *rdev) static void c4iw_rdev_close(struct c4iw_rdev *rdev)
{ {
destroy_workqueue(rdev->free_workq);
kfree(rdev->wr_log); kfree(rdev->wr_log);
free_page((unsigned long)rdev->status_page); free_page((unsigned long)rdev->status_page);
c4iw_pblpool_destroy(rdev); c4iw_pblpool_destroy(rdev);
......
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#include <linux/kref.h> #include <linux/kref.h>
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/workqueue.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
...@@ -107,6 +108,7 @@ struct c4iw_dev_ucontext { ...@@ -107,6 +108,7 @@ struct c4iw_dev_ucontext {
struct list_head qpids; struct list_head qpids;
struct list_head cqids; struct list_head cqids;
struct mutex lock; struct mutex lock;
struct kref kref;
}; };
enum c4iw_rdev_flags { enum c4iw_rdev_flags {
...@@ -183,6 +185,7 @@ struct c4iw_rdev { ...@@ -183,6 +185,7 @@ struct c4iw_rdev {
atomic_t wr_log_idx; atomic_t wr_log_idx;
struct wr_log_entry *wr_log; struct wr_log_entry *wr_log;
int wr_log_size; int wr_log_size;
struct workqueue_struct *free_workq;
}; };
static inline int c4iw_fatal_error(struct c4iw_rdev *rdev) static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
...@@ -480,8 +483,8 @@ struct c4iw_qp { ...@@ -480,8 +483,8 @@ struct c4iw_qp {
wait_queue_head_t wait; wait_queue_head_t wait;
struct timer_list timer; struct timer_list timer;
int sq_sig_all; int sq_sig_all;
struct completion rq_drained; struct work_struct free_work;
struct completion sq_drained; struct c4iw_ucontext *ucontext;
}; };
static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp) static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
...@@ -495,6 +498,7 @@ struct c4iw_ucontext { ...@@ -495,6 +498,7 @@ struct c4iw_ucontext {
u32 key; u32 key;
spinlock_t mmap_lock; spinlock_t mmap_lock;
struct list_head mmaps; struct list_head mmaps;
struct kref kref;
}; };
static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c) static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
...@@ -502,6 +506,18 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c) ...@@ -502,6 +506,18 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
return container_of(c, struct c4iw_ucontext, ibucontext); return container_of(c, struct c4iw_ucontext, ibucontext);
} }
void _c4iw_free_ucontext(struct kref *kref);
static inline void c4iw_put_ucontext(struct c4iw_ucontext *ucontext)
{
kref_put(&ucontext->kref, _c4iw_free_ucontext);
}
static inline void c4iw_get_ucontext(struct c4iw_ucontext *ucontext)
{
kref_get(&ucontext->kref);
}
struct c4iw_mm_entry { struct c4iw_mm_entry {
struct list_head entry; struct list_head entry;
u64 addr; u64 addr;
...@@ -615,6 +631,8 @@ static inline int to_ib_qp_state(int c4iw_qp_state) ...@@ -615,6 +631,8 @@ static inline int to_ib_qp_state(int c4iw_qp_state)
return IB_QPS_ERR; return IB_QPS_ERR;
} }
#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN
static inline u32 c4iw_ib_to_tpt_access(int a) static inline u32 c4iw_ib_to_tpt_access(int a)
{ {
return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
...@@ -997,8 +1015,6 @@ extern int c4iw_wr_log; ...@@ -997,8 +1015,6 @@ extern int c4iw_wr_log;
extern int db_fc_threshold; extern int db_fc_threshold;
extern int db_coalescing_threshold; extern int db_coalescing_threshold;
extern int use_dsgl; extern int use_dsgl;
void c4iw_drain_rq(struct ib_qp *qp);
void c4iw_drain_sq(struct ib_qp *qp);
void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey); void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
#endif #endif
...@@ -93,17 +93,28 @@ static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags, ...@@ -93,17 +93,28 @@ static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
return -ENOSYS; return -ENOSYS;
} }
static int c4iw_dealloc_ucontext(struct ib_ucontext *context) void _c4iw_free_ucontext(struct kref *kref)
{ {
struct c4iw_dev *rhp = to_c4iw_dev(context->device); struct c4iw_ucontext *ucontext;
struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context); struct c4iw_dev *rhp;
struct c4iw_mm_entry *mm, *tmp; struct c4iw_mm_entry *mm, *tmp;
PDBG("%s context %p\n", __func__, context); ucontext = container_of(kref, struct c4iw_ucontext, kref);
rhp = to_c4iw_dev(ucontext->ibucontext.device);
PDBG("%s ucontext %p\n", __func__, ucontext);
list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
kfree(mm); kfree(mm);
c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx); c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
kfree(ucontext); kfree(ucontext);
}
static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
{
struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
PDBG("%s context %p\n", __func__, context);
c4iw_put_ucontext(ucontext);
return 0; return 0;
} }
...@@ -127,6 +138,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev, ...@@ -127,6 +138,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx); c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
INIT_LIST_HEAD(&context->mmaps); INIT_LIST_HEAD(&context->mmaps);
spin_lock_init(&context->mmap_lock); spin_lock_init(&context->mmap_lock);
kref_init(&context->kref);
if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) { if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
if (!warned++) if (!warned++)
...@@ -361,16 +373,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port, ...@@ -361,16 +373,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
memset(props, 0, sizeof(struct ib_port_attr)); memset(props, 0, sizeof(struct ib_port_attr));
props->max_mtu = IB_MTU_4096; props->max_mtu = IB_MTU_4096;
if (netdev->mtu >= 4096) props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
props->active_mtu = IB_MTU_4096;
else if (netdev->mtu >= 2048)
props->active_mtu = IB_MTU_2048;
else if (netdev->mtu >= 1024)
props->active_mtu = IB_MTU_1024;
else if (netdev->mtu >= 512)
props->active_mtu = IB_MTU_512;
else
props->active_mtu = IB_MTU_256;
if (!netif_carrier_ok(netdev)) if (!netif_carrier_ok(netdev))
props->state = IB_PORT_DOWN; props->state = IB_PORT_DOWN;
...@@ -607,8 +610,6 @@ int c4iw_register_device(struct c4iw_dev *dev) ...@@ -607,8 +610,6 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION; dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
dev->ibdev.get_port_immutable = c4iw_port_immutable; dev->ibdev.get_port_immutable = c4iw_port_immutable;
dev->ibdev.get_dev_fw_str = get_dev_fw_str; dev->ibdev.get_dev_fw_str = get_dev_fw_str;
dev->ibdev.drain_sq = c4iw_drain_sq;
dev->ibdev.drain_rq = c4iw_drain_rq;
dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL); dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
if (!dev->ibdev.iwcm) if (!dev->ibdev.iwcm)
......
...@@ -715,13 +715,32 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) ...@@ -715,13 +715,32 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
return 0; return 0;
} }
static void _free_qp(struct kref *kref) static void free_qp_work(struct work_struct *work)
{
struct c4iw_ucontext *ucontext;
struct c4iw_qp *qhp;
struct c4iw_dev *rhp;
qhp = container_of(work, struct c4iw_qp, free_work);
ucontext = qhp->ucontext;
rhp = qhp->rhp;
PDBG("%s qhp %p ucontext %p\n", __func__, qhp, ucontext);
destroy_qp(&rhp->rdev, &qhp->wq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
if (ucontext)
c4iw_put_ucontext(ucontext);
kfree(qhp);
}
static void queue_qp_free(struct kref *kref)
{ {
struct c4iw_qp *qhp; struct c4iw_qp *qhp;
qhp = container_of(kref, struct c4iw_qp, kref); qhp = container_of(kref, struct c4iw_qp, kref);
PDBG("%s qhp %p\n", __func__, qhp); PDBG("%s qhp %p\n", __func__, qhp);
kfree(qhp); queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
} }
void c4iw_qp_add_ref(struct ib_qp *qp) void c4iw_qp_add_ref(struct ib_qp *qp)
...@@ -733,7 +752,7 @@ void c4iw_qp_add_ref(struct ib_qp *qp) ...@@ -733,7 +752,7 @@ void c4iw_qp_add_ref(struct ib_qp *qp)
void c4iw_qp_rem_ref(struct ib_qp *qp) void c4iw_qp_rem_ref(struct ib_qp *qp)
{ {
PDBG("%s ib_qp %p\n", __func__, qp); PDBG("%s ib_qp %p\n", __func__, qp);
kref_put(&to_c4iw_qp(qp)->kref, _free_qp); kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
} }
static void add_to_fc_list(struct list_head *head, struct list_head *entry) static void add_to_fc_list(struct list_head *head, struct list_head *entry)
...@@ -776,6 +795,64 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc) ...@@ -776,6 +795,64 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
return 0; return 0;
} }
static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
{
struct t4_cqe cqe = {};
struct c4iw_cq *schp;
unsigned long flag;
struct t4_cq *cq;
schp = to_c4iw_cq(qhp->ibqp.send_cq);
cq = &schp->cq;
cqe.u.drain_cookie = wr->wr_id;
cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
CQE_TYPE_V(1) |
CQE_SWCQE_V(1) |
CQE_QPID_V(qhp->wq.sq.qid));
spin_lock_irqsave(&schp->lock, flag);
cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
cq->sw_queue[cq->sw_pidx] = cqe;
t4_swcq_produce(cq);
spin_unlock_irqrestore(&schp->lock, flag);
spin_lock_irqsave(&schp->comp_handler_lock, flag);
(*schp->ibcq.comp_handler)(&schp->ibcq,
schp->ibcq.cq_context);
spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
}
static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
{
struct t4_cqe cqe = {};
struct c4iw_cq *rchp;
unsigned long flag;
struct t4_cq *cq;
rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
cq = &rchp->cq;
cqe.u.drain_cookie = wr->wr_id;
cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
CQE_TYPE_V(0) |
CQE_SWCQE_V(1) |
CQE_QPID_V(qhp->wq.sq.qid));
spin_lock_irqsave(&rchp->lock, flag);
cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
cq->sw_queue[cq->sw_pidx] = cqe;
t4_swcq_produce(cq);
spin_unlock_irqrestore(&rchp->lock, flag);
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq,
rchp->ibcq.cq_context);
spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
}
int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr) struct ib_send_wr **bad_wr)
{ {
...@@ -794,8 +871,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -794,8 +871,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qhp->lock, flag); spin_lock_irqsave(&qhp->lock, flag);
if (t4_wq_in_error(&qhp->wq)) { if (t4_wq_in_error(&qhp->wq)) {
spin_unlock_irqrestore(&qhp->lock, flag); spin_unlock_irqrestore(&qhp->lock, flag);
*bad_wr = wr; complete_sq_drain_wr(qhp, wr);
return -EINVAL; return err;
} }
num_wrs = t4_sq_avail(&qhp->wq); num_wrs = t4_sq_avail(&qhp->wq);
if (num_wrs == 0) { if (num_wrs == 0) {
...@@ -937,8 +1014,8 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -937,8 +1014,8 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_lock_irqsave(&qhp->lock, flag); spin_lock_irqsave(&qhp->lock, flag);
if (t4_wq_in_error(&qhp->wq)) { if (t4_wq_in_error(&qhp->wq)) {
spin_unlock_irqrestore(&qhp->lock, flag); spin_unlock_irqrestore(&qhp->lock, flag);
*bad_wr = wr; complete_rq_drain_wr(qhp, wr);
return -EINVAL; return err;
} }
num_wrs = t4_rq_avail(&qhp->wq); num_wrs = t4_rq_avail(&qhp->wq);
if (num_wrs == 0) { if (num_wrs == 0) {
...@@ -1550,7 +1627,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, ...@@ -1550,7 +1627,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
} }
break; break;
case C4IW_QP_STATE_CLOSING: case C4IW_QP_STATE_CLOSING:
if (!internal) {
/*
* Allow kernel users to move to ERROR for qp draining.
*/
if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
C4IW_QP_STATE_ERROR)) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
...@@ -1643,7 +1725,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp) ...@@ -1643,7 +1725,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
struct c4iw_dev *rhp; struct c4iw_dev *rhp;
struct c4iw_qp *qhp; struct c4iw_qp *qhp;
struct c4iw_qp_attributes attrs; struct c4iw_qp_attributes attrs;
struct c4iw_ucontext *ucontext;
qhp = to_c4iw_qp(ib_qp); qhp = to_c4iw_qp(ib_qp);
rhp = qhp->rhp; rhp = qhp->rhp;
...@@ -1663,11 +1744,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp) ...@@ -1663,11 +1744,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
spin_unlock_irq(&rhp->lock); spin_unlock_irq(&rhp->lock);
free_ird(rhp, qhp->attr.max_ird); free_ird(rhp, qhp->attr.max_ird);
ucontext = ib_qp->uobject ?
to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
destroy_qp(&rhp->rdev, &qhp->wq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
c4iw_qp_rem_ref(ib_qp); c4iw_qp_rem_ref(ib_qp);
PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid); PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
...@@ -1763,11 +1839,10 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, ...@@ -1763,11 +1839,10 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
qhp->attr.max_ird = 0; qhp->attr.max_ird = 0;
qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR; qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
spin_lock_init(&qhp->lock); spin_lock_init(&qhp->lock);
init_completion(&qhp->sq_drained);
init_completion(&qhp->rq_drained);
mutex_init(&qhp->mutex); mutex_init(&qhp->mutex);
init_waitqueue_head(&qhp->wait); init_waitqueue_head(&qhp->wait);
kref_init(&qhp->kref); kref_init(&qhp->kref);
INIT_WORK(&qhp->free_work, free_qp_work);
ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
if (ret) if (ret)
...@@ -1854,6 +1929,9 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, ...@@ -1854,6 +1929,9 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
ma_sync_key_mm->len = PAGE_SIZE; ma_sync_key_mm->len = PAGE_SIZE;
insert_mmap(ucontext, ma_sync_key_mm); insert_mmap(ucontext, ma_sync_key_mm);
} }
c4iw_get_ucontext(ucontext);
qhp->ucontext = ucontext;
} }
qhp->ibqp.qp_num = qhp->wq.sq.qid; qhp->ibqp.qp_num = qhp->wq.sq.qid;
init_timer(&(qhp->timer)); init_timer(&(qhp->timer));
...@@ -1958,40 +2036,3 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -1958,40 +2036,3 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
return 0; return 0;
} }
static void move_qp_to_err(struct c4iw_qp *qp)
{
struct c4iw_qp_attributes attrs = { .next_state = C4IW_QP_STATE_ERROR };
(void)c4iw_modify_qp(qp->rhp, qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
}
void c4iw_drain_sq(struct ib_qp *ibqp)
{
struct c4iw_qp *qp = to_c4iw_qp(ibqp);
unsigned long flag;
bool need_to_wait;
move_qp_to_err(qp);
spin_lock_irqsave(&qp->lock, flag);
need_to_wait = !t4_sq_empty(&qp->wq);
spin_unlock_irqrestore(&qp->lock, flag);
if (need_to_wait)
wait_for_completion(&qp->sq_drained);
}
void c4iw_drain_rq(struct ib_qp *ibqp)
{
struct c4iw_qp *qp = to_c4iw_qp(ibqp);
unsigned long flag;
bool need_to_wait;
move_qp_to_err(qp);
spin_lock_irqsave(&qp->lock, flag);
need_to_wait = !t4_rq_empty(&qp->wq);
spin_unlock_irqrestore(&qp->lock, flag);
if (need_to_wait)
wait_for_completion(&qp->rq_drained);
}
...@@ -179,6 +179,7 @@ struct t4_cqe { ...@@ -179,6 +179,7 @@ struct t4_cqe {
__be32 wrid_hi; __be32 wrid_hi;
__be32 wrid_low; __be32 wrid_low;
} gen; } gen;
u64 drain_cookie;
} u; } u;
__be64 reserved; __be64 reserved;
__be64 bits_type_ts; __be64 bits_type_ts;
...@@ -238,6 +239,7 @@ struct t4_cqe { ...@@ -238,6 +239,7 @@ struct t4_cqe {
/* generic accessor macros */ /* generic accessor macros */
#define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi)) #define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi))
#define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low)) #define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low))
#define CQE_DRAIN_COOKIE(x) ((x)->u.drain_cookie)
/* macros for flit 3 of the cqe */ /* macros for flit 3 of the cqe */
#define CQE_GENBIT_S 63 #define CQE_GENBIT_S 63
......
...@@ -100,16 +100,7 @@ static int i40iw_query_port(struct ib_device *ibdev, ...@@ -100,16 +100,7 @@ static int i40iw_query_port(struct ib_device *ibdev,
memset(props, 0, sizeof(*props)); memset(props, 0, sizeof(*props));
props->max_mtu = IB_MTU_4096; props->max_mtu = IB_MTU_4096;
if (netdev->mtu >= 4096) props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
props->active_mtu = IB_MTU_4096;
else if (netdev->mtu >= 2048)
props->active_mtu = IB_MTU_2048;
else if (netdev->mtu >= 1024)
props->active_mtu = IB_MTU_1024;
else if (netdev->mtu >= 512)
props->active_mtu = IB_MTU_512;
else
props->active_mtu = IB_MTU_256;
props->lid = 1; props->lid = 1;
if (netif_carrier_ok(iwdev->netdev)) if (netif_carrier_ok(iwdev->netdev))
......
...@@ -478,17 +478,7 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr ...@@ -478,17 +478,7 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr
memset(props, 0, sizeof(*props)); memset(props, 0, sizeof(*props));
props->max_mtu = IB_MTU_4096; props->max_mtu = IB_MTU_4096;
props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
if (netdev->mtu >= 4096)
props->active_mtu = IB_MTU_4096;
else if (netdev->mtu >= 2048)
props->active_mtu = IB_MTU_2048;
else if (netdev->mtu >= 1024)
props->active_mtu = IB_MTU_1024;
else if (netdev->mtu >= 512)
props->active_mtu = IB_MTU_512;
else
props->active_mtu = IB_MTU_256;
props->lid = 1; props->lid = 1;
props->lmc = 0; props->lmc = 0;
......
...@@ -576,8 +576,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev) ...@@ -576,8 +576,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
return 0; return 0;
} }
void qedr_unaffiliated_event(void *context, void qedr_unaffiliated_event(void *context, u8 event_code)
u8 event_code)
{ {
pr_err("unaffiliated event not implemented yet\n"); pr_err("unaffiliated event not implemented yet\n");
} }
...@@ -792,6 +791,9 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev, ...@@ -792,6 +791,9 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
if (device_create_file(&dev->ibdev.dev, qedr_attributes[i])) if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
goto sysfs_err; goto sysfs_err;
if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n"); DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
return dev; return dev;
...@@ -824,11 +826,10 @@ static void qedr_remove(struct qedr_dev *dev) ...@@ -824,11 +826,10 @@ static void qedr_remove(struct qedr_dev *dev)
ib_dealloc_device(&dev->ibdev); ib_dealloc_device(&dev->ibdev);
} }
static int qedr_close(struct qedr_dev *dev) static void qedr_close(struct qedr_dev *dev)
{ {
qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR); if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
return 0;
} }
static void qedr_shutdown(struct qedr_dev *dev) static void qedr_shutdown(struct qedr_dev *dev)
...@@ -837,6 +838,12 @@ static void qedr_shutdown(struct qedr_dev *dev) ...@@ -837,6 +838,12 @@ static void qedr_shutdown(struct qedr_dev *dev)
qedr_remove(dev); qedr_remove(dev);
} }
static void qedr_open(struct qedr_dev *dev)
{
if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
}
static void qedr_mac_address_change(struct qedr_dev *dev) static void qedr_mac_address_change(struct qedr_dev *dev)
{ {
union ib_gid *sgid = &dev->sgid_tbl[0]; union ib_gid *sgid = &dev->sgid_tbl[0];
...@@ -863,7 +870,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev) ...@@ -863,7 +870,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev)
ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr); ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE); qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
if (rc) if (rc)
DP_ERR(dev, "Error updating mac filter\n"); DP_ERR(dev, "Error updating mac filter\n");
...@@ -877,7 +884,7 @@ static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event) ...@@ -877,7 +884,7 @@ static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event)
{ {
switch (event) { switch (event) {
case QEDE_UP: case QEDE_UP:
qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE); qedr_open(dev);
break; break;
case QEDE_DOWN: case QEDE_DOWN:
qedr_close(dev); qedr_close(dev);
......
...@@ -113,6 +113,8 @@ struct qedr_device_attr { ...@@ -113,6 +113,8 @@ struct qedr_device_attr {
struct qed_rdma_events events; struct qed_rdma_events events;
}; };
#define QEDR_ENET_STATE_BIT (0)
struct qedr_dev { struct qedr_dev {
struct ib_device ibdev; struct ib_device ibdev;
struct qed_dev *cdev; struct qed_dev *cdev;
...@@ -153,6 +155,8 @@ struct qedr_dev { ...@@ -153,6 +155,8 @@ struct qedr_dev {
struct qedr_cq *gsi_sqcq; struct qedr_cq *gsi_sqcq;
struct qedr_cq *gsi_rqcq; struct qedr_cq *gsi_rqcq;
struct qedr_qp *gsi_qp; struct qedr_qp *gsi_qp;
unsigned long enet_state;
}; };
#define QEDR_MAX_SQ_PBL (0x8000) #define QEDR_MAX_SQ_PBL (0x8000)
...@@ -188,6 +192,7 @@ struct qedr_dev { ...@@ -188,6 +192,7 @@ struct qedr_dev {
#define QEDR_ROCE_MAX_CNQ_SIZE (0x4000) #define QEDR_ROCE_MAX_CNQ_SIZE (0x4000)
#define QEDR_MAX_PORT (1) #define QEDR_MAX_PORT (1)
#define QEDR_PORT (1)
#define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) #define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
...@@ -251,9 +256,6 @@ struct qedr_cq { ...@@ -251,9 +256,6 @@ struct qedr_cq {
u16 icid; u16 icid;
/* Lock to protect completion handler */
spinlock_t comp_handler_lock;
/* Lock to protect multiplem CQ's */ /* Lock to protect multiplem CQ's */
spinlock_t cq_lock; spinlock_t cq_lock;
u8 arm_flags; u8 arm_flags;
......
...@@ -87,11 +87,8 @@ void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt) ...@@ -87,11 +87,8 @@ void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt)
qedr_inc_sw_gsi_cons(&qp->sq); qedr_inc_sw_gsi_cons(&qp->sq);
spin_unlock_irqrestore(&qp->q_lock, flags); spin_unlock_irqrestore(&qp->q_lock, flags);
if (cq->ibcq.comp_handler) { if (cq->ibcq.comp_handler)
spin_lock_irqsave(&cq->comp_handler_lock, flags);
(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
}
} }
void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt, void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
...@@ -113,11 +110,8 @@ void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt, ...@@ -113,11 +110,8 @@ void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
spin_unlock_irqrestore(&qp->q_lock, flags); spin_unlock_irqrestore(&qp->q_lock, flags);
if (cq->ibcq.comp_handler) { if (cq->ibcq.comp_handler)
spin_lock_irqsave(&cq->comp_handler_lock, flags);
(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
}
} }
static void qedr_destroy_gsi_cq(struct qedr_dev *dev, static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
...@@ -404,9 +398,9 @@ static inline int qedr_gsi_build_packet(struct qedr_dev *dev, ...@@ -404,9 +398,9 @@ static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
} }
if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h)) if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
else
packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB; packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
else
packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
packet->roce_mode = roce_mode; packet->roce_mode = roce_mode;
memcpy(packet->header.vaddr, ud_header_buffer, header_size); memcpy(packet->header.vaddr, ud_header_buffer, header_size);
......
...@@ -471,8 +471,6 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev, ...@@ -471,8 +471,6 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
struct ib_ucontext *context, struct ib_udata *udata) struct ib_ucontext *context, struct ib_udata *udata)
{ {
struct qedr_dev *dev = get_qedr_dev(ibdev); struct qedr_dev *dev = get_qedr_dev(ibdev);
struct qedr_ucontext *uctx = NULL;
struct qedr_alloc_pd_uresp uresp;
struct qedr_pd *pd; struct qedr_pd *pd;
u16 pd_id; u16 pd_id;
int rc; int rc;
...@@ -489,21 +487,33 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev, ...@@ -489,21 +487,33 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
if (!pd) if (!pd)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id); rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
if (rc)
goto err;
uresp.pd_id = pd_id;
pd->pd_id = pd_id; pd->pd_id = pd_id;
if (udata && context) { if (udata && context) {
struct qedr_alloc_pd_uresp uresp;
uresp.pd_id = pd_id;
rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (rc) if (rc) {
DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id); DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
uctx = get_qedr_ucontext(context); dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
uctx->pd = pd; goto err;
pd->uctx = uctx; }
pd->uctx = get_qedr_ucontext(context);
pd->uctx->pd = pd;
} }
return &pd->ibpd; return &pd->ibpd;
err:
kfree(pd);
return ERR_PTR(rc);
} }
int qedr_dealloc_pd(struct ib_pd *ibpd) int qedr_dealloc_pd(struct ib_pd *ibpd)
...@@ -1516,7 +1526,7 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd, ...@@ -1516,7 +1526,7 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
return ERR_PTR(-EFAULT); return ERR_PTR(-EFAULT);
} }
enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state) static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
{ {
switch (qp_state) { switch (qp_state) {
case QED_ROCE_QP_STATE_RESET: case QED_ROCE_QP_STATE_RESET:
...@@ -1537,7 +1547,8 @@ enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state) ...@@ -1537,7 +1547,8 @@ enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
return IB_QPS_ERR; return IB_QPS_ERR;
} }
enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state) static enum qed_roce_qp_state qedr_get_state_from_ibqp(
enum ib_qp_state qp_state)
{ {
switch (qp_state) { switch (qp_state) {
case IB_QPS_RESET: case IB_QPS_RESET:
...@@ -1573,7 +1584,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev, ...@@ -1573,7 +1584,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
int status = 0; int status = 0;
if (new_state == qp->state) if (new_state == qp->state)
return 1; return 0;
switch (qp->state) { switch (qp->state) {
case QED_ROCE_QP_STATE_RESET: case QED_ROCE_QP_STATE_RESET:
...@@ -1649,6 +1660,14 @@ static int qedr_update_qp_state(struct qedr_dev *dev, ...@@ -1649,6 +1660,14 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
/* ERR->XXX */ /* ERR->XXX */
switch (new_state) { switch (new_state) {
case QED_ROCE_QP_STATE_RESET: case QED_ROCE_QP_STATE_RESET:
if ((qp->rq.prod != qp->rq.cons) ||
(qp->sq.prod != qp->sq.cons)) {
DP_NOTICE(dev,
"Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
qp->rq.prod, qp->rq.cons, qp->sq.prod,
qp->sq.cons);
status = -EINVAL;
}
break; break;
default: default:
status = -EINVAL; status = -EINVAL;
...@@ -1781,7 +1800,6 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -1781,7 +1800,6 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]); qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n", DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
qp_params.remote_mac_addr); qp_params.remote_mac_addr);
;
qp_params.mtu = qp->mtu; qp_params.mtu = qp->mtu;
qp_params.lb_indication = false; qp_params.lb_indication = false;
...@@ -1932,7 +1950,7 @@ int qedr_query_qp(struct ib_qp *ibqp, ...@@ -1932,7 +1950,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
qp_attr->qp_state = qedr_get_ibqp_state(params.state); qp_attr->qp_state = qedr_get_ibqp_state(params.state);
qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state); qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
qp_attr->path_mtu = iboe_get_mtu(params.mtu); qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
qp_attr->path_mig_state = IB_MIG_MIGRATED; qp_attr->path_mig_state = IB_MIG_MIGRATED;
qp_attr->rq_psn = params.rq_psn; qp_attr->rq_psn = params.rq_psn;
qp_attr->sq_psn = params.sq_psn; qp_attr->sq_psn = params.sq_psn;
...@@ -1944,7 +1962,7 @@ int qedr_query_qp(struct ib_qp *ibqp, ...@@ -1944,7 +1962,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
qp_attr->cap.max_recv_wr = qp->rq.max_wr; qp_attr->cap.max_recv_wr = qp->rq.max_wr;
qp_attr->cap.max_send_sge = qp->sq.max_sges; qp_attr->cap.max_send_sge = qp->sq.max_sges;
qp_attr->cap.max_recv_sge = qp->rq.max_sges; qp_attr->cap.max_recv_sge = qp->rq.max_sges;
qp_attr->cap.max_inline_data = qp->max_inline_data; qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
qp_init_attr->cap = qp_attr->cap; qp_init_attr->cap = qp_attr->cap;
memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0], memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
...@@ -2225,7 +2243,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr) ...@@ -2225,7 +2243,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr)
return rc; return rc;
} }
struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len) static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
int max_page_list_len)
{ {
struct qedr_pd *pd = get_qedr_pd(ibpd); struct qedr_pd *pd = get_qedr_pd(ibpd);
struct qedr_dev *dev = get_qedr_dev(ibpd->device); struct qedr_dev *dev = get_qedr_dev(ibpd->device);
...@@ -2627,7 +2646,7 @@ static int qedr_prepare_reg(struct qedr_qp *qp, ...@@ -2627,7 +2646,7 @@ static int qedr_prepare_reg(struct qedr_qp *qp,
return 0; return 0;
} }
enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode) static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
{ {
switch (opcode) { switch (opcode) {
case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE:
...@@ -2652,7 +2671,7 @@ enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode) ...@@ -2652,7 +2671,7 @@ enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
} }
} }
inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr) static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
{ {
int wq_is_full, err_wr, pbl_is_full; int wq_is_full, err_wr, pbl_is_full;
struct qedr_dev *dev = qp->dev; struct qedr_dev *dev = qp->dev;
...@@ -2689,7 +2708,7 @@ inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr) ...@@ -2689,7 +2708,7 @@ inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
return true; return true;
} }
int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr) struct ib_send_wr **bad_wr)
{ {
struct qedr_dev *dev = get_qedr_dev(ibqp->device); struct qedr_dev *dev = get_qedr_dev(ibqp->device);
...@@ -3157,9 +3176,10 @@ static int qedr_poll_cq_req(struct qedr_dev *dev, ...@@ -3157,9 +3176,10 @@ static int qedr_poll_cq_req(struct qedr_dev *dev,
IB_WC_SUCCESS, 0); IB_WC_SUCCESS, 0);
break; break;
case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR: case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
DP_ERR(dev, if (qp->state != QED_ROCE_QP_STATE_ERR)
"Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n", DP_ERR(dev,
cq->icid, qp->icid); "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
cq->icid, qp->icid);
cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons, cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
IB_WC_WR_FLUSH_ERR, 1); IB_WC_WR_FLUSH_ERR, 1);
break; break;
......
...@@ -1029,7 +1029,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev, ...@@ -1029,7 +1029,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
if (ret) { if (ret) {
dev_err(&pdev->dev, "failed to allocate interrupts\n"); dev_err(&pdev->dev, "failed to allocate interrupts\n");
ret = -ENOMEM; ret = -ENOMEM;
goto err_netdevice; goto err_free_cq_ring;
} }
/* Allocate UAR table. */ /* Allocate UAR table. */
...@@ -1092,8 +1092,6 @@ static int pvrdma_pci_probe(struct pci_dev *pdev, ...@@ -1092,8 +1092,6 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
err_free_intrs: err_free_intrs:
pvrdma_free_irq(dev); pvrdma_free_irq(dev);
pvrdma_disable_msi_all(dev); pvrdma_disable_msi_all(dev);
err_netdevice:
unregister_netdevice_notifier(&dev->nb_netdev);
err_free_cq_ring: err_free_cq_ring:
pvrdma_page_dir_cleanup(dev, &dev->cq_pdir); pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
err_free_async_ring: err_free_async_ring:
......
...@@ -306,7 +306,7 @@ struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev, ...@@ -306,7 +306,7 @@ struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev,
union pvrdma_cmd_resp rsp; union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_uc *cmd = &req.create_uc; struct pvrdma_cmd_create_uc *cmd = &req.create_uc;
struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp; struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp;
struct pvrdma_alloc_ucontext_resp uresp; struct pvrdma_alloc_ucontext_resp uresp = {0};
int ret; int ret;
void *ptr; void *ptr;
......
...@@ -59,9 +59,11 @@ int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length) ...@@ -59,9 +59,11 @@ int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
case RXE_MEM_TYPE_MR: case RXE_MEM_TYPE_MR:
case RXE_MEM_TYPE_FMR: case RXE_MEM_TYPE_FMR:
return ((iova < mem->iova) || if (iova < mem->iova ||
((iova + length) > (mem->iova + mem->length))) ? length > mem->length ||
-EFAULT : 0; iova > mem->iova + mem->length - length)
return -EFAULT;
return 0;
default: default:
return -EFAULT; return -EFAULT;
......
...@@ -538,7 +538,7 @@ struct rxe_dev *rxe_net_add(struct net_device *ndev) ...@@ -538,7 +538,7 @@ struct rxe_dev *rxe_net_add(struct net_device *ndev)
} }
spin_lock_bh(&dev_list_lock); spin_lock_bh(&dev_list_lock);
list_add_tail(&rxe_dev_list, &rxe->list); list_add_tail(&rxe->list, &rxe_dev_list);
spin_unlock_bh(&dev_list_lock); spin_unlock_bh(&dev_list_lock);
return rxe; return rxe;
} }
......
...@@ -813,8 +813,7 @@ void rxe_qp_destroy(struct rxe_qp *qp) ...@@ -813,8 +813,7 @@ void rxe_qp_destroy(struct rxe_qp *qp)
del_timer_sync(&qp->rnr_nak_timer); del_timer_sync(&qp->rnr_nak_timer);
rxe_cleanup_task(&qp->req.task); rxe_cleanup_task(&qp->req.task);
if (qp_type(qp) == IB_QPT_RC) rxe_cleanup_task(&qp->comp.task);
rxe_cleanup_task(&qp->comp.task);
/* flush out any receive wr's or pending requests */ /* flush out any receive wr's or pending requests */
__rxe_do_task(&qp->req.task); __rxe_do_task(&qp->req.task);
......
...@@ -479,7 +479,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp, ...@@ -479,7 +479,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
goto err; goto err;
} }
resid = mtu; qp->resp.resid = mtu;
} else { } else {
if (pktlen != resid) { if (pktlen != resid) {
state = RESPST_ERR_LENGTH; state = RESPST_ERR_LENGTH;
......
...@@ -651,13 +651,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, ...@@ -651,13 +651,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
SHOST_DIX_GUARD_CRC); SHOST_DIX_GUARD_CRC);
} }
/*
* Limit the sg_tablesize and max_sectors based on the device
* max fastreg page list length.
*/
shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize,
ib_conn->device->ib_device->attrs.max_fast_reg_page_list_len);
if (iscsi_host_add(shost, if (iscsi_host_add(shost,
ib_conn->device->ib_device->dma_device)) { ib_conn->device->ib_device->dma_device)) {
mutex_unlock(&iser_conn->state_mutex); mutex_unlock(&iser_conn->state_mutex);
...@@ -679,6 +672,10 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, ...@@ -679,6 +672,10 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9; max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9;
shost->max_sectors = min(iser_max_sectors, max_fr_sectors); shost->max_sectors = min(iser_max_sectors, max_fr_sectors);
iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
iser_conn, shost->sg_tablesize,
shost->max_sectors);
if (cmds_max > max_cmds) { if (cmds_max > max_cmds) {
iser_info("cmds_max changed from %u to %u\n", iser_info("cmds_max changed from %u to %u\n",
cmds_max, max_cmds); cmds_max, max_cmds);
......
...@@ -496,7 +496,6 @@ struct ib_conn { ...@@ -496,7 +496,6 @@ struct ib_conn {
* @rx_descs: rx buffers array (cyclic buffer) * @rx_descs: rx buffers array (cyclic buffer)
* @num_rx_descs: number of rx descriptors * @num_rx_descs: number of rx descriptors
* @scsi_sg_tablesize: scsi host sg_tablesize * @scsi_sg_tablesize: scsi host sg_tablesize
* @scsi_max_sectors: scsi host max sectors
*/ */
struct iser_conn { struct iser_conn {
struct ib_conn ib_conn; struct ib_conn ib_conn;
...@@ -519,7 +518,6 @@ struct iser_conn { ...@@ -519,7 +518,6 @@ struct iser_conn {
struct iser_rx_desc *rx_descs; struct iser_rx_desc *rx_descs;
u32 num_rx_descs; u32 num_rx_descs;
unsigned short scsi_sg_tablesize; unsigned short scsi_sg_tablesize;
unsigned int scsi_max_sectors;
bool snd_w_inv; bool snd_w_inv;
}; };
......
...@@ -707,18 +707,7 @@ iser_calc_scsi_params(struct iser_conn *iser_conn, ...@@ -707,18 +707,7 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE, sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE,
device->ib_device->attrs.max_fast_reg_page_list_len); device->ib_device->attrs.max_fast_reg_page_list_len);
if (sg_tablesize > sup_sg_tablesize) { iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
sg_tablesize = sup_sg_tablesize;
iser_conn->scsi_max_sectors = sg_tablesize * SIZE_4K / 512;
} else {
iser_conn->scsi_max_sectors = max_sectors;
}
iser_conn->scsi_sg_tablesize = sg_tablesize;
iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
iser_conn, iser_conn->scsi_sg_tablesize,
iser_conn->scsi_max_sectors);
} }
/** /**
......
...@@ -371,6 +371,7 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device, ...@@ -371,6 +371,7 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
struct srp_fr_desc *d; struct srp_fr_desc *d;
struct ib_mr *mr; struct ib_mr *mr;
int i, ret = -EINVAL; int i, ret = -EINVAL;
enum ib_mr_type mr_type;
if (pool_size <= 0) if (pool_size <= 0)
goto err; goto err;
...@@ -384,9 +385,13 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device, ...@@ -384,9 +385,13 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
spin_lock_init(&pool->lock); spin_lock_init(&pool->lock);
INIT_LIST_HEAD(&pool->free_list); INIT_LIST_HEAD(&pool->free_list);
if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
mr_type = IB_MR_TYPE_SG_GAPS;
else
mr_type = IB_MR_TYPE_MEM_REG;
for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
max_page_list_len);
if (IS_ERR(mr)) { if (IS_ERR(mr)) {
ret = PTR_ERR(mr); ret = PTR_ERR(mr);
if (ret == -ENOMEM) if (ret == -ENOMEM)
...@@ -3694,6 +3699,12 @@ static int __init srp_init_module(void) ...@@ -3694,6 +3699,12 @@ static int __init srp_init_module(void)
indirect_sg_entries = cmd_sg_entries; indirect_sg_entries = cmd_sg_entries;
} }
if (indirect_sg_entries > SG_MAX_SEGMENTS) {
pr_warn("Clamping indirect_sg_entries to %u\n",
SG_MAX_SEGMENTS);
indirect_sg_entries = SG_MAX_SEGMENTS;
}
srp_remove_wq = create_workqueue("srp_remove"); srp_remove_wq = create_workqueue("srp_remove");
if (!srp_remove_wq) { if (!srp_remove_wq) {
ret = -ENOMEM; ret = -ENOMEM;
......
...@@ -352,6 +352,20 @@ static inline int ib_mtu_enum_to_int(enum ib_mtu mtu) ...@@ -352,6 +352,20 @@ static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
} }
} }
static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
{
if (mtu >= 4096)
return IB_MTU_4096;
else if (mtu >= 2048)
return IB_MTU_2048;
else if (mtu >= 1024)
return IB_MTU_1024;
else if (mtu >= 512)
return IB_MTU_512;
else
return IB_MTU_256;
}
enum ib_port_state { enum ib_port_state {
IB_PORT_NOP = 0, IB_PORT_NOP = 0,
IB_PORT_DOWN = 1, IB_PORT_DOWN = 1,
......
...@@ -17,3 +17,4 @@ header-y += nes-abi.h ...@@ -17,3 +17,4 @@ header-y += nes-abi.h
header-y += ocrdma-abi.h header-y += ocrdma-abi.h
header-y += hns-abi.h header-y += hns-abi.h
header-y += vmw_pvrdma-abi.h header-y += vmw_pvrdma-abi.h
header-y += qedr-abi.h
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
* SOFTWARE. * SOFTWARE.
*/ */
#ifndef CXGB3_ABI_USER_H #ifndef CXGB3_ABI_USER_H
#define CXBG3_ABI_USER_H #define CXGB3_ABI_USER_H
#include <linux/types.h> #include <linux/types.h>
......
...@@ -37,7 +37,6 @@ ...@@ -37,7 +37,6 @@
#define IB_USER_VERBS_H #define IB_USER_VERBS_H
#include <linux/types.h> #include <linux/types.h>
#include <rdma/ib_verbs.h>
/* /*
* Increment this value if any changes that break userspace ABI * Increment this value if any changes that break userspace ABI
...@@ -548,11 +547,17 @@ enum { ...@@ -548,11 +547,17 @@ enum {
}; };
enum { enum {
IB_USER_LEGACY_LAST_QP_ATTR_MASK = IB_QP_DEST_QPN /*
* This value is equal to IB_QP_DEST_QPN.
*/
IB_USER_LEGACY_LAST_QP_ATTR_MASK = 1ULL << 20,
}; };
enum { enum {
IB_USER_LAST_QP_ATTR_MASK = IB_QP_RATE_LIMIT /*
* This value is equal to IB_QP_RATE_LIMIT.
*/
IB_USER_LAST_QP_ATTR_MASK = 1ULL << 25,
}; };
struct ib_uverbs_ex_create_qp { struct ib_uverbs_ex_create_qp {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment