Commit 4c171acc authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  RDMA/cma: Save PID of ID's owner
  RDMA/cma: Add support for netlink statistics export
  RDMA/cma: Pass QP type into rdma_create_id()
  RDMA: Update exported headers list
  RDMA/cma: Export enum cma_state in <rdma/rdma_cm.h>
  RDMA/nes: Add a check for strict_strtoul()
  RDMA/cxgb3: Don't post zero-byte read if endpoint is going away
  RDMA/cxgb4: Use completion objects for event blocking
  IB/srp: Fix integer -> pointer cast warnings
  IB: Add devnode methods to cm_class and umad_class
  IB/mad: Return EPROTONOSUPPORT when an RDMA device lacks the QP required
  IB/uverbs: Add devnode method to set path/mode
  RDMA/ucma: Add .nodename/.mode to tell userspace where to create device node
  RDMA: Add netlink infrastructure
  RDMA: Add error handling to ib_core_init()
parents 20e0ec11 8dc4abdf
...@@ -2,6 +2,7 @@ menuconfig INFINIBAND ...@@ -2,6 +2,7 @@ menuconfig INFINIBAND
tristate "InfiniBand support" tristate "InfiniBand support"
depends on PCI || BROKEN depends on PCI || BROKEN
depends on HAS_IOMEM depends on HAS_IOMEM
depends on NET
---help--- ---help---
Core support for InfiniBand (IB). Make sure to also select Core support for InfiniBand (IB). Make sure to also select
any protocols you wish to use as well as drivers for your any protocols you wish to use as well as drivers for your
......
...@@ -8,7 +8,7 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \ ...@@ -8,7 +8,7 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
$(user_access-y) $(user_access-y)
ib_core-y := packer.o ud_header.o verbs.o sysfs.o \ ib_core-y := packer.o ud_header.o verbs.o sysfs.o \
device.o fmr_pool.o cache.o device.o fmr_pool.o cache.o netlink.o
ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
ib_mad-y := mad.o smi.o agent.o mad_rmpp.o ib_mad-y := mad.o smi.o agent.o mad_rmpp.o
......
...@@ -3639,8 +3639,16 @@ static struct kobj_type cm_port_obj_type = { ...@@ -3639,8 +3639,16 @@ static struct kobj_type cm_port_obj_type = {
.release = cm_release_port_obj .release = cm_release_port_obj
}; };
static char *cm_devnode(struct device *dev, mode_t *mode)
{
*mode = 0666;
return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
}
struct class cm_class = { struct class cm_class = {
.owner = THIS_MODULE,
.name = "infiniband_cm", .name = "infiniband_cm",
.devnode = cm_devnode,
}; };
EXPORT_SYMBOL(cm_class); EXPORT_SYMBOL(cm_class);
......
This diff is collapsed.
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <rdma/rdma_netlink.h>
#include "core_priv.h" #include "core_priv.h"
...@@ -725,22 +726,40 @@ static int __init ib_core_init(void) ...@@ -725,22 +726,40 @@ static int __init ib_core_init(void)
return -ENOMEM; return -ENOMEM;
ret = ib_sysfs_setup(); ret = ib_sysfs_setup();
if (ret) if (ret) {
printk(KERN_WARNING "Couldn't create InfiniBand device class\n"); printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
goto err;
}
ret = ibnl_init();
if (ret) {
printk(KERN_WARNING "Couldn't init IB netlink interface\n");
goto err_sysfs;
}
ret = ib_cache_setup(); ret = ib_cache_setup();
if (ret) { if (ret) {
printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n"); printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
ib_sysfs_cleanup(); goto err_nl;
destroy_workqueue(ib_wq);
} }
return 0;
err_nl:
ibnl_cleanup();
err_sysfs:
ib_sysfs_cleanup();
err:
destroy_workqueue(ib_wq);
return ret; return ret;
} }
static void __exit ib_core_cleanup(void) static void __exit ib_core_cleanup(void)
{ {
ib_cache_cleanup(); ib_cache_cleanup();
ibnl_cleanup();
ib_sysfs_cleanup(); ib_sysfs_cleanup();
/* Make sure that any pending umem accounting work is done. */ /* Make sure that any pending umem accounting work is done. */
destroy_workqueue(ib_wq); destroy_workqueue(ib_wq);
......
...@@ -276,6 +276,13 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, ...@@ -276,6 +276,13 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
goto error1; goto error1;
} }
/* Verify the QP requested is supported. For example, Ethernet devices
* will not have QP0 */
if (!port_priv->qp_info[qpn].qp) {
ret = ERR_PTR(-EPROTONOSUPPORT);
goto error1;
}
/* Allocate structures */ /* Allocate structures */
mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL); mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
if (!mad_agent_priv) { if (!mad_agent_priv) {
......
/*
* Copyright (c) 2010 Voltaire Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
#include <net/netlink.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <rdma/rdma_netlink.h>
struct ibnl_client {
struct list_head list;
int index;
int nops;
const struct ibnl_client_cbs *cb_table;
};
static DEFINE_MUTEX(ibnl_mutex);
static struct sock *nls;
static LIST_HEAD(client_list);
int ibnl_add_client(int index, int nops,
const struct ibnl_client_cbs cb_table[])
{
struct ibnl_client *cur;
struct ibnl_client *nl_client;
nl_client = kmalloc(sizeof *nl_client, GFP_KERNEL);
if (!nl_client)
return -ENOMEM;
nl_client->index = index;
nl_client->nops = nops;
nl_client->cb_table = cb_table;
mutex_lock(&ibnl_mutex);
list_for_each_entry(cur, &client_list, list) {
if (cur->index == index) {
pr_warn("Client for %d already exists\n", index);
mutex_unlock(&ibnl_mutex);
kfree(nl_client);
return -EINVAL;
}
}
list_add_tail(&nl_client->list, &client_list);
mutex_unlock(&ibnl_mutex);
return 0;
}
EXPORT_SYMBOL(ibnl_add_client);
int ibnl_remove_client(int index)
{
struct ibnl_client *cur, *next;
mutex_lock(&ibnl_mutex);
list_for_each_entry_safe(cur, next, &client_list, list) {
if (cur->index == index) {
list_del(&(cur->list));
mutex_unlock(&ibnl_mutex);
kfree(cur);
return 0;
}
}
pr_warn("Can't remove callback for client idx %d. Not found\n", index);
mutex_unlock(&ibnl_mutex);
return -EINVAL;
}
EXPORT_SYMBOL(ibnl_remove_client);
void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq,
int len, int client, int op)
{
unsigned char *prev_tail;
prev_tail = skb_tail_pointer(skb);
*nlh = NLMSG_NEW(skb, 0, seq, RDMA_NL_GET_TYPE(client, op),
len, NLM_F_MULTI);
(*nlh)->nlmsg_len = skb_tail_pointer(skb) - prev_tail;
return NLMSG_DATA(*nlh);
nlmsg_failure:
nlmsg_trim(skb, prev_tail);
return NULL;
}
EXPORT_SYMBOL(ibnl_put_msg);
int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,
int len, void *data, int type)
{
unsigned char *prev_tail;
prev_tail = skb_tail_pointer(skb);
NLA_PUT(skb, type, len, data);
nlh->nlmsg_len += skb_tail_pointer(skb) - prev_tail;
return 0;
nla_put_failure:
nlmsg_trim(skb, prev_tail - nlh->nlmsg_len);
return -EMSGSIZE;
}
EXPORT_SYMBOL(ibnl_put_attr);
static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct ibnl_client *client;
int type = nlh->nlmsg_type;
int index = RDMA_NL_GET_CLIENT(type);
int op = RDMA_NL_GET_OP(type);
list_for_each_entry(client, &client_list, list) {
if (client->index == index) {
if (op < 0 || op >= client->nops ||
!client->cb_table[RDMA_NL_GET_OP(op)].dump)
return -EINVAL;
return netlink_dump_start(nls, skb, nlh,
client->cb_table[op].dump,
NULL);
}
}
pr_info("Index %d wasn't found in client list\n", index);
return -EINVAL;
}
static void ibnl_rcv(struct sk_buff *skb)
{
mutex_lock(&ibnl_mutex);
netlink_rcv_skb(skb, &ibnl_rcv_msg);
mutex_unlock(&ibnl_mutex);
}
int __init ibnl_init(void)
{
nls = netlink_kernel_create(&init_net, NETLINK_RDMA, 0, ibnl_rcv,
NULL, THIS_MODULE);
if (!nls) {
pr_warn("Failed to create netlink socket\n");
return -ENOMEM;
}
return 0;
}
void ibnl_cleanup(void)
{
struct ibnl_client *cur, *next;
mutex_lock(&ibnl_mutex);
list_for_each_entry_safe(cur, next, &client_list, list) {
list_del(&(cur->list));
kfree(cur);
}
mutex_unlock(&ibnl_mutex);
netlink_kernel_release(nls);
}
...@@ -367,13 +367,28 @@ static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf, ...@@ -367,13 +367,28 @@ static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
return ret; return ret;
} }
static ssize_t ucma_create_id(struct ucma_file *file, static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
const char __user *inbuf, {
switch (cmd->ps) {
case RDMA_PS_TCP:
*qp_type = IB_QPT_RC;
return 0;
case RDMA_PS_UDP:
case RDMA_PS_IPOIB:
*qp_type = IB_QPT_UD;
return 0;
default:
return -EINVAL;
}
}
static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
int in_len, int out_len) int in_len, int out_len)
{ {
struct rdma_ucm_create_id cmd; struct rdma_ucm_create_id cmd;
struct rdma_ucm_create_id_resp resp; struct rdma_ucm_create_id_resp resp;
struct ucma_context *ctx; struct ucma_context *ctx;
enum ib_qp_type qp_type;
int ret; int ret;
if (out_len < sizeof(resp)) if (out_len < sizeof(resp))
...@@ -382,6 +397,10 @@ static ssize_t ucma_create_id(struct ucma_file *file, ...@@ -382,6 +397,10 @@ static ssize_t ucma_create_id(struct ucma_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd))) if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT; return -EFAULT;
ret = ucma_get_qp_type(&cmd, &qp_type);
if (ret)
return ret;
mutex_lock(&file->mut); mutex_lock(&file->mut);
ctx = ucma_alloc_ctx(file); ctx = ucma_alloc_ctx(file);
mutex_unlock(&file->mut); mutex_unlock(&file->mut);
...@@ -389,7 +408,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, ...@@ -389,7 +408,7 @@ static ssize_t ucma_create_id(struct ucma_file *file,
return -ENOMEM; return -ENOMEM;
ctx->uid = cmd.uid; ctx->uid = cmd.uid;
ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps); ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps, qp_type);
if (IS_ERR(ctx->cm_id)) { if (IS_ERR(ctx->cm_id)) {
ret = PTR_ERR(ctx->cm_id); ret = PTR_ERR(ctx->cm_id);
goto err1; goto err1;
...@@ -1340,6 +1359,8 @@ static const struct file_operations ucma_fops = { ...@@ -1340,6 +1359,8 @@ static const struct file_operations ucma_fops = {
static struct miscdevice ucma_misc = { static struct miscdevice ucma_misc = {
.minor = MISC_DYNAMIC_MINOR, .minor = MISC_DYNAMIC_MINOR,
.name = "rdma_cm", .name = "rdma_cm",
.nodename = "infiniband/rdma_cm",
.mode = 0666,
.fops = &ucma_fops, .fops = &ucma_fops,
}; };
......
...@@ -1176,6 +1176,11 @@ static void ib_umad_remove_one(struct ib_device *device) ...@@ -1176,6 +1176,11 @@ static void ib_umad_remove_one(struct ib_device *device)
kref_put(&umad_dev->ref, ib_umad_release_dev); kref_put(&umad_dev->ref, ib_umad_release_dev);
} }
static char *umad_devnode(struct device *dev, mode_t *mode)
{
return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
}
static int __init ib_umad_init(void) static int __init ib_umad_init(void)
{ {
int ret; int ret;
...@@ -1194,6 +1199,8 @@ static int __init ib_umad_init(void) ...@@ -1194,6 +1199,8 @@ static int __init ib_umad_init(void)
goto out_chrdev; goto out_chrdev;
} }
umad_class->devnode = umad_devnode;
ret = class_create_file(umad_class, &class_attr_abi_version.attr); ret = class_create_file(umad_class, &class_attr_abi_version.attr);
if (ret) { if (ret) {
printk(KERN_ERR "user_mad: couldn't create abi_version attribute\n"); printk(KERN_ERR "user_mad: couldn't create abi_version attribute\n");
......
...@@ -824,6 +824,12 @@ static void ib_uverbs_remove_one(struct ib_device *device) ...@@ -824,6 +824,12 @@ static void ib_uverbs_remove_one(struct ib_device *device)
kfree(uverbs_dev); kfree(uverbs_dev);
} }
static char *uverbs_devnode(struct device *dev, mode_t *mode)
{
*mode = 0666;
return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
}
static int __init ib_uverbs_init(void) static int __init ib_uverbs_init(void)
{ {
int ret; int ret;
...@@ -842,6 +848,8 @@ static int __init ib_uverbs_init(void) ...@@ -842,6 +848,8 @@ static int __init ib_uverbs_init(void)
goto out_chrdev; goto out_chrdev;
} }
uverbs_class->devnode = uverbs_devnode;
ret = class_create_file(uverbs_class, &class_attr_abi_version.attr); ret = class_create_file(uverbs_class, &class_attr_abi_version.attr);
if (ret) { if (ret) {
printk(KERN_ERR "user_verbs: couldn't create abi_version attribute\n"); printk(KERN_ERR "user_verbs: couldn't create abi_version attribute\n");
......
...@@ -914,7 +914,7 @@ static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb) ...@@ -914,7 +914,7 @@ static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
goto err; goto err;
if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) { if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) {
iwch_post_zb_read(ep->com.qp); iwch_post_zb_read(ep);
} }
goto out; goto out;
...@@ -1078,6 +1078,8 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) ...@@ -1078,6 +1078,8 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
struct iwch_ep *ep = ctx; struct iwch_ep *ep = ctx;
struct cpl_wr_ack *hdr = cplhdr(skb); struct cpl_wr_ack *hdr = cplhdr(skb);
unsigned int credits = ntohs(hdr->credits); unsigned int credits = ntohs(hdr->credits);
unsigned long flags;
int post_zb = 0;
PDBG("%s ep %p credits %u\n", __func__, ep, credits); PDBG("%s ep %p credits %u\n", __func__, ep, credits);
...@@ -1087,28 +1089,34 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) ...@@ -1087,28 +1089,34 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
return CPL_RET_BUF_DONE; return CPL_RET_BUF_DONE;
} }
spin_lock_irqsave(&ep->com.lock, flags);
BUG_ON(credits != 1); BUG_ON(credits != 1);
dst_confirm(ep->dst); dst_confirm(ep->dst);
if (!ep->mpa_skb) { if (!ep->mpa_skb) {
PDBG("%s rdma_init wr_ack ep %p state %u\n", PDBG("%s rdma_init wr_ack ep %p state %u\n",
__func__, ep, state_read(&ep->com)); __func__, ep, ep->com.state);
if (ep->mpa_attr.initiator) { if (ep->mpa_attr.initiator) {
PDBG("%s initiator ep %p state %u\n", PDBG("%s initiator ep %p state %u\n",
__func__, ep, state_read(&ep->com)); __func__, ep, ep->com.state);
if (peer2peer) if (peer2peer && ep->com.state == FPDU_MODE)
iwch_post_zb_read(ep->com.qp); post_zb = 1;
} else { } else {
PDBG("%s responder ep %p state %u\n", PDBG("%s responder ep %p state %u\n",
__func__, ep, state_read(&ep->com)); __func__, ep, ep->com.state);
if (ep->com.state == MPA_REQ_RCVD) {
ep->com.rpl_done = 1; ep->com.rpl_done = 1;
wake_up(&ep->com.waitq); wake_up(&ep->com.waitq);
} }
}
} else { } else {
PDBG("%s lsm ack ep %p state %u freeing skb\n", PDBG("%s lsm ack ep %p state %u freeing skb\n",
__func__, ep, state_read(&ep->com)); __func__, ep, ep->com.state);
kfree_skb(ep->mpa_skb); kfree_skb(ep->mpa_skb);
ep->mpa_skb = NULL; ep->mpa_skb = NULL;
} }
spin_unlock_irqrestore(&ep->com.lock, flags);
if (post_zb)
iwch_post_zb_read(ep);
return CPL_RET_BUF_DONE; return CPL_RET_BUF_DONE;
} }
......
...@@ -332,7 +332,7 @@ int iwch_bind_mw(struct ib_qp *qp, ...@@ -332,7 +332,7 @@ int iwch_bind_mw(struct ib_qp *qp,
struct ib_mw_bind *mw_bind); struct ib_mw_bind *mw_bind);
int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg); int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
int iwch_post_zb_read(struct iwch_qp *qhp); int iwch_post_zb_read(struct iwch_ep *ep);
int iwch_register_device(struct iwch_dev *dev); int iwch_register_device(struct iwch_dev *dev);
void iwch_unregister_device(struct iwch_dev *dev); void iwch_unregister_device(struct iwch_dev *dev);
void stop_read_rep_timer(struct iwch_qp *qhp); void stop_read_rep_timer(struct iwch_qp *qhp);
......
...@@ -738,7 +738,7 @@ static inline void build_term_codes(struct respQ_msg_t *rsp_msg, ...@@ -738,7 +738,7 @@ static inline void build_term_codes(struct respQ_msg_t *rsp_msg,
} }
} }
int iwch_post_zb_read(struct iwch_qp *qhp) int iwch_post_zb_read(struct iwch_ep *ep)
{ {
union t3_wr *wqe; union t3_wr *wqe;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -761,10 +761,10 @@ int iwch_post_zb_read(struct iwch_qp *qhp) ...@@ -761,10 +761,10 @@ int iwch_post_zb_read(struct iwch_qp *qhp)
wqe->read.local_len = cpu_to_be32(0); wqe->read.local_len = cpu_to_be32(0);
wqe->read.local_to = cpu_to_be64(1); wqe->read.local_to = cpu_to_be64(1);
wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ)); wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ));
wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid)| wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(ep->hwtid)|
V_FW_RIWR_LEN(flit_cnt)); V_FW_RIWR_LEN(flit_cnt));
skb->priority = CPL_PRIORITY_DATA; skb->priority = CPL_PRIORITY_DATA;
return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb); return iwch_cxgb3_ofld_send(ep->com.qp->rhp->rdev.t3cdev_p, skb);
} }
/* /*
......
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/workqueue.h> #include <linux/completion.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/pci.h> #include <linux/pci.h>
...@@ -131,28 +131,21 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev) ...@@ -131,28 +131,21 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
#define C4IW_WR_TO (10*HZ) #define C4IW_WR_TO (10*HZ)
enum {
REPLY_READY = 0,
};
struct c4iw_wr_wait { struct c4iw_wr_wait {
wait_queue_head_t wait; struct completion completion;
unsigned long status;
int ret; int ret;
}; };
static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp) static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
{ {
wr_waitp->ret = 0; wr_waitp->ret = 0;
wr_waitp->status = 0; init_completion(&wr_waitp->completion);
init_waitqueue_head(&wr_waitp->wait);
} }
static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret) static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
{ {
wr_waitp->ret = ret; wr_waitp->ret = ret;
set_bit(REPLY_READY, &wr_waitp->status); complete(&wr_waitp->completion);
wake_up(&wr_waitp->wait);
} }
static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
...@@ -164,8 +157,7 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, ...@@ -164,8 +157,7 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
int ret; int ret;
do { do {
ret = wait_event_timeout(wr_waitp->wait, ret = wait_for_completion_timeout(&wr_waitp->completion, to);
test_and_clear_bit(REPLY_READY, &wr_waitp->status), to);
if (!ret) { if (!ret) {
printk(KERN_ERR MOD "%s - Device %s not responding - " printk(KERN_ERR MOD "%s - Device %s not responding - "
"tid %u qpid %u\n", func, "tid %u qpid %u\n", func,
......
...@@ -1138,7 +1138,9 @@ static ssize_t nes_store_wqm_quanta(struct device_driver *ddp, ...@@ -1138,7 +1138,9 @@ static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
u32 i = 0; u32 i = 0;
struct nes_device *nesdev; struct nes_device *nesdev;
strict_strtoul(buf, 0, &wqm_quanta_value); if (kstrtoul(buf, 0, &wqm_quanta_value) < 0)
return -EINVAL;
list_for_each_entry(nesdev, &nes_dev_list, list) { list_for_each_entry(nesdev, &nes_dev_list, list) {
if (i == ee_flsh_adapter) { if (i == ee_flsh_adapter) {
nesdev->nesadapter->wqm_quanta = wqm_quanta_value; nesdev->nesadapter->wqm_quanta = wqm_quanta_value;
......
config INFINIBAND_QIB config INFINIBAND_QIB
tristate "QLogic PCIe HCA support" tristate "QLogic PCIe HCA support"
depends on 64BIT && NET depends on 64BIT
---help--- ---help---
This is a low-level driver for QLogic PCIe QLE InfiniBand host This is a low-level driver for QLogic PCIe QLE InfiniBand host
channel adapters. This driver does not support the QLogic channel adapters. This driver does not support the QLogic
......
...@@ -548,7 +548,7 @@ int iser_connect(struct iser_conn *ib_conn, ...@@ -548,7 +548,7 @@ int iser_connect(struct iser_conn *ib_conn,
iser_conn_get(ib_conn); /* ref ib conn's cma id */ iser_conn_get(ib_conn); /* ref ib conn's cma id */
ib_conn->cma_id = rdma_create_id(iser_cma_handler, ib_conn->cma_id = rdma_create_id(iser_cma_handler,
(void *)ib_conn, (void *)ib_conn,
RDMA_PS_TCP); RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(ib_conn->cma_id)) { if (IS_ERR(ib_conn->cma_id)) {
err = PTR_ERR(ib_conn->cma_id); err = PTR_ERR(ib_conn->cma_id);
iser_err("rdma_create_id failed: %d\n", err); iser_err("rdma_create_id failed: %d\n", err);
......
...@@ -1147,7 +1147,7 @@ static void srp_process_aer_req(struct srp_target_port *target, ...@@ -1147,7 +1147,7 @@ static void srp_process_aer_req(struct srp_target_port *target,
static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
{ {
struct ib_device *dev = target->srp_host->srp_dev->dev; struct ib_device *dev = target->srp_host->srp_dev->dev;
struct srp_iu *iu = (struct srp_iu *) wc->wr_id; struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
int res; int res;
u8 opcode; u8 opcode;
...@@ -1231,7 +1231,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr) ...@@ -1231,7 +1231,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
break; break;
} }
iu = (struct srp_iu *) wc.wr_id; iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
list_add(&iu->list, &target->free_tx); list_add(&iu->list, &target->free_tx);
} }
} }
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
/* leave room for NETLINK_DM (DM Events) */ /* leave room for NETLINK_DM (DM Events) */
#define NETLINK_SCSITRANSPORT 18 /* SCSI Transports */ #define NETLINK_SCSITRANSPORT 18 /* SCSI Transports */
#define NETLINK_ECRYPTFS 19 #define NETLINK_ECRYPTFS 19
#define NETLINK_RDMA 20
#define MAX_LINKS 32 #define MAX_LINKS 32
......
header-y += ib_user_cm.h
header-y += ib_user_mad.h header-y += ib_user_mad.h
header-y += ib_user_sa.h
header-y += ib_user_verbs.h
header-y += rdma_netlink.h
header-y += rdma_user_cm.h
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#ifndef IB_USER_CM_H #ifndef IB_USER_CM_H
#define IB_USER_CM_H #define IB_USER_CM_H
#include <linux/types.h>
#include <rdma/ib_user_sa.h> #include <rdma/ib_user_sa.h>
#define IB_USER_CM_ABI_VERSION 5 #define IB_USER_CM_ABI_VERSION 5
......
...@@ -111,6 +111,20 @@ struct rdma_cm_event { ...@@ -111,6 +111,20 @@ struct rdma_cm_event {
} param; } param;
}; };
enum rdma_cm_state {
RDMA_CM_IDLE,
RDMA_CM_ADDR_QUERY,
RDMA_CM_ADDR_RESOLVED,
RDMA_CM_ROUTE_QUERY,
RDMA_CM_ROUTE_RESOLVED,
RDMA_CM_CONNECT,
RDMA_CM_DISCONNECT,
RDMA_CM_ADDR_BOUND,
RDMA_CM_LISTEN,
RDMA_CM_DEVICE_REMOVAL,
RDMA_CM_DESTROYING
};
struct rdma_cm_id; struct rdma_cm_id;
/** /**
...@@ -130,6 +144,7 @@ struct rdma_cm_id { ...@@ -130,6 +144,7 @@ struct rdma_cm_id {
rdma_cm_event_handler event_handler; rdma_cm_event_handler event_handler;
struct rdma_route route; struct rdma_route route;
enum rdma_port_space ps; enum rdma_port_space ps;
enum ib_qp_type qp_type;
u8 port_num; u8 port_num;
}; };
...@@ -140,9 +155,11 @@ struct rdma_cm_id { ...@@ -140,9 +155,11 @@ struct rdma_cm_id {
* returned rdma_id. * returned rdma_id.
* @context: User specified context associated with the id. * @context: User specified context associated with the id.
* @ps: RDMA port space. * @ps: RDMA port space.
* @qp_type: type of queue pair associated with the id.
*/ */
struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
void *context, enum rdma_port_space ps); void *context, enum rdma_port_space ps,
enum ib_qp_type qp_type);
/** /**
* rdma_destroy_id - Destroys an RDMA identifier. * rdma_destroy_id - Destroys an RDMA identifier.
......
#ifndef _RDMA_NETLINK_H
#define _RDMA_NETLINK_H
#include <linux/types.h>
enum {
RDMA_NL_RDMA_CM = 1
};
#define RDMA_NL_GET_CLIENT(type) ((type & (((1 << 6) - 1) << 10)) >> 10)
#define RDMA_NL_GET_OP(type) (type & ((1 << 10) - 1))
#define RDMA_NL_GET_TYPE(client, op) ((client << 10) + op)
enum {
RDMA_NL_RDMA_CM_ID_STATS = 0,
RDMA_NL_RDMA_CM_NUM_OPS
};
enum {
RDMA_NL_RDMA_CM_ATTR_SRC_ADDR = 1,
RDMA_NL_RDMA_CM_ATTR_DST_ADDR,
RDMA_NL_RDMA_CM_NUM_ATTR,
};
struct rdma_cm_id_stats {
__u32 qp_num;
__u32 bound_dev_if;
__u32 port_space;
__s32 pid;
__u8 cm_state;
__u8 node_type;
__u8 port_num;
__u8 qp_type;
};
#ifdef __KERNEL__
#include <linux/netlink.h>
struct ibnl_client_cbs {
int (*dump)(struct sk_buff *skb, struct netlink_callback *nlcb);
};
int ibnl_init(void);
void ibnl_cleanup(void);
/**
* Add a a client to the list of IB netlink exporters.
* @index: Index of the added client
* @nops: Number of supported ops by the added client.
* @cb_table: A table for op->callback
*
* Returns 0 on success or a negative error code.
*/
int ibnl_add_client(int index, int nops,
const struct ibnl_client_cbs cb_table[]);
/**
* Remove a client from IB netlink.
* @index: Index of the removed IB client.
*
* Returns 0 on success or a negative error code.
*/
int ibnl_remove_client(int index);
/**
* Put a new message in a supplied skb.
* @skb: The netlink skb.
* @nlh: Pointer to put the header of the new netlink message.
* @seq: The message sequence number.
* @len: The requested message length to allocate.
* @client: Calling IB netlink client.
* @op: message content op.
* Returns the allocated buffer on success and NULL on failure.
*/
void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq,
int len, int client, int op);
/**
* Put a new attribute in a supplied skb.
* @skb: The netlink skb.
* @nlh: Header of the netlink message to append the attribute to.
* @len: The length of the attribute data.
* @data: The attribute data to put.
* @type: The attribute type.
* Returns the 0 and a negative error code on failure.
*/
int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,
int len, void *data, int type);
#endif /* __KERNEL__ */
#endif /* _RDMA_NETLINK_H */
...@@ -589,7 +589,8 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args) ...@@ -589,7 +589,8 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
return -ENOMEM; return -ENOMEM;
/* Create the RDMA CM ID */ /* Create the RDMA CM ID */
rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP); rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP,
IB_QPT_RC);
if (IS_ERR(rdma->cm_id)) if (IS_ERR(rdma->cm_id))
goto error; goto error;
......
...@@ -325,7 +325,7 @@ static int rds_ib_laddr_check(__be32 addr) ...@@ -325,7 +325,7 @@ static int rds_ib_laddr_check(__be32 addr)
/* Create a CMA ID and try to bind it. This catches both /* Create a CMA ID and try to bind it. This catches both
* IB and iWARP capable NICs. * IB and iWARP capable NICs.
*/ */
cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP); cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(cm_id)) if (IS_ERR(cm_id))
return PTR_ERR(cm_id); return PTR_ERR(cm_id);
......
...@@ -587,7 +587,7 @@ int rds_ib_conn_connect(struct rds_connection *conn) ...@@ -587,7 +587,7 @@ int rds_ib_conn_connect(struct rds_connection *conn)
/* XXX I wonder what affect the port space has */ /* XXX I wonder what affect the port space has */
/* delegate cm event handler to rdma_transport */ /* delegate cm event handler to rdma_transport */
ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn, ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn,
RDMA_PS_TCP); RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(ic->i_cm_id)) { if (IS_ERR(ic->i_cm_id)) {
ret = PTR_ERR(ic->i_cm_id); ret = PTR_ERR(ic->i_cm_id);
ic->i_cm_id = NULL; ic->i_cm_id = NULL;
......
...@@ -226,7 +226,7 @@ static int rds_iw_laddr_check(__be32 addr) ...@@ -226,7 +226,7 @@ static int rds_iw_laddr_check(__be32 addr)
/* Create a CMA ID and try to bind it. This catches both /* Create a CMA ID and try to bind it. This catches both
* IB and iWARP capable NICs. * IB and iWARP capable NICs.
*/ */
cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP); cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(cm_id)) if (IS_ERR(cm_id))
return PTR_ERR(cm_id); return PTR_ERR(cm_id);
......
...@@ -522,7 +522,7 @@ int rds_iw_conn_connect(struct rds_connection *conn) ...@@ -522,7 +522,7 @@ int rds_iw_conn_connect(struct rds_connection *conn)
/* XXX I wonder what affect the port space has */ /* XXX I wonder what affect the port space has */
/* delegate cm event handler to rdma_transport */ /* delegate cm event handler to rdma_transport */
ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn, ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn,
RDMA_PS_TCP); RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(ic->i_cm_id)) { if (IS_ERR(ic->i_cm_id)) {
ret = PTR_ERR(ic->i_cm_id); ret = PTR_ERR(ic->i_cm_id);
ic->i_cm_id = NULL; ic->i_cm_id = NULL;
......
...@@ -158,7 +158,8 @@ static int rds_rdma_listen_init(void) ...@@ -158,7 +158,8 @@ static int rds_rdma_listen_init(void)
struct rdma_cm_id *cm_id; struct rdma_cm_id *cm_id;
int ret; int ret;
cm_id = rdma_create_id(rds_rdma_cm_event_handler, NULL, RDMA_PS_TCP); cm_id = rdma_create_id(rds_rdma_cm_event_handler, NULL, RDMA_PS_TCP,
IB_QPT_RC);
if (IS_ERR(cm_id)) { if (IS_ERR(cm_id)) {
ret = PTR_ERR(cm_id); ret = PTR_ERR(cm_id);
printk(KERN_ERR "RDS/RDMA: failed to setup listener, " printk(KERN_ERR "RDS/RDMA: failed to setup listener, "
......
...@@ -695,7 +695,8 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, ...@@ -695,7 +695,8 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
xprt = &cma_xprt->sc_xprt; xprt = &cma_xprt->sc_xprt;
listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP); listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP,
IB_QPT_RC);
if (IS_ERR(listen_id)) { if (IS_ERR(listen_id)) {
ret = PTR_ERR(listen_id); ret = PTR_ERR(listen_id);
dprintk("svcrdma: rdma_create_id failed = %d\n", ret); dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
......
...@@ -387,7 +387,7 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt, ...@@ -387,7 +387,7 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt,
init_completion(&ia->ri_done); init_completion(&ia->ri_done);
id = rdma_create_id(rpcrdma_conn_upcall, xprt, RDMA_PS_TCP); id = rdma_create_id(rpcrdma_conn_upcall, xprt, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(id)) { if (IS_ERR(id)) {
rc = PTR_ERR(id); rc = PTR_ERR(id);
dprintk("RPC: %s: rdma_create_id() failed %i\n", dprintk("RPC: %s: rdma_create_id() failed %i\n",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment