Commit db392219 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband: (65 commits)
  IB: Fix typo in kerneldoc for ib_set_client_data()
  IPoIB: Add some likely/unlikely annotations in hot path
  IPoIB: Remove unused include of vmalloc.h
  IPoIB: Rejoin all multicast groups after a port event
  IPoIB: Create MCGs with all attributes required by RFC
  IB/sa: fix ib_sa_selector names
  IB/iser: INFINIBAND_ISER depends on INET
  IB/mthca: Simplify calls to mthca_cq_clean()
  RDMA/cma: Document rdma_accept() error handling
  IB/mthca: Recover from catastrophic errors
  RDMA/cma: Document rdma_destroy_id() function
  IB/cm: Do not track remote QPN in timewait state
  IB/sa: Require SA registration
  IPoIB: Refactor completion handling
  IB/iser: Do not use FMR for a single dma entry sg
  IB/iser: fix some debug prints
  IB/iser: make FMR "page size" be 4K and not PAGE_SIZE
  IB/iser: Limit the max size of a scsi command
  IB/iser: fix a check of SG alignment for RDMA
  RDMA/cma: Protect against adding device during destruction
  ...
parents 3e2ab46d 9cd330d3
...@@ -298,6 +298,14 @@ L: info-linux@geode.amd.com ...@@ -298,6 +298,14 @@ L: info-linux@geode.amd.com
W: http://www.amd.com/us-en/ConnectivitySolutions/TechnicalResources/0,,50_2334_2452_11363,00.html W: http://www.amd.com/us-en/ConnectivitySolutions/TechnicalResources/0,,50_2334_2452_11363,00.html
S: Supported S: Supported
AMSO1100 RNIC DRIVER
P: Tom Tucker
M: tom@opengridcomputing.com
P: Steve Wise
M: swise@opengridcomputing.com
L: openib-general@openib.org
S: Maintained
AOA (Apple Onboard Audio) ALSA DRIVER AOA (Apple Onboard Audio) ALSA DRIVER
P: Johannes Berg P: Johannes Berg
M: johannes@sipsolutions.net M: johannes@sipsolutions.net
...@@ -991,6 +999,14 @@ EFS FILESYSTEM ...@@ -991,6 +999,14 @@ EFS FILESYSTEM
W: http://aeschi.ch.eu.org/efs/ W: http://aeschi.ch.eu.org/efs/
S: Orphan S: Orphan
EHCA (IBM GX bus InfiniBand adapter) DRIVER:
P: Hoang-Nam Nguyen
M: hnguyen@de.ibm.com
P: Christoph Raisch
M: raisch@de.ibm.com
L: openib-general@openib.org
S: Supported
EMU10K1 SOUND DRIVER EMU10K1 SOUND DRIVER
P: James Courtier-Dutton P: James Courtier-Dutton
M: James@superbug.demon.co.uk M: James@superbug.demon.co.uk
......
...@@ -14,7 +14,7 @@ config INFINIBAND_USER_MAD ...@@ -14,7 +14,7 @@ config INFINIBAND_USER_MAD
---help--- ---help---
Userspace InfiniBand Management Datagram (MAD) support. This Userspace InfiniBand Management Datagram (MAD) support. This
is the kernel side of the userspace MAD support, which allows is the kernel side of the userspace MAD support, which allows
userspace processes to send and receive MADs. You will also userspace processes to send and receive MADs. You will also
need libibumad from <http://www.openib.org>. need libibumad from <http://www.openib.org>.
config INFINIBAND_USER_ACCESS config INFINIBAND_USER_ACCESS
...@@ -36,6 +36,8 @@ config INFINIBAND_ADDR_TRANS ...@@ -36,6 +36,8 @@ config INFINIBAND_ADDR_TRANS
source "drivers/infiniband/hw/mthca/Kconfig" source "drivers/infiniband/hw/mthca/Kconfig"
source "drivers/infiniband/hw/ipath/Kconfig" source "drivers/infiniband/hw/ipath/Kconfig"
source "drivers/infiniband/hw/ehca/Kconfig"
source "drivers/infiniband/hw/amso1100/Kconfig"
source "drivers/infiniband/ulp/ipoib/Kconfig" source "drivers/infiniband/ulp/ipoib/Kconfig"
......
obj-$(CONFIG_INFINIBAND) += core/ obj-$(CONFIG_INFINIBAND) += core/
obj-$(CONFIG_INFINIBAND_MTHCA) += hw/mthca/ obj-$(CONFIG_INFINIBAND_MTHCA) += hw/mthca/
obj-$(CONFIG_IPATH_CORE) += hw/ipath/ obj-$(CONFIG_INFINIBAND_IPATH) += hw/ipath/
obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/
obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/
obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/ obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/ obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/
infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := ib_addr.o rdma_cm.o infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := ib_addr.o rdma_cm.o
obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \ obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \
ib_cm.o $(infiniband-y) ib_cm.o iw_cm.o $(infiniband-y)
obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o
...@@ -14,6 +14,8 @@ ib_sa-y := sa_query.o ...@@ -14,6 +14,8 @@ ib_sa-y := sa_query.o
ib_cm-y := cm.o ib_cm-y := cm.o
iw_cm-y := iwcm.o
rdma_cm-y := cma.o rdma_cm-y := cma.o
ib_addr-y := addr.o ib_addr-y := addr.o
......
...@@ -61,12 +61,15 @@ static LIST_HEAD(req_list); ...@@ -61,12 +61,15 @@ static LIST_HEAD(req_list);
static DECLARE_WORK(work, process_req, NULL); static DECLARE_WORK(work, process_req, NULL);
static struct workqueue_struct *addr_wq; static struct workqueue_struct *addr_wq;
static int copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev, int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
unsigned char *dst_dev_addr) const unsigned char *dst_dev_addr)
{ {
switch (dev->type) { switch (dev->type) {
case ARPHRD_INFINIBAND: case ARPHRD_INFINIBAND:
dev_addr->dev_type = IB_NODE_CA; dev_addr->dev_type = RDMA_NODE_IB_CA;
break;
case ARPHRD_ETHER:
dev_addr->dev_type = RDMA_NODE_RNIC;
break; break;
default: default:
return -EADDRNOTAVAIL; return -EADDRNOTAVAIL;
...@@ -78,6 +81,7 @@ static int copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev, ...@@ -78,6 +81,7 @@ static int copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
memcpy(dev_addr->dst_dev_addr, dst_dev_addr, MAX_ADDR_LEN); memcpy(dev_addr->dst_dev_addr, dst_dev_addr, MAX_ADDR_LEN);
return 0; return 0;
} }
EXPORT_SYMBOL(rdma_copy_addr);
int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
{ {
...@@ -89,7 +93,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) ...@@ -89,7 +93,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
if (!dev) if (!dev)
return -EADDRNOTAVAIL; return -EADDRNOTAVAIL;
ret = copy_addr(dev_addr, dev, NULL); ret = rdma_copy_addr(dev_addr, dev, NULL);
dev_put(dev); dev_put(dev);
return ret; return ret;
} }
...@@ -161,7 +165,7 @@ static int addr_resolve_remote(struct sockaddr_in *src_in, ...@@ -161,7 +165,7 @@ static int addr_resolve_remote(struct sockaddr_in *src_in,
/* If the device does ARP internally, return 'done' */ /* If the device does ARP internally, return 'done' */
if (rt->idev->dev->flags & IFF_NOARP) { if (rt->idev->dev->flags & IFF_NOARP) {
copy_addr(addr, rt->idev->dev, NULL); rdma_copy_addr(addr, rt->idev->dev, NULL);
goto put; goto put;
} }
...@@ -181,7 +185,7 @@ static int addr_resolve_remote(struct sockaddr_in *src_in, ...@@ -181,7 +185,7 @@ static int addr_resolve_remote(struct sockaddr_in *src_in,
src_in->sin_addr.s_addr = rt->rt_src; src_in->sin_addr.s_addr = rt->rt_src;
} }
ret = copy_addr(addr, neigh->dev, neigh->ha); ret = rdma_copy_addr(addr, neigh->dev, neigh->ha);
release: release:
neigh_release(neigh); neigh_release(neigh);
put: put:
...@@ -245,7 +249,7 @@ static int addr_resolve_local(struct sockaddr_in *src_in, ...@@ -245,7 +249,7 @@ static int addr_resolve_local(struct sockaddr_in *src_in,
if (ZERONET(src_ip)) { if (ZERONET(src_ip)) {
src_in->sin_family = dst_in->sin_family; src_in->sin_family = dst_in->sin_family;
src_in->sin_addr.s_addr = dst_ip; src_in->sin_addr.s_addr = dst_ip;
ret = copy_addr(addr, dev, dev->dev_addr); ret = rdma_copy_addr(addr, dev, dev->dev_addr);
} else if (LOOPBACK(src_ip)) { } else if (LOOPBACK(src_ip)) {
ret = rdma_translate_ip((struct sockaddr *)dst_in, addr); ret = rdma_translate_ip((struct sockaddr *)dst_in, addr);
if (!ret) if (!ret)
...@@ -327,10 +331,10 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr) ...@@ -327,10 +331,10 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
} }
EXPORT_SYMBOL(rdma_addr_cancel); EXPORT_SYMBOL(rdma_addr_cancel);
static int netevent_callback(struct notifier_block *self, unsigned long event, static int netevent_callback(struct notifier_block *self, unsigned long event,
void *ctx) void *ctx)
{ {
if (event == NETEVENT_NEIGH_UPDATE) { if (event == NETEVENT_NEIGH_UPDATE) {
struct neighbour *neigh = ctx; struct neighbour *neigh = ctx;
if (neigh->dev->type == ARPHRD_INFINIBAND && if (neigh->dev->type == ARPHRD_INFINIBAND &&
......
...@@ -62,12 +62,13 @@ struct ib_update_work { ...@@ -62,12 +62,13 @@ struct ib_update_work {
static inline int start_port(struct ib_device *device) static inline int start_port(struct ib_device *device)
{ {
return device->node_type == IB_NODE_SWITCH ? 0 : 1; return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
} }
static inline int end_port(struct ib_device *device) static inline int end_port(struct ib_device *device)
{ {
return device->node_type == IB_NODE_SWITCH ? 0 : device->phys_port_cnt; return (device->node_type == RDMA_NODE_IB_SWITCH) ?
0 : device->phys_port_cnt;
} }
int ib_get_cached_gid(struct ib_device *device, int ib_get_cached_gid(struct ib_device *device,
......
/* /*
* Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. * Copyright (c) 2004-2006 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved. * Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/random.h>
#include <linux/rbtree.h> #include <linux/rbtree.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
...@@ -73,6 +74,7 @@ static struct ib_cm { ...@@ -73,6 +74,7 @@ static struct ib_cm {
struct rb_root remote_id_table; struct rb_root remote_id_table;
struct rb_root remote_sidr_table; struct rb_root remote_sidr_table;
struct idr local_id_table; struct idr local_id_table;
__be32 random_id_operand;
struct workqueue_struct *wq; struct workqueue_struct *wq;
} cm; } cm;
...@@ -177,7 +179,7 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv, ...@@ -177,7 +179,7 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
if (IS_ERR(ah)) if (IS_ERR(ah))
return PTR_ERR(ah); return PTR_ERR(ah);
m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
cm_id_priv->av.pkey_index, cm_id_priv->av.pkey_index,
0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
GFP_ATOMIC); GFP_ATOMIC);
...@@ -299,15 +301,17 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) ...@@ -299,15 +301,17 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
static int cm_alloc_id(struct cm_id_private *cm_id_priv) static int cm_alloc_id(struct cm_id_private *cm_id_priv)
{ {
unsigned long flags; unsigned long flags;
int ret; int ret, id;
static int next_id; static int next_id;
do { do {
spin_lock_irqsave(&cm.lock, flags); spin_lock_irqsave(&cm.lock, flags);
ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, next_id++, ret = idr_get_new_above(&cm.local_id_table, cm_id_priv,
(__force int *) &cm_id_priv->id.local_id); next_id++, &id);
spin_unlock_irqrestore(&cm.lock, flags); spin_unlock_irqrestore(&cm.lock, flags);
} while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
cm_id_priv->id.local_id = (__force __be32) (id ^ cm.random_id_operand);
return ret; return ret;
} }
...@@ -316,7 +320,8 @@ static void cm_free_id(__be32 local_id) ...@@ -316,7 +320,8 @@ static void cm_free_id(__be32 local_id)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&cm.lock, flags); spin_lock_irqsave(&cm.lock, flags);
idr_remove(&cm.local_id_table, (__force int) local_id); idr_remove(&cm.local_id_table,
(__force int) (local_id ^ cm.random_id_operand));
spin_unlock_irqrestore(&cm.lock, flags); spin_unlock_irqrestore(&cm.lock, flags);
} }
...@@ -324,7 +329,8 @@ static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id) ...@@ -324,7 +329,8 @@ static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
{ {
struct cm_id_private *cm_id_priv; struct cm_id_private *cm_id_priv;
cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id); cm_id_priv = idr_find(&cm.local_id_table,
(__force int) (local_id ^ cm.random_id_operand));
if (cm_id_priv) { if (cm_id_priv) {
if (cm_id_priv->id.remote_id == remote_id) if (cm_id_priv->id.remote_id == remote_id)
atomic_inc(&cm_id_priv->refcount); atomic_inc(&cm_id_priv->refcount);
...@@ -679,6 +685,8 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv) ...@@ -679,6 +685,8 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
{ {
int wait_time; int wait_time;
cm_cleanup_timewait(cm_id_priv->timewait_info);
/* /*
* The cm_id could be destroyed by the user before we exit timewait. * The cm_id could be destroyed by the user before we exit timewait.
* To protect against this, we search for the cm_id after exiting * To protect against this, we search for the cm_id after exiting
...@@ -1354,7 +1362,7 @@ static int cm_req_handler(struct cm_work *work) ...@@ -1354,7 +1362,7 @@ static int cm_req_handler(struct cm_work *work)
id.local_id); id.local_id);
if (IS_ERR(cm_id_priv->timewait_info)) { if (IS_ERR(cm_id_priv->timewait_info)) {
ret = PTR_ERR(cm_id_priv->timewait_info); ret = PTR_ERR(cm_id_priv->timewait_info);
goto error1; goto destroy;
} }
cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id; cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid; cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
...@@ -1363,7 +1371,8 @@ static int cm_req_handler(struct cm_work *work) ...@@ -1363,7 +1371,8 @@ static int cm_req_handler(struct cm_work *work)
listen_cm_id_priv = cm_match_req(work, cm_id_priv); listen_cm_id_priv = cm_match_req(work, cm_id_priv);
if (!listen_cm_id_priv) { if (!listen_cm_id_priv) {
ret = -EINVAL; ret = -EINVAL;
goto error2; kfree(cm_id_priv->timewait_info);
goto destroy;
} }
cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
...@@ -1373,12 +1382,22 @@ static int cm_req_handler(struct cm_work *work) ...@@ -1373,12 +1382,22 @@ static int cm_req_handler(struct cm_work *work)
cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
if (ret) if (ret) {
goto error3; ib_get_cached_gid(work->port->cm_dev->device,
work->port->port_num, 0, &work->path[0].sgid);
ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
&work->path[0].sgid, sizeof work->path[0].sgid,
NULL, 0);
goto rejected;
}
if (req_msg->alt_local_lid) { if (req_msg->alt_local_lid) {
ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av); ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
if (ret) if (ret) {
goto error3; ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
&work->path[0].sgid,
sizeof work->path[0].sgid, NULL, 0);
goto rejected;
}
} }
cm_id_priv->tid = req_msg->hdr.tid; cm_id_priv->tid = req_msg->hdr.tid;
cm_id_priv->timeout_ms = cm_convert_to_ms( cm_id_priv->timeout_ms = cm_convert_to_ms(
...@@ -1400,12 +1419,11 @@ static int cm_req_handler(struct cm_work *work) ...@@ -1400,12 +1419,11 @@ static int cm_req_handler(struct cm_work *work)
cm_deref_id(listen_cm_id_priv); cm_deref_id(listen_cm_id_priv);
return 0; return 0;
error3: atomic_dec(&cm_id_priv->refcount); rejected:
atomic_dec(&cm_id_priv->refcount);
cm_deref_id(listen_cm_id_priv); cm_deref_id(listen_cm_id_priv);
cm_cleanup_timewait(cm_id_priv->timewait_info); destroy:
error2: kfree(cm_id_priv->timewait_info); ib_destroy_cm_id(cm_id);
cm_id_priv->timewait_info = NULL;
error1: ib_destroy_cm_id(&cm_id_priv->id);
return ret; return ret;
} }
...@@ -2072,8 +2090,9 @@ static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg) ...@@ -2072,8 +2090,9 @@ static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
spin_unlock_irqrestore(&cm.lock, flags); spin_unlock_irqrestore(&cm.lock, flags);
return NULL; return NULL;
} }
cm_id_priv = idr_find(&cm.local_id_table, cm_id_priv = idr_find(&cm.local_id_table, (__force int)
(__force int) timewait_info->work.local_id); (timewait_info->work.local_id ^
cm.random_id_operand));
if (cm_id_priv) { if (cm_id_priv) {
if (cm_id_priv->id.remote_id == remote_id) if (cm_id_priv->id.remote_id == remote_id)
atomic_inc(&cm_id_priv->refcount); atomic_inc(&cm_id_priv->refcount);
...@@ -3125,7 +3144,8 @@ static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, ...@@ -3125,7 +3144,8 @@ static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE; IB_ACCESS_REMOTE_WRITE;
if (cm_id_priv->responder_resources) if (cm_id_priv->responder_resources)
qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ; qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_ATOMIC;
qp_attr->pkey_index = cm_id_priv->av.pkey_index; qp_attr->pkey_index = cm_id_priv->av.pkey_index;
qp_attr->port_num = cm_id_priv->av.port->port_num; qp_attr->port_num = cm_id_priv->av.port->port_num;
ret = 0; ret = 0;
...@@ -3262,6 +3282,9 @@ static void cm_add_one(struct ib_device *device) ...@@ -3262,6 +3282,9 @@ static void cm_add_one(struct ib_device *device)
int ret; int ret;
u8 i; u8 i;
if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
return;
cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) * cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) *
device->phys_port_cnt, GFP_KERNEL); device->phys_port_cnt, GFP_KERNEL);
if (!cm_dev) if (!cm_dev)
...@@ -3349,6 +3372,7 @@ static int __init ib_cm_init(void) ...@@ -3349,6 +3372,7 @@ static int __init ib_cm_init(void)
cm.remote_qp_table = RB_ROOT; cm.remote_qp_table = RB_ROOT;
cm.remote_sidr_table = RB_ROOT; cm.remote_sidr_table = RB_ROOT;
idr_init(&cm.local_id_table); idr_init(&cm.local_id_table);
get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
idr_pre_get(&cm.local_id_table, GFP_KERNEL); idr_pre_get(&cm.local_id_table, GFP_KERNEL);
cm.wq = create_workqueue("ib_cm"); cm.wq = create_workqueue("ib_cm");
......
This diff is collapsed.
...@@ -385,7 +385,7 @@ void *ib_get_client_data(struct ib_device *device, struct ib_client *client) ...@@ -385,7 +385,7 @@ void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
EXPORT_SYMBOL(ib_get_client_data); EXPORT_SYMBOL(ib_get_client_data);
/** /**
* ib_set_client_data - Get IB client context * ib_set_client_data - Set IB client context
* @device:Device to set context for * @device:Device to set context for
* @client:Client to set context for * @client:Client to set context for
* @data:Context to set * @data:Context to set
...@@ -505,7 +505,7 @@ int ib_query_port(struct ib_device *device, ...@@ -505,7 +505,7 @@ int ib_query_port(struct ib_device *device,
u8 port_num, u8 port_num,
struct ib_port_attr *port_attr) struct ib_port_attr *port_attr)
{ {
if (device->node_type == IB_NODE_SWITCH) { if (device->node_type == RDMA_NODE_IB_SWITCH) {
if (port_num) if (port_num)
return -EINVAL; return -EINVAL;
} else if (port_num < 1 || port_num > device->phys_port_cnt) } else if (port_num < 1 || port_num > device->phys_port_cnt)
...@@ -580,7 +580,7 @@ int ib_modify_port(struct ib_device *device, ...@@ -580,7 +580,7 @@ int ib_modify_port(struct ib_device *device,
u8 port_num, int port_modify_mask, u8 port_num, int port_modify_mask,
struct ib_port_modify *port_modify) struct ib_port_modify *port_modify)
{ {
if (device->node_type == IB_NODE_SWITCH) { if (device->node_type == RDMA_NODE_IB_SWITCH) {
if (port_num) if (port_num)
return -EINVAL; return -EINVAL;
} else if (port_num < 1 || port_num > device->phys_port_cnt) } else if (port_num < 1 || port_num > device->phys_port_cnt)
......
This diff is collapsed.
/*
* Copyright (c) 2005 Network Appliance, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef IWCM_H
#define IWCM_H
enum iw_cm_state {
IW_CM_STATE_IDLE, /* unbound, inactive */
IW_CM_STATE_LISTEN, /* listen waiting for connect */
IW_CM_STATE_CONN_RECV, /* inbound waiting for user accept */
IW_CM_STATE_CONN_SENT, /* outbound waiting for peer accept */
IW_CM_STATE_ESTABLISHED, /* established */
IW_CM_STATE_CLOSING, /* disconnect */
IW_CM_STATE_DESTROYING /* object being deleted */
};
struct iwcm_id_private {
struct iw_cm_id id;
enum iw_cm_state state;
unsigned long flags;
struct ib_qp *qp;
struct completion destroy_comp;
wait_queue_head_t connect_wait;
struct list_head work_list;
spinlock_t lock;
atomic_t refcount;
struct list_head work_free_list;
};
#define IWCM_F_CALLBACK_DESTROY 1
#define IWCM_F_CONNECT_WAIT 2
#endif /* IWCM_H */
...@@ -1246,8 +1246,8 @@ static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class, ...@@ -1246,8 +1246,8 @@ static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
int i; int i;
for (i = 0; i < MAX_MGMT_OUI; i++) for (i = 0; i < MAX_MGMT_OUI; i++)
/* Is there matching OUI for this vendor class ? */ /* Is there matching OUI for this vendor class ? */
if (!memcmp(vendor_class->oui[i], oui, 3)) if (!memcmp(vendor_class->oui[i], oui, 3))
return i; return i;
return -1; return -1;
...@@ -2237,7 +2237,7 @@ static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv) ...@@ -2237,7 +2237,7 @@ static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
&mad_agent_priv->send_list, agent_list) { &mad_agent_priv->send_list, agent_list) {
if (mad_send_wr->status == IB_WC_SUCCESS) { if (mad_send_wr->status == IB_WC_SUCCESS) {
mad_send_wr->status = IB_WC_WR_FLUSH_ERR; mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
mad_send_wr->refcount -= (mad_send_wr->timeout > 0); mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
} }
} }
...@@ -2528,10 +2528,10 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, ...@@ -2528,10 +2528,10 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
} }
} }
sg_list.addr = dma_map_single(qp_info->port_priv-> sg_list.addr = dma_map_single(qp_info->port_priv->
device->dma_device, device->dma_device,
&mad_priv->grh, &mad_priv->grh,
sizeof *mad_priv - sizeof *mad_priv -
sizeof mad_priv->header, sizeof mad_priv->header,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr); pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
...@@ -2606,7 +2606,7 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) ...@@ -2606,7 +2606,7 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
struct ib_qp *qp; struct ib_qp *qp;
attr = kmalloc(sizeof *attr, GFP_KERNEL); attr = kmalloc(sizeof *attr, GFP_KERNEL);
if (!attr) { if (!attr) {
printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n"); printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -2876,7 +2876,10 @@ static void ib_mad_init_device(struct ib_device *device) ...@@ -2876,7 +2876,10 @@ static void ib_mad_init_device(struct ib_device *device)
{ {
int start, end, i; int start, end, i;
if (device->node_type == IB_NODE_SWITCH) { if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
return;
if (device->node_type == RDMA_NODE_IB_SWITCH) {
start = 0; start = 0;
end = 0; end = 0;
} else { } else {
...@@ -2923,7 +2926,7 @@ static void ib_mad_remove_device(struct ib_device *device) ...@@ -2923,7 +2926,7 @@ static void ib_mad_remove_device(struct ib_device *device)
{ {
int i, num_ports, cur_port; int i, num_ports, cur_port;
if (device->node_type == IB_NODE_SWITCH) { if (device->node_type == RDMA_NODE_IB_SWITCH) {
num_ports = 1; num_ports = 1;
cur_port = 0; cur_port = 0;
} else { } else {
......
...@@ -39,7 +39,6 @@ ...@@ -39,7 +39,6 @@
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <rdma/ib_mad.h> #include <rdma/ib_mad.h>
#include <rdma/ib_smi.h> #include <rdma/ib_smi.h>
......
...@@ -33,8 +33,6 @@ ...@@ -33,8 +33,6 @@
* $Id: mad_rmpp.c 1921 2005-03-02 22:58:44Z sean.hefty $ * $Id: mad_rmpp.c 1921 2005-03-02 22:58:44Z sean.hefty $
*/ */
#include <linux/dma-mapping.h>
#include "mad_priv.h" #include "mad_priv.h"
#include "mad_rmpp.h" #include "mad_rmpp.h"
...@@ -60,6 +58,7 @@ struct mad_rmpp_recv { ...@@ -60,6 +58,7 @@ struct mad_rmpp_recv {
int last_ack; int last_ack;
int seg_num; int seg_num;
int newwin; int newwin;
int repwin;
__be64 tid; __be64 tid;
u32 src_qp; u32 src_qp;
...@@ -170,6 +169,32 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent, ...@@ -170,6 +169,32 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
return msg; return msg;
} }
static void ack_ds_ack(struct ib_mad_agent_private *agent,
struct ib_mad_recv_wc *recv_wc)
{
struct ib_mad_send_buf *msg;
struct ib_rmpp_mad *rmpp_mad;
int ret;
msg = alloc_response_msg(&agent->agent, recv_wc);
if (IS_ERR(msg))
return;
rmpp_mad = msg->mad;
memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len);
rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
rmpp_mad->rmpp_hdr.seg_num = 0;
rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(1);
ret = ib_post_send_mad(msg, NULL);
if (ret) {
ib_destroy_ah(msg->ah);
ib_free_send_mad(msg);
}
}
void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc) void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc)
{ {
struct ib_rmpp_mad *rmpp_mad = mad_send_wc->send_buf->mad; struct ib_rmpp_mad *rmpp_mad = mad_send_wc->send_buf->mad;
...@@ -271,6 +296,7 @@ create_rmpp_recv(struct ib_mad_agent_private *agent, ...@@ -271,6 +296,7 @@ create_rmpp_recv(struct ib_mad_agent_private *agent,
rmpp_recv->newwin = 1; rmpp_recv->newwin = 1;
rmpp_recv->seg_num = 1; rmpp_recv->seg_num = 1;
rmpp_recv->last_ack = 0; rmpp_recv->last_ack = 0;
rmpp_recv->repwin = 1;
mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr; mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
rmpp_recv->tid = mad_hdr->tid; rmpp_recv->tid = mad_hdr->tid;
...@@ -365,7 +391,7 @@ static inline int window_size(struct ib_mad_agent_private *agent) ...@@ -365,7 +391,7 @@ static inline int window_size(struct ib_mad_agent_private *agent)
static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list, static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list,
int seg_num) int seg_num)
{ {
struct ib_mad_recv_buf *seg_buf; struct ib_mad_recv_buf *seg_buf;
int cur_seg_num; int cur_seg_num;
list_for_each_entry_reverse(seg_buf, rmpp_list, list) { list_for_each_entry_reverse(seg_buf, rmpp_list, list) {
...@@ -591,6 +617,16 @@ static inline void adjust_last_ack(struct ib_mad_send_wr_private *wr, ...@@ -591,6 +617,16 @@ static inline void adjust_last_ack(struct ib_mad_send_wr_private *wr,
break; break;
} }
static void process_ds_ack(struct ib_mad_agent_private *agent,
struct ib_mad_recv_wc *mad_recv_wc, int newwin)
{
struct mad_rmpp_recv *rmpp_recv;
rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
if (rmpp_recv && rmpp_recv->state == RMPP_STATE_COMPLETE)
rmpp_recv->repwin = newwin;
}
static void process_rmpp_ack(struct ib_mad_agent_private *agent, static void process_rmpp_ack(struct ib_mad_agent_private *agent,
struct ib_mad_recv_wc *mad_recv_wc) struct ib_mad_recv_wc *mad_recv_wc)
{ {
...@@ -616,8 +652,18 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent, ...@@ -616,8 +652,18 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
spin_lock_irqsave(&agent->lock, flags); spin_lock_irqsave(&agent->lock, flags);
mad_send_wr = ib_find_send_mad(agent, mad_recv_wc); mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
if (!mad_send_wr) if (!mad_send_wr) {
goto out; /* Unmatched ACK */ if (!seg_num)
process_ds_ack(agent, mad_recv_wc, newwin);
goto out; /* Unmatched or DS RMPP ACK */
}
if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) &&
(mad_send_wr->timeout)) {
spin_unlock_irqrestore(&agent->lock, flags);
ack_ds_ack(agent, mad_recv_wc);
return; /* Repeated ACK for DS RMPP transaction */
}
if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) || if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
(!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
...@@ -656,6 +702,9 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent, ...@@ -656,6 +702,9 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
if (mad_send_wr->refcount == 1) if (mad_send_wr->refcount == 1)
ib_reset_mad_timeout(mad_send_wr, ib_reset_mad_timeout(mad_send_wr,
mad_send_wr->send_buf.timeout_ms); mad_send_wr->send_buf.timeout_ms);
spin_unlock_irqrestore(&agent->lock, flags);
ack_ds_ack(agent, mad_recv_wc);
return;
} else if (mad_send_wr->refcount == 1 && } else if (mad_send_wr->refcount == 1 &&
mad_send_wr->seg_num < mad_send_wr->newwin && mad_send_wr->seg_num < mad_send_wr->newwin &&
mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) { mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) {
...@@ -772,6 +821,39 @@ ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent, ...@@ -772,6 +821,39 @@ ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
return NULL; return NULL;
} }
static int init_newwin(struct ib_mad_send_wr_private *mad_send_wr)
{
struct ib_mad_agent_private *agent = mad_send_wr->mad_agent_priv;
struct ib_mad_hdr *mad_hdr = mad_send_wr->send_buf.mad;
struct mad_rmpp_recv *rmpp_recv;
struct ib_ah_attr ah_attr;
unsigned long flags;
int newwin = 1;
if (!(mad_hdr->method & IB_MGMT_METHOD_RESP))
goto out;
spin_lock_irqsave(&agent->lock, flags);
list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
if (rmpp_recv->tid != mad_hdr->tid ||
rmpp_recv->mgmt_class != mad_hdr->mgmt_class ||
rmpp_recv->class_version != mad_hdr->class_version ||
(rmpp_recv->method & IB_MGMT_METHOD_RESP))
continue;
if (ib_query_ah(mad_send_wr->send_buf.ah, &ah_attr))
continue;
if (rmpp_recv->slid == ah_attr.dlid) {
newwin = rmpp_recv->repwin;
break;
}
}
spin_unlock_irqrestore(&agent->lock, flags);
out:
return newwin;
}
int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr) int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
{ {
struct ib_rmpp_mad *rmpp_mad; struct ib_rmpp_mad *rmpp_mad;
...@@ -787,7 +869,7 @@ int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr) ...@@ -787,7 +869,7 @@ int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
return IB_RMPP_RESULT_INTERNAL; return IB_RMPP_RESULT_INTERNAL;
} }
mad_send_wr->newwin = 1; mad_send_wr->newwin = init_newwin(mad_send_wr);
/* We need to wait for the final ACK even if there isn't a response */ /* We need to wait for the final ACK even if there isn't a response */
mad_send_wr->refcount += (mad_send_wr->timeout == 0); mad_send_wr->refcount += (mad_send_wr->timeout == 0);
......
/* /*
* Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Voltaire, Inc.  All rights reserved. * Copyright (c) 2005 Voltaire, Inc.  All rights reserved.
* Copyright (c) 2006 Intel Corporation. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU * licenses. You may choose to be licensed under the terms of the GNU
...@@ -75,6 +76,7 @@ struct ib_sa_device { ...@@ -75,6 +76,7 @@ struct ib_sa_device {
struct ib_sa_query { struct ib_sa_query {
void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *); void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
void (*release)(struct ib_sa_query *); void (*release)(struct ib_sa_query *);
struct ib_sa_client *client;
struct ib_sa_port *port; struct ib_sa_port *port;
struct ib_mad_send_buf *mad_buf; struct ib_mad_send_buf *mad_buf;
struct ib_sa_sm_ah *sm_ah; struct ib_sa_sm_ah *sm_ah;
...@@ -415,6 +417,31 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event ...@@ -415,6 +417,31 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
} }
} }
void ib_sa_register_client(struct ib_sa_client *client)
{
atomic_set(&client->users, 1);
init_completion(&client->comp);
}
EXPORT_SYMBOL(ib_sa_register_client);
static inline void ib_sa_client_get(struct ib_sa_client *client)
{
atomic_inc(&client->users);
}
static inline void ib_sa_client_put(struct ib_sa_client *client)
{
if (atomic_dec_and_test(&client->users))
complete(&client->comp);
}
void ib_sa_unregister_client(struct ib_sa_client *client)
{
ib_sa_client_put(client);
wait_for_completion(&client->comp);
}
EXPORT_SYMBOL(ib_sa_unregister_client);
/** /**
* ib_sa_cancel_query - try to cancel an SA query * ib_sa_cancel_query - try to cancel an SA query
* @id:ID of query to cancel * @id:ID of query to cancel
...@@ -557,6 +584,7 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) ...@@ -557,6 +584,7 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
/** /**
* ib_sa_path_rec_get - Start a Path get query * ib_sa_path_rec_get - Start a Path get query
* @client:SA client
* @device:device to send query on * @device:device to send query on
* @port_num: port number to send query on * @port_num: port number to send query on
* @rec:Path Record to send in query * @rec:Path Record to send in query
...@@ -579,7 +607,8 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) ...@@ -579,7 +607,8 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
* error code. Otherwise it is a query ID that can be used to cancel * error code. Otherwise it is a query ID that can be used to cancel
* the query. * the query.
*/ */
int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, int ib_sa_path_rec_get(struct ib_sa_client *client,
struct ib_device *device, u8 port_num,
struct ib_sa_path_rec *rec, struct ib_sa_path_rec *rec,
ib_sa_comp_mask comp_mask, ib_sa_comp_mask comp_mask,
int timeout_ms, gfp_t gfp_mask, int timeout_ms, gfp_t gfp_mask,
...@@ -614,8 +643,10 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, ...@@ -614,8 +643,10 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
goto err1; goto err1;
} }
query->callback = callback; ib_sa_client_get(client);
query->context = context; query->sa_query.client = client;
query->callback = callback;
query->context = context;
mad = query->sa_query.mad_buf->mad; mad = query->sa_query.mad_buf->mad;
init_mad(mad, agent); init_mad(mad, agent);
...@@ -639,6 +670,7 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, ...@@ -639,6 +670,7 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
err2: err2:
*sa_query = NULL; *sa_query = NULL;
ib_sa_client_put(query->sa_query.client);
ib_free_send_mad(query->sa_query.mad_buf); ib_free_send_mad(query->sa_query.mad_buf);
err1: err1:
...@@ -671,6 +703,7 @@ static void ib_sa_service_rec_release(struct ib_sa_query *sa_query) ...@@ -671,6 +703,7 @@ static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
/** /**
* ib_sa_service_rec_query - Start Service Record operation * ib_sa_service_rec_query - Start Service Record operation
* @client:SA client
* @device:device to send request on * @device:device to send request on
* @port_num: port number to send request on * @port_num: port number to send request on
* @method:SA method - should be get, set, or delete * @method:SA method - should be get, set, or delete
...@@ -695,7 +728,8 @@ static void ib_sa_service_rec_release(struct ib_sa_query *sa_query) ...@@ -695,7 +728,8 @@ static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
* error code. Otherwise it is a request ID that can be used to cancel * error code. Otherwise it is a request ID that can be used to cancel
* the query. * the query.
*/ */
int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method, int ib_sa_service_rec_query(struct ib_sa_client *client,
struct ib_device *device, u8 port_num, u8 method,
struct ib_sa_service_rec *rec, struct ib_sa_service_rec *rec,
ib_sa_comp_mask comp_mask, ib_sa_comp_mask comp_mask,
int timeout_ms, gfp_t gfp_mask, int timeout_ms, gfp_t gfp_mask,
...@@ -735,8 +769,10 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method, ...@@ -735,8 +769,10 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method,
goto err1; goto err1;
} }
query->callback = callback; ib_sa_client_get(client);
query->context = context; query->sa_query.client = client;
query->callback = callback;
query->context = context;
mad = query->sa_query.mad_buf->mad; mad = query->sa_query.mad_buf->mad;
init_mad(mad, agent); init_mad(mad, agent);
...@@ -761,6 +797,7 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method, ...@@ -761,6 +797,7 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method,
err2: err2:
*sa_query = NULL; *sa_query = NULL;
ib_sa_client_put(query->sa_query.client);
ib_free_send_mad(query->sa_query.mad_buf); ib_free_send_mad(query->sa_query.mad_buf);
err1: err1:
...@@ -791,7 +828,8 @@ static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query) ...@@ -791,7 +828,8 @@ static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query)); kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
} }
int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num, int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
struct ib_device *device, u8 port_num,
u8 method, u8 method,
struct ib_sa_mcmember_rec *rec, struct ib_sa_mcmember_rec *rec,
ib_sa_comp_mask comp_mask, ib_sa_comp_mask comp_mask,
...@@ -827,8 +865,10 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num, ...@@ -827,8 +865,10 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
goto err1; goto err1;
} }
query->callback = callback; ib_sa_client_get(client);
query->context = context; query->sa_query.client = client;
query->callback = callback;
query->context = context;
mad = query->sa_query.mad_buf->mad; mad = query->sa_query.mad_buf->mad;
init_mad(mad, agent); init_mad(mad, agent);
...@@ -853,6 +893,7 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num, ...@@ -853,6 +893,7 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
err2: err2:
*sa_query = NULL; *sa_query = NULL;
ib_sa_client_put(query->sa_query.client);
ib_free_send_mad(query->sa_query.mad_buf); ib_free_send_mad(query->sa_query.mad_buf);
err1: err1:
...@@ -887,8 +928,9 @@ static void send_handler(struct ib_mad_agent *agent, ...@@ -887,8 +928,9 @@ static void send_handler(struct ib_mad_agent *agent,
idr_remove(&query_idr, query->id); idr_remove(&query_idr, query->id);
spin_unlock_irqrestore(&idr_lock, flags); spin_unlock_irqrestore(&idr_lock, flags);
ib_free_send_mad(mad_send_wc->send_buf); ib_free_send_mad(mad_send_wc->send_buf);
kref_put(&query->sm_ah->ref, free_sm_ah); kref_put(&query->sm_ah->ref, free_sm_ah);
ib_sa_client_put(query->client);
query->release(query); query->release(query);
} }
...@@ -919,7 +961,10 @@ static void ib_sa_add_one(struct ib_device *device) ...@@ -919,7 +961,10 @@ static void ib_sa_add_one(struct ib_device *device)
struct ib_sa_device *sa_dev; struct ib_sa_device *sa_dev;
int s, e, i; int s, e, i;
if (device->node_type == IB_NODE_SWITCH) if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
return;
if (device->node_type == RDMA_NODE_IB_SWITCH)
s = e = 0; s = e = 0;
else { else {
s = 1; s = 1;
......
...@@ -64,7 +64,7 @@ int smi_handle_dr_smp_send(struct ib_smp *smp, ...@@ -64,7 +64,7 @@ int smi_handle_dr_smp_send(struct ib_smp *smp,
/* C14-9:2 */ /* C14-9:2 */
if (hop_ptr && hop_ptr < hop_cnt) { if (hop_ptr && hop_ptr < hop_cnt) {
if (node_type != IB_NODE_SWITCH) if (node_type != RDMA_NODE_IB_SWITCH)
return 0; return 0;
/* smp->return_path set when received */ /* smp->return_path set when received */
...@@ -77,7 +77,7 @@ int smi_handle_dr_smp_send(struct ib_smp *smp, ...@@ -77,7 +77,7 @@ int smi_handle_dr_smp_send(struct ib_smp *smp,
if (hop_ptr == hop_cnt) { if (hop_ptr == hop_cnt) {
/* smp->return_path set when received */ /* smp->return_path set when received */
smp->hop_ptr++; smp->hop_ptr++;
return (node_type == IB_NODE_SWITCH || return (node_type == RDMA_NODE_IB_SWITCH ||
smp->dr_dlid == IB_LID_PERMISSIVE); smp->dr_dlid == IB_LID_PERMISSIVE);
} }
...@@ -95,7 +95,7 @@ int smi_handle_dr_smp_send(struct ib_smp *smp, ...@@ -95,7 +95,7 @@ int smi_handle_dr_smp_send(struct ib_smp *smp,
/* C14-13:2 */ /* C14-13:2 */
if (2 <= hop_ptr && hop_ptr <= hop_cnt) { if (2 <= hop_ptr && hop_ptr <= hop_cnt) {
if (node_type != IB_NODE_SWITCH) if (node_type != RDMA_NODE_IB_SWITCH)
return 0; return 0;
smp->hop_ptr--; smp->hop_ptr--;
...@@ -107,7 +107,7 @@ int smi_handle_dr_smp_send(struct ib_smp *smp, ...@@ -107,7 +107,7 @@ int smi_handle_dr_smp_send(struct ib_smp *smp,
if (hop_ptr == 1) { if (hop_ptr == 1) {
smp->hop_ptr--; smp->hop_ptr--;
/* C14-13:3 -- SMPs destined for SM shouldn't be here */ /* C14-13:3 -- SMPs destined for SM shouldn't be here */
return (node_type == IB_NODE_SWITCH || return (node_type == RDMA_NODE_IB_SWITCH ||
smp->dr_slid == IB_LID_PERMISSIVE); smp->dr_slid == IB_LID_PERMISSIVE);
} }
...@@ -142,7 +142,7 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp, ...@@ -142,7 +142,7 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp,
/* C14-9:2 -- intermediate hop */ /* C14-9:2 -- intermediate hop */
if (hop_ptr && hop_ptr < hop_cnt) { if (hop_ptr && hop_ptr < hop_cnt) {
if (node_type != IB_NODE_SWITCH) if (node_type != RDMA_NODE_IB_SWITCH)
return 0; return 0;
smp->return_path[hop_ptr] = port_num; smp->return_path[hop_ptr] = port_num;
...@@ -156,7 +156,7 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp, ...@@ -156,7 +156,7 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp,
smp->return_path[hop_ptr] = port_num; smp->return_path[hop_ptr] = port_num;
/* smp->hop_ptr updated when sending */ /* smp->hop_ptr updated when sending */
return (node_type == IB_NODE_SWITCH || return (node_type == RDMA_NODE_IB_SWITCH ||
smp->dr_dlid == IB_LID_PERMISSIVE); smp->dr_dlid == IB_LID_PERMISSIVE);
} }
...@@ -175,7 +175,7 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp, ...@@ -175,7 +175,7 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp,
/* C14-13:2 */ /* C14-13:2 */
if (2 <= hop_ptr && hop_ptr <= hop_cnt) { if (2 <= hop_ptr && hop_ptr <= hop_cnt) {
if (node_type != IB_NODE_SWITCH) if (node_type != RDMA_NODE_IB_SWITCH)
return 0; return 0;
/* smp->hop_ptr updated when sending */ /* smp->hop_ptr updated when sending */
...@@ -190,7 +190,7 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp, ...@@ -190,7 +190,7 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp,
return 1; return 1;
} }
/* smp->hop_ptr updated when sending */ /* smp->hop_ptr updated when sending */
return (node_type == IB_NODE_SWITCH); return (node_type == RDMA_NODE_IB_SWITCH);
} }
/* C14-13:4 -- hop_ptr = 0 -> give to SM */ /* C14-13:4 -- hop_ptr = 0 -> give to SM */
......
...@@ -68,7 +68,7 @@ struct port_table_attribute { ...@@ -68,7 +68,7 @@ struct port_table_attribute {
int index; int index;
}; };
static inline int ibdev_is_alive(const struct ib_device *dev) static inline int ibdev_is_alive(const struct ib_device *dev)
{ {
return dev->reg_state == IB_DEV_REGISTERED; return dev->reg_state == IB_DEV_REGISTERED;
} }
...@@ -589,10 +589,11 @@ static ssize_t show_node_type(struct class_device *cdev, char *buf) ...@@ -589,10 +589,11 @@ static ssize_t show_node_type(struct class_device *cdev, char *buf)
return -ENODEV; return -ENODEV;
switch (dev->node_type) { switch (dev->node_type) {
case IB_NODE_CA: return sprintf(buf, "%d: CA\n", dev->node_type); case RDMA_NODE_IB_CA: return sprintf(buf, "%d: CA\n", dev->node_type);
case IB_NODE_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type); case RDMA_NODE_RNIC: return sprintf(buf, "%d: RNIC\n", dev->node_type);
case IB_NODE_ROUTER: return sprintf(buf, "%d: router\n", dev->node_type); case RDMA_NODE_IB_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type);
default: return sprintf(buf, "%d: <unknown>\n", dev->node_type); case RDMA_NODE_IB_ROUTER: return sprintf(buf, "%d: router\n", dev->node_type);
default: return sprintf(buf, "%d: <unknown>\n", dev->node_type);
} }
} }
...@@ -708,7 +709,7 @@ int ib_device_register_sysfs(struct ib_device *device) ...@@ -708,7 +709,7 @@ int ib_device_register_sysfs(struct ib_device *device)
if (ret) if (ret)
goto err_put; goto err_put;
if (device->node_type == IB_NODE_SWITCH) { if (device->node_type == RDMA_NODE_IB_SWITCH) {
ret = add_port(device, 0); ret = add_port(device, 0);
if (ret) if (ret)
goto err_put; goto err_put;
......
...@@ -309,9 +309,9 @@ static int ib_ucm_event_process(struct ib_cm_event *evt, ...@@ -309,9 +309,9 @@ static int ib_ucm_event_process(struct ib_cm_event *evt,
info = evt->param.apr_rcvd.apr_info; info = evt->param.apr_rcvd.apr_info;
break; break;
case IB_CM_SIDR_REQ_RECEIVED: case IB_CM_SIDR_REQ_RECEIVED:
uvt->resp.u.sidr_req_resp.pkey = uvt->resp.u.sidr_req_resp.pkey =
evt->param.sidr_req_rcvd.pkey; evt->param.sidr_req_rcvd.pkey;
uvt->resp.u.sidr_req_resp.port = uvt->resp.u.sidr_req_resp.port =
evt->param.sidr_req_rcvd.port; evt->param.sidr_req_rcvd.port;
uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE; uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE;
break; break;
...@@ -1237,7 +1237,7 @@ static struct class ucm_class = { ...@@ -1237,7 +1237,7 @@ static struct class ucm_class = {
static ssize_t show_ibdev(struct class_device *class_dev, char *buf) static ssize_t show_ibdev(struct class_device *class_dev, char *buf)
{ {
struct ib_ucm_device *dev; struct ib_ucm_device *dev;
dev = container_of(class_dev, struct ib_ucm_device, class_dev); dev = container_of(class_dev, struct ib_ucm_device, class_dev);
return sprintf(buf, "%s\n", dev->ib_dev->name); return sprintf(buf, "%s\n", dev->ib_dev->name);
} }
...@@ -1247,7 +1247,8 @@ static void ib_ucm_add_one(struct ib_device *device) ...@@ -1247,7 +1247,8 @@ static void ib_ucm_add_one(struct ib_device *device)
{ {
struct ib_ucm_device *ucm_dev; struct ib_ucm_device *ucm_dev;
if (!device->alloc_ucontext) if (!device->alloc_ucontext ||
rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
return; return;
ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL); ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL);
......
/* /*
* Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Voltaire, Inc. All rights reserved. * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
...@@ -1032,7 +1032,10 @@ static void ib_umad_add_one(struct ib_device *device) ...@@ -1032,7 +1032,10 @@ static void ib_umad_add_one(struct ib_device *device)
struct ib_umad_device *umad_dev; struct ib_umad_device *umad_dev;
int s, e, i; int s, e, i;
if (device->node_type == IB_NODE_SWITCH) if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
return;
if (device->node_type == RDMA_NODE_IB_SWITCH)
s = e = 0; s = e = 0;
else { else {
s = 1; s = 1;
......
...@@ -155,7 +155,7 @@ static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, ...@@ -155,7 +155,7 @@ static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
} }
static struct ib_uobject *idr_read_uobj(struct idr *idr, int id, static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
struct ib_ucontext *context) struct ib_ucontext *context, int nested)
{ {
struct ib_uobject *uobj; struct ib_uobject *uobj;
...@@ -163,7 +163,10 @@ static struct ib_uobject *idr_read_uobj(struct idr *idr, int id, ...@@ -163,7 +163,10 @@ static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
if (!uobj) if (!uobj)
return NULL; return NULL;
down_read(&uobj->mutex); if (nested)
down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING);
else
down_read(&uobj->mutex);
if (!uobj->live) { if (!uobj->live) {
put_uobj_read(uobj); put_uobj_read(uobj);
return NULL; return NULL;
...@@ -190,17 +193,18 @@ static struct ib_uobject *idr_write_uobj(struct idr *idr, int id, ...@@ -190,17 +193,18 @@ static struct ib_uobject *idr_write_uobj(struct idr *idr, int id,
return uobj; return uobj;
} }
static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context) static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context,
int nested)
{ {
struct ib_uobject *uobj; struct ib_uobject *uobj;
uobj = idr_read_uobj(idr, id, context); uobj = idr_read_uobj(idr, id, context, nested);
return uobj ? uobj->object : NULL; return uobj ? uobj->object : NULL;
} }
static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context) static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context)
{ {
return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context); return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0);
} }
static void put_pd_read(struct ib_pd *pd) static void put_pd_read(struct ib_pd *pd)
...@@ -208,9 +212,9 @@ static void put_pd_read(struct ib_pd *pd) ...@@ -208,9 +212,9 @@ static void put_pd_read(struct ib_pd *pd)
put_uobj_read(pd->uobject); put_uobj_read(pd->uobject);
} }
static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context) static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
{ {
return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context); return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
} }
static void put_cq_read(struct ib_cq *cq) static void put_cq_read(struct ib_cq *cq)
...@@ -220,7 +224,7 @@ static void put_cq_read(struct ib_cq *cq) ...@@ -220,7 +224,7 @@ static void put_cq_read(struct ib_cq *cq)
static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context) static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context)
{ {
return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context); return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0);
} }
static void put_ah_read(struct ib_ah *ah) static void put_ah_read(struct ib_ah *ah)
...@@ -230,7 +234,7 @@ static void put_ah_read(struct ib_ah *ah) ...@@ -230,7 +234,7 @@ static void put_ah_read(struct ib_ah *ah)
static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context) static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
{ {
return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context); return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
} }
static void put_qp_read(struct ib_qp *qp) static void put_qp_read(struct ib_qp *qp)
...@@ -240,7 +244,7 @@ static void put_qp_read(struct ib_qp *qp) ...@@ -240,7 +244,7 @@ static void put_qp_read(struct ib_qp *qp)
static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context) static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
{ {
return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context); return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
} }
static void put_srq_read(struct ib_srq *srq) static void put_srq_read(struct ib_srq *srq)
...@@ -837,7 +841,6 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, ...@@ -837,7 +841,6 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
err_copy: err_copy:
idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject); idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
err_free: err_free:
ib_destroy_cq(cq); ib_destroy_cq(cq);
...@@ -867,7 +870,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, ...@@ -867,7 +870,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
(unsigned long) cmd.response + sizeof resp, (unsigned long) cmd.response + sizeof resp,
in_len - sizeof cmd, out_len - sizeof resp); in_len - sizeof cmd, out_len - sizeof resp);
cq = idr_read_cq(cmd.cq_handle, file->ucontext); cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
if (!cq) if (!cq)
return -EINVAL; return -EINVAL;
...@@ -875,11 +878,10 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, ...@@ -875,11 +878,10 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
if (ret) if (ret)
goto out; goto out;
memset(&resp, 0, sizeof resp);
resp.cqe = cq->cqe; resp.cqe = cq->cqe;
if (copy_to_user((void __user *) (unsigned long) cmd.response, if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp)) &resp, sizeof resp.cqe))
ret = -EFAULT; ret = -EFAULT;
out: out:
...@@ -894,7 +896,6 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, ...@@ -894,7 +896,6 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
{ {
struct ib_uverbs_poll_cq cmd; struct ib_uverbs_poll_cq cmd;
struct ib_uverbs_poll_cq_resp *resp; struct ib_uverbs_poll_cq_resp *resp;
struct ib_uobject *uobj;
struct ib_cq *cq; struct ib_cq *cq;
struct ib_wc *wc; struct ib_wc *wc;
int ret = 0; int ret = 0;
...@@ -915,16 +916,15 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, ...@@ -915,16 +916,15 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
goto out_wc; goto out_wc;
} }
uobj = idr_read_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext); cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
if (!uobj) { if (!cq) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
cq = uobj->object;
resp->count = ib_poll_cq(cq, cmd.ne, wc); resp->count = ib_poll_cq(cq, cmd.ne, wc);
put_uobj_read(uobj); put_cq_read(cq);
for (i = 0; i < resp->count; i++) { for (i = 0; i < resp->count; i++) {
resp->wc[i].wr_id = wc[i].wr_id; resp->wc[i].wr_id = wc[i].wr_id;
...@@ -959,21 +959,19 @@ ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, ...@@ -959,21 +959,19 @@ ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
int out_len) int out_len)
{ {
struct ib_uverbs_req_notify_cq cmd; struct ib_uverbs_req_notify_cq cmd;
struct ib_uobject *uobj;
struct ib_cq *cq; struct ib_cq *cq;
if (copy_from_user(&cmd, buf, sizeof cmd)) if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT; return -EFAULT;
uobj = idr_read_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext); cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
if (!uobj) if (!cq)
return -EINVAL; return -EINVAL;
cq = uobj->object;
ib_req_notify_cq(cq, cmd.solicited_only ? ib_req_notify_cq(cq, cmd.solicited_only ?
IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
put_uobj_read(uobj); put_cq_read(cq);
return in_len; return in_len;
} }
...@@ -1064,9 +1062,9 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, ...@@ -1064,9 +1062,9 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL; srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL;
pd = idr_read_pd(cmd.pd_handle, file->ucontext); pd = idr_read_pd(cmd.pd_handle, file->ucontext);
scq = idr_read_cq(cmd.send_cq_handle, file->ucontext); scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, 0);
rcq = cmd.recv_cq_handle == cmd.send_cq_handle ? rcq = cmd.recv_cq_handle == cmd.send_cq_handle ?
scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext); scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext, 1);
if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) { if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) {
ret = -EINVAL; ret = -EINVAL;
...@@ -1274,6 +1272,7 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, ...@@ -1274,6 +1272,7 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
int out_len) int out_len)
{ {
struct ib_uverbs_modify_qp cmd; struct ib_uverbs_modify_qp cmd;
struct ib_udata udata;
struct ib_qp *qp; struct ib_qp *qp;
struct ib_qp_attr *attr; struct ib_qp_attr *attr;
int ret; int ret;
...@@ -1281,6 +1280,9 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, ...@@ -1281,6 +1280,9 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd)) if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT; return -EFAULT;
INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
out_len);
attr = kmalloc(sizeof *attr, GFP_KERNEL); attr = kmalloc(sizeof *attr, GFP_KERNEL);
if (!attr) if (!attr)
return -ENOMEM; return -ENOMEM;
...@@ -1337,7 +1339,7 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, ...@@ -1337,7 +1339,7 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
ret = ib_modify_qp(qp, attr, cmd.attr_mask); ret = qp->device->modify_qp(qp, attr, cmd.attr_mask, &udata);
put_qp_read(qp); put_qp_read(qp);
...@@ -1674,7 +1676,6 @@ ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, ...@@ -1674,7 +1676,6 @@ ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
break; break;
} }
if (copy_to_user((void __user *) (unsigned long) cmd.response, if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp)) &resp, sizeof resp))
ret = -EFAULT; ret = -EFAULT;
...@@ -1724,7 +1725,6 @@ ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, ...@@ -1724,7 +1725,6 @@ ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
break; break;
} }
if (copy_to_user((void __user *) (unsigned long) cmd.response, if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp)) &resp, sizeof resp))
ret = -EFAULT; ret = -EFAULT;
...@@ -2055,6 +2055,7 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, ...@@ -2055,6 +2055,7 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
int out_len) int out_len)
{ {
struct ib_uverbs_modify_srq cmd; struct ib_uverbs_modify_srq cmd;
struct ib_udata udata;
struct ib_srq *srq; struct ib_srq *srq;
struct ib_srq_attr attr; struct ib_srq_attr attr;
int ret; int ret;
...@@ -2062,6 +2063,9 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, ...@@ -2062,6 +2063,9 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd)) if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT; return -EFAULT;
INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
out_len);
srq = idr_read_srq(cmd.srq_handle, file->ucontext); srq = idr_read_srq(cmd.srq_handle, file->ucontext);
if (!srq) if (!srq)
return -EINVAL; return -EINVAL;
...@@ -2069,7 +2073,7 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, ...@@ -2069,7 +2073,7 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
attr.max_wr = cmd.max_wr; attr.max_wr = cmd.max_wr;
attr.srq_limit = cmd.srq_limit; attr.srq_limit = cmd.srq_limit;
ret = ib_modify_srq(srq, &attr, cmd.attr_mask); ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
put_srq_read(srq); put_srq_read(srq);
......
...@@ -79,6 +79,23 @@ enum ib_rate mult_to_ib_rate(int mult) ...@@ -79,6 +79,23 @@ enum ib_rate mult_to_ib_rate(int mult)
} }
EXPORT_SYMBOL(mult_to_ib_rate); EXPORT_SYMBOL(mult_to_ib_rate);
enum rdma_transport_type
rdma_node_get_transport(enum rdma_node_type node_type)
{
switch (node_type) {
case RDMA_NODE_IB_CA:
case RDMA_NODE_IB_SWITCH:
case RDMA_NODE_IB_ROUTER:
return RDMA_TRANSPORT_IB;
case RDMA_NODE_RNIC:
return RDMA_TRANSPORT_IWARP;
default:
BUG();
return 0;
}
}
EXPORT_SYMBOL(rdma_node_get_transport);
/* Protection domains */ /* Protection domains */
struct ib_pd *ib_alloc_pd(struct ib_device *device) struct ib_pd *ib_alloc_pd(struct ib_device *device)
...@@ -231,7 +248,7 @@ int ib_modify_srq(struct ib_srq *srq, ...@@ -231,7 +248,7 @@ int ib_modify_srq(struct ib_srq *srq,
struct ib_srq_attr *srq_attr, struct ib_srq_attr *srq_attr,
enum ib_srq_attr_mask srq_attr_mask) enum ib_srq_attr_mask srq_attr_mask)
{ {
return srq->device->modify_srq(srq, srq_attr, srq_attr_mask); return srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL);
} }
EXPORT_SYMBOL(ib_modify_srq); EXPORT_SYMBOL(ib_modify_srq);
...@@ -547,7 +564,7 @@ int ib_modify_qp(struct ib_qp *qp, ...@@ -547,7 +564,7 @@ int ib_modify_qp(struct ib_qp *qp,
struct ib_qp_attr *qp_attr, struct ib_qp_attr *qp_attr,
int qp_attr_mask) int qp_attr_mask)
{ {
return qp->device->modify_qp(qp, qp_attr, qp_attr_mask); return qp->device->modify_qp(qp, qp_attr, qp_attr_mask, NULL);
} }
EXPORT_SYMBOL(ib_modify_qp); EXPORT_SYMBOL(ib_modify_qp);
......
ifdef CONFIG_INFINIBAND_AMSO1100_DEBUG
EXTRA_CFLAGS += -DDEBUG
endif
obj-$(CONFIG_INFINIBAND_AMSO1100) += iw_c2.o
iw_c2-y := c2.o c2_provider.o c2_rnic.o c2_alloc.o c2_mq.o c2_ae.o c2_vq.o \
c2_intr.o c2_cq.o c2_qp.o c2_cm.o c2_mm.o c2_pd.o
config INFINIBAND_AMSO1100
tristate "Ammasso 1100 HCA support"
depends on PCI && INET && INFINIBAND
---help---
This is a low-level driver for the Ammasso 1100 host
channel adapter (HCA).
config INFINIBAND_AMSO1100_DEBUG
bool "Verbose debugging output"
depends on INFINIBAND_AMSO1100
default n
---help---
This option causes the amso1100 driver to produce a bunch of
debug messages. Select this if you are developing the driver
or trying to diagnose a problem.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* Copyright (c) 2005 Ammasso, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _C2_AE_H_
#define _C2_AE_H_
/*
* WARNING: If you change this file, also bump C2_IVN_BASE
* in common/include/clustercore/c2_ivn.h.
*/
/*
* Asynchronous Event Identifiers
*
* These start at 0x80 only so it's obvious from inspection that
* they are not work-request statuses. This isn't critical.
*
* NOTE: these event id's must fit in eight bits.
*/
enum c2_event_id {
CCAE_REMOTE_SHUTDOWN = 0x80,
CCAE_ACTIVE_CONNECT_RESULTS,
CCAE_CONNECTION_REQUEST,
CCAE_LLP_CLOSE_COMPLETE,
CCAE_TERMINATE_MESSAGE_RECEIVED,
CCAE_LLP_CONNECTION_RESET,
CCAE_LLP_CONNECTION_LOST,
CCAE_LLP_SEGMENT_SIZE_INVALID,
CCAE_LLP_INVALID_CRC,
CCAE_LLP_BAD_FPDU,
CCAE_INVALID_DDP_VERSION,
CCAE_INVALID_RDMA_VERSION,
CCAE_UNEXPECTED_OPCODE,
CCAE_INVALID_DDP_QUEUE_NUMBER,
CCAE_RDMA_READ_NOT_ENABLED,
CCAE_RDMA_WRITE_NOT_ENABLED,
CCAE_RDMA_READ_TOO_SMALL,
CCAE_NO_L_BIT,
CCAE_TAGGED_INVALID_STAG,
CCAE_TAGGED_BASE_BOUNDS_VIOLATION,
CCAE_TAGGED_ACCESS_RIGHTS_VIOLATION,
CCAE_TAGGED_INVALID_PD,
CCAE_WRAP_ERROR,
CCAE_BAD_CLOSE,
CCAE_BAD_LLP_CLOSE,
CCAE_INVALID_MSN_RANGE,
CCAE_INVALID_MSN_GAP,
CCAE_IRRQ_OVERFLOW,
CCAE_IRRQ_MSN_GAP,
CCAE_IRRQ_MSN_RANGE,
CCAE_IRRQ_INVALID_STAG,
CCAE_IRRQ_BASE_BOUNDS_VIOLATION,
CCAE_IRRQ_ACCESS_RIGHTS_VIOLATION,
CCAE_IRRQ_INVALID_PD,
CCAE_IRRQ_WRAP_ERROR,
CCAE_CQ_SQ_COMPLETION_OVERFLOW,
CCAE_CQ_RQ_COMPLETION_ERROR,
CCAE_QP_SRQ_WQE_ERROR,
CCAE_QP_LOCAL_CATASTROPHIC_ERROR,
CCAE_CQ_OVERFLOW,
CCAE_CQ_OPERATION_ERROR,
CCAE_SRQ_LIMIT_REACHED,
CCAE_QP_RQ_LIMIT_REACHED,
CCAE_SRQ_CATASTROPHIC_ERROR,
CCAE_RNIC_CATASTROPHIC_ERROR
/* WARNING If you add more id's, make sure their values fit in eight bits. */
};
/*
* Resource Indicators and Identifiers
*/
enum c2_resource_indicator {
C2_RES_IND_QP = 1,
C2_RES_IND_EP,
C2_RES_IND_CQ,
C2_RES_IND_SRQ,
};
#endif /* _C2_AE_H_ */
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/bitmap.h>
#include "c2.h"
static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask,
struct sp_chunk **head)
{
int i;
struct sp_chunk *new_head;
new_head = (struct sp_chunk *) __get_free_page(gfp_mask);
if (new_head == NULL)
return -ENOMEM;
new_head->dma_addr = dma_map_single(c2dev->ibdev.dma_device, new_head,
PAGE_SIZE, DMA_FROM_DEVICE);
pci_unmap_addr_set(new_head, mapping, new_head->dma_addr);
new_head->next = NULL;
new_head->head = 0;
/* build list where each index is the next free slot */
for (i = 0;
i < (PAGE_SIZE - sizeof(struct sp_chunk) -
sizeof(u16)) / sizeof(u16) - 1;
i++) {
new_head->shared_ptr[i] = i + 1;
}
/* terminate list */
new_head->shared_ptr[i] = 0xFFFF;
*head = new_head;
return 0;
}
int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask,
struct sp_chunk **root)
{
return c2_alloc_mqsp_chunk(c2dev, gfp_mask, root);
}
void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root)
{
struct sp_chunk *next;
while (root) {
next = root->next;
dma_unmap_single(c2dev->ibdev.dma_device,
pci_unmap_addr(root, mapping), PAGE_SIZE,
DMA_FROM_DEVICE);
__free_page((struct page *) root);
root = next;
}
}
u16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
dma_addr_t *dma_addr, gfp_t gfp_mask)
{
u16 mqsp;
while (head) {
mqsp = head->head;
if (mqsp != 0xFFFF) {
head->head = head->shared_ptr[mqsp];
break;
} else if (head->next == NULL) {
if (c2_alloc_mqsp_chunk(c2dev, gfp_mask, &head->next) ==
0) {
head = head->next;
mqsp = head->head;
head->head = head->shared_ptr[mqsp];
break;
} else
return NULL;
} else
head = head->next;
}
if (head) {
*dma_addr = head->dma_addr +
((unsigned long) &(head->shared_ptr[mqsp]) -
(unsigned long) head);
pr_debug("%s addr %p dma_addr %llx\n", __FUNCTION__,
&(head->shared_ptr[mqsp]), (u64)*dma_addr);
return &(head->shared_ptr[mqsp]);
}
return NULL;
}
void c2_free_mqsp(u16 * mqsp)
{
struct sp_chunk *head;
u16 idx;
/* The chunk containing this ptr begins at the page boundary */
head = (struct sp_chunk *) ((unsigned long) mqsp & PAGE_MASK);
/* Link head to new mqsp */
*mqsp = head->head;
/* Compute the shared_ptr index */
idx = ((unsigned long) mqsp & ~PAGE_MASK) >> 1;
idx -= (unsigned long) &(((struct sp_chunk *) 0)->shared_ptr[0]) >> 1;
/* Point this index at the head */
head->shared_ptr[idx] = head->head;
/* Point head at this index */
head->head = idx;
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
config INFINIBAND_EHCA
tristate "eHCA support"
depends on IBMEBUS && INFINIBAND
---help---
This driver supports the IBM pSeries eHCA InfiniBand adapter.
To compile the driver as a module, choose M here. The module
will be called ib_ehca.
config INFINIBAND_EHCA_SCALING
bool "Scaling support (EXPERIMENTAL)"
depends on IBMEBUS && INFINIBAND_EHCA && HOTPLUG_CPU && EXPERIMENTAL
---help---
eHCA scaling support schedules the CQ callbacks to different CPUs.
To enable this feature choose Y here.
# Authors: Heiko J Schick <schickhj@de.ibm.com>
# Christoph Raisch <raisch@de.ibm.com>
# Joachim Fenkes <fenkes@de.ibm.com>
#
# Copyright (c) 2005 IBM Corporation
#
# All rights reserved.
#
# This source code is distributed under a dual license of GPL v2.0 and OpenIB BSD.
obj-$(CONFIG_INFINIBAND_EHCA) += ib_ehca.o
ib_ehca-objs = ehca_main.o ehca_hca.o ehca_mcast.o ehca_pd.o ehca_av.o ehca_eq.o \
ehca_cq.o ehca_qp.o ehca_sqp.o ehca_mrmw.o ehca_reqs.o ehca_irq.o \
ehca_uverbs.o ipz_pt_fn.o hcp_if.o hcp_phyp.o
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
config IPATH_CORE
tristate "QLogic InfiniPath Driver"
depends on 64BIT && PCI_MSI && NET
---help---
This is a low-level driver for QLogic InfiniPath host channel
adapters (HCAs) based on the HT-400 and PE-800 chips.
config INFINIBAND_IPATH config INFINIBAND_IPATH
tristate "QLogic InfiniPath Verbs Driver" tristate "QLogic InfiniPath Driver"
depends on IPATH_CORE && INFINIBAND depends on PCI_MSI && 64BIT && INFINIBAND
---help--- ---help---
This is a driver that provides InfiniBand verbs support for This is a driver for QLogic InfiniPath host channel adapters,
QLogic InfiniPath host channel adapters (HCAs). This including InfiniBand verbs support. This driver allows these
allows these devices to be used with both kernel upper level devices to be used with both kernel upper level protocols such
protocols such as IP-over-InfiniBand as well as with userspace as IP-over-InfiniBand as well as with userspace applications
applications (in conjunction with InfiniBand userspace access). (in conjunction with InfiniBand userspace access).
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment