Commit b1ef951e authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband:
  IPoIB: Make sure struct ipoib_neigh.queue is always initialized
  IB/iser: Use the new verbs DMA mapping functions
  IB/srp: Use new verbs IB DMA mapping functions
  IPoIB: Use the new verbs DMA mapping functions
  IB/core: Use the new verbs DMA mapping functions
  IB/ipath: Implement new verbs DMA mapping functions
  IB: Add DMA mapping functions to allow device drivers to interpose
  RDMA/cma: Export rdma cm interface to userspace
  RDMA/cma: Add support for RDMA_PS_UDP
  RDMA/cma: Allow early transition to RTS to handle lost CM messages
  RDMA/cma: Report connect info with connect events
  RDMA/cma: Remove unneeded qp_type parameter from rdma_cm
  IB/ipath: Fix IRQ for PCI Express HCAs
  RDMA/amso1100: Fix memory leak in c2_qp_modify()
  IB/iser: Remove unused "write-only" variables
  IB/ipath: Remove unused "write-only" variables
  IB/fmr: ib_flush_fmr_pool() may wait too long
parents 775ba7ad 82b39913
infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := ib_addr.o rdma_cm.o infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := ib_addr.o rdma_cm.o
user_access-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_ucm.o
obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \ obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \
ib_cm.o iw_cm.o $(infiniband-y) ib_cm.o iw_cm.o $(infiniband-y)
obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
$(user_access-y)
ib_core-y := packer.o ud_header.o verbs.o sysfs.o \ ib_core-y := packer.o ud_header.o verbs.o sysfs.o \
device.o fmr_pool.o cache.o device.o fmr_pool.o cache.o
...@@ -18,6 +20,8 @@ iw_cm-y := iwcm.o ...@@ -18,6 +20,8 @@ iw_cm-y := iwcm.o
rdma_cm-y := cma.o rdma_cm-y := cma.o
rdma_ucm-y := ucma.o
ib_addr-y := addr.o ib_addr-y := addr.o
ib_umad-y := user_mad.o ib_umad-y := user_mad.o
......
...@@ -3289,6 +3289,10 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv, ...@@ -3289,6 +3289,10 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
spin_lock_irqsave(&cm_id_priv->lock, flags); spin_lock_irqsave(&cm_id_priv->lock, flags);
switch (cm_id_priv->id.state) { switch (cm_id_priv->id.state) {
/* Allow transition to RTS before sending REP */
case IB_CM_REQ_RCVD:
case IB_CM_MRA_REQ_SENT:
case IB_CM_REP_RCVD: case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT: case IB_CM_MRA_REP_SENT:
case IB_CM_REP_SENT: case IB_CM_REP_SENT:
......
This diff is collapsed.
...@@ -394,20 +394,12 @@ EXPORT_SYMBOL(ib_destroy_fmr_pool); ...@@ -394,20 +394,12 @@ EXPORT_SYMBOL(ib_destroy_fmr_pool);
*/ */
int ib_flush_fmr_pool(struct ib_fmr_pool *pool) int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
{ {
int serial; int serial = atomic_inc_return(&pool->req_ser);
atomic_inc(&pool->req_ser);
/*
* It's OK if someone else bumps req_ser again here -- we'll
* just wait a little longer.
*/
serial = atomic_read(&pool->req_ser);
wake_up_process(pool->thread); wake_up_process(pool->thread);
if (wait_event_interruptible(pool->force_wait, if (wait_event_interruptible(pool->force_wait,
atomic_read(&pool->flush_ser) - atomic_read(&pool->flush_ser) - serial >= 0))
atomic_read(&pool->req_ser) >= 0))
return -EINTR; return -EINTR;
return 0; return 0;
......
...@@ -998,17 +998,17 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) ...@@ -998,17 +998,17 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
mad_agent = mad_send_wr->send_buf.mad_agent; mad_agent = mad_send_wr->send_buf.mad_agent;
sge = mad_send_wr->sg_list; sge = mad_send_wr->sg_list;
sge[0].addr = dma_map_single(mad_agent->device->dma_device, sge[0].addr = ib_dma_map_single(mad_agent->device,
mad_send_wr->send_buf.mad, mad_send_wr->send_buf.mad,
sge[0].length, sge[0].length,
DMA_TO_DEVICE); DMA_TO_DEVICE);
pci_unmap_addr_set(mad_send_wr, header_mapping, sge[0].addr); mad_send_wr->header_mapping = sge[0].addr;
sge[1].addr = dma_map_single(mad_agent->device->dma_device, sge[1].addr = ib_dma_map_single(mad_agent->device,
ib_get_payload(mad_send_wr), ib_get_payload(mad_send_wr),
sge[1].length, sge[1].length,
DMA_TO_DEVICE); DMA_TO_DEVICE);
pci_unmap_addr_set(mad_send_wr, payload_mapping, sge[1].addr); mad_send_wr->payload_mapping = sge[1].addr;
spin_lock_irqsave(&qp_info->send_queue.lock, flags); spin_lock_irqsave(&qp_info->send_queue.lock, flags);
if (qp_info->send_queue.count < qp_info->send_queue.max_active) { if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
...@@ -1026,12 +1026,12 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) ...@@ -1026,12 +1026,12 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
} }
spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
if (ret) { if (ret) {
dma_unmap_single(mad_agent->device->dma_device, ib_dma_unmap_single(mad_agent->device,
pci_unmap_addr(mad_send_wr, header_mapping), mad_send_wr->header_mapping,
sge[0].length, DMA_TO_DEVICE); sge[0].length, DMA_TO_DEVICE);
dma_unmap_single(mad_agent->device->dma_device, ib_dma_unmap_single(mad_agent->device,
pci_unmap_addr(mad_send_wr, payload_mapping), mad_send_wr->payload_mapping,
sge[1].length, DMA_TO_DEVICE); sge[1].length, DMA_TO_DEVICE);
} }
return ret; return ret;
} }
...@@ -1850,11 +1850,11 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, ...@@ -1850,11 +1850,11 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
mad_list); mad_list);
recv = container_of(mad_priv_hdr, struct ib_mad_private, header); recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
dma_unmap_single(port_priv->device->dma_device, ib_dma_unmap_single(port_priv->device,
pci_unmap_addr(&recv->header, mapping), recv->header.mapping,
sizeof(struct ib_mad_private) - sizeof(struct ib_mad_private) -
sizeof(struct ib_mad_private_header), sizeof(struct ib_mad_private_header),
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
/* Setup MAD receive work completion from "normal" work completion */ /* Setup MAD receive work completion from "normal" work completion */
recv->header.wc = *wc; recv->header.wc = *wc;
...@@ -2080,12 +2080,12 @@ static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv, ...@@ -2080,12 +2080,12 @@ static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
qp_info = send_queue->qp_info; qp_info = send_queue->qp_info;
retry: retry:
dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device, ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
pci_unmap_addr(mad_send_wr, header_mapping), mad_send_wr->header_mapping,
mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device, ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
pci_unmap_addr(mad_send_wr, payload_mapping), mad_send_wr->payload_mapping,
mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
queued_send_wr = NULL; queued_send_wr = NULL;
spin_lock_irqsave(&send_queue->lock, flags); spin_lock_irqsave(&send_queue->lock, flags);
list_del(&mad_list->list); list_del(&mad_list->list);
...@@ -2528,13 +2528,12 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, ...@@ -2528,13 +2528,12 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
break; break;
} }
} }
sg_list.addr = dma_map_single(qp_info->port_priv-> sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
device->dma_device, &mad_priv->grh,
&mad_priv->grh, sizeof *mad_priv -
sizeof *mad_priv - sizeof mad_priv->header,
sizeof mad_priv->header, DMA_FROM_DEVICE);
DMA_FROM_DEVICE); mad_priv->header.mapping = sg_list.addr;
pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
mad_priv->header.mad_list.mad_queue = recv_queue; mad_priv->header.mad_list.mad_queue = recv_queue;
...@@ -2549,12 +2548,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, ...@@ -2549,12 +2548,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
list_del(&mad_priv->header.mad_list.list); list_del(&mad_priv->header.mad_list.list);
recv_queue->count--; recv_queue->count--;
spin_unlock_irqrestore(&recv_queue->lock, flags); spin_unlock_irqrestore(&recv_queue->lock, flags);
dma_unmap_single(qp_info->port_priv->device->dma_device, ib_dma_unmap_single(qp_info->port_priv->device,
pci_unmap_addr(&mad_priv->header, mad_priv->header.mapping,
mapping), sizeof *mad_priv -
sizeof *mad_priv - sizeof mad_priv->header,
sizeof mad_priv->header, DMA_FROM_DEVICE);
DMA_FROM_DEVICE);
kmem_cache_free(ib_mad_cache, mad_priv); kmem_cache_free(ib_mad_cache, mad_priv);
printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret); printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
break; break;
...@@ -2586,11 +2584,11 @@ static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info) ...@@ -2586,11 +2584,11 @@ static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
/* Remove from posted receive MAD list */ /* Remove from posted receive MAD list */
list_del(&mad_list->list); list_del(&mad_list->list);
dma_unmap_single(qp_info->port_priv->device->dma_device, ib_dma_unmap_single(qp_info->port_priv->device,
pci_unmap_addr(&recv->header, mapping), recv->header.mapping,
sizeof(struct ib_mad_private) - sizeof(struct ib_mad_private) -
sizeof(struct ib_mad_private_header), sizeof(struct ib_mad_private_header),
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
kmem_cache_free(ib_mad_cache, recv); kmem_cache_free(ib_mad_cache, recv);
} }
......
...@@ -73,7 +73,7 @@ struct ib_mad_private_header { ...@@ -73,7 +73,7 @@ struct ib_mad_private_header {
struct ib_mad_list_head mad_list; struct ib_mad_list_head mad_list;
struct ib_mad_recv_wc recv_wc; struct ib_mad_recv_wc recv_wc;
struct ib_wc wc; struct ib_wc wc;
DECLARE_PCI_UNMAP_ADDR(mapping) u64 mapping;
} __attribute__ ((packed)); } __attribute__ ((packed));
struct ib_mad_private { struct ib_mad_private {
...@@ -126,8 +126,8 @@ struct ib_mad_send_wr_private { ...@@ -126,8 +126,8 @@ struct ib_mad_send_wr_private {
struct list_head agent_list; struct list_head agent_list;
struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_agent_private *mad_agent_priv;
struct ib_mad_send_buf send_buf; struct ib_mad_send_buf send_buf;
DECLARE_PCI_UNMAP_ADDR(header_mapping) u64 header_mapping;
DECLARE_PCI_UNMAP_ADDR(payload_mapping) u64 payload_mapping;
struct ib_send_wr send_wr; struct ib_send_wr send_wr;
struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
__be64 tid; __be64 tid;
......
This diff is collapsed.
...@@ -32,8 +32,8 @@ ...@@ -32,8 +32,8 @@
#include <rdma/ib_marshall.h> #include <rdma/ib_marshall.h>
static void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst, void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
struct ib_ah_attr *src) struct ib_ah_attr *src)
{ {
memcpy(dst->grh.dgid, src->grh.dgid.raw, sizeof src->grh.dgid); memcpy(dst->grh.dgid, src->grh.dgid.raw, sizeof src->grh.dgid);
dst->grh.flow_label = src->grh.flow_label; dst->grh.flow_label = src->grh.flow_label;
...@@ -47,6 +47,7 @@ static void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst, ...@@ -47,6 +47,7 @@ static void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0; dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
dst->port_num = src->port_num; dst->port_num = src->port_num;
} }
EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst, void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
struct ib_qp_attr *src) struct ib_qp_attr *src)
......
...@@ -52,8 +52,8 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d ...@@ -52,8 +52,8 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
int i; int i;
list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) { list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
dma_unmap_sg(dev->dma_device, chunk->page_list, ib_dma_unmap_sg(dev, chunk->page_list,
chunk->nents, DMA_BIDIRECTIONAL); chunk->nents, DMA_BIDIRECTIONAL);
for (i = 0; i < chunk->nents; ++i) { for (i = 0; i < chunk->nents; ++i) {
if (umem->writable && dirty) if (umem->writable && dirty)
set_page_dirty_lock(chunk->page_list[i].page); set_page_dirty_lock(chunk->page_list[i].page);
...@@ -136,10 +136,10 @@ int ib_umem_get(struct ib_device *dev, struct ib_umem *mem, ...@@ -136,10 +136,10 @@ int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
chunk->page_list[i].length = PAGE_SIZE; chunk->page_list[i].length = PAGE_SIZE;
} }
chunk->nmap = dma_map_sg(dev->dma_device, chunk->nmap = ib_dma_map_sg(dev,
&chunk->page_list[0], &chunk->page_list[0],
chunk->nents, chunk->nents,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (chunk->nmap <= 0) { if (chunk->nmap <= 0) {
for (i = 0; i < chunk->nents; ++i) for (i = 0; i < chunk->nents; ++i)
put_page(chunk->page_list[i].page); put_page(chunk->page_list[i].page);
......
...@@ -161,8 +161,10 @@ int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp, ...@@ -161,8 +161,10 @@ int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
if (attr_mask & IB_QP_STATE) { if (attr_mask & IB_QP_STATE) {
/* Ensure the state is valid */ /* Ensure the state is valid */
if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR) if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR) {
return -EINVAL; err = -EINVAL;
goto bail0;
}
wr.next_qp_state = cpu_to_be32(to_c2_state(attr->qp_state)); wr.next_qp_state = cpu_to_be32(to_c2_state(attr->qp_state));
...@@ -184,9 +186,10 @@ int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp, ...@@ -184,9 +186,10 @@ int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
if (attr->cur_qp_state != IB_QPS_RTR && if (attr->cur_qp_state != IB_QPS_RTR &&
attr->cur_qp_state != IB_QPS_RTS && attr->cur_qp_state != IB_QPS_RTS &&
attr->cur_qp_state != IB_QPS_SQD && attr->cur_qp_state != IB_QPS_SQD &&
attr->cur_qp_state != IB_QPS_SQE) attr->cur_qp_state != IB_QPS_SQE) {
return -EINVAL; err = -EINVAL;
else goto bail0;
} else
wr.next_qp_state = wr.next_qp_state =
cpu_to_be32(to_c2_state(attr->cur_qp_state)); cpu_to_be32(to_c2_state(attr->cur_qp_state));
......
...@@ -6,6 +6,7 @@ obj-$(CONFIG_INFINIBAND_IPATH) += ib_ipath.o ...@@ -6,6 +6,7 @@ obj-$(CONFIG_INFINIBAND_IPATH) += ib_ipath.o
ib_ipath-y := \ ib_ipath-y := \
ipath_cq.o \ ipath_cq.o \
ipath_diag.o \ ipath_diag.o \
ipath_dma.o \
ipath_driver.o \ ipath_driver.o \
ipath_eeprom.o \ ipath_eeprom.o \
ipath_file_ops.o \ ipath_file_ops.o \
......
/*
* Copyright (c) 2006 QLogic, Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <rdma/ib_verbs.h>
#include "ipath_verbs.h"
#define BAD_DMA_ADDRESS ((u64) 0)
/*
* The following functions implement driver specific replacements
* for the ib_dma_*() functions.
*
* These functions return kernel virtual addresses instead of
* device bus addresses since the driver uses the CPU to copy
* data instead of using hardware DMA.
*/
static int ipath_mapping_error(struct ib_device *dev, u64 dma_addr)
{
return dma_addr == BAD_DMA_ADDRESS;
}
static u64 ipath_dma_map_single(struct ib_device *dev,
void *cpu_addr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(!valid_dma_direction(direction));
return (u64) cpu_addr;
}
static void ipath_dma_unmap_single(struct ib_device *dev,
u64 addr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(!valid_dma_direction(direction));
}
static u64 ipath_dma_map_page(struct ib_device *dev,
struct page *page,
unsigned long offset,
size_t size,
enum dma_data_direction direction)
{
u64 addr;
BUG_ON(!valid_dma_direction(direction));
if (offset + size > PAGE_SIZE) {
addr = BAD_DMA_ADDRESS;
goto done;
}
addr = (u64) page_address(page);
if (addr)
addr += offset;
/* TODO: handle highmem pages */
done:
return addr;
}
static void ipath_dma_unmap_page(struct ib_device *dev,
u64 addr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(!valid_dma_direction(direction));
}
int ipath_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
u64 addr;
int i;
int ret = nents;
BUG_ON(!valid_dma_direction(direction));
for (i = 0; i < nents; i++) {
addr = (u64) page_address(sg[i].page);
/* TODO: handle highmem pages */
if (!addr) {
ret = 0;
break;
}
}
return ret;
}
static void ipath_unmap_sg(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
BUG_ON(!valid_dma_direction(direction));
}
static u64 ipath_sg_dma_address(struct ib_device *dev, struct scatterlist *sg)
{
u64 addr = (u64) page_address(sg->page);
if (addr)
addr += sg->offset;
return addr;
}
static unsigned int ipath_sg_dma_len(struct ib_device *dev,
struct scatterlist *sg)
{
return sg->length;
}
static void ipath_sync_single_for_cpu(struct ib_device *dev,
u64 addr,
size_t size,
enum dma_data_direction dir)
{
}
static void ipath_sync_single_for_device(struct ib_device *dev,
u64 addr,
size_t size,
enum dma_data_direction dir)
{
}
static void *ipath_dma_alloc_coherent(struct ib_device *dev, size_t size,
u64 *dma_handle, gfp_t flag)
{
struct page *p;
void *addr = NULL;
p = alloc_pages(flag, get_order(size));
if (p)
addr = page_address(p);
if (dma_handle)
*dma_handle = (u64) addr;
return addr;
}
static void ipath_dma_free_coherent(struct ib_device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
{
free_pages((unsigned long) cpu_addr, get_order(size));
}
struct ib_dma_mapping_ops ipath_dma_mapping_ops = {
ipath_mapping_error,
ipath_dma_map_single,
ipath_dma_unmap_single,
ipath_dma_map_page,
ipath_dma_unmap_page,
ipath_map_sg,
ipath_unmap_sg,
ipath_sg_dma_address,
ipath_sg_dma_len,
ipath_sync_single_for_cpu,
ipath_sync_single_for_device,
ipath_dma_alloc_coherent,
ipath_dma_free_coherent
};
...@@ -1825,8 +1825,6 @@ void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno, ...@@ -1825,8 +1825,6 @@ void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno,
*/ */
void ipath_shutdown_device(struct ipath_devdata *dd) void ipath_shutdown_device(struct ipath_devdata *dd)
{ {
u64 val;
ipath_dbg("Shutting down the device\n"); ipath_dbg("Shutting down the device\n");
dd->ipath_flags |= IPATH_LINKUNK; dd->ipath_flags |= IPATH_LINKUNK;
...@@ -1849,7 +1847,7 @@ void ipath_shutdown_device(struct ipath_devdata *dd) ...@@ -1849,7 +1847,7 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
*/ */
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0ULL); ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0ULL);
/* flush it */ /* flush it */
val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
/* /*
* enough for anything that's going to trickle out to have actually * enough for anything that's going to trickle out to have actually
* done so. * done so.
......
...@@ -699,7 +699,6 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport, ...@@ -699,7 +699,6 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
int start_stop) int start_stop)
{ {
struct ipath_devdata *dd = pd->port_dd; struct ipath_devdata *dd = pd->port_dd;
u64 tval;
ipath_cdbg(PROC, "%sabling rcv for unit %u port %u:%u\n", ipath_cdbg(PROC, "%sabling rcv for unit %u port %u:%u\n",
start_stop ? "en" : "dis", dd->ipath_unit, start_stop ? "en" : "dis", dd->ipath_unit,
...@@ -729,7 +728,7 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport, ...@@ -729,7 +728,7 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
dd->ipath_rcvctrl); dd->ipath_rcvctrl);
/* now be sure chip saw it before we return */ /* now be sure chip saw it before we return */
tval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
if (start_stop) { if (start_stop) {
/* /*
* And try to be sure that tail reg update has happened too. * And try to be sure that tail reg update has happened too.
...@@ -738,7 +737,7 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport, ...@@ -738,7 +737,7 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
* in memory copy, since we could overwrite an update by the * in memory copy, since we could overwrite an update by the
* chip if we did. * chip if we did.
*/ */
tval = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port); ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
} }
/* always; new head should be equal to new tail; see above */ /* always; new head should be equal to new tail; see above */
bail: bail:
......
...@@ -1447,7 +1447,7 @@ static void ipath_ht_tidtemplate(struct ipath_devdata *dd) ...@@ -1447,7 +1447,7 @@ static void ipath_ht_tidtemplate(struct ipath_devdata *dd)
static int ipath_ht_early_init(struct ipath_devdata *dd) static int ipath_ht_early_init(struct ipath_devdata *dd)
{ {
u32 __iomem *piobuf; u32 __iomem *piobuf;
u32 pioincr, val32, egrsize; u32 pioincr, val32;
int i; int i;
/* /*
...@@ -1467,7 +1467,6 @@ static int ipath_ht_early_init(struct ipath_devdata *dd) ...@@ -1467,7 +1467,6 @@ static int ipath_ht_early_init(struct ipath_devdata *dd)
* errors interrupts if we ever see one). * errors interrupts if we ever see one).
*/ */
dd->ipath_rcvegrbufsize = dd->ipath_piosize2k; dd->ipath_rcvegrbufsize = dd->ipath_piosize2k;
egrsize = dd->ipath_rcvegrbufsize;
/* /*
* the min() check here is currently a nop, but it may not * the min() check here is currently a nop, but it may not
......
...@@ -602,7 +602,7 @@ static void ipath_pe_init_hwerrors(struct ipath_devdata *dd) ...@@ -602,7 +602,7 @@ static void ipath_pe_init_hwerrors(struct ipath_devdata *dd)
*/ */
static int ipath_pe_bringup_serdes(struct ipath_devdata *dd) static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
{ {
u64 val, tmp, config1, prev_val; u64 val, config1, prev_val;
int ret = 0; int ret = 0;
ipath_dbg("Trying to bringup serdes\n"); ipath_dbg("Trying to bringup serdes\n");
...@@ -633,7 +633,7 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd) ...@@ -633,7 +633,7 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
| INFINIPATH_SERDC0_L1PWR_DN; | INFINIPATH_SERDC0_L1PWR_DN;
ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
/* be sure chip saw it */ /* be sure chip saw it */
tmp = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
udelay(5); /* need pll reset set at least for a bit */ udelay(5); /* need pll reset set at least for a bit */
/* /*
* after PLL is reset, set the per-lane Resets and TxIdle and * after PLL is reset, set the per-lane Resets and TxIdle and
...@@ -647,7 +647,7 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd) ...@@ -647,7 +647,7 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
"and txidle (%llx)\n", (unsigned long long) val); "and txidle (%llx)\n", (unsigned long long) val);
ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
/* be sure chip saw it */ /* be sure chip saw it */
tmp = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
/* need PLL reset clear for at least 11 usec before lane /* need PLL reset clear for at least 11 usec before lane
* resets cleared; give it a few more to be sure */ * resets cleared; give it a few more to be sure */
udelay(15); udelay(15);
...@@ -851,12 +851,12 @@ static int ipath_setup_pe_config(struct ipath_devdata *dd, ...@@ -851,12 +851,12 @@ static int ipath_setup_pe_config(struct ipath_devdata *dd,
int pos, ret; int pos, ret;
dd->ipath_msi_lo = 0; /* used as a flag during reset processing */ dd->ipath_msi_lo = 0; /* used as a flag during reset processing */
dd->ipath_irq = pdev->irq;
ret = pci_enable_msi(dd->pcidev); ret = pci_enable_msi(dd->pcidev);
if (ret) if (ret)
ipath_dev_err(dd, "pci_enable_msi failed: %d, " ipath_dev_err(dd, "pci_enable_msi failed: %d, "
"interrupts may not work\n", ret); "interrupts may not work\n", ret);
/* continue even if it fails, we may still be OK... */ /* continue even if it fails, we may still be OK... */
dd->ipath_irq = pdev->irq;
if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) { if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) {
u16 control; u16 control;
......
...@@ -347,10 +347,9 @@ static int init_chip_first(struct ipath_devdata *dd, ...@@ -347,10 +347,9 @@ static int init_chip_first(struct ipath_devdata *dd,
static int init_chip_reset(struct ipath_devdata *dd, static int init_chip_reset(struct ipath_devdata *dd,
struct ipath_portdata **pdp) struct ipath_portdata **pdp)
{ {
struct ipath_portdata *pd;
u32 rtmp; u32 rtmp;
*pdp = pd = dd->ipath_pd[0]; *pdp = dd->ipath_pd[0];
/* ensure chip does no sends or receives while we re-initialize */ /* ensure chip does no sends or receives while we re-initialize */
dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U; dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U;
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 0); ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 0);
......
...@@ -598,10 +598,9 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) ...@@ -598,10 +598,9 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
* on close * on close
*/ */
if (errs & INFINIPATH_E_RRCVHDRFULL) { if (errs & INFINIPATH_E_RRCVHDRFULL) {
int any;
u32 hd, tl; u32 hd, tl;
ipath_stats.sps_hdrqfull++; ipath_stats.sps_hdrqfull++;
for (any = i = 0; i < dd->ipath_cfgports; i++) { for (i = 0; i < dd->ipath_cfgports; i++) {
struct ipath_portdata *pd = dd->ipath_pd[i]; struct ipath_portdata *pd = dd->ipath_pd[i];
if (i == 0) { if (i == 0) {
hd = dd->ipath_port0head; hd = dd->ipath_port0head;
......
...@@ -134,7 +134,7 @@ int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge, ...@@ -134,7 +134,7 @@ int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
*/ */
if (sge->lkey == 0) { if (sge->lkey == 0) {
isge->mr = NULL; isge->mr = NULL;
isge->vaddr = bus_to_virt(sge->addr); isge->vaddr = (void *) sge->addr;
isge->length = sge->length; isge->length = sge->length;
isge->sge_length = sge->length; isge->sge_length = sge->length;
ret = 1; ret = 1;
...@@ -202,12 +202,12 @@ int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss, ...@@ -202,12 +202,12 @@ int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss,
int ret; int ret;
/* /*
* We use RKEY == zero for physical addresses * We use RKEY == zero for kernel virtual addresses
* (see ipath_get_dma_mr). * (see ipath_get_dma_mr and ipath_dma.c).
*/ */
if (rkey == 0) { if (rkey == 0) {
sge->mr = NULL; sge->mr = NULL;
sge->vaddr = phys_to_virt(vaddr); sge->vaddr = (void *) vaddr;
sge->length = len; sge->length = len;
sge->sge_length = len; sge->sge_length = len;
ss->sg_list = NULL; ss->sg_list = NULL;
......
...@@ -54,6 +54,8 @@ static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr) ...@@ -54,6 +54,8 @@ static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr)
* @acc: access flags * @acc: access flags
* *
* Returns the memory region on success, otherwise returns an errno. * Returns the memory region on success, otherwise returns an errno.
* Note that all DMA addresses should be created via the
* struct ib_dma_mapping_ops functions (see ipath_dma.c).
*/ */
struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc) struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc)
{ {
...@@ -149,8 +151,7 @@ struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd, ...@@ -149,8 +151,7 @@ struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd,
m = 0; m = 0;
n = 0; n = 0;
for (i = 0; i < num_phys_buf; i++) { for (i = 0; i < num_phys_buf; i++) {
mr->mr.map[m]->segs[n].vaddr = mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
phys_to_virt(buffer_list[i].addr);
mr->mr.map[m]->segs[n].length = buffer_list[i].size; mr->mr.map[m]->segs[n].length = buffer_list[i].size;
mr->mr.length += buffer_list[i].size; mr->mr.length += buffer_list[i].size;
n++; n++;
...@@ -347,7 +348,7 @@ int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list, ...@@ -347,7 +348,7 @@ int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
n = 0; n = 0;
ps = 1 << fmr->page_shift; ps = 1 << fmr->page_shift;
for (i = 0; i < list_len; i++) { for (i = 0; i < list_len; i++) {
fmr->mr.map[m]->segs[n].vaddr = phys_to_virt(page_list[i]); fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
fmr->mr.map[m]->segs[n].length = ps; fmr->mr.map[m]->segs[n].length = ps;
if (++n == IPATH_SEGSZ) { if (++n == IPATH_SEGSZ) {
m++; m++;
......
...@@ -215,7 +215,6 @@ static ssize_t store_mlid(struct device *dev, ...@@ -215,7 +215,6 @@ static ssize_t store_mlid(struct device *dev,
size_t count) size_t count)
{ {
struct ipath_devdata *dd = dev_get_drvdata(dev); struct ipath_devdata *dd = dev_get_drvdata(dev);
int unit;
u16 mlid; u16 mlid;
int ret; int ret;
...@@ -223,8 +222,6 @@ static ssize_t store_mlid(struct device *dev, ...@@ -223,8 +222,6 @@ static ssize_t store_mlid(struct device *dev,
if (ret < 0 || mlid < IPATH_MULTICAST_LID_BASE) if (ret < 0 || mlid < IPATH_MULTICAST_LID_BASE)
goto invalid; goto invalid;
unit = dd->ipath_unit;
dd->ipath_mlid = mlid; dd->ipath_mlid = mlid;
goto bail; goto bail;
......
...@@ -1599,6 +1599,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd) ...@@ -1599,6 +1599,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
dev->detach_mcast = ipath_multicast_detach; dev->detach_mcast = ipath_multicast_detach;
dev->process_mad = ipath_process_mad; dev->process_mad = ipath_process_mad;
dev->mmap = ipath_mmap; dev->mmap = ipath_mmap;
dev->dma_ops = &ipath_dma_mapping_ops;
snprintf(dev->node_desc, sizeof(dev->node_desc), snprintf(dev->node_desc, sizeof(dev->node_desc),
IPATH_IDSTR " %s", init_utsname()->nodename); IPATH_IDSTR " %s", init_utsname()->nodename);
......
...@@ -812,4 +812,6 @@ extern unsigned int ib_ipath_max_srq_wrs; ...@@ -812,4 +812,6 @@ extern unsigned int ib_ipath_max_srq_wrs;
extern const u32 ib_ipath_rnr_table[]; extern const u32 ib_ipath_rnr_table[];
extern struct ib_dma_mapping_ops ipath_dma_mapping_ops;
#endif /* IPATH_VERBS_H */ #endif /* IPATH_VERBS_H */
...@@ -105,12 +105,12 @@ struct ipoib_mcast; ...@@ -105,12 +105,12 @@ struct ipoib_mcast;
struct ipoib_rx_buf { struct ipoib_rx_buf {
struct sk_buff *skb; struct sk_buff *skb;
dma_addr_t mapping; u64 mapping;
}; };
struct ipoib_tx_buf { struct ipoib_tx_buf {
struct sk_buff *skb; struct sk_buff *skb;
DECLARE_PCI_UNMAP_ADDR(mapping) u64 mapping;
}; };
/* /*
......
...@@ -109,9 +109,8 @@ static int ipoib_ib_post_receive(struct net_device *dev, int id) ...@@ -109,9 +109,8 @@ static int ipoib_ib_post_receive(struct net_device *dev, int id)
ret = ib_post_recv(priv->qp, &param, &bad_wr); ret = ib_post_recv(priv->qp, &param, &bad_wr);
if (unlikely(ret)) { if (unlikely(ret)) {
ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
dma_unmap_single(priv->ca->dma_device, ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping,
priv->rx_ring[id].mapping, IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb_any(priv->rx_ring[id].skb); dev_kfree_skb_any(priv->rx_ring[id].skb);
priv->rx_ring[id].skb = NULL; priv->rx_ring[id].skb = NULL;
} }
...@@ -123,7 +122,7 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id) ...@@ -123,7 +122,7 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
{ {
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
struct sk_buff *skb; struct sk_buff *skb;
dma_addr_t addr; u64 addr;
skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4); skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4);
if (!skb) if (!skb)
...@@ -136,10 +135,9 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id) ...@@ -136,10 +135,9 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
*/ */
skb_reserve(skb, 4); skb_reserve(skb, 4);
addr = dma_map_single(priv->ca->dma_device, addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE,
skb->data, IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
DMA_FROM_DEVICE); if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
if (unlikely(dma_mapping_error(addr))) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return -EIO; return -EIO;
} }
...@@ -174,7 +172,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -174,7 +172,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
struct sk_buff *skb; struct sk_buff *skb;
dma_addr_t addr; u64 addr;
ipoib_dbg_data(priv, "recv completion: id %d, op %d, status: %d\n", ipoib_dbg_data(priv, "recv completion: id %d, op %d, status: %d\n",
wr_id, wc->opcode, wc->status); wr_id, wc->opcode, wc->status);
...@@ -193,8 +191,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -193,8 +191,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
ipoib_warn(priv, "failed recv event " ipoib_warn(priv, "failed recv event "
"(status=%d, wrid=%d vend_err %x)\n", "(status=%d, wrid=%d vend_err %x)\n",
wc->status, wr_id, wc->vendor_err); wc->status, wr_id, wc->vendor_err);
dma_unmap_single(priv->ca->dma_device, addr, ib_dma_unmap_single(priv->ca, addr,
IPOIB_BUF_SIZE, DMA_FROM_DEVICE); IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
priv->rx_ring[wr_id].skb = NULL; priv->rx_ring[wr_id].skb = NULL;
return; return;
...@@ -212,8 +210,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -212,8 +210,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
wc->byte_len, wc->slid); wc->byte_len, wc->slid);
dma_unmap_single(priv->ca->dma_device, addr, ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
skb_put(skb, wc->byte_len); skb_put(skb, wc->byte_len);
skb_pull(skb, IB_GRH_BYTES); skb_pull(skb, IB_GRH_BYTES);
...@@ -261,10 +258,8 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -261,10 +258,8 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
tx_req = &priv->tx_ring[wr_id]; tx_req = &priv->tx_ring[wr_id];
dma_unmap_single(priv->ca->dma_device, ib_dma_unmap_single(priv->ca, tx_req->mapping,
pci_unmap_addr(tx_req, mapping), tx_req->skb->len, DMA_TO_DEVICE);
tx_req->skb->len,
DMA_TO_DEVICE);
++priv->stats.tx_packets; ++priv->stats.tx_packets;
priv->stats.tx_bytes += tx_req->skb->len; priv->stats.tx_bytes += tx_req->skb->len;
...@@ -311,7 +306,7 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) ...@@ -311,7 +306,7 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
static inline int post_send(struct ipoib_dev_priv *priv, static inline int post_send(struct ipoib_dev_priv *priv,
unsigned int wr_id, unsigned int wr_id,
struct ib_ah *address, u32 qpn, struct ib_ah *address, u32 qpn,
dma_addr_t addr, int len) u64 addr, int len)
{ {
struct ib_send_wr *bad_wr; struct ib_send_wr *bad_wr;
...@@ -330,7 +325,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, ...@@ -330,7 +325,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
{ {
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_tx_buf *tx_req; struct ipoib_tx_buf *tx_req;
dma_addr_t addr; u64 addr;
if (unlikely(skb->len > dev->mtu + INFINIBAND_ALEN)) { if (unlikely(skb->len > dev->mtu + INFINIBAND_ALEN)) {
ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
...@@ -353,21 +348,20 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, ...@@ -353,21 +348,20 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
*/ */
tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)]; tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
tx_req->skb = skb; tx_req->skb = skb;
addr = dma_map_single(priv->ca->dma_device, skb->data, skb->len, addr = ib_dma_map_single(priv->ca, skb->data, skb->len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(addr))) { if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
++priv->stats.tx_errors; ++priv->stats.tx_errors;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return; return;
} }
pci_unmap_addr_set(tx_req, mapping, addr); tx_req->mapping = addr;
if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
address->ah, qpn, addr, skb->len))) { address->ah, qpn, addr, skb->len))) {
ipoib_warn(priv, "post_send failed\n"); ipoib_warn(priv, "post_send failed\n");
++priv->stats.tx_errors; ++priv->stats.tx_errors;
dma_unmap_single(priv->ca->dma_device, addr, skb->len, ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
DMA_TO_DEVICE);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} else { } else {
dev->trans_start = jiffies; dev->trans_start = jiffies;
...@@ -538,24 +532,27 @@ int ipoib_ib_dev_stop(struct net_device *dev) ...@@ -538,24 +532,27 @@ int ipoib_ib_dev_stop(struct net_device *dev)
while ((int) priv->tx_tail - (int) priv->tx_head < 0) { while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
tx_req = &priv->tx_ring[priv->tx_tail & tx_req = &priv->tx_ring[priv->tx_tail &
(ipoib_sendq_size - 1)]; (ipoib_sendq_size - 1)];
dma_unmap_single(priv->ca->dma_device, ib_dma_unmap_single(priv->ca,
pci_unmap_addr(tx_req, mapping), tx_req->mapping,
tx_req->skb->len, tx_req->skb->len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
dev_kfree_skb_any(tx_req->skb); dev_kfree_skb_any(tx_req->skb);
++priv->tx_tail; ++priv->tx_tail;
} }
for (i = 0; i < ipoib_recvq_size; ++i) for (i = 0; i < ipoib_recvq_size; ++i) {
if (priv->rx_ring[i].skb) { struct ipoib_rx_buf *rx_req;
dma_unmap_single(priv->ca->dma_device,
pci_unmap_addr(&priv->rx_ring[i], rx_req = &priv->rx_ring[i];
mapping), if (!rx_req->skb)
IPOIB_BUF_SIZE, continue;
DMA_FROM_DEVICE); ib_dma_unmap_single(priv->ca,
dev_kfree_skb_any(priv->rx_ring[i].skb); rx_req->mapping,
priv->rx_ring[i].skb = NULL; IPOIB_BUF_SIZE,
} DMA_FROM_DEVICE);
dev_kfree_skb_any(rx_req->skb);
rx_req->skb = NULL;
}
goto timeout; goto timeout;
} }
......
...@@ -497,8 +497,6 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) ...@@ -497,8 +497,6 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
return; return;
} }
skb_queue_head_init(&neigh->queue);
/* /*
* We can only be called from ipoib_start_xmit, so we're * We can only be called from ipoib_start_xmit, so we're
* inside tx_lock -- no need to save/restore flags. * inside tx_lock -- no need to save/restore flags.
...@@ -806,6 +804,7 @@ struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour) ...@@ -806,6 +804,7 @@ struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour)
neigh->neighbour = neighbour; neigh->neighbour = neighbour;
*to_ipoib_neigh(neighbour) = neigh; *to_ipoib_neigh(neighbour) = neigh;
skb_queue_head_init(&neigh->queue);
return neigh; return neigh;
} }
......
...@@ -182,7 +182,7 @@ struct iser_regd_buf { ...@@ -182,7 +182,7 @@ struct iser_regd_buf {
struct iser_mem_reg reg; /* memory registration info */ struct iser_mem_reg reg; /* memory registration info */
void *virt_addr; void *virt_addr;
struct iser_device *device; /* device->device for dma_unmap */ struct iser_device *device; /* device->device for dma_unmap */
dma_addr_t dma_addr; /* if non zero, addr for dma_unmap */ u64 dma_addr; /* if non zero, addr for dma_unmap */
enum dma_data_direction direction; /* direction for dma_unmap */ enum dma_data_direction direction; /* direction for dma_unmap */
unsigned int data_size; unsigned int data_size;
atomic_t ref_count; /* refcount, freed when dec to 0 */ atomic_t ref_count; /* refcount, freed when dec to 0 */
......
...@@ -487,10 +487,8 @@ int iser_send_control(struct iscsi_conn *conn, ...@@ -487,10 +487,8 @@ int iser_send_control(struct iscsi_conn *conn,
struct iscsi_iser_conn *iser_conn = conn->dd_data; struct iscsi_iser_conn *iser_conn = conn->dd_data;
struct iser_desc *mdesc = mtask->dd_data; struct iser_desc *mdesc = mtask->dd_data;
struct iser_dto *send_dto = NULL; struct iser_dto *send_dto = NULL;
unsigned int itt;
unsigned long data_seg_len; unsigned long data_seg_len;
int err = 0; int err = 0;
unsigned char opcode;
struct iser_regd_buf *regd_buf; struct iser_regd_buf *regd_buf;
struct iser_device *device; struct iser_device *device;
...@@ -512,8 +510,6 @@ int iser_send_control(struct iscsi_conn *conn, ...@@ -512,8 +510,6 @@ int iser_send_control(struct iscsi_conn *conn,
iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE); iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE);
itt = ntohl(mtask->hdr->itt);
opcode = mtask->hdr->opcode & ISCSI_OPCODE_MASK;
data_seg_len = ntoh24(mtask->hdr->dlength); data_seg_len = ntoh24(mtask->hdr->dlength);
if (data_seg_len > 0) { if (data_seg_len > 0) {
......
This diff is collapsed.
...@@ -122,9 +122,8 @@ static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size, ...@@ -122,9 +122,8 @@ static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
if (!iu->buf) if (!iu->buf)
goto out_free_iu; goto out_free_iu;
iu->dma = dma_map_single(host->dev->dev->dma_device, iu->dma = ib_dma_map_single(host->dev->dev, iu->buf, size, direction);
iu->buf, size, direction); if (ib_dma_mapping_error(host->dev->dev, iu->dma))
if (dma_mapping_error(iu->dma))
goto out_free_buf; goto out_free_buf;
iu->size = size; iu->size = size;
...@@ -145,8 +144,7 @@ static void srp_free_iu(struct srp_host *host, struct srp_iu *iu) ...@@ -145,8 +144,7 @@ static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
if (!iu) if (!iu)
return; return;
dma_unmap_single(host->dev->dev->dma_device, ib_dma_unmap_single(host->dev->dev, iu->dma, iu->size, iu->direction);
iu->dma, iu->size, iu->direction);
kfree(iu->buf); kfree(iu->buf);
kfree(iu); kfree(iu);
} }
...@@ -482,8 +480,8 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd, ...@@ -482,8 +480,8 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
scat = &req->fake_sg; scat = &req->fake_sg;
} }
dma_unmap_sg(target->srp_host->dev->dev->dma_device, scat, nents, ib_dma_unmap_sg(target->srp_host->dev->dev, scat, nents,
scmnd->sc_data_direction); scmnd->sc_data_direction);
} }
static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
...@@ -595,23 +593,26 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat, ...@@ -595,23 +593,26 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
int i, j; int i, j;
int ret; int ret;
struct srp_device *dev = target->srp_host->dev; struct srp_device *dev = target->srp_host->dev;
struct ib_device *ibdev = dev->dev;
if (!dev->fmr_pool) if (!dev->fmr_pool)
return -ENODEV; return -ENODEV;
if ((sg_dma_address(&scat[0]) & ~dev->fmr_page_mask) && if ((ib_sg_dma_address(ibdev, &scat[0]) & ~dev->fmr_page_mask) &&
mellanox_workarounds && !memcmp(&target->ioc_guid, mellanox_oui, 3)) mellanox_workarounds && !memcmp(&target->ioc_guid, mellanox_oui, 3))
return -EINVAL; return -EINVAL;
len = page_cnt = 0; len = page_cnt = 0;
for (i = 0; i < sg_cnt; ++i) { for (i = 0; i < sg_cnt; ++i) {
if (sg_dma_address(&scat[i]) & ~dev->fmr_page_mask) { unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
if (ib_sg_dma_address(ibdev, &scat[i]) & ~dev->fmr_page_mask) {
if (i > 0) if (i > 0)
return -EINVAL; return -EINVAL;
else else
++page_cnt; ++page_cnt;
} }
if ((sg_dma_address(&scat[i]) + sg_dma_len(&scat[i])) & if ((ib_sg_dma_address(ibdev, &scat[i]) + dma_len) &
~dev->fmr_page_mask) { ~dev->fmr_page_mask) {
if (i < sg_cnt - 1) if (i < sg_cnt - 1)
return -EINVAL; return -EINVAL;
...@@ -619,7 +620,7 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat, ...@@ -619,7 +620,7 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
++page_cnt; ++page_cnt;
} }
len += sg_dma_len(&scat[i]); len += dma_len;
} }
page_cnt += len >> dev->fmr_page_shift; page_cnt += len >> dev->fmr_page_shift;
...@@ -631,10 +632,14 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat, ...@@ -631,10 +632,14 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
return -ENOMEM; return -ENOMEM;
page_cnt = 0; page_cnt = 0;
for (i = 0; i < sg_cnt; ++i) for (i = 0; i < sg_cnt; ++i) {
for (j = 0; j < sg_dma_len(&scat[i]); j += dev->fmr_page_size) unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
for (j = 0; j < dma_len; j += dev->fmr_page_size)
dma_pages[page_cnt++] = dma_pages[page_cnt++] =
(sg_dma_address(&scat[i]) & dev->fmr_page_mask) + j; (ib_sg_dma_address(ibdev, &scat[i]) &
dev->fmr_page_mask) + j;
}
req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool, req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool,
dma_pages, page_cnt, io_addr); dma_pages, page_cnt, io_addr);
...@@ -644,7 +649,8 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat, ...@@ -644,7 +649,8 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
goto out; goto out;
} }
buf->va = cpu_to_be64(sg_dma_address(&scat[0]) & ~dev->fmr_page_mask); buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, &scat[0]) &
~dev->fmr_page_mask);
buf->key = cpu_to_be32(req->fmr->fmr->rkey); buf->key = cpu_to_be32(req->fmr->fmr->rkey);
buf->len = cpu_to_be32(len); buf->len = cpu_to_be32(len);
...@@ -663,6 +669,8 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, ...@@ -663,6 +669,8 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
struct srp_cmd *cmd = req->cmd->buf; struct srp_cmd *cmd = req->cmd->buf;
int len, nents, count; int len, nents, count;
u8 fmt = SRP_DATA_DESC_DIRECT; u8 fmt = SRP_DATA_DESC_DIRECT;
struct srp_device *dev;
struct ib_device *ibdev;
if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE) if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE)
return sizeof (struct srp_cmd); return sizeof (struct srp_cmd);
...@@ -687,8 +695,10 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, ...@@ -687,8 +695,10 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen); sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen);
} }
count = dma_map_sg(target->srp_host->dev->dev->dma_device, dev = target->srp_host->dev;
scat, nents, scmnd->sc_data_direction); ibdev = dev->dev;
count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
fmt = SRP_DATA_DESC_DIRECT; fmt = SRP_DATA_DESC_DIRECT;
len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
...@@ -702,9 +712,9 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, ...@@ -702,9 +712,9 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
*/ */
struct srp_direct_buf *buf = (void *) cmd->add_data; struct srp_direct_buf *buf = (void *) cmd->add_data;
buf->va = cpu_to_be64(sg_dma_address(scat)); buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
buf->key = cpu_to_be32(target->srp_host->dev->mr->rkey); buf->key = cpu_to_be32(dev->mr->rkey);
buf->len = cpu_to_be32(sg_dma_len(scat)); buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
} else if (srp_map_fmr(target, scat, count, req, } else if (srp_map_fmr(target, scat, count, req,
(void *) cmd->add_data)) { (void *) cmd->add_data)) {
/* /*
...@@ -722,13 +732,14 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, ...@@ -722,13 +732,14 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
count * sizeof (struct srp_direct_buf); count * sizeof (struct srp_direct_buf);
for (i = 0; i < count; ++i) { for (i = 0; i < count; ++i) {
unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
buf->desc_list[i].va = buf->desc_list[i].va =
cpu_to_be64(sg_dma_address(&scat[i])); cpu_to_be64(ib_sg_dma_address(ibdev, &scat[i]));
buf->desc_list[i].key = buf->desc_list[i].key =
cpu_to_be32(target->srp_host->dev->mr->rkey); cpu_to_be32(dev->mr->rkey);
buf->desc_list[i].len = buf->desc_list[i].len = cpu_to_be32(dma_len);
cpu_to_be32(sg_dma_len(&scat[i])); datalen += dma_len;
datalen += sg_dma_len(&scat[i]);
} }
if (scmnd->sc_data_direction == DMA_TO_DEVICE) if (scmnd->sc_data_direction == DMA_TO_DEVICE)
...@@ -808,13 +819,15 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) ...@@ -808,13 +819,15 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
{ {
struct ib_device *dev;
struct srp_iu *iu; struct srp_iu *iu;
u8 opcode; u8 opcode;
iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV]; iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV];
dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma, dev = target->srp_host->dev->dev;
target->max_ti_iu_len, DMA_FROM_DEVICE); ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
DMA_FROM_DEVICE);
opcode = *(u8 *) iu->buf; opcode = *(u8 *) iu->buf;
...@@ -850,8 +863,8 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) ...@@ -850,8 +863,8 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
break; break;
} }
dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma, ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
target->max_ti_iu_len, DMA_FROM_DEVICE); DMA_FROM_DEVICE);
} }
static void srp_completion(struct ib_cq *cq, void *target_ptr) static void srp_completion(struct ib_cq *cq, void *target_ptr)
...@@ -969,6 +982,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, ...@@ -969,6 +982,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
struct srp_request *req; struct srp_request *req;
struct srp_iu *iu; struct srp_iu *iu;
struct srp_cmd *cmd; struct srp_cmd *cmd;
struct ib_device *dev;
int len; int len;
if (target->state == SRP_TARGET_CONNECTING) if (target->state == SRP_TARGET_CONNECTING)
...@@ -985,8 +999,9 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, ...@@ -985,8 +999,9 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
if (!iu) if (!iu)
goto err; goto err;
dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma, dev = target->srp_host->dev->dev;
srp_max_iu_len, DMA_TO_DEVICE); ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
DMA_TO_DEVICE);
req = list_entry(target->free_reqs.next, struct srp_request, list); req = list_entry(target->free_reqs.next, struct srp_request, list);
...@@ -1018,8 +1033,8 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, ...@@ -1018,8 +1033,8 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
goto err_unmap; goto err_unmap;
} }
dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma, ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len,
srp_max_iu_len, DMA_TO_DEVICE); DMA_TO_DEVICE);
if (__srp_post_send(target, iu, len)) { if (__srp_post_send(target, iu, len)) {
printk(KERN_ERR PFX "Send failed\n"); printk(KERN_ERR PFX "Send failed\n");
......
...@@ -161,7 +161,7 @@ struct srp_target_port { ...@@ -161,7 +161,7 @@ struct srp_target_port {
}; };
struct srp_iu { struct srp_iu {
dma_addr_t dma; u64 dma;
void *buf; void *buf;
size_t size; size_t size;
enum dma_data_direction direction; enum dma_data_direction direction;
......
/* /*
* Copyright (c) 2005 Intel Corporation. All rights reserved. * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU * licenses. You may choose to be licensed under the terms of the GNU
...@@ -41,6 +41,9 @@ ...@@ -41,6 +41,9 @@
void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst, void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
struct ib_qp_attr *src); struct ib_qp_attr *src);
void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
struct ib_ah_attr *src);
void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst, void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
struct ib_sa_path_rec *src); struct ib_sa_path_rec *src);
......
...@@ -43,6 +43,8 @@ ...@@ -43,6 +43,8 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/scatterlist.h> #include <asm/scatterlist.h>
...@@ -848,6 +850,49 @@ struct ib_cache { ...@@ -848,6 +850,49 @@ struct ib_cache {
u8 *lmc_cache; u8 *lmc_cache;
}; };
struct ib_dma_mapping_ops {
int (*mapping_error)(struct ib_device *dev,
u64 dma_addr);
u64 (*map_single)(struct ib_device *dev,
void *ptr, size_t size,
enum dma_data_direction direction);
void (*unmap_single)(struct ib_device *dev,
u64 addr, size_t size,
enum dma_data_direction direction);
u64 (*map_page)(struct ib_device *dev,
struct page *page, unsigned long offset,
size_t size,
enum dma_data_direction direction);
void (*unmap_page)(struct ib_device *dev,
u64 addr, size_t size,
enum dma_data_direction direction);
int (*map_sg)(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction);
void (*unmap_sg)(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction);
u64 (*dma_address)(struct ib_device *dev,
struct scatterlist *sg);
unsigned int (*dma_len)(struct ib_device *dev,
struct scatterlist *sg);
void (*sync_single_for_cpu)(struct ib_device *dev,
u64 dma_handle,
size_t size,
enum dma_data_direction dir);
void (*sync_single_for_device)(struct ib_device *dev,
u64 dma_handle,
size_t size,
enum dma_data_direction dir);
void *(*alloc_coherent)(struct ib_device *dev,
size_t size,
u64 *dma_handle,
gfp_t flag);
void (*free_coherent)(struct ib_device *dev,
size_t size, void *cpu_addr,
u64 dma_handle);
};
struct iw_cm_verbs; struct iw_cm_verbs;
struct ib_device { struct ib_device {
...@@ -992,6 +1037,8 @@ struct ib_device { ...@@ -992,6 +1037,8 @@ struct ib_device {
struct ib_mad *in_mad, struct ib_mad *in_mad,
struct ib_mad *out_mad); struct ib_mad *out_mad);
struct ib_dma_mapping_ops *dma_ops;
struct module *owner; struct module *owner;
struct class_device class_dev; struct class_device class_dev;
struct kobject ports_parent; struct kobject ports_parent;
...@@ -1395,9 +1442,215 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) ...@@ -1395,9 +1442,215 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
* usable for DMA. * usable for DMA.
* @pd: The protection domain associated with the memory region. * @pd: The protection domain associated with the memory region.
* @mr_access_flags: Specifies the memory access rights. * @mr_access_flags: Specifies the memory access rights.
*
* Note that the ib_dma_*() functions defined below must be used
* to create/destroy addresses used with the Lkey or Rkey returned
* by ib_get_dma_mr().
*/ */
struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags); struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
/**
* ib_dma_mapping_error - check a DMA addr for error
* @dev: The device for which the dma_addr was created
* @dma_addr: The DMA address to check
*/
static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
{
return dev->dma_ops ?
dev->dma_ops->mapping_error(dev, dma_addr) :
dma_mapping_error(dma_addr);
}
/**
* ib_dma_map_single - Map a kernel virtual address to DMA address
* @dev: The device for which the dma_addr is to be created
* @cpu_addr: The kernel virtual address
* @size: The size of the region in bytes
* @direction: The direction of the DMA
*/
static inline u64 ib_dma_map_single(struct ib_device *dev,
void *cpu_addr, size_t size,
enum dma_data_direction direction)
{
return dev->dma_ops ?
dev->dma_ops->map_single(dev, cpu_addr, size, direction) :
dma_map_single(dev->dma_device, cpu_addr, size, direction);
}
/**
* ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
* @dev: The device for which the DMA address was created
* @addr: The DMA address
* @size: The size of the region in bytes
* @direction: The direction of the DMA
*/
static inline void ib_dma_unmap_single(struct ib_device *dev,
u64 addr, size_t size,
enum dma_data_direction direction)
{
dev->dma_ops ?
dev->dma_ops->unmap_single(dev, addr, size, direction) :
dma_unmap_single(dev->dma_device, addr, size, direction);
}
/**
* ib_dma_map_page - Map a physical page to DMA address
* @dev: The device for which the dma_addr is to be created
* @page: The page to be mapped
* @offset: The offset within the page
* @size: The size of the region in bytes
* @direction: The direction of the DMA
*/
static inline u64 ib_dma_map_page(struct ib_device *dev,
struct page *page,
unsigned long offset,
size_t size,
enum dma_data_direction direction)
{
return dev->dma_ops ?
dev->dma_ops->map_page(dev, page, offset, size, direction) :
dma_map_page(dev->dma_device, page, offset, size, direction);
}
/**
* ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
* @dev: The device for which the DMA address was created
* @addr: The DMA address
* @size: The size of the region in bytes
* @direction: The direction of the DMA
*/
static inline void ib_dma_unmap_page(struct ib_device *dev,
u64 addr, size_t size,
enum dma_data_direction direction)
{
dev->dma_ops ?
dev->dma_ops->unmap_page(dev, addr, size, direction) :
dma_unmap_page(dev->dma_device, addr, size, direction);
}
/**
* ib_dma_map_sg - Map a scatter/gather list to DMA addresses
* @dev: The device for which the DMA addresses are to be created
* @sg: The array of scatter/gather entries
* @nents: The number of scatter/gather entries
* @direction: The direction of the DMA
*/
static inline int ib_dma_map_sg(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
return dev->dma_ops ?
dev->dma_ops->map_sg(dev, sg, nents, direction) :
dma_map_sg(dev->dma_device, sg, nents, direction);
}
/**
* ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
* @dev: The device for which the DMA addresses were created
* @sg: The array of scatter/gather entries
* @nents: The number of scatter/gather entries
* @direction: The direction of the DMA
*/
static inline void ib_dma_unmap_sg(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
dev->dma_ops ?
dev->dma_ops->unmap_sg(dev, sg, nents, direction) :
dma_unmap_sg(dev->dma_device, sg, nents, direction);
}
/**
* ib_sg_dma_address - Return the DMA address from a scatter/gather entry
* @dev: The device for which the DMA addresses were created
* @sg: The scatter/gather entry
*/
static inline u64 ib_sg_dma_address(struct ib_device *dev,
struct scatterlist *sg)
{
return dev->dma_ops ?
dev->dma_ops->dma_address(dev, sg) : sg_dma_address(sg);
}
/**
* ib_sg_dma_len - Return the DMA length from a scatter/gather entry
* @dev: The device for which the DMA addresses were created
* @sg: The scatter/gather entry
*/
static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
struct scatterlist *sg)
{
return dev->dma_ops ?
dev->dma_ops->dma_len(dev, sg) : sg_dma_len(sg);
}
/**
* ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
* @dev: The device for which the DMA address was created
* @addr: The DMA address
* @size: The size of the region in bytes
* @dir: The direction of the DMA
*/
static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
u64 addr,
size_t size,
enum dma_data_direction dir)
{
dev->dma_ops ?
dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir) :
dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
}
/**
* ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
* @dev: The device for which the DMA address was created
* @addr: The DMA address
* @size: The size of the region in bytes
* @dir: The direction of the DMA
*/
static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
u64 addr,
size_t size,
enum dma_data_direction dir)
{
dev->dma_ops ?
dev->dma_ops->sync_single_for_device(dev, addr, size, dir) :
dma_sync_single_for_device(dev->dma_device, addr, size, dir);
}
/**
* ib_dma_alloc_coherent - Allocate memory and map it for DMA
* @dev: The device for which the DMA address is requested
* @size: The size of the region to allocate in bytes
* @dma_handle: A pointer for returning the DMA address of the region
* @flag: memory allocator flags
*/
static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
size_t size,
u64 *dma_handle,
gfp_t flag)
{
return dev->dma_ops ?
dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag) :
dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
}
/**
* ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
* @dev: The device for which the DMA addresses were allocated
* @size: The size of the region
* @cpu_addr: the address returned by ib_dma_alloc_coherent()
* @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
*/
static inline void ib_dma_free_coherent(struct ib_device *dev,
size_t size, void *cpu_addr,
u64 dma_handle)
{
dev->dma_ops ?
dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle) :
dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
}
/** /**
* ib_reg_phys_mr - Prepares a virtually addressed memory region for use * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
* by an HCA. * by an HCA.
......
...@@ -77,11 +77,34 @@ struct rdma_route { ...@@ -77,11 +77,34 @@ struct rdma_route {
int num_paths; int num_paths;
}; };
struct rdma_conn_param {
const void *private_data;
u8 private_data_len;
u8 responder_resources;
u8 initiator_depth;
u8 flow_control;
u8 retry_count; /* ignored when accepting */
u8 rnr_retry_count;
/* Fields below ignored if a QP is created on the rdma_cm_id. */
u8 srq;
u32 qp_num;
};
struct rdma_ud_param {
const void *private_data;
u8 private_data_len;
struct ib_ah_attr ah_attr;
u32 qp_num;
u32 qkey;
};
struct rdma_cm_event { struct rdma_cm_event {
enum rdma_cm_event_type event; enum rdma_cm_event_type event;
int status; int status;
void *private_data; union {
u8 private_data_len; struct rdma_conn_param conn;
struct rdma_ud_param ud;
} param;
}; };
struct rdma_cm_id; struct rdma_cm_id;
...@@ -204,25 +227,17 @@ void rdma_destroy_qp(struct rdma_cm_id *id); ...@@ -204,25 +227,17 @@ void rdma_destroy_qp(struct rdma_cm_id *id);
int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
int *qp_attr_mask); int *qp_attr_mask);
struct rdma_conn_param {
const void *private_data;
u8 private_data_len;
u8 responder_resources;
u8 initiator_depth;
u8 flow_control;
u8 retry_count; /* ignored when accepting */
u8 rnr_retry_count;
/* Fields below ignored if a QP is created on the rdma_cm_id. */
u8 srq;
u32 qp_num;
enum ib_qp_type qp_type;
};
/** /**
* rdma_connect - Initiate an active connection request. * rdma_connect - Initiate an active connection request.
* @id: Connection identifier to connect.
* @conn_param: Connection information used for connected QPs.
* *
* Users must have resolved a route for the rdma_cm_id to connect with * Users must have resolved a route for the rdma_cm_id to connect with
* by having called rdma_resolve_route before calling this routine. * by having called rdma_resolve_route before calling this routine.
*
* This call will either connect to a remote QP or obtain remote QP
* information for unconnected rdma_cm_id's. The actual operation is
* based on the rdma_cm_id's port space.
*/ */
int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
...@@ -252,6 +267,21 @@ int rdma_listen(struct rdma_cm_id *id, int backlog); ...@@ -252,6 +267,21 @@ int rdma_listen(struct rdma_cm_id *id, int backlog);
*/ */
int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
/**
* rdma_notify - Notifies the RDMA CM of an asynchronous event that has
* occurred on the connection.
* @id: Connection identifier to transition to established.
* @event: Asynchronous event.
*
* This routine should be invoked by users to notify the CM of relevant
* communication events. Events that should be reported to the CM and
* when to report them are:
*
* IB_EVENT_COMM_EST - Used when a message is received on a connected
* QP before an RTU has been received.
*/
int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event);
/** /**
* rdma_reject - Called to reject a connection request or response. * rdma_reject - Called to reject a connection request or response.
*/ */
......
...@@ -44,4 +44,7 @@ ...@@ -44,4 +44,7 @@
int rdma_set_ib_paths(struct rdma_cm_id *id, int rdma_set_ib_paths(struct rdma_cm_id *id,
struct ib_sa_path_rec *path_rec, int num_paths); struct ib_sa_path_rec *path_rec, int num_paths);
/* Global qkey for UD QPs and multicast groups. */
#define RDMA_UD_QKEY 0x01234567
#endif /* RDMA_CM_IB_H */ #endif /* RDMA_CM_IB_H */
/*
* Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef RDMA_USER_CM_H
#define RDMA_USER_CM_H
#include <linux/types.h>
#include <linux/in6.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_user_sa.h>
#define RDMA_USER_CM_ABI_VERSION 3
#define RDMA_MAX_PRIVATE_DATA 256
enum {
RDMA_USER_CM_CMD_CREATE_ID,
RDMA_USER_CM_CMD_DESTROY_ID,
RDMA_USER_CM_CMD_BIND_ADDR,
RDMA_USER_CM_CMD_RESOLVE_ADDR,
RDMA_USER_CM_CMD_RESOLVE_ROUTE,
RDMA_USER_CM_CMD_QUERY_ROUTE,
RDMA_USER_CM_CMD_CONNECT,
RDMA_USER_CM_CMD_LISTEN,
RDMA_USER_CM_CMD_ACCEPT,
RDMA_USER_CM_CMD_REJECT,
RDMA_USER_CM_CMD_DISCONNECT,
RDMA_USER_CM_CMD_INIT_QP_ATTR,
RDMA_USER_CM_CMD_GET_EVENT,
RDMA_USER_CM_CMD_GET_OPTION,
RDMA_USER_CM_CMD_SET_OPTION,
RDMA_USER_CM_CMD_NOTIFY
};
/*
* command ABI structures.
*/
struct rdma_ucm_cmd_hdr {
__u32 cmd;
__u16 in;
__u16 out;
};
struct rdma_ucm_create_id {
__u64 uid;
__u64 response;
__u16 ps;
__u8 reserved[6];
};
struct rdma_ucm_create_id_resp {
__u32 id;
};
struct rdma_ucm_destroy_id {
__u64 response;
__u32 id;
__u32 reserved;
};
struct rdma_ucm_destroy_id_resp {
__u32 events_reported;
};
struct rdma_ucm_bind_addr {
__u64 response;
struct sockaddr_in6 addr;
__u32 id;
};
struct rdma_ucm_resolve_addr {
struct sockaddr_in6 src_addr;
struct sockaddr_in6 dst_addr;
__u32 id;
__u32 timeout_ms;
};
struct rdma_ucm_resolve_route {
__u32 id;
__u32 timeout_ms;
};
struct rdma_ucm_query_route {
__u64 response;
__u32 id;
__u32 reserved;
};
struct rdma_ucm_query_route_resp {
__u64 node_guid;
struct ib_user_path_rec ib_route[2];
struct sockaddr_in6 src_addr;
struct sockaddr_in6 dst_addr;
__u32 num_paths;
__u8 port_num;
__u8 reserved[3];
};
struct rdma_ucm_conn_param {
__u32 qp_num;
__u32 reserved;
__u8 private_data[RDMA_MAX_PRIVATE_DATA];
__u8 private_data_len;
__u8 srq;
__u8 responder_resources;
__u8 initiator_depth;
__u8 flow_control;
__u8 retry_count;
__u8 rnr_retry_count;
__u8 valid;
};
struct rdma_ucm_ud_param {
__u32 qp_num;
__u32 qkey;
struct ib_uverbs_ah_attr ah_attr;
__u8 private_data[RDMA_MAX_PRIVATE_DATA];
__u8 private_data_len;
__u8 reserved[7];
};
struct rdma_ucm_connect {
struct rdma_ucm_conn_param conn_param;
__u32 id;
__u32 reserved;
};
struct rdma_ucm_listen {
__u32 id;
__u32 backlog;
};
struct rdma_ucm_accept {
__u64 uid;
struct rdma_ucm_conn_param conn_param;
__u32 id;
__u32 reserved;
};
struct rdma_ucm_reject {
__u32 id;
__u8 private_data_len;
__u8 reserved[3];
__u8 private_data[RDMA_MAX_PRIVATE_DATA];
};
struct rdma_ucm_disconnect {
__u32 id;
};
struct rdma_ucm_init_qp_attr {
__u64 response;
__u32 id;
__u32 qp_state;
};
struct rdma_ucm_notify {
__u32 id;
__u32 event;
};
struct rdma_ucm_get_event {
__u64 response;
};
struct rdma_ucm_event_resp {
__u64 uid;
__u32 id;
__u32 event;
__u32 status;
union {
struct rdma_ucm_conn_param conn;
struct rdma_ucm_ud_param ud;
} param;
};
#endif /* RDMA_USER_CM_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment