Commit 32708e8e authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  RDMA: Add __init/__exit macros to addr.c and cma.c
  IB/ehca: Bump version number
  mlx4_core: Fix dma_sync_single_for_cpu() with matching for_device() calls
  IB/mthca: Replace dma_sync_single() use with proper functions
  RDMA/nes: Fix FIN state handling under error conditions
  RDMA/nes: Fix max_qp_init_rd_atom returned from query device
  IB/ehca: Ensure that guid_entry index is not negative
  IB/ehca: Tolerate dynamic memory operations before driver load
parents f5bcf5f4 4a7eca82
...@@ -514,7 +514,7 @@ static struct notifier_block nb = { ...@@ -514,7 +514,7 @@ static struct notifier_block nb = {
.notifier_call = netevent_callback .notifier_call = netevent_callback
}; };
static int addr_init(void) static int __init addr_init(void)
{ {
addr_wq = create_singlethread_workqueue("ib_addr"); addr_wq = create_singlethread_workqueue("ib_addr");
if (!addr_wq) if (!addr_wq)
...@@ -524,7 +524,7 @@ static int addr_init(void) ...@@ -524,7 +524,7 @@ static int addr_init(void)
return 0; return 0;
} }
static void addr_cleanup(void) static void __exit addr_cleanup(void)
{ {
unregister_netevent_notifier(&nb); unregister_netevent_notifier(&nb);
destroy_workqueue(addr_wq); destroy_workqueue(addr_wq);
......
...@@ -2960,7 +2960,7 @@ static void cma_remove_one(struct ib_device *device) ...@@ -2960,7 +2960,7 @@ static void cma_remove_one(struct ib_device *device)
kfree(cma_dev); kfree(cma_dev);
} }
static int cma_init(void) static int __init cma_init(void)
{ {
int ret, low, high, remaining; int ret, low, high, remaining;
...@@ -2990,7 +2990,7 @@ static int cma_init(void) ...@@ -2990,7 +2990,7 @@ static int cma_init(void)
return ret; return ret;
} }
static void cma_cleanup(void) static void __exit cma_cleanup(void)
{ {
ib_unregister_client(&cma_client); ib_unregister_client(&cma_client);
unregister_netdevice_notifier(&cma_nb); unregister_netdevice_notifier(&cma_nb);
......
...@@ -319,7 +319,7 @@ int ehca_query_gid(struct ib_device *ibdev, u8 port, ...@@ -319,7 +319,7 @@ int ehca_query_gid(struct ib_device *ibdev, u8 port,
ib_device); ib_device);
struct hipz_query_port *rblock; struct hipz_query_port *rblock;
if (index > 255) { if (index < 0 || index > 255) {
ehca_err(&shca->ib_device, "Invalid index: %x.", index); ehca_err(&shca->ib_device, "Invalid index: %x.", index);
return -EINVAL; return -EINVAL;
} }
......
...@@ -52,7 +52,7 @@ ...@@ -52,7 +52,7 @@
#include "ehca_tools.h" #include "ehca_tools.h"
#include "hcp_if.h" #include "hcp_if.h"
#define HCAD_VERSION "0027" #define HCAD_VERSION "0028"
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
...@@ -506,6 +506,7 @@ static int ehca_init_device(struct ehca_shca *shca) ...@@ -506,6 +506,7 @@ static int ehca_init_device(struct ehca_shca *shca)
shca->ib_device.detach_mcast = ehca_detach_mcast; shca->ib_device.detach_mcast = ehca_detach_mcast;
shca->ib_device.process_mad = ehca_process_mad; shca->ib_device.process_mad = ehca_process_mad;
shca->ib_device.mmap = ehca_mmap; shca->ib_device.mmap = ehca_mmap;
shca->ib_device.dma_ops = &ehca_dma_mapping_ops;
if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) { if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
shca->ib_device.uverbs_cmd_mask |= shca->ib_device.uverbs_cmd_mask |=
...@@ -1028,17 +1029,23 @@ static int __init ehca_module_init(void) ...@@ -1028,17 +1029,23 @@ static int __init ehca_module_init(void)
goto module_init1; goto module_init1;
} }
ret = ehca_create_busmap();
if (ret) {
ehca_gen_err("Cannot create busmap.");
goto module_init2;
}
ret = ibmebus_register_driver(&ehca_driver); ret = ibmebus_register_driver(&ehca_driver);
if (ret) { if (ret) {
ehca_gen_err("Cannot register eHCA device driver"); ehca_gen_err("Cannot register eHCA device driver");
ret = -EINVAL; ret = -EINVAL;
goto module_init2; goto module_init3;
} }
ret = register_memory_notifier(&ehca_mem_nb); ret = register_memory_notifier(&ehca_mem_nb);
if (ret) { if (ret) {
ehca_gen_err("Failed registering memory add/remove notifier"); ehca_gen_err("Failed registering memory add/remove notifier");
goto module_init3; goto module_init4;
} }
if (ehca_poll_all_eqs != 1) { if (ehca_poll_all_eqs != 1) {
...@@ -1053,9 +1060,12 @@ static int __init ehca_module_init(void) ...@@ -1053,9 +1060,12 @@ static int __init ehca_module_init(void)
return 0; return 0;
module_init3: module_init4:
ibmebus_unregister_driver(&ehca_driver); ibmebus_unregister_driver(&ehca_driver);
module_init3:
ehca_destroy_busmap();
module_init2: module_init2:
ehca_destroy_slab_caches(); ehca_destroy_slab_caches();
...@@ -1073,6 +1083,8 @@ static void __exit ehca_module_exit(void) ...@@ -1073,6 +1083,8 @@ static void __exit ehca_module_exit(void)
unregister_memory_notifier(&ehca_mem_nb); unregister_memory_notifier(&ehca_mem_nb);
ehca_destroy_busmap();
ehca_destroy_slab_caches(); ehca_destroy_slab_caches();
ehca_destroy_comp_pool(); ehca_destroy_comp_pool();
......
This diff is collapsed.
...@@ -42,6 +42,11 @@ ...@@ -42,6 +42,11 @@
#ifndef _EHCA_MRMW_H_ #ifndef _EHCA_MRMW_H_
#define _EHCA_MRMW_H_ #define _EHCA_MRMW_H_
enum ehca_reg_type {
EHCA_REG_MR,
EHCA_REG_BUSMAP_MR
};
int ehca_reg_mr(struct ehca_shca *shca, int ehca_reg_mr(struct ehca_shca *shca,
struct ehca_mr *e_mr, struct ehca_mr *e_mr,
u64 *iova_start, u64 *iova_start,
...@@ -50,7 +55,8 @@ int ehca_reg_mr(struct ehca_shca *shca, ...@@ -50,7 +55,8 @@ int ehca_reg_mr(struct ehca_shca *shca,
struct ehca_pd *e_pd, struct ehca_pd *e_pd,
struct ehca_mr_pginfo *pginfo, struct ehca_mr_pginfo *pginfo,
u32 *lkey, u32 *lkey,
u32 *rkey); u32 *rkey,
enum ehca_reg_type reg_type);
int ehca_reg_mr_rpages(struct ehca_shca *shca, int ehca_reg_mr_rpages(struct ehca_shca *shca,
struct ehca_mr *e_mr, struct ehca_mr *e_mr,
...@@ -118,4 +124,9 @@ void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl, ...@@ -118,4 +124,9 @@ void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
void ehca_mr_deletenew(struct ehca_mr *mr); void ehca_mr_deletenew(struct ehca_mr *mr);
int ehca_create_busmap(void);
void ehca_destroy_busmap(void);
extern struct ib_dma_mapping_ops ehca_dma_mapping_ops;
#endif /*_EHCA_MRMW_H_*/ #endif /*_EHCA_MRMW_H_*/
...@@ -352,10 +352,14 @@ static void mthca_arbel_write_mtt_seg(struct mthca_dev *dev, ...@@ -352,10 +352,14 @@ static void mthca_arbel_write_mtt_seg(struct mthca_dev *dev,
BUG_ON(!mtts); BUG_ON(!mtts);
dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
list_len * sizeof (u64), DMA_TO_DEVICE);
for (i = 0; i < list_len; ++i) for (i = 0; i < list_len; ++i)
mtts[i] = cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT); mtts[i] = cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT);
dma_sync_single(&dev->pdev->dev, dma_handle, list_len * sizeof (u64), DMA_TO_DEVICE); dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
list_len * sizeof (u64), DMA_TO_DEVICE);
} }
int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
...@@ -803,11 +807,14 @@ int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, ...@@ -803,11 +807,14 @@ int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
wmb(); wmb();
dma_sync_single_for_cpu(&dev->pdev->dev, fmr->mem.arbel.dma_handle,
list_len * sizeof(u64), DMA_TO_DEVICE);
for (i = 0; i < list_len; ++i) for (i = 0; i < list_len; ++i)
fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] | fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] |
MTHCA_MTT_FLAG_PRESENT); MTHCA_MTT_FLAG_PRESENT);
dma_sync_single(&dev->pdev->dev, fmr->mem.arbel.dma_handle, dma_sync_single_for_device(&dev->pdev->dev, fmr->mem.arbel.dma_handle,
list_len * sizeof(u64), DMA_TO_DEVICE); list_len * sizeof(u64), DMA_TO_DEVICE);
fmr->mem.arbel.mpt->key = cpu_to_be32(key); fmr->mem.arbel.mpt->key = cpu_to_be32(key);
......
...@@ -472,6 +472,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, ...@@ -472,6 +472,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
static void nes_retrans_expired(struct nes_cm_node *cm_node) static void nes_retrans_expired(struct nes_cm_node *cm_node)
{ {
struct iw_cm_id *cm_id = cm_node->cm_id;
switch (cm_node->state) { switch (cm_node->state) {
case NES_CM_STATE_SYN_RCVD: case NES_CM_STATE_SYN_RCVD:
case NES_CM_STATE_CLOSING: case NES_CM_STATE_CLOSING:
...@@ -479,7 +480,9 @@ static void nes_retrans_expired(struct nes_cm_node *cm_node) ...@@ -479,7 +480,9 @@ static void nes_retrans_expired(struct nes_cm_node *cm_node)
break; break;
case NES_CM_STATE_LAST_ACK: case NES_CM_STATE_LAST_ACK:
case NES_CM_STATE_FIN_WAIT1: case NES_CM_STATE_FIN_WAIT1:
case NES_CM_STATE_MPAREJ_RCVD: if (cm_node->cm_id)
cm_id->rem_ref(cm_id);
cm_node->state = NES_CM_STATE_CLOSED;
send_reset(cm_node, NULL); send_reset(cm_node, NULL);
break; break;
default: default:
...@@ -1406,6 +1409,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, ...@@ -1406,6 +1409,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
case NES_CM_STATE_CLOSED: case NES_CM_STATE_CLOSED:
drop_packet(skb); drop_packet(skb);
break; break;
case NES_CM_STATE_FIN_WAIT1:
case NES_CM_STATE_LAST_ACK: case NES_CM_STATE_LAST_ACK:
cm_node->cm_id->rem_ref(cm_node->cm_id); cm_node->cm_id->rem_ref(cm_node->cm_id);
case NES_CM_STATE_TIME_WAIT: case NES_CM_STATE_TIME_WAIT:
...@@ -1413,8 +1417,6 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, ...@@ -1413,8 +1417,6 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
rem_ref_cm_node(cm_node->cm_core, cm_node); rem_ref_cm_node(cm_node->cm_core, cm_node);
drop_packet(skb); drop_packet(skb);
break; break;
case NES_CM_STATE_FIN_WAIT1:
nes_debug(NES_DBG_CM, "Bad state %s[%u]\n", __func__, __LINE__);
default: default:
drop_packet(skb); drop_packet(skb);
break; break;
......
...@@ -654,7 +654,7 @@ static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *prop ...@@ -654,7 +654,7 @@ static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *prop
default: default:
props->max_qp_rd_atom = 0; props->max_qp_rd_atom = 0;
} }
props->max_qp_init_rd_atom = props->max_qp_wr; props->max_qp_init_rd_atom = props->max_qp_rd_atom;
props->atomic_cap = IB_ATOMIC_NONE; props->atomic_cap = IB_ATOMIC_NONE;
props->max_map_per_fmr = 1; props->max_map_per_fmr = 1;
......
...@@ -399,10 +399,13 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, ...@@ -399,10 +399,13 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
if (!mtts) if (!mtts)
return -ENOMEM; return -ENOMEM;
dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
npages * sizeof (u64), DMA_TO_DEVICE);
for (i = 0; i < npages; ++i) for (i = 0; i < npages; ++i)
mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
npages * sizeof (u64), DMA_TO_DEVICE); npages * sizeof (u64), DMA_TO_DEVICE);
return 0; return 0;
...@@ -547,10 +550,13 @@ int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list ...@@ -547,10 +550,13 @@ int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list
/* Make sure MPT status is visible before writing MTT entries */ /* Make sure MPT status is visible before writing MTT entries */
wmb(); wmb();
dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle,
npages * sizeof(u64), DMA_TO_DEVICE);
for (i = 0; i < npages; ++i) for (i = 0; i < npages; ++i)
fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle, dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle,
npages * sizeof(u64), DMA_TO_DEVICE); npages * sizeof(u64), DMA_TO_DEVICE);
fmr->mpt->key = cpu_to_be32(key); fmr->mpt->key = cpu_to_be32(key);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment