Commit 2d630d1a authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  mlx4_core: Add helper to move QP to ready-to-send
  mlx4_core: Add HW queues allocation helpers
  RDMA/nes: Remove volatile qualifier from struct nes_hw_cq.cq_vbase
  mlx4_core: CQ resizing should pass a 0 opcode modifier to MODIFY_CQ
  mlx4_core: Move kernel doorbell management into core
  IB/ehca: Bump version number to 0026
  IB/ehca: Make some module parameters bool, update descriptions
  IB/ehca: Remove mr_largepage parameter
  IB/ehca: Move high-volume debug output to higher debug levels
  IB/ehca: Prevent posting of SQ WQEs if QP not in RTS
  IPoIB: Handle 4K IB MTU for UD (datagram) mode
  RDMA/nes: Fix adapter reset after PXE boot
  RDMA/nes: Print IPv4 addresses in a readable format
  RDMA/nes: Use print_mac() to format ethernet addresses for printing
parents f375d558 ed4d3c10
...@@ -160,6 +160,7 @@ struct ehca_qp { ...@@ -160,6 +160,7 @@ struct ehca_qp {
}; };
u32 qp_type; u32 qp_type;
enum ehca_ext_qp_type ext_type; enum ehca_ext_qp_type ext_type;
enum ib_qp_state state;
struct ipz_queue ipz_squeue; struct ipz_queue ipz_squeue;
struct ipz_queue ipz_rqueue; struct ipz_queue ipz_rqueue;
struct h_galpas galpas; struct h_galpas galpas;
......
...@@ -633,7 +633,7 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool) ...@@ -633,7 +633,7 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool)
unsigned long flags; unsigned long flags;
WARN_ON_ONCE(!in_interrupt()); WARN_ON_ONCE(!in_interrupt());
if (ehca_debug_level) if (ehca_debug_level >= 3)
ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
spin_lock_irqsave(&pool->last_cpu_lock, flags); spin_lock_irqsave(&pool->last_cpu_lock, flags);
......
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
#include "ehca_tools.h" #include "ehca_tools.h"
#include "hcp_if.h" #include "hcp_if.h"
#define HCAD_VERSION "0025" #define HCAD_VERSION "0026"
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
...@@ -60,7 +60,6 @@ MODULE_VERSION(HCAD_VERSION); ...@@ -60,7 +60,6 @@ MODULE_VERSION(HCAD_VERSION);
static int ehca_open_aqp1 = 0; static int ehca_open_aqp1 = 0;
static int ehca_hw_level = 0; static int ehca_hw_level = 0;
static int ehca_poll_all_eqs = 1; static int ehca_poll_all_eqs = 1;
static int ehca_mr_largepage = 1;
int ehca_debug_level = 0; int ehca_debug_level = 0;
int ehca_nr_ports = 2; int ehca_nr_ports = 2;
...@@ -70,45 +69,40 @@ int ehca_static_rate = -1; ...@@ -70,45 +69,40 @@ int ehca_static_rate = -1;
int ehca_scaling_code = 0; int ehca_scaling_code = 0;
int ehca_lock_hcalls = -1; int ehca_lock_hcalls = -1;
module_param_named(open_aqp1, ehca_open_aqp1, int, S_IRUGO); module_param_named(open_aqp1, ehca_open_aqp1, bool, S_IRUGO);
module_param_named(debug_level, ehca_debug_level, int, S_IRUGO); module_param_named(debug_level, ehca_debug_level, int, S_IRUGO);
module_param_named(hw_level, ehca_hw_level, int, S_IRUGO); module_param_named(hw_level, ehca_hw_level, int, S_IRUGO);
module_param_named(nr_ports, ehca_nr_ports, int, S_IRUGO); module_param_named(nr_ports, ehca_nr_ports, int, S_IRUGO);
module_param_named(use_hp_mr, ehca_use_hp_mr, int, S_IRUGO); module_param_named(use_hp_mr, ehca_use_hp_mr, bool, S_IRUGO);
module_param_named(port_act_time, ehca_port_act_time, int, S_IRUGO); module_param_named(port_act_time, ehca_port_act_time, int, S_IRUGO);
module_param_named(poll_all_eqs, ehca_poll_all_eqs, int, S_IRUGO); module_param_named(poll_all_eqs, ehca_poll_all_eqs, bool, S_IRUGO);
module_param_named(static_rate, ehca_static_rate, int, S_IRUGO); module_param_named(static_rate, ehca_static_rate, int, S_IRUGO);
module_param_named(scaling_code, ehca_scaling_code, int, S_IRUGO); module_param_named(scaling_code, ehca_scaling_code, bool, S_IRUGO);
module_param_named(mr_largepage, ehca_mr_largepage, int, S_IRUGO);
module_param_named(lock_hcalls, ehca_lock_hcalls, bool, S_IRUGO); module_param_named(lock_hcalls, ehca_lock_hcalls, bool, S_IRUGO);
MODULE_PARM_DESC(open_aqp1, MODULE_PARM_DESC(open_aqp1,
"AQP1 on startup (0: no (default), 1: yes)"); "Open AQP1 on startup (default: no)");
MODULE_PARM_DESC(debug_level, MODULE_PARM_DESC(debug_level,
"debug level" "Amount of debug output (0: none (default), 1: traces, "
" (0: no debug traces (default), 1: with debug traces)"); "2: some dumps, 3: lots)");
MODULE_PARM_DESC(hw_level, MODULE_PARM_DESC(hw_level,
"hardware level" "Hardware level (0: autosensing (default), "
" (0: autosensing (default), 1: v. 0.20, 2: v. 0.21)"); "0x10..0x14: eHCA, 0x20..0x23: eHCA2)");
MODULE_PARM_DESC(nr_ports, MODULE_PARM_DESC(nr_ports,
"number of connected ports (-1: autodetect, 1: port one only, " "number of connected ports (-1: autodetect, 1: port one only, "
"2: two ports (default)"); "2: two ports (default)");
MODULE_PARM_DESC(use_hp_mr, MODULE_PARM_DESC(use_hp_mr,
"high performance MRs (0: no (default), 1: yes)"); "Use high performance MRs (default: no)");
MODULE_PARM_DESC(port_act_time, MODULE_PARM_DESC(port_act_time,
"time to wait for port activation (default: 30 sec)"); "Time to wait for port activation (default: 30 sec)");
MODULE_PARM_DESC(poll_all_eqs, MODULE_PARM_DESC(poll_all_eqs,
"polls all event queues periodically" "Poll all event queues periodically (default: yes)");
" (0: no, 1: yes (default))");
MODULE_PARM_DESC(static_rate, MODULE_PARM_DESC(static_rate,
"set permanent static rate (default: disabled)"); "Set permanent static rate (default: no static rate)");
MODULE_PARM_DESC(scaling_code, MODULE_PARM_DESC(scaling_code,
"set scaling code (0: disabled/default, 1: enabled)"); "Enable scaling code (default: no)");
MODULE_PARM_DESC(mr_largepage,
"use large page for MR (0: use PAGE_SIZE (default), "
"1: use large page depending on MR size");
MODULE_PARM_DESC(lock_hcalls, MODULE_PARM_DESC(lock_hcalls,
"serialize all hCalls made by the driver " "Serialize all hCalls made by the driver "
"(default: autodetect)"); "(default: autodetect)");
DEFINE_RWLOCK(ehca_qp_idr_lock); DEFINE_RWLOCK(ehca_qp_idr_lock);
...@@ -275,6 +269,7 @@ static int ehca_sense_attributes(struct ehca_shca *shca) ...@@ -275,6 +269,7 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
u64 h_ret; u64 h_ret;
struct hipz_query_hca *rblock; struct hipz_query_hca *rblock;
struct hipz_query_port *port; struct hipz_query_port *port;
const char *loc_code;
static const u32 pgsize_map[] = { static const u32 pgsize_map[] = {
HCA_CAP_MR_PGSIZE_4K, 0x1000, HCA_CAP_MR_PGSIZE_4K, 0x1000,
...@@ -283,6 +278,12 @@ static int ehca_sense_attributes(struct ehca_shca *shca) ...@@ -283,6 +278,12 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
HCA_CAP_MR_PGSIZE_16M, 0x1000000, HCA_CAP_MR_PGSIZE_16M, 0x1000000,
}; };
ehca_gen_dbg("Probing adapter %s...",
shca->ofdev->node->full_name);
loc_code = of_get_property(shca->ofdev->node, "ibm,loc-code", NULL);
if (loc_code)
ehca_gen_dbg(" ... location lode=%s", loc_code);
rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!rblock) { if (!rblock) {
ehca_gen_err("Cannot allocate rblock memory."); ehca_gen_err("Cannot allocate rblock memory.");
...@@ -350,11 +351,9 @@ static int ehca_sense_attributes(struct ehca_shca *shca) ...@@ -350,11 +351,9 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
/* translate supported MR page sizes; always support 4K */ /* translate supported MR page sizes; always support 4K */
shca->hca_cap_mr_pgsize = EHCA_PAGESIZE; shca->hca_cap_mr_pgsize = EHCA_PAGESIZE;
if (ehca_mr_largepage) { /* support extra sizes only if enabled */ for (i = 0; i < ARRAY_SIZE(pgsize_map); i += 2)
for (i = 0; i < ARRAY_SIZE(pgsize_map); i += 2) if (rblock->memory_page_size_supported & pgsize_map[i])
if (rblock->memory_page_size_supported & pgsize_map[i]) shca->hca_cap_mr_pgsize |= pgsize_map[i + 1];
shca->hca_cap_mr_pgsize |= pgsize_map[i + 1];
}
/* query max MTU from first port -- it's the same for all ports */ /* query max MTU from first port -- it's the same for all ports */
port = (struct hipz_query_port *)rblock; port = (struct hipz_query_port *)rblock;
...@@ -567,8 +566,7 @@ static int ehca_destroy_aqp1(struct ehca_sport *sport) ...@@ -567,8 +566,7 @@ static int ehca_destroy_aqp1(struct ehca_sport *sport)
static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf) static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf)
{ {
return snprintf(buf, PAGE_SIZE, "%d\n", return snprintf(buf, PAGE_SIZE, "%d\n", ehca_debug_level);
ehca_debug_level);
} }
static ssize_t ehca_store_debug_level(struct device_driver *ddp, static ssize_t ehca_store_debug_level(struct device_driver *ddp,
...@@ -657,14 +655,6 @@ static ssize_t ehca_show_adapter_handle(struct device *dev, ...@@ -657,14 +655,6 @@ static ssize_t ehca_show_adapter_handle(struct device *dev,
} }
static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL); static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
static ssize_t ehca_show_mr_largepage(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", ehca_mr_largepage);
}
static DEVICE_ATTR(mr_largepage, S_IRUGO, ehca_show_mr_largepage, NULL);
static struct attribute *ehca_dev_attrs[] = { static struct attribute *ehca_dev_attrs[] = {
&dev_attr_adapter_handle.attr, &dev_attr_adapter_handle.attr,
&dev_attr_num_ports.attr, &dev_attr_num_ports.attr,
...@@ -681,7 +671,6 @@ static struct attribute *ehca_dev_attrs[] = { ...@@ -681,7 +671,6 @@ static struct attribute *ehca_dev_attrs[] = {
&dev_attr_cur_mw.attr, &dev_attr_cur_mw.attr,
&dev_attr_max_pd.attr, &dev_attr_max_pd.attr,
&dev_attr_max_ah.attr, &dev_attr_max_ah.attr,
&dev_attr_mr_largepage.attr,
NULL NULL
}; };
......
...@@ -1794,8 +1794,9 @@ static int ehca_check_kpages_per_ate(struct scatterlist *page_list, ...@@ -1794,8 +1794,9 @@ static int ehca_check_kpages_per_ate(struct scatterlist *page_list,
int t; int t;
for (t = start_idx; t <= end_idx; t++) { for (t = start_idx; t <= end_idx; t++) {
u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT; u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT;
ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr, if (ehca_debug_level >= 3)
*(u64 *)abs_to_virt(phys_to_abs(pgaddr))); ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr,
*(u64 *)abs_to_virt(phys_to_abs(pgaddr)));
if (pgaddr - PAGE_SIZE != *prev_pgaddr) { if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
ehca_gen_err("uncontiguous page found pgaddr=%lx " ehca_gen_err("uncontiguous page found pgaddr=%lx "
"prev_pgaddr=%lx page_list_i=%x", "prev_pgaddr=%lx page_list_i=%x",
...@@ -1862,10 +1863,13 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo, ...@@ -1862,10 +1863,13 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
pgaddr & pgaddr &
~(pginfo->hwpage_size - 1)); ~(pginfo->hwpage_size - 1));
} }
ehca_gen_dbg("kpage=%lx chunk_page=%lx " if (ehca_debug_level >= 3) {
"value=%016lx", *kpage, pgaddr, u64 val = *(u64 *)abs_to_virt(
*(u64 *)abs_to_virt( phys_to_abs(pgaddr));
phys_to_abs(pgaddr))); ehca_gen_dbg("kpage=%lx chunk_page=%lx "
"value=%016lx",
*kpage, pgaddr, val);
}
prev_pgaddr = pgaddr; prev_pgaddr = pgaddr;
i++; i++;
pginfo->kpage_cnt++; pginfo->kpage_cnt++;
......
...@@ -550,6 +550,7 @@ static struct ehca_qp *internal_create_qp( ...@@ -550,6 +550,7 @@ static struct ehca_qp *internal_create_qp(
spin_lock_init(&my_qp->spinlock_r); spin_lock_init(&my_qp->spinlock_r);
my_qp->qp_type = qp_type; my_qp->qp_type = qp_type;
my_qp->ext_type = parms.ext_type; my_qp->ext_type = parms.ext_type;
my_qp->state = IB_QPS_RESET;
if (init_attr->recv_cq) if (init_attr->recv_cq)
my_qp->recv_cq = my_qp->recv_cq =
...@@ -965,7 +966,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca, ...@@ -965,7 +966,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
qp_num, bad_send_wqe_p); qp_num, bad_send_wqe_p);
/* convert wqe pointer to vadr */ /* convert wqe pointer to vadr */
bad_send_wqe_v = abs_to_virt((u64)bad_send_wqe_p); bad_send_wqe_v = abs_to_virt((u64)bad_send_wqe_p);
if (ehca_debug_level) if (ehca_debug_level >= 2)
ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num); ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num);
squeue = &my_qp->ipz_squeue; squeue = &my_qp->ipz_squeue;
if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) { if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) {
...@@ -978,7 +979,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca, ...@@ -978,7 +979,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs); wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
*bad_wqe_cnt = 0; *bad_wqe_cnt = 0;
while (wqe->optype != 0xff && wqe->wqef != 0xff) { while (wqe->optype != 0xff && wqe->wqef != 0xff) {
if (ehca_debug_level) if (ehca_debug_level >= 2)
ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num); ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num);
wqe->nr_of_data_seg = 0; /* suppress data access */ wqe->nr_of_data_seg = 0; /* suppress data access */
wqe->wqef = WQEF_PURGE; /* WQE to be purged */ wqe->wqef = WQEF_PURGE; /* WQE to be purged */
...@@ -1450,7 +1451,7 @@ static int internal_modify_qp(struct ib_qp *ibqp, ...@@ -1450,7 +1451,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
/* no support for max_send/recv_sge yet */ /* no support for max_send/recv_sge yet */
} }
if (ehca_debug_level) if (ehca_debug_level >= 2)
ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num); ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num);
h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
...@@ -1508,6 +1509,8 @@ static int internal_modify_qp(struct ib_qp *ibqp, ...@@ -1508,6 +1509,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
if (attr_mask & IB_QP_QKEY) if (attr_mask & IB_QP_QKEY)
my_qp->qkey = attr->qkey; my_qp->qkey = attr->qkey;
my_qp->state = qp_new_state;
modify_qp_exit2: modify_qp_exit2:
if (squeue_locked) { /* this means: sqe -> rts */ if (squeue_locked) { /* this means: sqe -> rts */
spin_unlock_irqrestore(&my_qp->spinlock_s, flags); spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
...@@ -1763,7 +1766,7 @@ int ehca_query_qp(struct ib_qp *qp, ...@@ -1763,7 +1766,7 @@ int ehca_query_qp(struct ib_qp *qp,
if (qp_init_attr) if (qp_init_attr)
*qp_init_attr = my_qp->init_attr; *qp_init_attr = my_qp->init_attr;
if (ehca_debug_level) if (ehca_debug_level >= 2)
ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num); ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num);
query_qp_exit1: query_qp_exit1:
...@@ -1811,7 +1814,7 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, ...@@ -1811,7 +1814,7 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
goto modify_srq_exit0; goto modify_srq_exit0;
} }
if (ehca_debug_level) if (ehca_debug_level >= 2)
ehca_dmp(mqpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); ehca_dmp(mqpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, my_qp->ipz_qp_handle, h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, my_qp->ipz_qp_handle,
...@@ -1864,7 +1867,7 @@ int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr) ...@@ -1864,7 +1867,7 @@ int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
srq_attr->srq_limit = EHCA_BMASK_GET( srq_attr->srq_limit = EHCA_BMASK_GET(
MQPCB_CURR_SRQ_LIMIT, qpcb->curr_srq_limit); MQPCB_CURR_SRQ_LIMIT, qpcb->curr_srq_limit);
if (ehca_debug_level) if (ehca_debug_level >= 2)
ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
query_srq_exit1: query_srq_exit1:
......
...@@ -81,7 +81,7 @@ static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue, ...@@ -81,7 +81,7 @@ static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
recv_wr->sg_list[cnt_ds].length; recv_wr->sg_list[cnt_ds].length;
} }
if (ehca_debug_level) { if (ehca_debug_level >= 3) {
ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
ipz_rqueue); ipz_rqueue);
ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe"); ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
...@@ -281,7 +281,7 @@ static inline int ehca_write_swqe(struct ehca_qp *qp, ...@@ -281,7 +281,7 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
return -EINVAL; return -EINVAL;
} }
if (ehca_debug_level) { if (ehca_debug_level >= 3) {
ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp); ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp);
ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe"); ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe");
} }
...@@ -421,6 +421,11 @@ int ehca_post_send(struct ib_qp *qp, ...@@ -421,6 +421,11 @@ int ehca_post_send(struct ib_qp *qp,
int ret = 0; int ret = 0;
unsigned long flags; unsigned long flags;
if (unlikely(my_qp->state != IB_QPS_RTS)) {
ehca_err(qp->device, "QP not in RTS state qpn=%x", qp->qp_num);
return -EINVAL;
}
/* LOCK the QUEUE */ /* LOCK the QUEUE */
spin_lock_irqsave(&my_qp->spinlock_s, flags); spin_lock_irqsave(&my_qp->spinlock_s, flags);
...@@ -454,13 +459,14 @@ int ehca_post_send(struct ib_qp *qp, ...@@ -454,13 +459,14 @@ int ehca_post_send(struct ib_qp *qp,
goto post_send_exit0; goto post_send_exit0;
} }
wqe_cnt++; wqe_cnt++;
ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
my_qp, qp->qp_num, wqe_cnt);
} /* eof for cur_send_wr */ } /* eof for cur_send_wr */
post_send_exit0: post_send_exit0:
iosync(); /* serialize GAL register access */ iosync(); /* serialize GAL register access */
hipz_update_sqa(my_qp, wqe_cnt); hipz_update_sqa(my_qp, wqe_cnt);
if (unlikely(ret || ehca_debug_level >= 2))
ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
my_qp, qp->qp_num, wqe_cnt, ret);
my_qp->message_count += wqe_cnt; my_qp->message_count += wqe_cnt;
spin_unlock_irqrestore(&my_qp->spinlock_s, flags); spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
return ret; return ret;
...@@ -520,13 +526,14 @@ static int internal_post_recv(struct ehca_qp *my_qp, ...@@ -520,13 +526,14 @@ static int internal_post_recv(struct ehca_qp *my_qp,
goto post_recv_exit0; goto post_recv_exit0;
} }
wqe_cnt++; wqe_cnt++;
ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
my_qp, my_qp->real_qp_num, wqe_cnt);
} /* eof for cur_recv_wr */ } /* eof for cur_recv_wr */
post_recv_exit0: post_recv_exit0:
iosync(); /* serialize GAL register access */ iosync(); /* serialize GAL register access */
hipz_update_rqa(my_qp, wqe_cnt); hipz_update_rqa(my_qp, wqe_cnt);
if (unlikely(ret || ehca_debug_level >= 2))
ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
my_qp, my_qp->real_qp_num, wqe_cnt, ret);
spin_unlock_irqrestore(&my_qp->spinlock_r, flags); spin_unlock_irqrestore(&my_qp->spinlock_r, flags);
return ret; return ret;
} }
...@@ -570,16 +577,17 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc) ...@@ -570,16 +577,17 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
struct ehca_cqe *cqe; struct ehca_cqe *cqe;
struct ehca_qp *my_qp; struct ehca_qp *my_qp;
int cqe_count = 0; int cqe_count = 0, is_error;
poll_cq_one_read_cqe: poll_cq_one_read_cqe:
cqe = (struct ehca_cqe *) cqe = (struct ehca_cqe *)
ipz_qeit_get_inc_valid(&my_cq->ipz_queue); ipz_qeit_get_inc_valid(&my_cq->ipz_queue);
if (!cqe) { if (!cqe) {
ret = -EAGAIN; ret = -EAGAIN;
ehca_dbg(cq->device, "Completion queue is empty ehca_cq=%p " if (ehca_debug_level >= 3)
"cq_num=%x ret=%i", my_cq, my_cq->cq_number, ret); ehca_dbg(cq->device, "Completion queue is empty "
goto poll_cq_one_exit0; "my_cq=%p cq_num=%x", my_cq, my_cq->cq_number);
goto poll_cq_one_exit0;
} }
/* prevents loads being reordered across this point */ /* prevents loads being reordered across this point */
...@@ -609,7 +617,7 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc) ...@@ -609,7 +617,7 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
ehca_dbg(cq->device, ehca_dbg(cq->device,
"Got CQE with purged bit qp_num=%x src_qp=%x", "Got CQE with purged bit qp_num=%x src_qp=%x",
cqe->local_qp_number, cqe->remote_qp_number); cqe->local_qp_number, cqe->remote_qp_number);
if (ehca_debug_level) if (ehca_debug_level >= 2)
ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x", ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
cqe->local_qp_number, cqe->local_qp_number,
cqe->remote_qp_number); cqe->remote_qp_number);
...@@ -622,11 +630,13 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc) ...@@ -622,11 +630,13 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
} }
} }
/* tracing cqe */ is_error = cqe->status & WC_STATUS_ERROR_BIT;
if (unlikely(ehca_debug_level)) {
/* trace error CQEs if debug_level >= 1, trace all CQEs if >= 3 */
if (unlikely(ehca_debug_level >= 3 || (ehca_debug_level && is_error))) {
ehca_dbg(cq->device, ehca_dbg(cq->device,
"Received COMPLETION ehca_cq=%p cq_num=%x -----", "Received %sCOMPLETION ehca_cq=%p cq_num=%x -----",
my_cq, my_cq->cq_number); is_error ? "ERROR " : "", my_cq, my_cq->cq_number);
ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x", ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
my_cq, my_cq->cq_number); my_cq, my_cq->cq_number);
ehca_dbg(cq->device, ehca_dbg(cq->device,
...@@ -649,8 +659,9 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc) ...@@ -649,8 +659,9 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
/* update also queue adder to throw away this entry!!! */ /* update also queue adder to throw away this entry!!! */
goto poll_cq_one_exit0; goto poll_cq_one_exit0;
} }
/* eval ib_wc_status */ /* eval ib_wc_status */
if (unlikely(cqe->status & WC_STATUS_ERROR_BIT)) { if (unlikely(is_error)) {
/* complete with errors */ /* complete with errors */
map_ib_wc_status(cqe->status, &wc->status); map_ib_wc_status(cqe->status, &wc->status);
wc->vendor_err = wc->status; wc->vendor_err = wc->status;
...@@ -671,14 +682,6 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc) ...@@ -671,14 +682,6 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
wc->imm_data = cpu_to_be32(cqe->immediate_data); wc->imm_data = cpu_to_be32(cqe->immediate_data);
wc->sl = cqe->service_level; wc->sl = cqe->service_level;
if (unlikely(wc->status != IB_WC_SUCCESS))
ehca_dbg(cq->device,
"ehca_cq=%p cq_num=%x WARNING unsuccessful cqe "
"OPType=%x status=%x qp_num=%x src_qp=%x wr_id=%lx "
"cqe=%p", my_cq, my_cq->cq_number, cqe->optype,
cqe->status, cqe->local_qp_number,
cqe->remote_qp_number, cqe->work_request_id, cqe);
poll_cq_one_exit0: poll_cq_one_exit0:
if (cqe_count > 0) if (cqe_count > 0)
hipz_update_feca(my_cq, cqe_count); hipz_update_feca(my_cq, cqe_count);
......
...@@ -211,8 +211,7 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp, ...@@ -211,8 +211,7 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
break; break;
case 1: /* qp rqueue_addr */ case 1: /* qp rqueue_addr */
ehca_dbg(qp->ib_qp.device, "qp_num=%x rqueue", ehca_dbg(qp->ib_qp.device, "qp_num=%x rq", qp->ib_qp.qp_num);
qp->ib_qp.qp_num);
ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, ret = ehca_mmap_queue(vma, &qp->ipz_rqueue,
&qp->mm_count_rqueue); &qp->mm_count_rqueue);
if (unlikely(ret)) { if (unlikely(ret)) {
...@@ -224,8 +223,7 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp, ...@@ -224,8 +223,7 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
break; break;
case 2: /* qp squeue_addr */ case 2: /* qp squeue_addr */
ehca_dbg(qp->ib_qp.device, "qp_num=%x squeue", ehca_dbg(qp->ib_qp.device, "qp_num=%x sq", qp->ib_qp.qp_num);
qp->ib_qp.qp_num);
ret = ehca_mmap_queue(vma, &qp->ipz_squeue, ret = ehca_mmap_queue(vma, &qp->ipz_squeue,
&qp->mm_count_squeue); &qp->mm_count_squeue);
if (unlikely(ret)) { if (unlikely(ret)) {
......
...@@ -123,8 +123,9 @@ static long ehca_plpar_hcall_norets(unsigned long opcode, ...@@ -123,8 +123,9 @@ static long ehca_plpar_hcall_norets(unsigned long opcode,
int i, sleep_msecs; int i, sleep_msecs;
unsigned long flags = 0; unsigned long flags = 0;
ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT, if (unlikely(ehca_debug_level >= 2))
opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7); ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT,
opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
for (i = 0; i < 5; i++) { for (i = 0; i < 5; i++) {
/* serialize hCalls to work around firmware issue */ /* serialize hCalls to work around firmware issue */
...@@ -148,7 +149,8 @@ static long ehca_plpar_hcall_norets(unsigned long opcode, ...@@ -148,7 +149,8 @@ static long ehca_plpar_hcall_norets(unsigned long opcode,
opcode, ret, arg1, arg2, arg3, opcode, ret, arg1, arg2, arg3,
arg4, arg5, arg6, arg7); arg4, arg5, arg6, arg7);
else else
ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret); if (unlikely(ehca_debug_level >= 2))
ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret);
return ret; return ret;
} }
...@@ -172,8 +174,10 @@ static long ehca_plpar_hcall9(unsigned long opcode, ...@@ -172,8 +174,10 @@ static long ehca_plpar_hcall9(unsigned long opcode,
int i, sleep_msecs; int i, sleep_msecs;
unsigned long flags = 0; unsigned long flags = 0;
ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode, if (unlikely(ehca_debug_level >= 2))
arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9); ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode,
arg1, arg2, arg3, arg4, arg5,
arg6, arg7, arg8, arg9);
for (i = 0; i < 5; i++) { for (i = 0; i < 5; i++) {
/* serialize hCalls to work around firmware issue */ /* serialize hCalls to work around firmware issue */
...@@ -201,7 +205,7 @@ static long ehca_plpar_hcall9(unsigned long opcode, ...@@ -201,7 +205,7 @@ static long ehca_plpar_hcall9(unsigned long opcode,
ret, outs[0], outs[1], outs[2], outs[3], ret, outs[0], outs[1], outs[2], outs[3],
outs[4], outs[5], outs[6], outs[7], outs[4], outs[5], outs[6], outs[7],
outs[8]); outs[8]);
} else } else if (unlikely(ehca_debug_level >= 2))
ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT, ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
ret, outs[0], outs[1], outs[2], outs[3], ret, outs[0], outs[1], outs[2], outs[3],
outs[4], outs[5], outs[6], outs[7], outs[4], outs[5], outs[6], outs[7],
...@@ -381,7 +385,7 @@ u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle, ...@@ -381,7 +385,7 @@ u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
r_cb, /* r6 */ r_cb, /* r6 */
0, 0, 0, 0); 0, 0, 0, 0);
if (ehca_debug_level) if (ehca_debug_level >= 2)
ehca_dmp(query_port_response_block, 64, "response_block"); ehca_dmp(query_port_response_block, 64, "response_block");
return ret; return ret;
...@@ -731,9 +735,6 @@ u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle, ...@@ -731,9 +735,6 @@ u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
u64 ret; u64 ret;
u64 outs[PLPAR_HCALL9_BUFSIZE]; u64 outs[PLPAR_HCALL9_BUFSIZE];
ehca_gen_dbg("kernel PAGE_SIZE=%x access_ctrl=%016x "
"vaddr=%lx length=%lx",
(u32)PAGE_SIZE, access_ctrl, vaddr, length);
ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
adapter_handle.handle, /* r4 */ adapter_handle.handle, /* r4 */
5, /* r5 */ 5, /* r5 */
...@@ -758,7 +759,7 @@ u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle, ...@@ -758,7 +759,7 @@ u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
{ {
u64 ret; u64 ret;
if (unlikely(ehca_debug_level >= 2)) { if (unlikely(ehca_debug_level >= 3)) {
if (count > 1) { if (count > 1) {
u64 *kpage; u64 *kpage;
int i; int i;
......
...@@ -204,7 +204,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector ...@@ -204,7 +204,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
uar = &to_mucontext(context)->uar; uar = &to_mucontext(context)->uar;
} else { } else {
err = mlx4_ib_db_alloc(dev, &cq->db, 1); err = mlx4_db_alloc(dev->dev, &cq->db, 1);
if (err) if (err)
goto err_cq; goto err_cq;
...@@ -250,7 +250,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector ...@@ -250,7 +250,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
err_db: err_db:
if (!context) if (!context)
mlx4_ib_db_free(dev, &cq->db); mlx4_db_free(dev->dev, &cq->db);
err_cq: err_cq:
kfree(cq); kfree(cq);
...@@ -435,7 +435,7 @@ int mlx4_ib_destroy_cq(struct ib_cq *cq) ...@@ -435,7 +435,7 @@ int mlx4_ib_destroy_cq(struct ib_cq *cq)
ib_umem_release(mcq->umem); ib_umem_release(mcq->umem);
} else { } else {
mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe + 1); mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe + 1);
mlx4_ib_db_free(dev, &mcq->db); mlx4_db_free(dev->dev, &mcq->db);
} }
kfree(mcq); kfree(mcq);
......
...@@ -34,124 +34,6 @@ ...@@ -34,124 +34,6 @@
#include "mlx4_ib.h" #include "mlx4_ib.h"
struct mlx4_ib_db_pgdir {
struct list_head list;
DECLARE_BITMAP(order0, MLX4_IB_DB_PER_PAGE);
DECLARE_BITMAP(order1, MLX4_IB_DB_PER_PAGE / 2);
unsigned long *bits[2];
__be32 *db_page;
dma_addr_t db_dma;
};
static struct mlx4_ib_db_pgdir *mlx4_ib_alloc_db_pgdir(struct mlx4_ib_dev *dev)
{
struct mlx4_ib_db_pgdir *pgdir;
pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
if (!pgdir)
return NULL;
bitmap_fill(pgdir->order1, MLX4_IB_DB_PER_PAGE / 2);
pgdir->bits[0] = pgdir->order0;
pgdir->bits[1] = pgdir->order1;
pgdir->db_page = dma_alloc_coherent(dev->ib_dev.dma_device,
PAGE_SIZE, &pgdir->db_dma,
GFP_KERNEL);
if (!pgdir->db_page) {
kfree(pgdir);
return NULL;
}
return pgdir;
}
static int mlx4_ib_alloc_db_from_pgdir(struct mlx4_ib_db_pgdir *pgdir,
struct mlx4_ib_db *db, int order)
{
int o;
int i;
for (o = order; o <= 1; ++o) {
i = find_first_bit(pgdir->bits[o], MLX4_IB_DB_PER_PAGE >> o);
if (i < MLX4_IB_DB_PER_PAGE >> o)
goto found;
}
return -ENOMEM;
found:
clear_bit(i, pgdir->bits[o]);
i <<= o;
if (o > order)
set_bit(i ^ 1, pgdir->bits[order]);
db->u.pgdir = pgdir;
db->index = i;
db->db = pgdir->db_page + db->index;
db->dma = pgdir->db_dma + db->index * 4;
db->order = order;
return 0;
}
int mlx4_ib_db_alloc(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db, int order)
{
struct mlx4_ib_db_pgdir *pgdir;
int ret = 0;
mutex_lock(&dev->pgdir_mutex);
list_for_each_entry(pgdir, &dev->pgdir_list, list)
if (!mlx4_ib_alloc_db_from_pgdir(pgdir, db, order))
goto out;
pgdir = mlx4_ib_alloc_db_pgdir(dev);
if (!pgdir) {
ret = -ENOMEM;
goto out;
}
list_add(&pgdir->list, &dev->pgdir_list);
/* This should never fail -- we just allocated an empty page: */
WARN_ON(mlx4_ib_alloc_db_from_pgdir(pgdir, db, order));
out:
mutex_unlock(&dev->pgdir_mutex);
return ret;
}
void mlx4_ib_db_free(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db)
{
int o;
int i;
mutex_lock(&dev->pgdir_mutex);
o = db->order;
i = db->index;
if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
clear_bit(i ^ 1, db->u.pgdir->order0);
++o;
}
i >>= o;
set_bit(i, db->u.pgdir->bits[o]);
if (bitmap_full(db->u.pgdir->order1, MLX4_IB_DB_PER_PAGE / 2)) {
dma_free_coherent(dev->ib_dev.dma_device, PAGE_SIZE,
db->u.pgdir->db_page, db->u.pgdir->db_dma);
list_del(&db->u.pgdir->list);
kfree(db->u.pgdir);
}
mutex_unlock(&dev->pgdir_mutex);
}
struct mlx4_ib_user_db_page { struct mlx4_ib_user_db_page {
struct list_head list; struct list_head list;
struct ib_umem *umem; struct ib_umem *umem;
...@@ -160,7 +42,7 @@ struct mlx4_ib_user_db_page { ...@@ -160,7 +42,7 @@ struct mlx4_ib_user_db_page {
}; };
int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
struct mlx4_ib_db *db) struct mlx4_db *db)
{ {
struct mlx4_ib_user_db_page *page; struct mlx4_ib_user_db_page *page;
struct ib_umem_chunk *chunk; struct ib_umem_chunk *chunk;
...@@ -202,7 +84,7 @@ int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, ...@@ -202,7 +84,7 @@ int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
return err; return err;
} }
void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_ib_db *db) void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db)
{ {
mutex_lock(&context->db_page_mutex); mutex_lock(&context->db_page_mutex);
......
...@@ -557,9 +557,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ...@@ -557,9 +557,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
goto err_uar; goto err_uar;
MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
INIT_LIST_HEAD(&ibdev->pgdir_list);
mutex_init(&ibdev->pgdir_mutex);
ibdev->dev = dev; ibdev->dev = dev;
strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX); strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
......
...@@ -43,24 +43,6 @@ ...@@ -43,24 +43,6 @@
#include <linux/mlx4/device.h> #include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h> #include <linux/mlx4/doorbell.h>
enum {
MLX4_IB_DB_PER_PAGE = PAGE_SIZE / 4
};
struct mlx4_ib_db_pgdir;
struct mlx4_ib_user_db_page;
struct mlx4_ib_db {
__be32 *db;
union {
struct mlx4_ib_db_pgdir *pgdir;
struct mlx4_ib_user_db_page *user_page;
} u;
dma_addr_t dma;
int index;
int order;
};
struct mlx4_ib_ucontext { struct mlx4_ib_ucontext {
struct ib_ucontext ibucontext; struct ib_ucontext ibucontext;
struct mlx4_uar uar; struct mlx4_uar uar;
...@@ -88,7 +70,7 @@ struct mlx4_ib_cq { ...@@ -88,7 +70,7 @@ struct mlx4_ib_cq {
struct mlx4_cq mcq; struct mlx4_cq mcq;
struct mlx4_ib_cq_buf buf; struct mlx4_ib_cq_buf buf;
struct mlx4_ib_cq_resize *resize_buf; struct mlx4_ib_cq_resize *resize_buf;
struct mlx4_ib_db db; struct mlx4_db db;
spinlock_t lock; spinlock_t lock;
struct mutex resize_mutex; struct mutex resize_mutex;
struct ib_umem *umem; struct ib_umem *umem;
...@@ -127,7 +109,7 @@ struct mlx4_ib_qp { ...@@ -127,7 +109,7 @@ struct mlx4_ib_qp {
struct mlx4_qp mqp; struct mlx4_qp mqp;
struct mlx4_buf buf; struct mlx4_buf buf;
struct mlx4_ib_db db; struct mlx4_db db;
struct mlx4_ib_wq rq; struct mlx4_ib_wq rq;
u32 doorbell_qpn; u32 doorbell_qpn;
...@@ -154,7 +136,7 @@ struct mlx4_ib_srq { ...@@ -154,7 +136,7 @@ struct mlx4_ib_srq {
struct ib_srq ibsrq; struct ib_srq ibsrq;
struct mlx4_srq msrq; struct mlx4_srq msrq;
struct mlx4_buf buf; struct mlx4_buf buf;
struct mlx4_ib_db db; struct mlx4_db db;
u64 *wrid; u64 *wrid;
spinlock_t lock; spinlock_t lock;
int head; int head;
...@@ -175,9 +157,6 @@ struct mlx4_ib_dev { ...@@ -175,9 +157,6 @@ struct mlx4_ib_dev {
struct mlx4_dev *dev; struct mlx4_dev *dev;
void __iomem *uar_map; void __iomem *uar_map;
struct list_head pgdir_list;
struct mutex pgdir_mutex;
struct mlx4_uar priv_uar; struct mlx4_uar priv_uar;
u32 priv_pdn; u32 priv_pdn;
MLX4_DECLARE_DOORBELL_LOCK(uar_lock); MLX4_DECLARE_DOORBELL_LOCK(uar_lock);
...@@ -248,11 +227,9 @@ static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah) ...@@ -248,11 +227,9 @@ static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah)
return container_of(ibah, struct mlx4_ib_ah, ibah); return container_of(ibah, struct mlx4_ib_ah, ibah);
} }
int mlx4_ib_db_alloc(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db, int order);
void mlx4_ib_db_free(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db);
int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
struct mlx4_ib_db *db); struct mlx4_db *db);
void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_ib_db *db); void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db);
struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc); struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc);
int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
......
...@@ -514,7 +514,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, ...@@ -514,7 +514,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err; goto err;
if (!init_attr->srq) { if (!init_attr->srq) {
err = mlx4_ib_db_alloc(dev, &qp->db, 0); err = mlx4_db_alloc(dev->dev, &qp->db, 0);
if (err) if (err)
goto err; goto err;
...@@ -580,7 +580,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, ...@@ -580,7 +580,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
err_db: err_db:
if (!pd->uobject && !init_attr->srq) if (!pd->uobject && !init_attr->srq)
mlx4_ib_db_free(dev, &qp->db); mlx4_db_free(dev->dev, &qp->db);
err: err:
return err; return err;
...@@ -666,7 +666,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, ...@@ -666,7 +666,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
kfree(qp->rq.wrid); kfree(qp->rq.wrid);
mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
if (!qp->ibqp.srq) if (!qp->ibqp.srq)
mlx4_ib_db_free(dev, &qp->db); mlx4_db_free(dev->dev, &qp->db);
} }
} }
......
...@@ -129,7 +129,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, ...@@ -129,7 +129,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
if (err) if (err)
goto err_mtt; goto err_mtt;
} else { } else {
err = mlx4_ib_db_alloc(dev, &srq->db, 0); err = mlx4_db_alloc(dev->dev, &srq->db, 0);
if (err) if (err)
goto err_srq; goto err_srq;
...@@ -200,7 +200,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, ...@@ -200,7 +200,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
err_db: err_db:
if (!pd->uobject) if (!pd->uobject)
mlx4_ib_db_free(dev, &srq->db); mlx4_db_free(dev->dev, &srq->db);
err_srq: err_srq:
kfree(srq); kfree(srq);
...@@ -267,7 +267,7 @@ int mlx4_ib_destroy_srq(struct ib_srq *srq) ...@@ -267,7 +267,7 @@ int mlx4_ib_destroy_srq(struct ib_srq *srq)
kfree(msrq->wrid); kfree(msrq->wrid);
mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift, mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift,
&msrq->buf); &msrq->buf);
mlx4_ib_db_free(dev, &msrq->db); mlx4_db_free(dev->dev, &msrq->db);
} }
kfree(msrq); kfree(msrq);
......
...@@ -139,8 +139,9 @@ static int nes_inetaddr_event(struct notifier_block *notifier, ...@@ -139,8 +139,9 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
addr = ntohl(ifa->ifa_address); addr = ntohl(ifa->ifa_address);
mask = ntohl(ifa->ifa_mask); mask = ntohl(ifa->ifa_mask);
nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address %08X, netmask %08X.\n", nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address " NIPQUAD_FMT
addr, mask); ", netmask " NIPQUAD_FMT ".\n",
HIPQUAD(addr), HIPQUAD(mask));
list_for_each_entry(nesdev, &nes_dev_list, list) { list_for_each_entry(nesdev, &nes_dev_list, list) {
nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p. (%s)\n", nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p. (%s)\n",
nesdev, nesdev->netdev[0]->name); nesdev, nesdev->netdev[0]->name);
...@@ -353,13 +354,11 @@ struct ib_qp *nes_get_qp(struct ib_device *device, int qpn) ...@@ -353,13 +354,11 @@ struct ib_qp *nes_get_qp(struct ib_device *device, int qpn)
*/ */
static void nes_print_macaddr(struct net_device *netdev) static void nes_print_macaddr(struct net_device *netdev)
{ {
nes_debug(NES_DBG_INIT, "%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, IRQ %u\n", DECLARE_MAC_BUF(mac);
netdev->name,
netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5],
netdev->irq);
}
nes_debug(NES_DBG_INIT, "%s: %s, IRQ %u\n",
netdev->name, print_mac(mac, netdev->dev_addr), netdev->irq);
}
/** /**
* nes_interrupt - handle interrupts * nes_interrupt - handle interrupts
......
...@@ -852,8 +852,8 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core, ...@@ -852,8 +852,8 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
/* get a handle on the hte */ /* get a handle on the hte */
hte = &cm_core->connected_nodes; hte = &cm_core->connected_nodes;
nes_debug(NES_DBG_CM, "Searching for an owner node:%x:%x from core %p->%p\n", nes_debug(NES_DBG_CM, "Searching for an owner node: " NIPQUAD_FMT ":%x from core %p->%p\n",
loc_addr, loc_port, cm_core, hte); HIPQUAD(loc_addr), loc_port, cm_core, hte);
/* walk list and find cm_node associated with this session ID */ /* walk list and find cm_node associated with this session ID */
spin_lock_irqsave(&cm_core->ht_lock, flags); spin_lock_irqsave(&cm_core->ht_lock, flags);
...@@ -902,8 +902,8 @@ static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core, ...@@ -902,8 +902,8 @@ static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core,
} }
spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
nes_debug(NES_DBG_CM, "Unable to find listener- %x:%x\n", nes_debug(NES_DBG_CM, "Unable to find listener for " NIPQUAD_FMT ":%x\n",
dst_addr, dst_port); HIPQUAD(dst_addr), dst_port);
/* no listener */ /* no listener */
return NULL; return NULL;
...@@ -1054,6 +1054,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, ...@@ -1054,6 +1054,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
int arpindex = 0; int arpindex = 0;
struct nes_device *nesdev; struct nes_device *nesdev;
struct nes_adapter *nesadapter; struct nes_adapter *nesadapter;
DECLARE_MAC_BUF(mac);
/* create an hte and cm_node for this instance */ /* create an hte and cm_node for this instance */
cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC); cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
...@@ -1066,8 +1067,9 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, ...@@ -1066,8 +1067,9 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
cm_node->loc_port = cm_info->loc_port; cm_node->loc_port = cm_info->loc_port;
cm_node->rem_port = cm_info->rem_port; cm_node->rem_port = cm_info->rem_port;
cm_node->send_write0 = send_first; cm_node->send_write0 = send_first;
nes_debug(NES_DBG_CM, "Make node addresses : loc = %x:%x, rem = %x:%x\n", nes_debug(NES_DBG_CM, "Make node addresses : loc = " NIPQUAD_FMT ":%x, rem = " NIPQUAD_FMT ":%x\n",
cm_node->loc_addr, cm_node->loc_port, cm_node->rem_addr, cm_node->rem_port); HIPQUAD(cm_node->loc_addr), cm_node->loc_port,
HIPQUAD(cm_node->rem_addr), cm_node->rem_port);
cm_node->listener = listener; cm_node->listener = listener;
cm_node->netdev = nesvnic->netdev; cm_node->netdev = nesvnic->netdev;
cm_node->cm_id = cm_info->cm_id; cm_node->cm_id = cm_info->cm_id;
...@@ -1116,11 +1118,8 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, ...@@ -1116,11 +1118,8 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
/* copy the mac addr to node context */ /* copy the mac addr to node context */
memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN); memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
nes_debug(NES_DBG_CM, "Remote mac addr from arp table:%02x," nes_debug(NES_DBG_CM, "Remote mac addr from arp table: %s\n",
" %02x, %02x, %02x, %02x, %02x\n", print_mac(mac, cm_node->rem_mac));
cm_node->rem_mac[0], cm_node->rem_mac[1],
cm_node->rem_mac[2], cm_node->rem_mac[3],
cm_node->rem_mac[4], cm_node->rem_mac[5]);
add_hte_node(cm_core, cm_node); add_hte_node(cm_core, cm_node);
atomic_inc(&cm_nodes_created); atomic_inc(&cm_nodes_created);
...@@ -1850,8 +1849,10 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvni ...@@ -1850,8 +1849,10 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvni
nfo.rem_addr = ntohl(iph->saddr); nfo.rem_addr = ntohl(iph->saddr);
nfo.rem_port = ntohs(tcph->source); nfo.rem_port = ntohs(tcph->source);
nes_debug(NES_DBG_CM, "Received packet: dest=0x%08X:0x%04X src=0x%08X:0x%04X\n", nes_debug(NES_DBG_CM, "Received packet: dest=" NIPQUAD_FMT
iph->daddr, tcph->dest, iph->saddr, tcph->source); ":0x%04X src=" NIPQUAD_FMT ":0x%04X\n",
NIPQUAD(iph->daddr), tcph->dest,
NIPQUAD(iph->saddr), tcph->source);
/* note: this call is going to increment cm_node ref count */ /* note: this call is going to increment cm_node ref count */
cm_node = find_node(cm_core, cm_node = find_node(cm_core,
......
...@@ -636,6 +636,15 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_ ...@@ -636,6 +636,15 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n"); nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n");
return 0; return 0;
} }
i = 0;
while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000)
mdelay(1);
if (i >= 10000) {
printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n",
nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS));
return 0;
}
} }
/* port reset */ /* port reset */
...@@ -684,17 +693,6 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_ ...@@ -684,17 +693,6 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
} }
} }
i = 0;
while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000)
mdelay(1);
if (i >= 10000) {
printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n",
nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS));
return 0;
}
return port_count; return port_count;
} }
......
...@@ -905,7 +905,7 @@ struct nes_hw_qp { ...@@ -905,7 +905,7 @@ struct nes_hw_qp {
}; };
struct nes_hw_cq { struct nes_hw_cq {
struct nes_hw_cqe volatile *cq_vbase; /* PCI memory for host rings */ struct nes_hw_cqe *cq_vbase; /* PCI memory for host rings */
void (*ce_handler)(struct nes_device *nesdev, struct nes_hw_cq *cq); void (*ce_handler)(struct nes_device *nesdev, struct nes_hw_cq *cq);
dma_addr_t cq_pbase; /* PCI memory for host rings */ dma_addr_t cq_pbase; /* PCI memory for host rings */
u16 cq_head; u16 cq_head;
......
...@@ -787,16 +787,14 @@ static int nes_netdev_set_mac_address(struct net_device *netdev, void *p) ...@@ -787,16 +787,14 @@ static int nes_netdev_set_mac_address(struct net_device *netdev, void *p)
int i; int i;
u32 macaddr_low; u32 macaddr_low;
u16 macaddr_high; u16 macaddr_high;
DECLARE_MAC_BUF(mac);
if (!is_valid_ether_addr(mac_addr->sa_data)) if (!is_valid_ether_addr(mac_addr->sa_data))
return -EADDRNOTAVAIL; return -EADDRNOTAVAIL;
memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len); memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len);
printk(PFX "%s: Address length = %d, Address = %02X%02X%02X%02X%02X%02X..\n", printk(PFX "%s: Address length = %d, Address = %s\n",
__func__, netdev->addr_len, __func__, netdev->addr_len, print_mac(mac, mac_addr->sa_data));
mac_addr->sa_data[0], mac_addr->sa_data[1],
mac_addr->sa_data[2], mac_addr->sa_data[3],
mac_addr->sa_data[4], mac_addr->sa_data[5]);
macaddr_high = ((u16)netdev->dev_addr[0]) << 8; macaddr_high = ((u16)netdev->dev_addr[0]) << 8;
macaddr_high += (u16)netdev->dev_addr[1]; macaddr_high += (u16)netdev->dev_addr[1];
macaddr_low = ((u32)netdev->dev_addr[2]) << 24; macaddr_low = ((u32)netdev->dev_addr[2]) << 24;
...@@ -878,11 +876,11 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev) ...@@ -878,11 +876,11 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
if (mc_nic_index < 0) if (mc_nic_index < 0)
mc_nic_index = nesvnic->nic_index; mc_nic_index = nesvnic->nic_index;
if (multicast_addr) { if (multicast_addr) {
nes_debug(NES_DBG_NIC_RX, "Assigning MC Address = %02X%02X%02X%02X%02X%02X to register 0x%04X nic_idx=%d\n", DECLARE_MAC_BUF(mac);
multicast_addr->dmi_addr[0], multicast_addr->dmi_addr[1], nes_debug(NES_DBG_NIC_RX, "Assigning MC Address %s to register 0x%04X nic_idx=%d\n",
multicast_addr->dmi_addr[2], multicast_addr->dmi_addr[3], print_mac(mac, multicast_addr->dmi_addr),
multicast_addr->dmi_addr[4], multicast_addr->dmi_addr[5], perfect_filter_register_address+(mc_index * 8),
perfect_filter_register_address+(mc_index * 8), mc_nic_index); mc_nic_index);
macaddr_high = ((u16)multicast_addr->dmi_addr[0]) << 8; macaddr_high = ((u16)multicast_addr->dmi_addr[0]) << 8;
macaddr_high += (u16)multicast_addr->dmi_addr[1]; macaddr_high += (u16)multicast_addr->dmi_addr[1];
macaddr_low = ((u32)multicast_addr->dmi_addr[2]) << 24; macaddr_low = ((u32)multicast_addr->dmi_addr[2]) << 24;
......
...@@ -660,7 +660,9 @@ int nes_arp_table(struct nes_device *nesdev, u32 ip_addr, u8 *mac_addr, u32 acti ...@@ -660,7 +660,9 @@ int nes_arp_table(struct nes_device *nesdev, u32 ip_addr, u8 *mac_addr, u32 acti
/* DELETE or RESOLVE */ /* DELETE or RESOLVE */
if (arp_index == nesadapter->arp_table_size) { if (arp_index == nesadapter->arp_table_size) {
nes_debug(NES_DBG_NETDEV, "mac address not in ARP table - cannot delete or resolve\n"); nes_debug(NES_DBG_NETDEV, "MAC for " NIPQUAD_FMT " not in ARP table - cannot %s\n",
HIPQUAD(ip_addr),
action == NES_ARP_RESOLVE ? "resolve" : "delete");
return -1; return -1;
} }
......
...@@ -1976,7 +1976,7 @@ static int nes_destroy_cq(struct ib_cq *ib_cq) ...@@ -1976,7 +1976,7 @@ static int nes_destroy_cq(struct ib_cq *ib_cq)
if (nescq->cq_mem_size) if (nescq->cq_mem_size)
pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size,
(void *)nescq->hw_cq.cq_vbase, nescq->hw_cq.cq_pbase); nescq->hw_cq.cq_vbase, nescq->hw_cq.cq_pbase);
kfree(nescq); kfree(nescq);
return ret; return ret;
...@@ -3610,6 +3610,12 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) ...@@ -3610,6 +3610,12 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
while (cqe_count < num_entries) { while (cqe_count < num_entries) {
if (le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) & if (le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) &
NES_CQE_VALID) { NES_CQE_VALID) {
/*
* Make sure we read CQ entry contents *after*
* we've checked the valid bit.
*/
rmb();
cqe = nescq->hw_cq.cq_vbase[head]; cqe = nescq->hw_cq.cq_vbase[head];
nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0; nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]); u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
......
...@@ -56,11 +56,11 @@ ...@@ -56,11 +56,11 @@
/* constants */ /* constants */
enum { enum {
IPOIB_PACKET_SIZE = 2048,
IPOIB_BUF_SIZE = IPOIB_PACKET_SIZE + IB_GRH_BYTES,
IPOIB_ENCAP_LEN = 4, IPOIB_ENCAP_LEN = 4,
IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN,
IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */
IPOIB_CM_MTU = 0x10000 - 0x10, /* padding to align header to 16 */ IPOIB_CM_MTU = 0x10000 - 0x10, /* padding to align header to 16 */
IPOIB_CM_BUF_SIZE = IPOIB_CM_MTU + IPOIB_ENCAP_LEN, IPOIB_CM_BUF_SIZE = IPOIB_CM_MTU + IPOIB_ENCAP_LEN,
IPOIB_CM_HEAD_SIZE = IPOIB_CM_BUF_SIZE % PAGE_SIZE, IPOIB_CM_HEAD_SIZE = IPOIB_CM_BUF_SIZE % PAGE_SIZE,
...@@ -139,7 +139,7 @@ struct ipoib_mcast { ...@@ -139,7 +139,7 @@ struct ipoib_mcast {
struct ipoib_rx_buf { struct ipoib_rx_buf {
struct sk_buff *skb; struct sk_buff *skb;
u64 mapping; u64 mapping[IPOIB_UD_RX_SG];
}; };
struct ipoib_tx_buf { struct ipoib_tx_buf {
...@@ -294,6 +294,7 @@ struct ipoib_dev_priv { ...@@ -294,6 +294,7 @@ struct ipoib_dev_priv {
unsigned int admin_mtu; unsigned int admin_mtu;
unsigned int mcast_mtu; unsigned int mcast_mtu;
unsigned int max_ib_mtu;
struct ipoib_rx_buf *rx_ring; struct ipoib_rx_buf *rx_ring;
...@@ -305,6 +306,9 @@ struct ipoib_dev_priv { ...@@ -305,6 +306,9 @@ struct ipoib_dev_priv {
struct ib_send_wr tx_wr; struct ib_send_wr tx_wr;
unsigned tx_outstanding; unsigned tx_outstanding;
struct ib_recv_wr rx_wr;
struct ib_sge rx_sge[IPOIB_UD_RX_SG];
struct ib_wc ibwc[IPOIB_NUM_WC]; struct ib_wc ibwc[IPOIB_NUM_WC];
struct list_head dead_ahs; struct list_head dead_ahs;
...@@ -366,6 +370,14 @@ struct ipoib_neigh { ...@@ -366,6 +370,14 @@ struct ipoib_neigh {
struct list_head list; struct list_head list;
}; };
#define IPOIB_UD_MTU(ib_mtu) (ib_mtu - IPOIB_ENCAP_LEN)
#define IPOIB_UD_BUF_SIZE(ib_mtu) (ib_mtu + IB_GRH_BYTES)
static inline int ipoib_ud_need_sg(unsigned int ib_mtu)
{
return IPOIB_UD_BUF_SIZE(ib_mtu) > PAGE_SIZE;
}
/* /*
* We stash a pointer to our private neighbour information after our * We stash a pointer to our private neighbour information after our
* hardware address in neigh->ha. The ALIGN() expression here makes * hardware address in neigh->ha. The ALIGN() expression here makes
......
...@@ -89,28 +89,59 @@ void ipoib_free_ah(struct kref *kref) ...@@ -89,28 +89,59 @@ void ipoib_free_ah(struct kref *kref)
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
} }
static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
u64 mapping[IPOIB_UD_RX_SG])
{
if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE,
DMA_FROM_DEVICE);
ib_dma_unmap_page(priv->ca, mapping[1], PAGE_SIZE,
DMA_FROM_DEVICE);
} else
ib_dma_unmap_single(priv->ca, mapping[0],
IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
DMA_FROM_DEVICE);
}
static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
struct sk_buff *skb,
unsigned int length)
{
if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
unsigned int size;
/*
* There is only two buffers needed for max_payload = 4K,
* first buf size is IPOIB_UD_HEAD_SIZE
*/
skb->tail += IPOIB_UD_HEAD_SIZE;
skb->len += length;
size = length - IPOIB_UD_HEAD_SIZE;
frag->size = size;
skb->data_len += size;
skb->truesize += size;
} else
skb_put(skb, length);
}
static int ipoib_ib_post_receive(struct net_device *dev, int id) static int ipoib_ib_post_receive(struct net_device *dev, int id)
{ {
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_sge list;
struct ib_recv_wr param;
struct ib_recv_wr *bad_wr; struct ib_recv_wr *bad_wr;
int ret; int ret;
list.addr = priv->rx_ring[id].mapping; priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
list.length = IPOIB_BUF_SIZE; priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
list.lkey = priv->mr->lkey; priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
param.next = NULL;
param.wr_id = id | IPOIB_OP_RECV;
param.sg_list = &list;
param.num_sge = 1;
ret = ib_post_recv(priv->qp, &param, &bad_wr); ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
if (unlikely(ret)) { if (unlikely(ret)) {
ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping, ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb_any(priv->rx_ring[id].skb); dev_kfree_skb_any(priv->rx_ring[id].skb);
priv->rx_ring[id].skb = NULL; priv->rx_ring[id].skb = NULL;
} }
...@@ -118,15 +149,21 @@ static int ipoib_ib_post_receive(struct net_device *dev, int id) ...@@ -118,15 +149,21 @@ static int ipoib_ib_post_receive(struct net_device *dev, int id)
return ret; return ret;
} }
static int ipoib_alloc_rx_skb(struct net_device *dev, int id) static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
{ {
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
struct sk_buff *skb; struct sk_buff *skb;
u64 addr; int buf_size;
u64 *mapping;
skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4); if (ipoib_ud_need_sg(priv->max_ib_mtu))
if (!skb) buf_size = IPOIB_UD_HEAD_SIZE;
return -ENOMEM; else
buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
skb = dev_alloc_skb(buf_size + 4);
if (unlikely(!skb))
return NULL;
/* /*
* IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
...@@ -135,17 +172,32 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id) ...@@ -135,17 +172,32 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
*/ */
skb_reserve(skb, 4); skb_reserve(skb, 4);
addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE, mapping = priv->rx_ring[id].mapping;
DMA_FROM_DEVICE); mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { DMA_FROM_DEVICE);
dev_kfree_skb_any(skb); if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
return -EIO; goto error;
if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
struct page *page = alloc_page(GFP_ATOMIC);
if (!page)
goto partial_error;
skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE);
mapping[1] =
ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[0].page,
0, PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1])))
goto partial_error;
} }
priv->rx_ring[id].skb = skb; priv->rx_ring[id].skb = skb;
priv->rx_ring[id].mapping = addr; return skb;
return 0; partial_error:
ib_dma_unmap_single(priv->ca, mapping[0], buf_size, DMA_FROM_DEVICE);
error:
dev_kfree_skb_any(skb);
return NULL;
} }
static int ipoib_ib_post_receives(struct net_device *dev) static int ipoib_ib_post_receives(struct net_device *dev)
...@@ -154,7 +206,7 @@ static int ipoib_ib_post_receives(struct net_device *dev) ...@@ -154,7 +206,7 @@ static int ipoib_ib_post_receives(struct net_device *dev)
int i; int i;
for (i = 0; i < ipoib_recvq_size; ++i) { for (i = 0; i < ipoib_recvq_size; ++i) {
if (ipoib_alloc_rx_skb(dev, i)) { if (!ipoib_alloc_rx_skb(dev, i)) {
ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
return -ENOMEM; return -ENOMEM;
} }
...@@ -172,7 +224,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -172,7 +224,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
struct sk_buff *skb; struct sk_buff *skb;
u64 addr; u64 mapping[IPOIB_UD_RX_SG];
ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n", ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
wr_id, wc->status); wr_id, wc->status);
...@@ -184,15 +236,13 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -184,15 +236,13 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
} }
skb = priv->rx_ring[wr_id].skb; skb = priv->rx_ring[wr_id].skb;
addr = priv->rx_ring[wr_id].mapping;
if (unlikely(wc->status != IB_WC_SUCCESS)) { if (unlikely(wc->status != IB_WC_SUCCESS)) {
if (wc->status != IB_WC_WR_FLUSH_ERR) if (wc->status != IB_WC_WR_FLUSH_ERR)
ipoib_warn(priv, "failed recv event " ipoib_warn(priv, "failed recv event "
"(status=%d, wrid=%d vend_err %x)\n", "(status=%d, wrid=%d vend_err %x)\n",
wc->status, wr_id, wc->vendor_err); wc->status, wr_id, wc->vendor_err);
ib_dma_unmap_single(priv->ca, addr, ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
priv->rx_ring[wr_id].skb = NULL; priv->rx_ring[wr_id].skb = NULL;
return; return;
...@@ -205,11 +255,14 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -205,11 +255,14 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
goto repost; goto repost;
memcpy(mapping, priv->rx_ring[wr_id].mapping,
IPOIB_UD_RX_SG * sizeof *mapping);
/* /*
* If we can't allocate a new RX buffer, dump * If we can't allocate a new RX buffer, dump
* this packet and reuse the old buffer. * this packet and reuse the old buffer.
*/ */
if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) { if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
++dev->stats.rx_dropped; ++dev->stats.rx_dropped;
goto repost; goto repost;
} }
...@@ -217,9 +270,9 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -217,9 +270,9 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
wc->byte_len, wc->slid); wc->byte_len, wc->slid);
ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE); ipoib_ud_dma_unmap_rx(priv, mapping);
ipoib_ud_skb_put_frags(priv, skb, wc->byte_len);
skb_put(skb, wc->byte_len);
skb_pull(skb, IB_GRH_BYTES); skb_pull(skb, IB_GRH_BYTES);
skb->protocol = ((struct ipoib_header *) skb->data)->proto; skb->protocol = ((struct ipoib_header *) skb->data)->proto;
...@@ -733,10 +786,8 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush) ...@@ -733,10 +786,8 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush)
rx_req = &priv->rx_ring[i]; rx_req = &priv->rx_ring[i];
if (!rx_req->skb) if (!rx_req->skb)
continue; continue;
ib_dma_unmap_single(priv->ca, ipoib_ud_dma_unmap_rx(priv,
rx_req->mapping, priv->rx_ring[i].mapping);
IPOIB_BUF_SIZE,
DMA_FROM_DEVICE);
dev_kfree_skb_any(rx_req->skb); dev_kfree_skb_any(rx_req->skb);
rx_req->skb = NULL; rx_req->skb = NULL;
} }
......
...@@ -195,7 +195,7 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu) ...@@ -195,7 +195,7 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
return 0; return 0;
} }
if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN) if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
return -EINVAL; return -EINVAL;
priv->admin_mtu = new_mtu; priv->admin_mtu = new_mtu;
...@@ -971,10 +971,6 @@ static void ipoib_setup(struct net_device *dev) ...@@ -971,10 +971,6 @@ static void ipoib_setup(struct net_device *dev)
NETIF_F_LLTX | NETIF_F_LLTX |
NETIF_F_HIGHDMA); NETIF_F_HIGHDMA);
/* MTU will be reset when mcast join happens */
dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
priv->mcast_mtu = priv->admin_mtu = dev->mtu;
memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
netif_carrier_off(dev); netif_carrier_off(dev);
...@@ -1107,6 +1103,7 @@ static struct net_device *ipoib_add_port(const char *format, ...@@ -1107,6 +1103,7 @@ static struct net_device *ipoib_add_port(const char *format,
{ {
struct ipoib_dev_priv *priv; struct ipoib_dev_priv *priv;
struct ib_device_attr *device_attr; struct ib_device_attr *device_attr;
struct ib_port_attr attr;
int result = -ENOMEM; int result = -ENOMEM;
priv = ipoib_intf_alloc(format); priv = ipoib_intf_alloc(format);
...@@ -1115,6 +1112,18 @@ static struct net_device *ipoib_add_port(const char *format, ...@@ -1115,6 +1112,18 @@ static struct net_device *ipoib_add_port(const char *format,
SET_NETDEV_DEV(priv->dev, hca->dma_device); SET_NETDEV_DEV(priv->dev, hca->dma_device);
if (!ib_query_port(hca, port, &attr))
priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
else {
printk(KERN_WARNING "%s: ib_query_port %d failed\n",
hca->name, port);
goto device_init_failed;
}
/* MTU will be reset when mcast join happens */
priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
result = ib_query_pkey(hca, port, 0, &priv->pkey); result = ib_query_pkey(hca, port, 0, &priv->pkey);
if (result) { if (result) {
printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n", printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
......
...@@ -567,8 +567,7 @@ void ipoib_mcast_join_task(struct work_struct *work) ...@@ -567,8 +567,7 @@ void ipoib_mcast_join_task(struct work_struct *work)
return; return;
} }
priv->mcast_mtu = ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu) - priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
IPOIB_ENCAP_LEN;
if (!ipoib_cm_admin_enabled(dev)) if (!ipoib_cm_admin_enabled(dev))
dev->mtu = min(priv->mcast_mtu, priv->admin_mtu); dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
......
...@@ -150,7 +150,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) ...@@ -150,7 +150,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
.max_send_wr = ipoib_sendq_size, .max_send_wr = ipoib_sendq_size,
.max_recv_wr = ipoib_recvq_size, .max_recv_wr = ipoib_recvq_size,
.max_send_sge = 1, .max_send_sge = 1,
.max_recv_sge = 1 .max_recv_sge = IPOIB_UD_RX_SG
}, },
.sq_sig_type = IB_SIGNAL_ALL_WR, .sq_sig_type = IB_SIGNAL_ALL_WR,
.qp_type = IB_QPT_UD .qp_type = IB_QPT_UD
...@@ -215,6 +215,19 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) ...@@ -215,6 +215,19 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
priv->tx_wr.sg_list = priv->tx_sge; priv->tx_wr.sg_list = priv->tx_sge;
priv->tx_wr.send_flags = IB_SEND_SIGNALED; priv->tx_wr.send_flags = IB_SEND_SIGNALED;
priv->rx_sge[0].lkey = priv->mr->lkey;
if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
priv->rx_sge[0].length = IPOIB_UD_HEAD_SIZE;
priv->rx_sge[1].length = PAGE_SIZE;
priv->rx_sge[1].lkey = priv->mr->lkey;
priv->rx_wr.num_sge = IPOIB_UD_RX_SG;
} else {
priv->rx_sge[0].length = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
priv->rx_wr.num_sge = 1;
}
priv->rx_wr.next = NULL;
priv->rx_wr.sg_list = priv->rx_sge;
return 0; return 0;
out_free_cq: out_free_cq:
......
...@@ -89,6 +89,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) ...@@ -89,6 +89,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
goto err; goto err;
} }
priv->max_ib_mtu = ppriv->max_ib_mtu;
set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
priv->pkey = pkey; priv->pkey = pkey;
......
...@@ -196,3 +196,160 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) ...@@ -196,3 +196,160 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
} }
} }
EXPORT_SYMBOL_GPL(mlx4_buf_free); EXPORT_SYMBOL_GPL(mlx4_buf_free);
static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
{
struct mlx4_db_pgdir *pgdir;
pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
if (!pgdir)
return NULL;
bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2);
pgdir->bits[0] = pgdir->order0;
pgdir->bits[1] = pgdir->order1;
pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
&pgdir->db_dma, GFP_KERNEL);
if (!pgdir->db_page) {
kfree(pgdir);
return NULL;
}
return pgdir;
}
static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
struct mlx4_db *db, int order)
{
int o;
int i;
for (o = order; o <= 1; ++o) {
i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o);
if (i < MLX4_DB_PER_PAGE >> o)
goto found;
}
return -ENOMEM;
found:
clear_bit(i, pgdir->bits[o]);
i <<= o;
if (o > order)
set_bit(i ^ 1, pgdir->bits[order]);
db->u.pgdir = pgdir;
db->index = i;
db->db = pgdir->db_page + db->index;
db->dma = pgdir->db_dma + db->index * 4;
db->order = order;
return 0;
}
int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_db_pgdir *pgdir;
int ret = 0;
mutex_lock(&priv->pgdir_mutex);
list_for_each_entry(pgdir, &priv->pgdir_list, list)
if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
goto out;
pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev));
if (!pgdir) {
ret = -ENOMEM;
goto out;
}
list_add(&pgdir->list, &priv->pgdir_list);
/* This should never fail -- we just allocated an empty page: */
WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order));
out:
mutex_unlock(&priv->pgdir_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(mlx4_db_alloc);
void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int o;
int i;
mutex_lock(&priv->pgdir_mutex);
o = db->order;
i = db->index;
if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
clear_bit(i ^ 1, db->u.pgdir->order0);
++o;
}
i >>= o;
set_bit(i, db->u.pgdir->bits[o]);
if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
db->u.pgdir->db_page, db->u.pgdir->db_dma);
list_del(&db->u.pgdir->list);
kfree(db->u.pgdir);
}
mutex_unlock(&priv->pgdir_mutex);
}
EXPORT_SYMBOL_GPL(mlx4_db_free);
int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
int size, int max_direct)
{
int err;
err = mlx4_db_alloc(dev, &wqres->db, 1);
if (err)
return err;
*wqres->db.db = 0;
err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf);
if (err)
goto err_db;
err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift,
&wqres->mtt);
if (err)
goto err_buf;
err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
if (err)
goto err_mtt;
return 0;
err_mtt:
mlx4_mtt_cleanup(dev, &wqres->mtt);
err_buf:
mlx4_buf_free(dev, size, &wqres->buf);
err_db:
mlx4_db_free(dev, &wqres->db);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res);
void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
int size)
{
mlx4_mtt_cleanup(dev, &wqres->mtt);
mlx4_buf_free(dev, size, &wqres->buf);
mlx4_db_free(dev, &wqres->db);
}
EXPORT_SYMBOL_GPL(mlx4_free_hwq_res);
...@@ -180,7 +180,7 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq, ...@@ -180,7 +180,7 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
cq_context->mtt_base_addr_h = mtt_addr >> 32; cq_context->mtt_base_addr_h = mtt_addr >> 32;
cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1); err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0);
mlx4_free_cmd_mailbox(dev, mailbox); mlx4_free_cmd_mailbox(dev, mailbox);
return err; return err;
......
...@@ -798,6 +798,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -798,6 +798,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&priv->ctx_list); INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock); spin_lock_init(&priv->ctx_lock);
INIT_LIST_HEAD(&priv->pgdir_list);
mutex_init(&priv->pgdir_mutex);
/* /*
* Now reset the HCA before we touch the PCI capabilities or * Now reset the HCA before we touch the PCI capabilities or
* attempt a firmware command, since a boot ROM may have left * attempt a firmware command, since a boot ROM may have left
......
...@@ -257,6 +257,9 @@ struct mlx4_priv { ...@@ -257,6 +257,9 @@ struct mlx4_priv {
struct list_head ctx_list; struct list_head ctx_list;
spinlock_t ctx_lock; spinlock_t ctx_lock;
struct list_head pgdir_list;
struct mutex pgdir_mutex;
struct mlx4_fw fw; struct mlx4_fw fw;
struct mlx4_cmd cmd; struct mlx4_cmd cmd;
......
...@@ -299,3 +299,34 @@ int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, ...@@ -299,3 +299,34 @@ int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
} }
EXPORT_SYMBOL_GPL(mlx4_qp_query); EXPORT_SYMBOL_GPL(mlx4_qp_query);
int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
struct mlx4_qp_context *context,
struct mlx4_qp *qp, enum mlx4_qp_state *qp_state)
{
int err;
int i;
enum mlx4_qp_state states[] = {
MLX4_QP_STATE_RST,
MLX4_QP_STATE_INIT,
MLX4_QP_STATE_RTR,
MLX4_QP_STATE_RTS
};
for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
context->flags &= cpu_to_be32(~(0xf << 28));
context->flags |= cpu_to_be32(states[i + 1] << 28);
err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
context, 0, 0, qp);
if (err) {
mlx4_err(dev, "Failed to bring QP to state: "
"%d with error: %d\n",
states[i + 1], err);
return err;
}
*qp_state = states[i + 1];
}
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_qp_to_ready);
...@@ -208,6 +208,38 @@ struct mlx4_mtt { ...@@ -208,6 +208,38 @@ struct mlx4_mtt {
int page_shift; int page_shift;
}; };
enum {
MLX4_DB_PER_PAGE = PAGE_SIZE / 4
};
struct mlx4_db_pgdir {
struct list_head list;
DECLARE_BITMAP(order0, MLX4_DB_PER_PAGE);
DECLARE_BITMAP(order1, MLX4_DB_PER_PAGE / 2);
unsigned long *bits[2];
__be32 *db_page;
dma_addr_t db_dma;
};
struct mlx4_ib_user_db_page;
struct mlx4_db {
__be32 *db;
union {
struct mlx4_db_pgdir *pgdir;
struct mlx4_ib_user_db_page *user_page;
} u;
dma_addr_t dma;
int index;
int order;
};
struct mlx4_hwq_resources {
struct mlx4_db db;
struct mlx4_mtt mtt;
struct mlx4_buf buf;
};
struct mlx4_mr { struct mlx4_mr {
struct mlx4_mtt mtt; struct mlx4_mtt mtt;
u64 iova; u64 iova;
...@@ -341,6 +373,14 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, ...@@ -341,6 +373,14 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
struct mlx4_buf *buf); struct mlx4_buf *buf);
int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order);
void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
int size, int max_direct);
void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
int size);
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq); struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq);
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
......
...@@ -296,6 +296,10 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, ...@@ -296,6 +296,10 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
struct mlx4_qp_context *context); struct mlx4_qp_context *context);
int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
struct mlx4_qp_context *context,
struct mlx4_qp *qp, enum mlx4_qp_state *qp_state);
static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
{ {
return radix_tree_lookup(&dev->qp_table_tree, qpn & (dev->caps.num_qps - 1)); return radix_tree_lookup(&dev->qp_table_tree, qpn & (dev->caps.num_qps - 1));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment