Commit a16152c8 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma

Pull rdma fixes from Doug Ledford:
 "One ocrdma fix:

   - The new CQ API support was added to ocrdma, but they got the arming
     logic wrong, so without this, transfers eventually fail when they
     fail to arm the interrupt properly under load

  Two related fixes for mlx4:

   - When we added the 64bit extended counters support to the core IB
     code, they forgot to update the RoCE side of the mlx4 driver (the
     IB side they properly updated).

     I debated whether or not to include these patches as they could be
     considered feature enablement patches, but the existing code will
     blindy copy the 32bit counters, whether any counters were requested
     at all (a bug).

     These two patches make it (a) check to see that counters were
     requested and (b) copy the right counters (the 64bit support is
     new, the 32bit is not).  For that reason I went ahead and took
     them"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma:
  IB/mlx4: Add support for the port info class for RoCE ports
  IB/mlx4: Add support for extended counters over RoCE ports
  RDMA/ocrdma: Fix arm logic to align with new cq API
parents 7ee302f6 c2bab619
...@@ -817,9 +817,15 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, ...@@ -817,9 +817,15 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
} }
static void edit_counter(struct mlx4_counter *cnt, static void edit_counter(struct mlx4_counter *cnt, void *counters,
struct ib_pma_portcounters *pma_cnt) __be16 attr_id)
{ {
switch (attr_id) {
case IB_PMA_PORT_COUNTERS:
{
struct ib_pma_portcounters *pma_cnt =
(struct ib_pma_portcounters *)counters;
ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data, ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
(be64_to_cpu(cnt->tx_bytes) >> 2)); (be64_to_cpu(cnt->tx_bytes) >> 2));
ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data, ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
...@@ -828,6 +834,31 @@ static void edit_counter(struct mlx4_counter *cnt, ...@@ -828,6 +834,31 @@ static void edit_counter(struct mlx4_counter *cnt,
be64_to_cpu(cnt->tx_frames)); be64_to_cpu(cnt->tx_frames));
ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets, ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
be64_to_cpu(cnt->rx_frames)); be64_to_cpu(cnt->rx_frames));
break;
}
case IB_PMA_PORT_COUNTERS_EXT:
{
struct ib_pma_portcounters_ext *pma_cnt_ext =
(struct ib_pma_portcounters_ext *)counters;
pma_cnt_ext->port_xmit_data =
cpu_to_be64(be64_to_cpu(cnt->tx_bytes) >> 2);
pma_cnt_ext->port_rcv_data =
cpu_to_be64(be64_to_cpu(cnt->rx_bytes) >> 2);
pma_cnt_ext->port_xmit_packets = cnt->tx_frames;
pma_cnt_ext->port_rcv_packets = cnt->rx_frames;
break;
}
}
}
static int iboe_process_mad_port_info(void *out_mad)
{
struct ib_class_port_info cpi = {};
cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
memcpy(out_mad, &cpi, sizeof(cpi));
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
} }
static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
...@@ -842,6 +873,9 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, ...@@ -842,6 +873,9 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT) if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
return -EINVAL; return -EINVAL;
if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)
return iboe_process_mad_port_info((void *)(out_mad->data + 40));
memset(&counter_stats, 0, sizeof(counter_stats)); memset(&counter_stats, 0, sizeof(counter_stats));
mutex_lock(&dev->counters_table[port_num - 1].mutex); mutex_lock(&dev->counters_table[port_num - 1].mutex);
list_for_each_entry(tmp_counter, list_for_each_entry(tmp_counter,
...@@ -863,7 +897,8 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, ...@@ -863,7 +897,8 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
switch (counter_stats.counter_mode & 0xf) { switch (counter_stats.counter_mode & 0xf) {
case 0: case 0:
edit_counter(&counter_stats, edit_counter(&counter_stats,
(void *)(out_mad->data + 40)); (void *)(out_mad->data + 40),
in_mad->mad_hdr.attr_id);
err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
break; break;
default: default:
...@@ -894,8 +929,10 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, ...@@ -894,8 +929,10 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
*/ */
if (link == IB_LINK_LAYER_INFINIBAND) { if (link == IB_LINK_LAYER_INFINIBAND) {
if (mlx4_is_slave(dev->dev) && if (mlx4_is_slave(dev->dev) &&
in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT && (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS) (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS ||
in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT ||
in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)))
return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
in_grh, in_mad, out_mad); in_grh, in_mad, out_mad);
......
...@@ -323,9 +323,6 @@ struct ocrdma_cq { ...@@ -323,9 +323,6 @@ struct ocrdma_cq {
*/ */
u32 max_hw_cqe; u32 max_hw_cqe;
bool phase_change; bool phase_change;
bool deferred_arm, deferred_sol;
bool first_arm;
spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization
* to cq polling * to cq polling
*/ */
......
...@@ -1094,7 +1094,6 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, ...@@ -1094,7 +1094,6 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
spin_lock_init(&cq->comp_handler_lock); spin_lock_init(&cq->comp_handler_lock);
INIT_LIST_HEAD(&cq->sq_head); INIT_LIST_HEAD(&cq->sq_head);
INIT_LIST_HEAD(&cq->rq_head); INIT_LIST_HEAD(&cq->rq_head);
cq->first_arm = true;
if (ib_ctx) { if (ib_ctx) {
uctx = get_ocrdma_ucontext(ib_ctx); uctx = get_ocrdma_ucontext(ib_ctx);
...@@ -2910,12 +2909,9 @@ static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries, ...@@ -2910,12 +2909,9 @@ static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
} }
stop_cqe: stop_cqe:
cq->getp = cur_getp; cq->getp = cur_getp;
if (cq->deferred_arm || polled_hw_cqes) {
ocrdma_ring_cq_db(dev, cq->id, cq->deferred_arm, if (polled_hw_cqes)
cq->deferred_sol, polled_hw_cqes); ocrdma_ring_cq_db(dev, cq->id, false, false, polled_hw_cqes);
cq->deferred_arm = false;
cq->deferred_sol = false;
}
return i; return i;
} }
...@@ -2999,13 +2995,7 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags) ...@@ -2999,13 +2995,7 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
if (cq_flags & IB_CQ_SOLICITED) if (cq_flags & IB_CQ_SOLICITED)
sol_needed = true; sol_needed = true;
if (cq->first_arm) {
ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0); ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
cq->first_arm = false;
}
cq->deferred_arm = true;
cq->deferred_sol = sol_needed;
spin_unlock_irqrestore(&cq->cq_lock, flags); spin_unlock_irqrestore(&cq->cq_lock, flags);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment