Commit f54c77dd authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

Pull infiniband fixes from Roland Dreier:
 "Small batch of fixes for 3.7:
   - Fix crash in error path in cxgb4
   - Fix build error on 32 bits in mlx4
   - Fix SR-IOV bugs in mlx4"

* tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  mlx4_core: Perform correct resource cleanup if mlx4_QUERY_ADAPTER() fails
  mlx4_core: Remove annoying debug messages from SR-IOV flow
  RDMA/cxgb4: Don't free chunk that we have failed to allocate
  IB/mlx4: Synchronize cleanup of MCGs in MCG paravirtualization
  IB/mlx4: Fix QP1 P_Key processing in the Primary Physical Function (PPF)
  IB/mlx4: Fix build error on platforms where UL is not 64 bits
parents 1d47091a 1e3474d1
......@@ -468,7 +468,7 @@ struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
ret = alloc_pbl(mhp, npages);
if (ret) {
kfree(page_list);
goto err_pbl;
goto err;
}
ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr,
......
......@@ -107,7 +107,7 @@ static __be64 get_cached_alias_guid(struct mlx4_ib_dev *dev, int port, int index
{
if (index >= NUM_ALIAS_GUID_PER_PORT) {
pr_err("%s: ERROR: asked for index:%d\n", __func__, index);
return (__force __be64) ((u64) 0xFFFFFFFFFFFFFFFFUL);
return (__force __be64) -1;
}
return *(__be64 *)&dev->sriov.demux[port - 1].guid_cache[index];
}
......
......@@ -409,38 +409,45 @@ int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid)
}
static int get_pkey_phys_indices(struct mlx4_ib_dev *ibdev, u8 port, u8 ph_pkey_ix,
u8 *full_pk_ix, u8 *partial_pk_ix,
int *is_full_member)
static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave,
u8 port, u16 pkey, u16 *ix)
{
u16 search_pkey;
int fm;
int err = 0;
u16 pk;
int i, ret;
u8 unassigned_pkey_ix, pkey_ix, partial_ix = 0xFF;
u16 slot_pkey;
err = ib_get_cached_pkey(&ibdev->ib_dev, port, ph_pkey_ix, &search_pkey);
if (err)
return err;
if (slave == mlx4_master_func_num(dev->dev))
return ib_find_cached_pkey(&dev->ib_dev, port, pkey, ix);
fm = (search_pkey & 0x8000) ? 1 : 0;
if (fm) {
*full_pk_ix = ph_pkey_ix;
search_pkey &= 0x7FFF;
unassigned_pkey_ix = dev->dev->phys_caps.pkey_phys_table_len[port] - 1;
for (i = 0; i < dev->dev->caps.pkey_table_len[port]; i++) {
if (dev->pkeys.virt2phys_pkey[slave][port - 1][i] == unassigned_pkey_ix)
continue;
pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][i];
ret = ib_get_cached_pkey(&dev->ib_dev, port, pkey_ix, &slot_pkey);
if (ret)
continue;
if ((slot_pkey & 0x7FFF) == (pkey & 0x7FFF)) {
if (slot_pkey & 0x8000) {
*ix = (u16) pkey_ix;
return 0;
} else {
*partial_pk_ix = ph_pkey_ix;
search_pkey |= 0x8000;
/* take first partial pkey index found */
if (partial_ix == 0xFF)
partial_ix = pkey_ix;
}
}
}
if (ib_find_exact_cached_pkey(&ibdev->ib_dev, port, search_pkey, &pk))
pk = 0xFFFF;
if (fm)
*partial_pk_ix = (pk & 0xFF);
else
*full_pk_ix = (pk & 0xFF);
if (partial_ix < 0xFF) {
*ix = (u16) partial_ix;
return 0;
}
*is_full_member = fm;
return err;
return -EINVAL;
}
int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
......@@ -458,10 +465,8 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
unsigned tun_tx_ix = 0;
int dqpn;
int ret = 0;
int i;
int is_full_member = 0;
u16 tun_pkey_ix;
u8 ph_pkey_ix, full_pk_ix = 0, partial_pk_ix = 0;
u16 cached_pkey;
if (dest_qpt > IB_QPT_GSI)
return -EINVAL;
......@@ -481,27 +486,17 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
else
tun_qp = &tun_ctx->qp[1];
/* compute pkey index for slave */
/* get physical pkey -- virtualized Dom0 pkey to phys*/
/* compute P_Key index to put in tunnel header for slave */
if (dest_qpt) {
ph_pkey_ix =
dev->pkeys.virt2phys_pkey[mlx4_master_func_num(dev->dev)][port - 1][wc->pkey_index];
/* now, translate this to the slave pkey index */
ret = get_pkey_phys_indices(dev, port, ph_pkey_ix, &full_pk_ix,
&partial_pk_ix, &is_full_member);
u16 pkey_ix;
ret = ib_get_cached_pkey(&dev->ib_dev, port, wc->pkey_index, &cached_pkey);
if (ret)
return -EINVAL;
for (i = 0; i < dev->dev->caps.pkey_table_len[port]; i++) {
if ((dev->pkeys.virt2phys_pkey[slave][port - 1][i] == full_pk_ix) ||
(is_full_member &&
(dev->pkeys.virt2phys_pkey[slave][port - 1][i] == partial_pk_ix)))
break;
}
if (i == dev->dev->caps.pkey_table_len[port])
ret = find_slave_port_pkey_ix(dev, slave, port, cached_pkey, &pkey_ix);
if (ret)
return -EINVAL;
tun_pkey_ix = i;
tun_pkey_ix = pkey_ix;
} else
tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
......
......@@ -233,7 +233,8 @@ static int send_mad_to_slave(int slave, struct mlx4_ib_demux_ctx *ctx,
ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr);
wc.pkey_index = 0;
if (ib_find_cached_pkey(&dev->ib_dev, ctx->port, IB_DEFAULT_PKEY_FULL, &wc.pkey_index))
return -EINVAL;
wc.sl = 0;
wc.dlid_path_bits = 0;
wc.port_num = ctx->port;
......@@ -1074,10 +1075,6 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy
unsigned long end;
int count;
if (ctx->flushing)
return;
ctx->flushing = 1;
for (i = 0; i < MAX_VFS; ++i)
clean_vf_mcast(ctx, i);
......@@ -1107,9 +1104,6 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy
force_clean_group(group);
}
mutex_unlock(&ctx->mcg_table_lock);
if (!destroy_wq)
ctx->flushing = 0;
}
struct clean_work {
......@@ -1123,6 +1117,7 @@ static void mcg_clean_task(struct work_struct *work)
struct clean_work *cw = container_of(work, struct clean_work, work);
_mlx4_ib_mcg_port_cleanup(cw->ctx, cw->destroy_wq);
cw->ctx->flushing = 0;
kfree(cw);
}
......@@ -1130,13 +1125,20 @@ void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq)
{
struct clean_work *work;
if (ctx->flushing)
return;
ctx->flushing = 1;
if (destroy_wq) {
_mlx4_ib_mcg_port_cleanup(ctx, destroy_wq);
ctx->flushing = 0;
return;
}
work = kmalloc(sizeof *work, GFP_KERNEL);
if (!work) {
ctx->flushing = 0;
mcg_warn("failed allocating work for cleanup\n");
return;
}
......
......@@ -329,9 +329,6 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
ctx = &priv->mfunc.master.slave_state[slave];
spin_lock_irqsave(&ctx->lock, flags);
mlx4_dbg(dev, "%s: slave: %d, current state: %d new event :%d\n",
__func__, slave, cur_state, event);
switch (cur_state) {
case SLAVE_PORT_DOWN:
if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event)
......@@ -366,9 +363,6 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
goto out;
}
ret = mlx4_get_slave_port_state(dev, slave, port);
mlx4_dbg(dev, "%s: slave: %d, current state: %d new event"
" :%d gen_event: %d\n",
__func__, slave, cur_state, event, *gen_event);
out:
spin_unlock_irqrestore(&ctx->lock, flags);
......
......@@ -1405,7 +1405,10 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
unmap_bf_area(dev);
err_close:
mlx4_close_hca(dev);
if (mlx4_is_slave(dev))
mlx4_slave_exit(dev);
else
mlx4_CLOSE_HCA(dev, 0);
err_free_icm:
if (!mlx4_is_slave(dev))
......
......@@ -330,9 +330,6 @@ static void update_pkey_index(struct mlx4_dev *dev, int slave,
new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
*(u8 *)(inbox->buf + 35) = new_index;
mlx4_dbg(dev, "port = %d, orig pkey index = %d, "
"new pkey index = %d\n", port, orig_index, new_index);
}
static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
......@@ -351,9 +348,6 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
qp_ctx->alt_path.mgid_index = slave & 0x7F;
}
mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
slave, qp_ctx->pri_path.mgid_index);
}
static int mpt_mask(struct mlx4_dev *dev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment