Commit 2ade0b7f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

Pull InfiniBand/RDMA fixes from Roland Dreier:
 - A couple more IPoIB fixes for regressions introduced by path database
   conversion
 - Minor other fixes to low-level drivers (cxgb4, mlx4, qib, ocrdma)

* tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  IB/qib: Fix failure of compliance test C14-024#06_LocalPortNum
  RDMA/ocrdma: Fix CQE expansion of unsignaled WQE
  mlx4_core: Fix integer overflows so 8TBs of memory registration works
  IPoIB: Fix AB-BA deadlock when deleting neighbours
  IPoIB: Fix memory leak in the neigh table deletion flow
  RDMA/cxgb4: Move dereference below NULL test
parents 6bf61045 2116fe4e
......@@ -1361,11 +1361,11 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct tid_info *t = dev->rdev.lldi.tids;
ep = lookup_tid(t, tid);
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
if (!ep) {
printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n");
return 0;
}
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
case ABORTING:
......
......@@ -2219,7 +2219,6 @@ static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
u32 wqe_idx;
if (!qp->wqe_wr_id_tbl[tail].signaled) {
expand = true; /* CQE cannot be consumed yet */
*polled = false; /* WC cannot be consumed yet */
} else {
ibwc->status = IB_WC_SUCCESS;
......@@ -2227,10 +2226,11 @@ static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
ibwc->qp = &qp->ibqp;
ocrdma_update_wc(qp, ibwc, tail);
*polled = true;
}
wqe_idx = le32_to_cpu(cqe->wq.wqeidx) & OCRDMA_CQE_WQEIDX_MASK;
if (tail != wqe_idx)
expand = true; /* Coalesced CQE can't be consumed yet */
}
ocrdma_hwq_inc_tail(&qp->sq);
return expand;
}
......
......@@ -471,11 +471,12 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
if (port_num != port) {
ibp = to_iport(ibdev, port_num);
ret = check_mkey(ibp, smp, 0);
if (ret)
if (ret) {
ret = IB_MAD_RESULT_FAILURE;
goto bail;
}
}
}
dd = dd_from_ibdev(ibdev);
/* IB numbers ports from 1, hdw from 0 */
......
......@@ -262,7 +262,10 @@ struct ipoib_ethtool_st {
u16 max_coalesced_frames;
};
struct ipoib_neigh_table;
struct ipoib_neigh_hash {
struct ipoib_neigh_table *ntbl;
struct ipoib_neigh __rcu **buckets;
struct rcu_head rcu;
u32 mask;
......@@ -271,9 +274,9 @@ struct ipoib_neigh_hash {
struct ipoib_neigh_table {
struct ipoib_neigh_hash __rcu *htbl;
rwlock_t rwlock;
atomic_t entries;
struct completion flushed;
struct completion deleted;
};
/*
......
......@@ -546,15 +546,15 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
struct ipoib_neigh *neigh;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
neigh = ipoib_neigh_alloc(daddr, dev);
if (!neigh) {
spin_unlock_irqrestore(&priv->lock, flags);
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
return;
}
spin_lock_irqsave(&priv->lock, flags);
path = __path_find(dev, daddr + 4);
if (!path) {
path = path_rec_create(dev, daddr + 4);
......@@ -863,10 +863,10 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
return;
write_lock_bh(&ntbl->rwlock);
spin_lock_irqsave(&priv->lock, flags);
htbl = rcu_dereference_protected(ntbl->htbl,
lockdep_is_held(&ntbl->rwlock));
lockdep_is_held(&priv->lock));
if (!htbl)
goto out_unlock;
......@@ -883,16 +883,14 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
struct ipoib_neigh __rcu **np = &htbl->buckets[i];
while ((neigh = rcu_dereference_protected(*np,
lockdep_is_held(&ntbl->rwlock))) != NULL) {
lockdep_is_held(&priv->lock))) != NULL) {
/* was the neigh idle for two GC periods */
if (time_after(neigh_obsolete, neigh->alive)) {
rcu_assign_pointer(*np,
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&ntbl->rwlock)));
lockdep_is_held(&priv->lock)));
/* remove from path/mc list */
spin_lock_irqsave(&priv->lock, flags);
list_del(&neigh->list);
spin_unlock_irqrestore(&priv->lock, flags);
call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
} else {
np = &neigh->hnext;
......@@ -902,7 +900,7 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
}
out_unlock:
write_unlock_bh(&ntbl->rwlock);
spin_unlock_irqrestore(&priv->lock, flags);
}
static void ipoib_reap_neigh(struct work_struct *work)
......@@ -947,10 +945,8 @@ struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
struct ipoib_neigh *neigh;
u32 hash_val;
write_lock_bh(&ntbl->rwlock);
htbl = rcu_dereference_protected(ntbl->htbl,
lockdep_is_held(&ntbl->rwlock));
lockdep_is_held(&priv->lock));
if (!htbl) {
neigh = NULL;
goto out_unlock;
......@@ -961,10 +957,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
*/
hash_val = ipoib_addr_hash(htbl, daddr);
for (neigh = rcu_dereference_protected(htbl->buckets[hash_val],
lockdep_is_held(&ntbl->rwlock));
lockdep_is_held(&priv->lock));
neigh != NULL;
neigh = rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&ntbl->rwlock))) {
lockdep_is_held(&priv->lock))) {
if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
/* found, take one ref on behalf of the caller */
if (!atomic_inc_not_zero(&neigh->refcnt)) {
......@@ -987,12 +983,11 @@ struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
/* put in hash */
rcu_assign_pointer(neigh->hnext,
rcu_dereference_protected(htbl->buckets[hash_val],
lockdep_is_held(&ntbl->rwlock)));
lockdep_is_held(&priv->lock)));
rcu_assign_pointer(htbl->buckets[hash_val], neigh);
atomic_inc(&ntbl->entries);
out_unlock:
write_unlock_bh(&ntbl->rwlock);
return neigh;
}
......@@ -1040,35 +1035,29 @@ void ipoib_neigh_free(struct ipoib_neigh *neigh)
struct ipoib_neigh *n;
u32 hash_val;
write_lock_bh(&ntbl->rwlock);
htbl = rcu_dereference_protected(ntbl->htbl,
lockdep_is_held(&ntbl->rwlock));
lockdep_is_held(&priv->lock));
if (!htbl)
goto out_unlock;
return;
hash_val = ipoib_addr_hash(htbl, neigh->daddr);
np = &htbl->buckets[hash_val];
for (n = rcu_dereference_protected(*np,
lockdep_is_held(&ntbl->rwlock));
lockdep_is_held(&priv->lock));
n != NULL;
n = rcu_dereference_protected(*np,
lockdep_is_held(&ntbl->rwlock))) {
lockdep_is_held(&priv->lock))) {
if (n == neigh) {
/* found */
rcu_assign_pointer(*np,
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&ntbl->rwlock)));
lockdep_is_held(&priv->lock)));
call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
goto out_unlock;
return;
} else {
np = &n->hnext;
}
}
out_unlock:
write_unlock_bh(&ntbl->rwlock);
}
static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
......@@ -1080,7 +1069,6 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
ntbl->htbl = NULL;
rwlock_init(&ntbl->rwlock);
htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
if (!htbl)
return -ENOMEM;
......@@ -1095,6 +1083,7 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
htbl->mask = (size - 1);
htbl->buckets = buckets;
ntbl->htbl = htbl;
htbl->ntbl = ntbl;
atomic_set(&ntbl->entries, 0);
/* start garbage collection */
......@@ -1111,9 +1100,11 @@ static void neigh_hash_free_rcu(struct rcu_head *head)
struct ipoib_neigh_hash,
rcu);
struct ipoib_neigh __rcu **buckets = htbl->buckets;
struct ipoib_neigh_table *ntbl = htbl->ntbl;
kfree(buckets);
kfree(htbl);
complete(&ntbl->deleted);
}
void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
......@@ -1125,10 +1116,10 @@ void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
int i;
/* remove all neigh connected to a given path or mcast */
write_lock_bh(&ntbl->rwlock);
spin_lock_irqsave(&priv->lock, flags);
htbl = rcu_dereference_protected(ntbl->htbl,
lockdep_is_held(&ntbl->rwlock));
lockdep_is_held(&priv->lock));
if (!htbl)
goto out_unlock;
......@@ -1138,16 +1129,14 @@ void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
struct ipoib_neigh __rcu **np = &htbl->buckets[i];
while ((neigh = rcu_dereference_protected(*np,
lockdep_is_held(&ntbl->rwlock))) != NULL) {
lockdep_is_held(&priv->lock))) != NULL) {
/* delete neighs belong to this parent */
if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) {
rcu_assign_pointer(*np,
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&ntbl->rwlock)));
lockdep_is_held(&priv->lock)));
/* remove from parent list */
spin_lock_irqsave(&priv->lock, flags);
list_del(&neigh->list);
spin_unlock_irqrestore(&priv->lock, flags);
call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
} else {
np = &neigh->hnext;
......@@ -1156,7 +1145,7 @@ void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
}
}
out_unlock:
write_unlock_bh(&ntbl->rwlock);
spin_unlock_irqrestore(&priv->lock, flags);
}
static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
......@@ -1164,37 +1153,44 @@ static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
struct ipoib_neigh_table *ntbl = &priv->ntbl;
struct ipoib_neigh_hash *htbl;
unsigned long flags;
int i;
int i, wait_flushed = 0;
write_lock_bh(&ntbl->rwlock);
init_completion(&priv->ntbl.flushed);
spin_lock_irqsave(&priv->lock, flags);
htbl = rcu_dereference_protected(ntbl->htbl,
lockdep_is_held(&ntbl->rwlock));
lockdep_is_held(&priv->lock));
if (!htbl)
goto out_unlock;
wait_flushed = atomic_read(&priv->ntbl.entries);
if (!wait_flushed)
goto free_htbl;
for (i = 0; i < htbl->size; i++) {
struct ipoib_neigh *neigh;
struct ipoib_neigh __rcu **np = &htbl->buckets[i];
while ((neigh = rcu_dereference_protected(*np,
lockdep_is_held(&ntbl->rwlock))) != NULL) {
lockdep_is_held(&priv->lock))) != NULL) {
rcu_assign_pointer(*np,
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&ntbl->rwlock)));
lockdep_is_held(&priv->lock)));
/* remove from path/mc list */
spin_lock_irqsave(&priv->lock, flags);
list_del(&neigh->list);
spin_unlock_irqrestore(&priv->lock, flags);
call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
}
}
free_htbl:
rcu_assign_pointer(ntbl->htbl, NULL);
call_rcu(&htbl->rcu, neigh_hash_free_rcu);
out_unlock:
write_unlock_bh(&ntbl->rwlock);
spin_unlock_irqrestore(&priv->lock, flags);
if (wait_flushed)
wait_for_completion(&priv->ntbl.flushed);
}
static void ipoib_neigh_hash_uninit(struct net_device *dev)
......@@ -1203,7 +1199,7 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev)
int stopped;
ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n");
init_completion(&priv->ntbl.flushed);
init_completion(&priv->ntbl.deleted);
set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
/* Stop GC if called at init fail need to cancel work */
......@@ -1211,10 +1207,9 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev)
if (!stopped)
cancel_delayed_work(&priv->neigh_reap_task);
if (atomic_read(&priv->ntbl.entries)) {
ipoib_flush_neighs(priv);
wait_for_completion(&priv->ntbl.flushed);
}
wait_for_completion(&priv->ntbl.deleted);
}
......
......@@ -707,9 +707,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
neigh = ipoib_neigh_get(dev, daddr);
spin_lock_irqsave(&priv->lock, flags);
if (!neigh) {
spin_unlock_irqrestore(&priv->lock, flags);
neigh = ipoib_neigh_alloc(daddr, dev);
spin_lock_irqsave(&priv->lock, flags);
if (neigh) {
kref_get(&mcast->ah->ref);
neigh->ah = mcast->ah;
......
......@@ -227,9 +227,10 @@ int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
{
int i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
u32 i = (obj & (table->num_obj - 1)) /
(MLX4_TABLE_CHUNK_SIZE / table->obj_size);
int ret = 0;
mutex_lock(&table->mutex);
......@@ -262,16 +263,18 @@ int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
return ret;
}
void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
{
int i;
u32 i;
u64 offset;
i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
mutex_lock(&table->mutex);
if (--table->icm[i]->refcount == 0) {
mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
offset = (u64) i * MLX4_TABLE_CHUNK_SIZE;
mlx4_UNMAP_ICM(dev, table->virt + offset,
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
mlx4_free_icm(dev, table->icm[i], table->coherent);
table->icm[i] = NULL;
......@@ -280,9 +283,11 @@ void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
mutex_unlock(&table->mutex);
}
void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle)
void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
dma_addr_t *dma_handle)
{
int idx, offset, dma_offset, i;
int offset, dma_offset, i;
u64 idx;
struct mlx4_icm_chunk *chunk;
struct mlx4_icm *icm;
struct page *page = NULL;
......@@ -292,7 +297,7 @@ void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_han
mutex_lock(&table->mutex);
idx = (obj & (table->num_obj - 1)) * table->obj_size;
idx = (u64) (obj & (table->num_obj - 1)) * table->obj_size;
icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE];
dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE;
......@@ -326,10 +331,11 @@ void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_han
}
int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
int start, int end)
u32 start, u32 end)
{
int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size;
int i, err;
int err;
u32 i;
for (i = start; i <= end; i += inc) {
err = mlx4_table_get(dev, table, i);
......@@ -349,9 +355,9 @@ int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
}
void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
int start, int end)
u32 start, u32 end)
{
int i;
u32 i;
for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size)
mlx4_table_put(dev, table, i);
......
......@@ -71,17 +71,17 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
gfp_t gfp_mask, int coherent);
void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent);
int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj);
void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj);
int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
int start, int end);
u32 start, u32 end);
void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
int start, int end);
u32 start, u32 end);
int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
u64 virt, int obj_size, u32 nobj, int reserved,
int use_lowmem, int use_coherent);
void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table);
void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle);
void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj, dma_addr_t *dma_handle);
static inline void mlx4_icm_first(struct mlx4_icm *icm,
struct mlx4_icm_iter *iter)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment