Commit 3b069c5d authored by Tejun Heo's avatar Tejun Heo Committed by Linus Torvalds

IB/core: convert to idr_alloc()

Convert to the much saner new idr interface.

v2: Mike triggered WARN_ON() in idr_preload() because send_mad(),
    which may be used from non-process context, was calling
    idr_preload() unconditionally.  Preload iff @gfp_mask has
    __GFP_WAIT.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Reviewed-by: default avatarSean Hefty <sean.hefty@intel.com>
Reported-by: default avatar"Marciniszyn, Mike" <mike.marciniszyn@intel.com>
Cc: Roland Dreier <roland@kernel.org>
Cc: Sean Hefty <sean.hefty@intel.com>
Cc: Hal Rosenstock <hal.rosenstock@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4ae42b0f
...@@ -382,20 +382,21 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) ...@@ -382,20 +382,21 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
static int cm_alloc_id(struct cm_id_private *cm_id_priv) static int cm_alloc_id(struct cm_id_private *cm_id_priv)
{ {
unsigned long flags; unsigned long flags;
int ret, id; int id;
static int next_id; static int next_id;
do { idr_preload(GFP_KERNEL);
spin_lock_irqsave(&cm.lock, flags); spin_lock_irqsave(&cm.lock, flags);
ret = idr_get_new_above(&cm.local_id_table, cm_id_priv,
next_id, &id); id = idr_alloc(&cm.local_id_table, cm_id_priv, next_id, 0, GFP_NOWAIT);
if (!ret) if (id >= 0)
next_id = ((unsigned) id + 1) & MAX_IDR_MASK; next_id = ((unsigned) id + 1) & MAX_IDR_MASK;
spin_unlock_irqrestore(&cm.lock, flags);
} while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); spin_unlock_irqrestore(&cm.lock, flags);
idr_preload_end();
cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand; cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
return ret; return id < 0 ? id : 0;
} }
static void cm_free_id(__be32 local_id) static void cm_free_id(__be32 local_id)
...@@ -3844,7 +3845,6 @@ static int __init ib_cm_init(void) ...@@ -3844,7 +3845,6 @@ static int __init ib_cm_init(void)
cm.remote_sidr_table = RB_ROOT; cm.remote_sidr_table = RB_ROOT;
idr_init(&cm.local_id_table); idr_init(&cm.local_id_table);
get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand); get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
idr_pre_get(&cm.local_id_table, GFP_KERNEL);
INIT_LIST_HEAD(&cm.timewait_list); INIT_LIST_HEAD(&cm.timewait_list);
ret = class_register(&cm_class); ret = class_register(&cm_class);
......
...@@ -2143,33 +2143,23 @@ static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv, ...@@ -2143,33 +2143,23 @@ static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
unsigned short snum) unsigned short snum)
{ {
struct rdma_bind_list *bind_list; struct rdma_bind_list *bind_list;
int port, ret; int ret;
bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
if (!bind_list) if (!bind_list)
return -ENOMEM; return -ENOMEM;
do { ret = idr_alloc(ps, bind_list, snum, snum + 1, GFP_KERNEL);
ret = idr_get_new_above(ps, bind_list, snum, &port); if (ret < 0)
} while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL)); goto err;
if (ret)
goto err1;
if (port != snum) {
ret = -EADDRNOTAVAIL;
goto err2;
}
bind_list->ps = ps; bind_list->ps = ps;
bind_list->port = (unsigned short) port; bind_list->port = (unsigned short)ret;
cma_bind_port(bind_list, id_priv); cma_bind_port(bind_list, id_priv);
return 0; return 0;
err2: err:
idr_remove(ps, port);
err1:
kfree(bind_list); kfree(bind_list);
return ret; return ret == -ENOSPC ? -EADDRNOTAVAIL : ret;
} }
static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv) static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
......
...@@ -611,19 +611,21 @@ static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent) ...@@ -611,19 +611,21 @@ static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask) static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
{ {
bool preload = gfp_mask & __GFP_WAIT;
unsigned long flags; unsigned long flags;
int ret, id; int ret, id;
retry: if (preload)
if (!idr_pre_get(&query_idr, gfp_mask)) idr_preload(gfp_mask);
return -ENOMEM;
spin_lock_irqsave(&idr_lock, flags); spin_lock_irqsave(&idr_lock, flags);
ret = idr_get_new(&query_idr, query, &id);
id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT);
spin_unlock_irqrestore(&idr_lock, flags); spin_unlock_irqrestore(&idr_lock, flags);
if (ret == -EAGAIN) if (preload)
goto retry; idr_preload_end();
if (ret) if (id < 0)
return ret; return id;
query->mad_buf->timeout_ms = timeout_ms; query->mad_buf->timeout_ms = timeout_ms;
query->mad_buf->context[0] = query; query->mad_buf->context[0] = query;
......
...@@ -176,7 +176,6 @@ static void ib_ucm_cleanup_events(struct ib_ucm_context *ctx) ...@@ -176,7 +176,6 @@ static void ib_ucm_cleanup_events(struct ib_ucm_context *ctx)
static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file) static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
{ {
struct ib_ucm_context *ctx; struct ib_ucm_context *ctx;
int result;
ctx = kzalloc(sizeof *ctx, GFP_KERNEL); ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
if (!ctx) if (!ctx)
...@@ -187,17 +186,10 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file) ...@@ -187,17 +186,10 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
ctx->file = file; ctx->file = file;
INIT_LIST_HEAD(&ctx->events); INIT_LIST_HEAD(&ctx->events);
do { mutex_lock(&ctx_id_mutex);
result = idr_pre_get(&ctx_id_table, GFP_KERNEL); ctx->id = idr_alloc(&ctx_id_table, ctx, 0, 0, GFP_KERNEL);
if (!result) mutex_unlock(&ctx_id_mutex);
goto error; if (ctx->id < 0)
mutex_lock(&ctx_id_mutex);
result = idr_get_new(&ctx_id_table, ctx, &ctx->id);
mutex_unlock(&ctx_id_mutex);
} while (result == -EAGAIN);
if (result)
goto error; goto error;
list_add_tail(&ctx->file_list, &file->ctxs); list_add_tail(&ctx->file_list, &file->ctxs);
......
...@@ -145,7 +145,6 @@ static void ucma_put_ctx(struct ucma_context *ctx) ...@@ -145,7 +145,6 @@ static void ucma_put_ctx(struct ucma_context *ctx)
static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
{ {
struct ucma_context *ctx; struct ucma_context *ctx;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) if (!ctx)
...@@ -156,17 +155,10 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) ...@@ -156,17 +155,10 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
INIT_LIST_HEAD(&ctx->mc_list); INIT_LIST_HEAD(&ctx->mc_list);
ctx->file = file; ctx->file = file;
do { mutex_lock(&mut);
ret = idr_pre_get(&ctx_idr, GFP_KERNEL); ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL);
if (!ret) mutex_unlock(&mut);
goto error; if (ctx->id < 0)
mutex_lock(&mut);
ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
mutex_unlock(&mut);
} while (ret == -EAGAIN);
if (ret)
goto error; goto error;
list_add_tail(&ctx->list, &file->ctx_list); list_add_tail(&ctx->list, &file->ctx_list);
...@@ -180,23 +172,15 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) ...@@ -180,23 +172,15 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx) static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
{ {
struct ucma_multicast *mc; struct ucma_multicast *mc;
int ret;
mc = kzalloc(sizeof(*mc), GFP_KERNEL); mc = kzalloc(sizeof(*mc), GFP_KERNEL);
if (!mc) if (!mc)
return NULL; return NULL;
do { mutex_lock(&mut);
ret = idr_pre_get(&multicast_idr, GFP_KERNEL); mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
if (!ret) mutex_unlock(&mut);
goto error; if (mc->id < 0)
mutex_lock(&mut);
ret = idr_get_new(&multicast_idr, mc, &mc->id);
mutex_unlock(&mut);
} while (ret == -EAGAIN);
if (ret)
goto error; goto error;
mc->ctx = ctx; mc->ctx = ctx;
......
...@@ -125,18 +125,17 @@ static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj) ...@@ -125,18 +125,17 @@ static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
{ {
int ret; int ret;
retry: idr_preload(GFP_KERNEL);
if (!idr_pre_get(idr, GFP_KERNEL))
return -ENOMEM;
spin_lock(&ib_uverbs_idr_lock); spin_lock(&ib_uverbs_idr_lock);
ret = idr_get_new(idr, uobj, &uobj->id);
spin_unlock(&ib_uverbs_idr_lock);
if (ret == -EAGAIN) ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT);
goto retry; if (ret >= 0)
uobj->id = ret;
return ret; spin_unlock(&ib_uverbs_idr_lock);
idr_preload_end();
return ret < 0 ? ret : 0;
} }
void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj) void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment