Commit e41c4253 authored by Shay Drory's avatar Shay Drory Committed by Jason Gunthorpe

IB/mad: Change atomics to refcount API

The refcount API provides better safety than atomics API.  Therefore,
change atomic functions to refcount functions.

Link: https://lore.kernel.org/r/20200621104738.54850-4-leon@kernel.orgSigned-off-by: default avatarShay Drory <shayd@mellanox.com>
Reviewed-by: default avatarMaor Gottlieb <maorg@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent b9af0e2d
...@@ -402,7 +402,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, ...@@ -402,7 +402,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends); INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
INIT_LIST_HEAD(&mad_agent_priv->local_list); INIT_LIST_HEAD(&mad_agent_priv->local_list);
INIT_WORK(&mad_agent_priv->local_work, local_completions); INIT_WORK(&mad_agent_priv->local_work, local_completions);
atomic_set(&mad_agent_priv->refcount, 1); refcount_set(&mad_agent_priv->refcount, 1);
init_completion(&mad_agent_priv->comp); init_completion(&mad_agent_priv->comp);
ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type); ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type);
...@@ -484,7 +484,7 @@ EXPORT_SYMBOL(ib_register_mad_agent); ...@@ -484,7 +484,7 @@ EXPORT_SYMBOL(ib_register_mad_agent);
static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv) static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
{ {
if (atomic_dec_and_test(&mad_agent_priv->refcount)) if (refcount_dec_and_test(&mad_agent_priv->refcount))
complete(&mad_agent_priv->comp); complete(&mad_agent_priv->comp);
} }
...@@ -718,7 +718,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, ...@@ -718,7 +718,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
* Reference MAD agent until receive * Reference MAD agent until receive
* side of local completion handled * side of local completion handled
*/ */
atomic_inc(&mad_agent_priv->refcount); refcount_inc(&mad_agent_priv->refcount);
} else } else
kfree(mad_priv); kfree(mad_priv);
break; break;
...@@ -758,7 +758,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, ...@@ -758,7 +758,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
local->return_wc_byte_len = mad_size; local->return_wc_byte_len = mad_size;
} }
/* Reference MAD agent until send side of local completion handled */ /* Reference MAD agent until send side of local completion handled */
atomic_inc(&mad_agent_priv->refcount); refcount_inc(&mad_agent_priv->refcount);
/* Queue local completion to local list */ /* Queue local completion to local list */
spin_lock_irqsave(&mad_agent_priv->lock, flags); spin_lock_irqsave(&mad_agent_priv->lock, flags);
list_add_tail(&local->completion_list, &mad_agent_priv->local_list); list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
...@@ -916,7 +916,7 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, ...@@ -916,7 +916,7 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
} }
mad_send_wr->send_buf.mad_agent = mad_agent; mad_send_wr->send_buf.mad_agent = mad_agent;
atomic_inc(&mad_agent_priv->refcount); refcount_inc(&mad_agent_priv->refcount);
return &mad_send_wr->send_buf; return &mad_send_wr->send_buf;
} }
EXPORT_SYMBOL(ib_create_send_mad); EXPORT_SYMBOL(ib_create_send_mad);
...@@ -1131,7 +1131,7 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf, ...@@ -1131,7 +1131,7 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
mad_send_wr->status = IB_WC_SUCCESS; mad_send_wr->status = IB_WC_SUCCESS;
/* Reference MAD agent until send completes */ /* Reference MAD agent until send completes */
atomic_inc(&mad_agent_priv->refcount); refcount_inc(&mad_agent_priv->refcount);
spin_lock_irqsave(&mad_agent_priv->lock, flags); spin_lock_irqsave(&mad_agent_priv->lock, flags);
list_add_tail(&mad_send_wr->agent_list, list_add_tail(&mad_send_wr->agent_list,
&mad_agent_priv->send_list); &mad_agent_priv->send_list);
...@@ -1554,7 +1554,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv, ...@@ -1554,7 +1554,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
hi_tid = be64_to_cpu(mad_hdr->tid) >> 32; hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
rcu_read_lock(); rcu_read_lock();
mad_agent = xa_load(&ib_mad_clients, hi_tid); mad_agent = xa_load(&ib_mad_clients, hi_tid);
if (mad_agent && !atomic_inc_not_zero(&mad_agent->refcount)) if (mad_agent && !refcount_inc_not_zero(&mad_agent->refcount))
mad_agent = NULL; mad_agent = NULL;
rcu_read_unlock(); rcu_read_unlock();
} else { } else {
...@@ -1606,7 +1606,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv, ...@@ -1606,7 +1606,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
} }
} }
if (mad_agent) if (mad_agent)
atomic_inc(&mad_agent->refcount); refcount_inc(&mad_agent->refcount);
out: out:
spin_unlock_irqrestore(&port_priv->reg_lock, flags); spin_unlock_irqrestore(&port_priv->reg_lock, flags);
} }
......
...@@ -103,7 +103,7 @@ struct ib_mad_agent_private { ...@@ -103,7 +103,7 @@ struct ib_mad_agent_private {
struct work_struct local_work; struct work_struct local_work;
struct list_head rmpp_list; struct list_head rmpp_list;
atomic_t refcount; refcount_t refcount;
union { union {
struct completion comp; struct completion comp;
struct rcu_head rcu; struct rcu_head rcu;
......
...@@ -52,7 +52,7 @@ struct mad_rmpp_recv { ...@@ -52,7 +52,7 @@ struct mad_rmpp_recv {
struct completion comp; struct completion comp;
enum rmpp_state state; enum rmpp_state state;
spinlock_t lock; spinlock_t lock;
atomic_t refcount; refcount_t refcount;
struct ib_ah *ah; struct ib_ah *ah;
struct ib_mad_recv_wc *rmpp_wc; struct ib_mad_recv_wc *rmpp_wc;
...@@ -73,7 +73,7 @@ struct mad_rmpp_recv { ...@@ -73,7 +73,7 @@ struct mad_rmpp_recv {
static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
{ {
if (atomic_dec_and_test(&rmpp_recv->refcount)) if (refcount_dec_and_test(&rmpp_recv->refcount))
complete(&rmpp_recv->comp); complete(&rmpp_recv->comp);
} }
...@@ -305,7 +305,7 @@ create_rmpp_recv(struct ib_mad_agent_private *agent, ...@@ -305,7 +305,7 @@ create_rmpp_recv(struct ib_mad_agent_private *agent,
INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler); INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler);
spin_lock_init(&rmpp_recv->lock); spin_lock_init(&rmpp_recv->lock);
rmpp_recv->state = RMPP_STATE_ACTIVE; rmpp_recv->state = RMPP_STATE_ACTIVE;
atomic_set(&rmpp_recv->refcount, 1); refcount_set(&rmpp_recv->refcount, 1);
rmpp_recv->rmpp_wc = mad_recv_wc; rmpp_recv->rmpp_wc = mad_recv_wc;
rmpp_recv->cur_seg_buf = &mad_recv_wc->recv_buf; rmpp_recv->cur_seg_buf = &mad_recv_wc->recv_buf;
...@@ -357,7 +357,7 @@ acquire_rmpp_recv(struct ib_mad_agent_private *agent, ...@@ -357,7 +357,7 @@ acquire_rmpp_recv(struct ib_mad_agent_private *agent,
spin_lock_irqsave(&agent->lock, flags); spin_lock_irqsave(&agent->lock, flags);
rmpp_recv = find_rmpp_recv(agent, mad_recv_wc); rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
if (rmpp_recv) if (rmpp_recv)
atomic_inc(&rmpp_recv->refcount); refcount_inc(&rmpp_recv->refcount);
spin_unlock_irqrestore(&agent->lock, flags); spin_unlock_irqrestore(&agent->lock, flags);
return rmpp_recv; return rmpp_recv;
} }
...@@ -553,7 +553,7 @@ start_rmpp(struct ib_mad_agent_private *agent, ...@@ -553,7 +553,7 @@ start_rmpp(struct ib_mad_agent_private *agent,
destroy_rmpp_recv(rmpp_recv); destroy_rmpp_recv(rmpp_recv);
return continue_rmpp(agent, mad_recv_wc); return continue_rmpp(agent, mad_recv_wc);
} }
atomic_inc(&rmpp_recv->refcount); refcount_inc(&rmpp_recv->refcount);
if (get_last_flag(&mad_recv_wc->recv_buf)) { if (get_last_flag(&mad_recv_wc->recv_buf)) {
rmpp_recv->state = RMPP_STATE_COMPLETE; rmpp_recv->state = RMPP_STATE_COMPLETE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment