Commit 3b13c168 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

percpu_ref: percpu_ref_tryget_live() version holding RCU

Add percpu_ref_tryget_live_rcu(), which is a version of
percpu_ref_tryget_live() but the user is responsible for enclosing it in
a RCU read lock section.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Acked-by: default avatarDennis Zhou <dennis@kernel.org>
Link: https://lore.kernel.org/r/3066500d7a6eb3e03f10adf98b87fdb3b1c49db8.1634822969.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 6549a874
...@@ -266,6 +266,28 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) ...@@ -266,6 +266,28 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
return percpu_ref_tryget_many(ref, 1); return percpu_ref_tryget_many(ref, 1);
} }
/**
* percpu_ref_tryget_live_rcu - same as percpu_ref_tryget_live() but the
* caller is responsible for taking RCU.
*
* This function is safe to call as long as @ref is between init and exit.
*/
static inline bool percpu_ref_tryget_live_rcu(struct percpu_ref *ref)
{
unsigned long __percpu *percpu_count;
bool ret = false;
WARN_ON_ONCE(!rcu_read_lock_held());
if (likely(__ref_is_percpu(ref, &percpu_count))) {
this_cpu_inc(*percpu_count);
ret = true;
} else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
ret = atomic_long_inc_not_zero(&ref->data->count);
}
return ret;
}
/** /**
* percpu_ref_tryget_live - try to increment a live percpu refcount * percpu_ref_tryget_live - try to increment a live percpu refcount
* @ref: percpu_ref to try-get * @ref: percpu_ref to try-get
...@@ -283,20 +305,11 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) ...@@ -283,20 +305,11 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
*/ */
static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
{ {
unsigned long __percpu *percpu_count;
bool ret = false; bool ret = false;
rcu_read_lock(); rcu_read_lock();
ret = percpu_ref_tryget_live_rcu(ref);
if (__ref_is_percpu(ref, &percpu_count)) {
this_cpu_inc(*percpu_count);
ret = true;
} else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
ret = atomic_long_inc_not_zero(&ref->data->count);
}
rcu_read_unlock(); rcu_read_unlock();
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment