2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-14 16:23:51 +08:00

percpu_ref: percpu_ref_tryget_live() version holding RCU

Add percpu_ref_tryget_live_rcu(), which is a version of
percpu_ref_tryget_live() but the user is responsible for enclosing it in
a RCU read lock section.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Acked-by: Dennis Zhou <dennis@kernel.org>
Link: https://lore.kernel.org/r/3066500d7a6eb3e03f10adf98b87fdb3b1c49db8.1634822969.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2021-10-21 14:30:51 +01:00 committed by Jens Axboe
parent 6549a874fb
commit 3b13c16818

View File

@ -266,6 +266,28 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
return percpu_ref_tryget_many(ref, 1);
}
/**
* percpu_ref_tryget_live_rcu - same as percpu_ref_tryget_live() but the
* caller is responsible for taking RCU.
*
* This function is safe to call as long as @ref is between init and exit.
*/
static inline bool percpu_ref_tryget_live_rcu(struct percpu_ref *ref)
{
unsigned long __percpu *percpu_count;
bool ret = false;
WARN_ON_ONCE(!rcu_read_lock_held());
if (likely(__ref_is_percpu(ref, &percpu_count))) {
this_cpu_inc(*percpu_count);
ret = true;
} else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
ret = atomic_long_inc_not_zero(&ref->data->count);
}
return ret;
}
/**
* percpu_ref_tryget_live - try to increment a live percpu refcount
* @ref: percpu_ref to try-get
@ -283,20 +305,11 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
*/
static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
{
unsigned long __percpu *percpu_count;
bool ret = false;
rcu_read_lock();
if (__ref_is_percpu(ref, &percpu_count)) {
this_cpu_inc(*percpu_count);
ret = true;
} else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
ret = atomic_long_inc_not_zero(&ref->data->count);
}
ret = percpu_ref_tryget_live_rcu(ref);
rcu_read_unlock();
return ret;
}