mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
97a1420adf
In preparation for changing the saturation point of REFCOUNT_FULL to INT_MIN/2, change the type of integer operands passed into the API from 'unsigned int' to 'int' so that we can avoid casting during comparisons when we don't want to fall foul of C integral conversion rules for signed and unsigned types. Since the kernel is compiled with '-fno-strict-overflow', we don't need to worry about the UB introduced by signed overflow here. Furthermore, we're already making heavy use of the atomic_t API, which operates exclusively on signed types. Signed-off-by: Will Deacon <will@kernel.org> Reviewed-by: Ard Biesheuvel <ardb@kernel.org> Reviewed-by: Kees Cook <keescook@chromium.org> Tested-by: Hanjun Guo <guohanjun@huawei.com> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org> Cc: Elena Reshetova <elena.reshetova@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: https://lkml.kernel.org/r/20191121115902.2551-3-will@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
391 lines
12 KiB
C
391 lines
12 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Variant of atomic_t specialized for reference counts.
|
|
*
|
|
* The interface matches the atomic_t interface (to aid in porting) but only
|
|
* provides the few functions one should use for reference counting.
|
|
*
|
|
* It differs in that the counter saturates at REFCOUNT_SATURATED and will not
|
|
* move once there. This avoids wrapping the counter and causing 'spurious'
|
|
* use-after-free issues.
|
|
*
|
|
* Memory ordering rules are slightly relaxed wrt regular atomic_t functions
|
|
* and provide only what is strictly required for refcounts.
|
|
*
|
|
* The increments are fully relaxed; these will not provide ordering. The
|
|
* rationale is that whatever is used to obtain the object we're increasing the
|
|
* reference count on will provide the ordering. For locked data structures,
|
|
* its the lock acquire, for RCU/lockless data structures its the dependent
|
|
* load.
|
|
*
|
|
* Do note that inc_not_zero() provides a control dependency which will order
|
|
* future stores against the inc, this ensures we'll never modify the object
|
|
* if we did not in fact acquire a reference.
|
|
*
|
|
* The decrements will provide release order, such that all the prior loads and
|
|
* stores will be issued before, it also provides a control dependency, which
|
|
* will order us against the subsequent free().
|
|
*
|
|
* The control dependency is against the load of the cmpxchg (ll/sc) that
|
|
* succeeded. This means the stores aren't fully ordered, but this is fine
|
|
* because the 1->0 transition indicates no concurrency.
|
|
*
|
|
* Note that the allocator is responsible for ordering things between free()
|
|
* and alloc().
|
|
*
|
|
* The decrements dec_and_test() and sub_and_test() also provide acquire
|
|
* ordering on success.
|
|
*
|
|
*/
|
|
|
|
#include <linux/mutex.h>
|
|
#include <linux/refcount.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/bug.h>
|
|
|
|
/**
|
|
* refcount_add_not_zero_checked - add a value to a refcount unless it is 0
|
|
* @i: the value to add to the refcount
|
|
* @r: the refcount
|
|
*
|
|
* Will saturate at REFCOUNT_SATURATED and WARN.
|
|
*
|
|
* Provides no memory ordering, it is assumed the caller has guaranteed the
|
|
* object memory to be stable (RCU, etc.). It does provide a control dependency
|
|
* and thereby orders future stores. See the comment on top.
|
|
*
|
|
* Use of this function is not recommended for the normal reference counting
|
|
* use case in which references are taken and released one at a time. In these
|
|
* cases, refcount_inc(), or one of its variants, should instead be used to
|
|
* increment a reference count.
|
|
*
|
|
* Return: false if the passed refcount is 0, true otherwise
|
|
*/
|
|
bool refcount_add_not_zero_checked(int i, refcount_t *r)
|
|
{
|
|
unsigned int new, val = atomic_read(&r->refs);
|
|
|
|
do {
|
|
if (!val)
|
|
return false;
|
|
|
|
if (unlikely(val == REFCOUNT_SATURATED))
|
|
return true;
|
|
|
|
new = val + i;
|
|
if (new < val)
|
|
new = REFCOUNT_SATURATED;
|
|
|
|
} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
|
|
|
|
WARN_ONCE(new == REFCOUNT_SATURATED,
|
|
"refcount_t: saturated; leaking memory.\n");
|
|
|
|
return true;
|
|
}
|
|
EXPORT_SYMBOL(refcount_add_not_zero_checked);
|
|
|
|
/**
|
|
* refcount_add_checked - add a value to a refcount
|
|
* @i: the value to add to the refcount
|
|
* @r: the refcount
|
|
*
|
|
* Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN.
|
|
*
|
|
* Provides no memory ordering, it is assumed the caller has guaranteed the
|
|
* object memory to be stable (RCU, etc.). It does provide a control dependency
|
|
* and thereby orders future stores. See the comment on top.
|
|
*
|
|
* Use of this function is not recommended for the normal reference counting
|
|
* use case in which references are taken and released one at a time. In these
|
|
* cases, refcount_inc(), or one of its variants, should instead be used to
|
|
* increment a reference count.
|
|
*/
|
|
void refcount_add_checked(int i, refcount_t *r)
|
|
{
|
|
WARN_ONCE(!refcount_add_not_zero_checked(i, r), "refcount_t: addition on 0; use-after-free.\n");
|
|
}
|
|
EXPORT_SYMBOL(refcount_add_checked);
|
|
|
|
/**
|
|
* refcount_inc_not_zero_checked - increment a refcount unless it is 0
|
|
* @r: the refcount to increment
|
|
*
|
|
* Similar to atomic_inc_not_zero(), but will saturate at REFCOUNT_SATURATED
|
|
* and WARN.
|
|
*
|
|
* Provides no memory ordering, it is assumed the caller has guaranteed the
|
|
* object memory to be stable (RCU, etc.). It does provide a control dependency
|
|
* and thereby orders future stores. See the comment on top.
|
|
*
|
|
* Return: true if the increment was successful, false otherwise
|
|
*/
|
|
bool refcount_inc_not_zero_checked(refcount_t *r)
|
|
{
|
|
unsigned int new, val = atomic_read(&r->refs);
|
|
|
|
do {
|
|
new = val + 1;
|
|
|
|
if (!val)
|
|
return false;
|
|
|
|
if (unlikely(!new))
|
|
return true;
|
|
|
|
} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
|
|
|
|
WARN_ONCE(new == REFCOUNT_SATURATED,
|
|
"refcount_t: saturated; leaking memory.\n");
|
|
|
|
return true;
|
|
}
|
|
EXPORT_SYMBOL(refcount_inc_not_zero_checked);
|
|
|
|
/**
|
|
* refcount_inc_checked - increment a refcount
|
|
* @r: the refcount to increment
|
|
*
|
|
* Similar to atomic_inc(), but will saturate at REFCOUNT_SATURATED and WARN.
|
|
*
|
|
* Provides no memory ordering, it is assumed the caller already has a
|
|
* reference on the object.
|
|
*
|
|
* Will WARN if the refcount is 0, as this represents a possible use-after-free
|
|
* condition.
|
|
*/
|
|
void refcount_inc_checked(refcount_t *r)
|
|
{
|
|
WARN_ONCE(!refcount_inc_not_zero_checked(r), "refcount_t: increment on 0; use-after-free.\n");
|
|
}
|
|
EXPORT_SYMBOL(refcount_inc_checked);
|
|
|
|
/**
|
|
* refcount_sub_and_test_checked - subtract from a refcount and test if it is 0
|
|
* @i: amount to subtract from the refcount
|
|
* @r: the refcount
|
|
*
|
|
* Similar to atomic_dec_and_test(), but it will WARN, return false and
|
|
* ultimately leak on underflow and will fail to decrement when saturated
|
|
* at REFCOUNT_SATURATED.
|
|
*
|
|
* Provides release memory ordering, such that prior loads and stores are done
|
|
* before, and provides an acquire ordering on success such that free()
|
|
* must come after.
|
|
*
|
|
* Use of this function is not recommended for the normal reference counting
|
|
* use case in which references are taken and released one at a time. In these
|
|
* cases, refcount_dec(), or one of its variants, should instead be used to
|
|
* decrement a reference count.
|
|
*
|
|
* Return: true if the resulting refcount is 0, false otherwise
|
|
*/
|
|
bool refcount_sub_and_test_checked(int i, refcount_t *r)
|
|
{
|
|
unsigned int new, val = atomic_read(&r->refs);
|
|
|
|
do {
|
|
if (unlikely(val == REFCOUNT_SATURATED))
|
|
return false;
|
|
|
|
new = val - i;
|
|
if (new > val) {
|
|
WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
|
|
return false;
|
|
}
|
|
|
|
} while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
|
|
|
|
if (!new) {
|
|
smp_acquire__after_ctrl_dep();
|
|
return true;
|
|
}
|
|
return false;
|
|
|
|
}
|
|
EXPORT_SYMBOL(refcount_sub_and_test_checked);
|
|
|
|
/**
|
|
* refcount_dec_and_test_checked - decrement a refcount and test if it is 0
|
|
* @r: the refcount
|
|
*
|
|
* Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
|
|
* decrement when saturated at REFCOUNT_SATURATED.
|
|
*
|
|
* Provides release memory ordering, such that prior loads and stores are done
|
|
* before, and provides an acquire ordering on success such that free()
|
|
* must come after.
|
|
*
|
|
* Return: true if the resulting refcount is 0, false otherwise
|
|
*/
|
|
bool refcount_dec_and_test_checked(refcount_t *r)
|
|
{
|
|
return refcount_sub_and_test_checked(1, r);
|
|
}
|
|
EXPORT_SYMBOL(refcount_dec_and_test_checked);
|
|
|
|
/**
|
|
* refcount_dec_checked - decrement a refcount
|
|
* @r: the refcount
|
|
*
|
|
* Similar to atomic_dec(), it will WARN on underflow and fail to decrement
|
|
* when saturated at REFCOUNT_SATURATED.
|
|
*
|
|
* Provides release memory ordering, such that prior loads and stores are done
|
|
* before.
|
|
*/
|
|
void refcount_dec_checked(refcount_t *r)
|
|
{
|
|
WARN_ONCE(refcount_dec_and_test_checked(r), "refcount_t: decrement hit 0; leaking memory.\n");
|
|
}
|
|
EXPORT_SYMBOL(refcount_dec_checked);
|
|
|
|
/**
|
|
* refcount_dec_if_one - decrement a refcount if it is 1
|
|
* @r: the refcount
|
|
*
|
|
* No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
|
|
* success thereof.
|
|
*
|
|
* Like all decrement operations, it provides release memory order and provides
|
|
* a control dependency.
|
|
*
|
|
* It can be used like a try-delete operator; this explicit case is provided
|
|
* and not cmpxchg in generic, because that would allow implementing unsafe
|
|
* operations.
|
|
*
|
|
* Return: true if the resulting refcount is 0, false otherwise
|
|
*/
|
|
bool refcount_dec_if_one(refcount_t *r)
|
|
{
|
|
int val = 1;
|
|
|
|
return atomic_try_cmpxchg_release(&r->refs, &val, 0);
|
|
}
|
|
EXPORT_SYMBOL(refcount_dec_if_one);
|
|
|
|
/**
|
|
* refcount_dec_not_one - decrement a refcount if it is not 1
|
|
* @r: the refcount
|
|
*
|
|
* No atomic_t counterpart, it decrements unless the value is 1, in which case
|
|
* it will return false.
|
|
*
|
|
* Was often done like: atomic_add_unless(&var, -1, 1)
|
|
*
|
|
* Return: true if the decrement operation was successful, false otherwise
|
|
*/
|
|
bool refcount_dec_not_one(refcount_t *r)
|
|
{
|
|
unsigned int new, val = atomic_read(&r->refs);
|
|
|
|
do {
|
|
if (unlikely(val == REFCOUNT_SATURATED))
|
|
return true;
|
|
|
|
if (val == 1)
|
|
return false;
|
|
|
|
new = val - 1;
|
|
if (new > val) {
|
|
WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
|
|
return true;
|
|
}
|
|
|
|
} while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
|
|
|
|
return true;
|
|
}
|
|
EXPORT_SYMBOL(refcount_dec_not_one);
|
|
|
|
/**
|
|
* refcount_dec_and_mutex_lock - return holding mutex if able to decrement
|
|
* refcount to 0
|
|
* @r: the refcount
|
|
* @lock: the mutex to be locked
|
|
*
|
|
* Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
|
|
* to decrement when saturated at REFCOUNT_SATURATED.
|
|
*
|
|
* Provides release memory ordering, such that prior loads and stores are done
|
|
* before, and provides a control dependency such that free() must come after.
|
|
* See the comment on top.
|
|
*
|
|
* Return: true and hold mutex if able to decrement refcount to 0, false
|
|
* otherwise
|
|
*/
|
|
bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
|
|
{
|
|
if (refcount_dec_not_one(r))
|
|
return false;
|
|
|
|
mutex_lock(lock);
|
|
if (!refcount_dec_and_test(r)) {
|
|
mutex_unlock(lock);
|
|
return false;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
|
|
|
|
/**
|
|
* refcount_dec_and_lock - return holding spinlock if able to decrement
|
|
* refcount to 0
|
|
* @r: the refcount
|
|
* @lock: the spinlock to be locked
|
|
*
|
|
* Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
|
|
* decrement when saturated at REFCOUNT_SATURATED.
|
|
*
|
|
* Provides release memory ordering, such that prior loads and stores are done
|
|
* before, and provides a control dependency such that free() must come after.
|
|
* See the comment on top.
|
|
*
|
|
* Return: true and hold spinlock if able to decrement refcount to 0, false
|
|
* otherwise
|
|
*/
|
|
bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
|
|
{
|
|
if (refcount_dec_not_one(r))
|
|
return false;
|
|
|
|
spin_lock(lock);
|
|
if (!refcount_dec_and_test(r)) {
|
|
spin_unlock(lock);
|
|
return false;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
EXPORT_SYMBOL(refcount_dec_and_lock);
|
|
|
|
/**
|
|
* refcount_dec_and_lock_irqsave - return holding spinlock with disabled
|
|
* interrupts if able to decrement refcount to 0
|
|
* @r: the refcount
|
|
* @lock: the spinlock to be locked
|
|
* @flags: saved IRQ-flags if the is acquired
|
|
*
|
|
* Same as refcount_dec_and_lock() above except that the spinlock is acquired
|
|
* with disabled interupts.
|
|
*
|
|
* Return: true and hold spinlock if able to decrement refcount to 0, false
|
|
* otherwise
|
|
*/
|
|
bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
|
|
unsigned long *flags)
|
|
{
|
|
if (refcount_dec_not_one(r))
|
|
return false;
|
|
|
|
spin_lock_irqsave(lock, *flags);
|
|
if (!refcount_dec_and_test(r)) {
|
|
spin_unlock_irqrestore(lock, *flags);
|
|
return false;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);
|