mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-24 05:04:00 +08:00
kcsan: Reduce get_ctx() uses in kcsan_found_watchpoint()
There are a number get_ctx() calls that are close to each other, which results in poor codegen (repeated preempt_count loads). Specifically in kcsan_found_watchpoint() (even though it's a slow-path) it is beneficial to keep the race-window small until the watchpoint has actually been consumed to avoid missed opportunities to report a race. Let's clean it up a bit before we add more code in kcsan_found_watchpoint(). Signed-off-by: Marco Elver <elver@google.com> Acked-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
parent
e675d2533a
commit
08cac60494
@ -301,9 +301,9 @@ static inline void reset_kcsan_skip(void)
|
||||
this_cpu_write(kcsan_skip, skip_count);
|
||||
}
|
||||
|
||||
static __always_inline bool kcsan_is_enabled(void)
|
||||
static __always_inline bool kcsan_is_enabled(struct kcsan_ctx *ctx)
|
||||
{
|
||||
return READ_ONCE(kcsan_enabled) && get_ctx()->disable_count == 0;
|
||||
return READ_ONCE(kcsan_enabled) && !ctx->disable_count;
|
||||
}
|
||||
|
||||
/* Introduce delay depending on context and configuration. */
|
||||
@ -353,10 +353,17 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr,
|
||||
atomic_long_t *watchpoint,
|
||||
long encoded_watchpoint)
|
||||
{
|
||||
struct kcsan_ctx *ctx = get_ctx();
|
||||
unsigned long flags;
|
||||
bool consumed;
|
||||
|
||||
if (!kcsan_is_enabled())
|
||||
/*
|
||||
* We know a watchpoint exists. Let's try to keep the race-window
|
||||
* between here and finally consuming the watchpoint below as small as
|
||||
* possible -- avoid unneccessarily complex code until consumed.
|
||||
*/
|
||||
|
||||
if (!kcsan_is_enabled(ctx))
|
||||
return;
|
||||
|
||||
/*
|
||||
@ -364,14 +371,12 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr,
|
||||
* reporting a race where e.g. the writer set up the watchpoint, but the
|
||||
* reader has access_mask!=0, we have to ignore the found watchpoint.
|
||||
*/
|
||||
if (get_ctx()->access_mask != 0)
|
||||
if (ctx->access_mask)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Consume the watchpoint as soon as possible, to minimize the chances
|
||||
* of !consumed. Consuming the watchpoint must always be guarded by
|
||||
* kcsan_is_enabled() check, as otherwise we might erroneously
|
||||
* triggering reports when disabled.
|
||||
* Consuming the watchpoint must be guarded by kcsan_is_enabled() to
|
||||
* avoid erroneously triggering reports if the context is disabled.
|
||||
*/
|
||||
consumed = try_consume_watchpoint(watchpoint, encoded_watchpoint);
|
||||
|
||||
@ -409,6 +414,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
|
||||
unsigned long access_mask;
|
||||
enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
|
||||
unsigned long ua_flags = user_access_save();
|
||||
struct kcsan_ctx *ctx = get_ctx();
|
||||
unsigned long irq_flags = 0;
|
||||
|
||||
/*
|
||||
@ -417,7 +423,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
|
||||
*/
|
||||
reset_kcsan_skip();
|
||||
|
||||
if (!kcsan_is_enabled())
|
||||
if (!kcsan_is_enabled(ctx))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
@ -489,7 +495,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
|
||||
* Re-read value, and check if it is as expected; if not, we infer a
|
||||
* racy access.
|
||||
*/
|
||||
access_mask = get_ctx()->access_mask;
|
||||
access_mask = ctx->access_mask;
|
||||
new = 0;
|
||||
switch (size) {
|
||||
case 1:
|
||||
|
Loading…
Reference in New Issue
Block a user