mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-21 10:05:00 +08:00
ce5c31db36
At the moment, UBSAN report will be serialized using a spin_lock(). On RT-systems, spinlocks are turned to rt_spin_lock and may sleep. This will result to the following splat if the undefined behavior is in a context that can sleep: BUG: sleeping function called from invalid context at /src/linux/kernel/locking/rtmutex.c:968 in_atomic(): 1, irqs_disabled(): 128, pid: 3447, name: make 1 lock held by make/3447: #0: 000000009a966332 (&mm->mmap_sem){++++}, at: do_page_fault+0x140/0x4f8 irq event stamp: 6284 hardirqs last enabled at (6283): [<ffff000011326520>] _raw_spin_unlock_irqrestore+0x90/0xa0 hardirqs last disabled at (6284): [<ffff0000113262b0>] _raw_spin_lock_irqsave+0x30/0x78 softirqs last enabled at (2430): [<ffff000010088ef8>] fpsimd_restore_current_state+0x60/0xe8 softirqs last disabled at (2427): [<ffff000010088ec0>] fpsimd_restore_current_state+0x28/0xe8 Preemption disabled at: [<ffff000011324a4c>] rt_mutex_futex_unlock+0x4c/0xb0 CPU: 3 PID: 3447 Comm: make Tainted: G W 5.2.14-rt7-01890-ge6e057589653 #911 Call trace: dump_backtrace+0x0/0x148 show_stack+0x14/0x20 dump_stack+0xbc/0x104 ___might_sleep+0x154/0x210 rt_spin_lock+0x68/0xa0 ubsan_prologue+0x30/0x68 handle_overflow+0x64/0xe0 __ubsan_handle_add_overflow+0x10/0x18 __lock_acquire+0x1c28/0x2a28 lock_acquire+0xf0/0x370 _raw_spin_lock_irqsave+0x58/0x78 rt_mutex_futex_unlock+0x4c/0xb0 rt_spin_unlock+0x28/0x70 get_page_from_freelist+0x428/0x2b60 __alloc_pages_nodemask+0x174/0x1708 alloc_pages_vma+0x1ac/0x238 __handle_mm_fault+0x4ac/0x10b0 handle_mm_fault+0x1d8/0x3b0 do_page_fault+0x1c8/0x4f8 do_translation_fault+0xb8/0xe0 do_mem_abort+0x3c/0x98 el0_da+0x20/0x24 The spin_lock() will protect against multiple CPUs to output a report together, I guess to prevent them from being interleaved. However, they can still interleave with other messages (and even splat from __might_sleep). So the lock usefulness seems pretty limited. Rather than trying to accomodate RT-system by switching to a raw_spin_lock(), the lock is now completely dropped. Link: http://lkml.kernel.org/r/20190920100835.14999-1-julien.grall@arm.com Signed-off-by: Julien Grall <julien.grall@arm.com> Reported-by: Andre Przywara <andre.przywara@arm.com> Acked-by: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
424 lines
10 KiB
C
424 lines
10 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* UBSAN error reporting functions
|
|
*
|
|
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
|
|
* Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
|
|
*/
|
|
|
|
#include <linux/bitops.h>
|
|
#include <linux/bug.h>
|
|
#include <linux/ctype.h>
|
|
#include <linux/init.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/types.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/uaccess.h>
|
|
|
|
#include "ubsan.h"
|
|
|
|
const char *type_check_kinds[] = {
|
|
"load of",
|
|
"store to",
|
|
"reference binding to",
|
|
"member access within",
|
|
"member call on",
|
|
"constructor call on",
|
|
"downcast of",
|
|
"downcast of"
|
|
};
|
|
|
|
#define REPORTED_BIT 31
|
|
|
|
#if (BITS_PER_LONG == 64) && defined(__BIG_ENDIAN)
|
|
#define COLUMN_MASK (~(1U << REPORTED_BIT))
|
|
#define LINE_MASK (~0U)
|
|
#else
|
|
#define COLUMN_MASK (~0U)
|
|
#define LINE_MASK (~(1U << REPORTED_BIT))
|
|
#endif
|
|
|
|
#define VALUE_LENGTH 40
|
|
|
|
static bool was_reported(struct source_location *location)
|
|
{
|
|
return test_and_set_bit(REPORTED_BIT, &location->reported);
|
|
}
|
|
|
|
static void print_source_location(const char *prefix,
|
|
struct source_location *loc)
|
|
{
|
|
pr_err("%s %s:%d:%d\n", prefix, loc->file_name,
|
|
loc->line & LINE_MASK, loc->column & COLUMN_MASK);
|
|
}
|
|
|
|
static bool suppress_report(struct source_location *loc)
|
|
{
|
|
return current->in_ubsan || was_reported(loc);
|
|
}
|
|
|
|
static bool type_is_int(struct type_descriptor *type)
|
|
{
|
|
return type->type_kind == type_kind_int;
|
|
}
|
|
|
|
static bool type_is_signed(struct type_descriptor *type)
|
|
{
|
|
WARN_ON(!type_is_int(type));
|
|
return type->type_info & 1;
|
|
}
|
|
|
|
static unsigned type_bit_width(struct type_descriptor *type)
|
|
{
|
|
return 1 << (type->type_info >> 1);
|
|
}
|
|
|
|
static bool is_inline_int(struct type_descriptor *type)
|
|
{
|
|
unsigned inline_bits = sizeof(unsigned long)*8;
|
|
unsigned bits = type_bit_width(type);
|
|
|
|
WARN_ON(!type_is_int(type));
|
|
|
|
return bits <= inline_bits;
|
|
}
|
|
|
|
static s_max get_signed_val(struct type_descriptor *type, void *val)
|
|
{
|
|
if (is_inline_int(type)) {
|
|
unsigned extra_bits = sizeof(s_max)*8 - type_bit_width(type);
|
|
unsigned long ulong_val = (unsigned long)val;
|
|
|
|
return ((s_max)ulong_val) << extra_bits >> extra_bits;
|
|
}
|
|
|
|
if (type_bit_width(type) == 64)
|
|
return *(s64 *)val;
|
|
|
|
return *(s_max *)val;
|
|
}
|
|
|
|
static bool val_is_negative(struct type_descriptor *type, void *val)
|
|
{
|
|
return type_is_signed(type) && get_signed_val(type, val) < 0;
|
|
}
|
|
|
|
static u_max get_unsigned_val(struct type_descriptor *type, void *val)
|
|
{
|
|
if (is_inline_int(type))
|
|
return (unsigned long)val;
|
|
|
|
if (type_bit_width(type) == 64)
|
|
return *(u64 *)val;
|
|
|
|
return *(u_max *)val;
|
|
}
|
|
|
|
static void val_to_string(char *str, size_t size, struct type_descriptor *type,
|
|
void *value)
|
|
{
|
|
if (type_is_int(type)) {
|
|
if (type_bit_width(type) == 128) {
|
|
#if defined(CONFIG_ARCH_SUPPORTS_INT128)
|
|
u_max val = get_unsigned_val(type, value);
|
|
|
|
scnprintf(str, size, "0x%08x%08x%08x%08x",
|
|
(u32)(val >> 96),
|
|
(u32)(val >> 64),
|
|
(u32)(val >> 32),
|
|
(u32)(val));
|
|
#else
|
|
WARN_ON(1);
|
|
#endif
|
|
} else if (type_is_signed(type)) {
|
|
scnprintf(str, size, "%lld",
|
|
(s64)get_signed_val(type, value));
|
|
} else {
|
|
scnprintf(str, size, "%llu",
|
|
(u64)get_unsigned_val(type, value));
|
|
}
|
|
}
|
|
}
|
|
|
|
static void ubsan_prologue(struct source_location *location)
|
|
{
|
|
current->in_ubsan++;
|
|
|
|
pr_err("========================================"
|
|
"========================================\n");
|
|
print_source_location("UBSAN: Undefined behaviour in", location);
|
|
}
|
|
|
|
static void ubsan_epilogue(void)
|
|
{
|
|
dump_stack();
|
|
pr_err("========================================"
|
|
"========================================\n");
|
|
|
|
current->in_ubsan--;
|
|
}
|
|
|
|
static void handle_overflow(struct overflow_data *data, void *lhs,
|
|
void *rhs, char op)
|
|
{
|
|
|
|
struct type_descriptor *type = data->type;
|
|
char lhs_val_str[VALUE_LENGTH];
|
|
char rhs_val_str[VALUE_LENGTH];
|
|
|
|
if (suppress_report(&data->location))
|
|
return;
|
|
|
|
ubsan_prologue(&data->location);
|
|
|
|
val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs);
|
|
val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs);
|
|
pr_err("%s integer overflow:\n",
|
|
type_is_signed(type) ? "signed" : "unsigned");
|
|
pr_err("%s %c %s cannot be represented in type %s\n",
|
|
lhs_val_str,
|
|
op,
|
|
rhs_val_str,
|
|
type->type_name);
|
|
|
|
ubsan_epilogue();
|
|
}
|
|
|
|
void __ubsan_handle_add_overflow(struct overflow_data *data,
|
|
void *lhs, void *rhs)
|
|
{
|
|
|
|
handle_overflow(data, lhs, rhs, '+');
|
|
}
|
|
EXPORT_SYMBOL(__ubsan_handle_add_overflow);
|
|
|
|
void __ubsan_handle_sub_overflow(struct overflow_data *data,
|
|
void *lhs, void *rhs)
|
|
{
|
|
handle_overflow(data, lhs, rhs, '-');
|
|
}
|
|
EXPORT_SYMBOL(__ubsan_handle_sub_overflow);
|
|
|
|
void __ubsan_handle_mul_overflow(struct overflow_data *data,
|
|
void *lhs, void *rhs)
|
|
{
|
|
handle_overflow(data, lhs, rhs, '*');
|
|
}
|
|
EXPORT_SYMBOL(__ubsan_handle_mul_overflow);
|
|
|
|
void __ubsan_handle_negate_overflow(struct overflow_data *data,
|
|
void *old_val)
|
|
{
|
|
char old_val_str[VALUE_LENGTH];
|
|
|
|
if (suppress_report(&data->location))
|
|
return;
|
|
|
|
ubsan_prologue(&data->location);
|
|
|
|
val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val);
|
|
|
|
pr_err("negation of %s cannot be represented in type %s:\n",
|
|
old_val_str, data->type->type_name);
|
|
|
|
ubsan_epilogue();
|
|
}
|
|
EXPORT_SYMBOL(__ubsan_handle_negate_overflow);
|
|
|
|
|
|
void __ubsan_handle_divrem_overflow(struct overflow_data *data,
|
|
void *lhs, void *rhs)
|
|
{
|
|
char rhs_val_str[VALUE_LENGTH];
|
|
|
|
if (suppress_report(&data->location))
|
|
return;
|
|
|
|
ubsan_prologue(&data->location);
|
|
|
|
val_to_string(rhs_val_str, sizeof(rhs_val_str), data->type, rhs);
|
|
|
|
if (type_is_signed(data->type) && get_signed_val(data->type, rhs) == -1)
|
|
pr_err("division of %s by -1 cannot be represented in type %s\n",
|
|
rhs_val_str, data->type->type_name);
|
|
else
|
|
pr_err("division by zero\n");
|
|
|
|
ubsan_epilogue();
|
|
}
|
|
EXPORT_SYMBOL(__ubsan_handle_divrem_overflow);
|
|
|
|
static void handle_null_ptr_deref(struct type_mismatch_data_common *data)
|
|
{
|
|
if (suppress_report(data->location))
|
|
return;
|
|
|
|
ubsan_prologue(data->location);
|
|
|
|
pr_err("%s null pointer of type %s\n",
|
|
type_check_kinds[data->type_check_kind],
|
|
data->type->type_name);
|
|
|
|
ubsan_epilogue();
|
|
}
|
|
|
|
static void handle_misaligned_access(struct type_mismatch_data_common *data,
|
|
unsigned long ptr)
|
|
{
|
|
if (suppress_report(data->location))
|
|
return;
|
|
|
|
ubsan_prologue(data->location);
|
|
|
|
pr_err("%s misaligned address %p for type %s\n",
|
|
type_check_kinds[data->type_check_kind],
|
|
(void *)ptr, data->type->type_name);
|
|
pr_err("which requires %ld byte alignment\n", data->alignment);
|
|
|
|
ubsan_epilogue();
|
|
}
|
|
|
|
static void handle_object_size_mismatch(struct type_mismatch_data_common *data,
|
|
unsigned long ptr)
|
|
{
|
|
if (suppress_report(data->location))
|
|
return;
|
|
|
|
ubsan_prologue(data->location);
|
|
pr_err("%s address %p with insufficient space\n",
|
|
type_check_kinds[data->type_check_kind],
|
|
(void *) ptr);
|
|
pr_err("for an object of type %s\n", data->type->type_name);
|
|
ubsan_epilogue();
|
|
}
|
|
|
|
static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data,
|
|
unsigned long ptr)
|
|
{
|
|
unsigned long flags = user_access_save();
|
|
|
|
if (!ptr)
|
|
handle_null_ptr_deref(data);
|
|
else if (data->alignment && !IS_ALIGNED(ptr, data->alignment))
|
|
handle_misaligned_access(data, ptr);
|
|
else
|
|
handle_object_size_mismatch(data, ptr);
|
|
|
|
user_access_restore(flags);
|
|
}
|
|
|
|
void __ubsan_handle_type_mismatch(struct type_mismatch_data *data,
|
|
void *ptr)
|
|
{
|
|
struct type_mismatch_data_common common_data = {
|
|
.location = &data->location,
|
|
.type = data->type,
|
|
.alignment = data->alignment,
|
|
.type_check_kind = data->type_check_kind
|
|
};
|
|
|
|
ubsan_type_mismatch_common(&common_data, (unsigned long)ptr);
|
|
}
|
|
EXPORT_SYMBOL(__ubsan_handle_type_mismatch);
|
|
|
|
void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data,
|
|
void *ptr)
|
|
{
|
|
|
|
struct type_mismatch_data_common common_data = {
|
|
.location = &data->location,
|
|
.type = data->type,
|
|
.alignment = 1UL << data->log_alignment,
|
|
.type_check_kind = data->type_check_kind
|
|
};
|
|
|
|
ubsan_type_mismatch_common(&common_data, (unsigned long)ptr);
|
|
}
|
|
EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1);
|
|
|
|
void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index)
|
|
{
|
|
char index_str[VALUE_LENGTH];
|
|
|
|
if (suppress_report(&data->location))
|
|
return;
|
|
|
|
ubsan_prologue(&data->location);
|
|
|
|
val_to_string(index_str, sizeof(index_str), data->index_type, index);
|
|
pr_err("index %s is out of range for type %s\n", index_str,
|
|
data->array_type->type_name);
|
|
ubsan_epilogue();
|
|
}
|
|
EXPORT_SYMBOL(__ubsan_handle_out_of_bounds);
|
|
|
|
void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
|
|
void *lhs, void *rhs)
|
|
{
|
|
struct type_descriptor *rhs_type = data->rhs_type;
|
|
struct type_descriptor *lhs_type = data->lhs_type;
|
|
char rhs_str[VALUE_LENGTH];
|
|
char lhs_str[VALUE_LENGTH];
|
|
unsigned long ua_flags = user_access_save();
|
|
|
|
if (suppress_report(&data->location))
|
|
goto out;
|
|
|
|
ubsan_prologue(&data->location);
|
|
|
|
val_to_string(rhs_str, sizeof(rhs_str), rhs_type, rhs);
|
|
val_to_string(lhs_str, sizeof(lhs_str), lhs_type, lhs);
|
|
|
|
if (val_is_negative(rhs_type, rhs))
|
|
pr_err("shift exponent %s is negative\n", rhs_str);
|
|
|
|
else if (get_unsigned_val(rhs_type, rhs) >=
|
|
type_bit_width(lhs_type))
|
|
pr_err("shift exponent %s is too large for %u-bit type %s\n",
|
|
rhs_str,
|
|
type_bit_width(lhs_type),
|
|
lhs_type->type_name);
|
|
else if (val_is_negative(lhs_type, lhs))
|
|
pr_err("left shift of negative value %s\n",
|
|
lhs_str);
|
|
else
|
|
pr_err("left shift of %s by %s places cannot be"
|
|
" represented in type %s\n",
|
|
lhs_str, rhs_str,
|
|
lhs_type->type_name);
|
|
|
|
ubsan_epilogue();
|
|
out:
|
|
user_access_restore(ua_flags);
|
|
}
|
|
EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds);
|
|
|
|
|
|
void __ubsan_handle_builtin_unreachable(struct unreachable_data *data)
|
|
{
|
|
ubsan_prologue(&data->location);
|
|
pr_err("calling __builtin_unreachable()\n");
|
|
ubsan_epilogue();
|
|
panic("can't return from __builtin_unreachable()");
|
|
}
|
|
EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable);
|
|
|
|
void __ubsan_handle_load_invalid_value(struct invalid_value_data *data,
|
|
void *val)
|
|
{
|
|
char val_str[VALUE_LENGTH];
|
|
|
|
if (suppress_report(&data->location))
|
|
return;
|
|
|
|
ubsan_prologue(&data->location);
|
|
|
|
val_to_string(val_str, sizeof(val_str), data->type, val);
|
|
|
|
pr_err("load of value %s is not a valid value for type %s\n",
|
|
val_str, data->type->type_name);
|
|
|
|
ubsan_epilogue();
|
|
}
|
|
EXPORT_SYMBOL(__ubsan_handle_load_invalid_value);
|