mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-21 03:33:59 +08:00
537cd89484
Commit af3b854492
("mm/page_alloc.c: allow error injection")
introduced EI_ETYPE_TRUE, but did not extend
* lib/error-inject.c:error_type_string(), and
* kernel/fail_function.c:adjust_error_retval()
to accommodate for this change.
Handle EI_ETYPE_TRUE in both functions appropriately by
* returning "TRUE" in error_type_string(),
* adjusting the return value to true (1) in adjust_error_retval().
Furthermore, simplify the logic of handling EI_ETYPE_NULL in
adjust_error_retval().
Link: https://lkml.kernel.org/r/njB1czX0ZgWPR9h61euHIBb5bEyePw9D4D2m3i5lc9Cl96P8Q1308dTcmsEZW7Vtz3Ifz4do-rOtSfuFTyGoEDYokkK2aUqBePVptzZEWfU=@protonmail.com
Signed-off-by: Barnabás Pőcze <pobrn@protonmail.com>
Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
Reviewed-by: Akinobu Mita <akinobu.mita@gmail.com>
Cc: "Naveen N. Rao" <naveen.n.rao@linux.ibm.com>
Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
245 lines
5.5 KiB
C
245 lines
5.5 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
// error-inject.c: Function-level error injection table
|
|
#include <linux/error-injection.h>
|
|
#include <linux/debugfs.h>
|
|
#include <linux/kallsyms.h>
|
|
#include <linux/kprobes.h>
|
|
#include <linux/module.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/list.h>
|
|
#include <linux/slab.h>
|
|
|
|
/* Whitelist of symbols that can be overridden for error injection. */
|
|
static LIST_HEAD(error_injection_list);
|
|
static DEFINE_MUTEX(ei_mutex);
|
|
struct ei_entry {
|
|
struct list_head list;
|
|
unsigned long start_addr;
|
|
unsigned long end_addr;
|
|
int etype;
|
|
void *priv;
|
|
};
|
|
|
|
bool within_error_injection_list(unsigned long addr)
|
|
{
|
|
struct ei_entry *ent;
|
|
bool ret = false;
|
|
|
|
mutex_lock(&ei_mutex);
|
|
list_for_each_entry(ent, &error_injection_list, list) {
|
|
if (addr >= ent->start_addr && addr < ent->end_addr) {
|
|
ret = true;
|
|
break;
|
|
}
|
|
}
|
|
mutex_unlock(&ei_mutex);
|
|
return ret;
|
|
}
|
|
|
|
int get_injectable_error_type(unsigned long addr)
|
|
{
|
|
struct ei_entry *ent;
|
|
|
|
list_for_each_entry(ent, &error_injection_list, list) {
|
|
if (addr >= ent->start_addr && addr < ent->end_addr)
|
|
return ent->etype;
|
|
}
|
|
return EI_ETYPE_NONE;
|
|
}
|
|
|
|
/*
|
|
* Lookup and populate the error_injection_list.
|
|
*
|
|
* For safety reasons we only allow certain functions to be overridden with
|
|
* bpf_error_injection, so we need to populate the list of the symbols that have
|
|
* been marked as safe for overriding.
|
|
*/
|
|
static void populate_error_injection_list(struct error_injection_entry *start,
|
|
struct error_injection_entry *end,
|
|
void *priv)
|
|
{
|
|
struct error_injection_entry *iter;
|
|
struct ei_entry *ent;
|
|
unsigned long entry, offset = 0, size = 0;
|
|
|
|
mutex_lock(&ei_mutex);
|
|
for (iter = start; iter < end; iter++) {
|
|
entry = arch_deref_entry_point((void *)iter->addr);
|
|
|
|
if (!kernel_text_address(entry) ||
|
|
!kallsyms_lookup_size_offset(entry, &size, &offset)) {
|
|
pr_err("Failed to find error inject entry at %p\n",
|
|
(void *)entry);
|
|
continue;
|
|
}
|
|
|
|
ent = kmalloc(sizeof(*ent), GFP_KERNEL);
|
|
if (!ent)
|
|
break;
|
|
ent->start_addr = entry;
|
|
ent->end_addr = entry + size;
|
|
ent->etype = iter->etype;
|
|
ent->priv = priv;
|
|
INIT_LIST_HEAD(&ent->list);
|
|
list_add_tail(&ent->list, &error_injection_list);
|
|
}
|
|
mutex_unlock(&ei_mutex);
|
|
}
|
|
|
|
/* Markers of the _error_inject_whitelist section */
|
|
extern struct error_injection_entry __start_error_injection_whitelist[];
|
|
extern struct error_injection_entry __stop_error_injection_whitelist[];
|
|
|
|
static void __init populate_kernel_ei_list(void)
|
|
{
|
|
populate_error_injection_list(__start_error_injection_whitelist,
|
|
__stop_error_injection_whitelist,
|
|
NULL);
|
|
}
|
|
|
|
#ifdef CONFIG_MODULES
|
|
static void module_load_ei_list(struct module *mod)
|
|
{
|
|
if (!mod->num_ei_funcs)
|
|
return;
|
|
|
|
populate_error_injection_list(mod->ei_funcs,
|
|
mod->ei_funcs + mod->num_ei_funcs, mod);
|
|
}
|
|
|
|
static void module_unload_ei_list(struct module *mod)
|
|
{
|
|
struct ei_entry *ent, *n;
|
|
|
|
if (!mod->num_ei_funcs)
|
|
return;
|
|
|
|
mutex_lock(&ei_mutex);
|
|
list_for_each_entry_safe(ent, n, &error_injection_list, list) {
|
|
if (ent->priv == mod) {
|
|
list_del_init(&ent->list);
|
|
kfree(ent);
|
|
}
|
|
}
|
|
mutex_unlock(&ei_mutex);
|
|
}
|
|
|
|
/* Module notifier call back, checking error injection table on the module */
|
|
static int ei_module_callback(struct notifier_block *nb,
|
|
unsigned long val, void *data)
|
|
{
|
|
struct module *mod = data;
|
|
|
|
if (val == MODULE_STATE_COMING)
|
|
module_load_ei_list(mod);
|
|
else if (val == MODULE_STATE_GOING)
|
|
module_unload_ei_list(mod);
|
|
|
|
return NOTIFY_DONE;
|
|
}
|
|
|
|
static struct notifier_block ei_module_nb = {
|
|
.notifier_call = ei_module_callback,
|
|
.priority = 0
|
|
};
|
|
|
|
static __init int module_ei_init(void)
|
|
{
|
|
return register_module_notifier(&ei_module_nb);
|
|
}
|
|
#else /* !CONFIG_MODULES */
|
|
#define module_ei_init() (0)
|
|
#endif
|
|
|
|
/*
|
|
* error_injection/whitelist -- shows which functions can be overridden for
|
|
* error injection.
|
|
*/
|
|
static void *ei_seq_start(struct seq_file *m, loff_t *pos)
|
|
{
|
|
mutex_lock(&ei_mutex);
|
|
return seq_list_start(&error_injection_list, *pos);
|
|
}
|
|
|
|
static void ei_seq_stop(struct seq_file *m, void *v)
|
|
{
|
|
mutex_unlock(&ei_mutex);
|
|
}
|
|
|
|
static void *ei_seq_next(struct seq_file *m, void *v, loff_t *pos)
|
|
{
|
|
return seq_list_next(v, &error_injection_list, pos);
|
|
}
|
|
|
|
static const char *error_type_string(int etype)
|
|
{
|
|
switch (etype) {
|
|
case EI_ETYPE_NULL:
|
|
return "NULL";
|
|
case EI_ETYPE_ERRNO:
|
|
return "ERRNO";
|
|
case EI_ETYPE_ERRNO_NULL:
|
|
return "ERRNO_NULL";
|
|
case EI_ETYPE_TRUE:
|
|
return "TRUE";
|
|
default:
|
|
return "(unknown)";
|
|
}
|
|
}
|
|
|
|
static int ei_seq_show(struct seq_file *m, void *v)
|
|
{
|
|
struct ei_entry *ent = list_entry(v, struct ei_entry, list);
|
|
|
|
seq_printf(m, "%ps\t%s\n", (void *)ent->start_addr,
|
|
error_type_string(ent->etype));
|
|
return 0;
|
|
}
|
|
|
|
static const struct seq_operations ei_seq_ops = {
|
|
.start = ei_seq_start,
|
|
.next = ei_seq_next,
|
|
.stop = ei_seq_stop,
|
|
.show = ei_seq_show,
|
|
};
|
|
|
|
static int ei_open(struct inode *inode, struct file *filp)
|
|
{
|
|
return seq_open(filp, &ei_seq_ops);
|
|
}
|
|
|
|
static const struct file_operations debugfs_ei_ops = {
|
|
.open = ei_open,
|
|
.read = seq_read,
|
|
.llseek = seq_lseek,
|
|
.release = seq_release,
|
|
};
|
|
|
|
static int __init ei_debugfs_init(void)
|
|
{
|
|
struct dentry *dir, *file;
|
|
|
|
dir = debugfs_create_dir("error_injection", NULL);
|
|
if (!dir)
|
|
return -ENOMEM;
|
|
|
|
file = debugfs_create_file("list", 0444, dir, NULL, &debugfs_ei_ops);
|
|
if (!file) {
|
|
debugfs_remove(dir);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int __init init_error_injection(void)
|
|
{
|
|
populate_kernel_ei_list();
|
|
|
|
if (!module_ei_init())
|
|
ei_debugfs_init();
|
|
|
|
return 0;
|
|
}
|
|
late_initcall(init_error_injection);
|