2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2025-01-18 10:34:24 +08:00

mm/memory_failure: Remove unused trapno from memory_failure

Today 4 architectures set ARCH_SUPPORTS_MEMORY_FAILURE (arm64, parisc,
powerpc, and x86), while 4 other architectures set __ARCH_SI_TRAPNO
(alpha, metag, sparc, and tile).  These two sets of architectures do
not interesect so remove the trapno paramater to remove confusion.

Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
This commit is contained in:
Eric W. Biederman 2017-07-09 18:14:01 -05:00
parent f71dd7dc2d
commit 83b57531c5
10 changed files with 25 additions and 32 deletions

View File

@ -325,7 +325,7 @@ static int pdt_mainloop(void *unused)
#ifdef CONFIG_MEMORY_FAILURE #ifdef CONFIG_MEMORY_FAILURE
if ((pde & PDT_ADDR_PERM_ERR) || if ((pde & PDT_ADDR_PERM_ERR) ||
((pde & PDT_ADDR_SINGLE_ERR) == 0)) ((pde & PDT_ADDR_SINGLE_ERR) == 0))
memory_failure(pde >> PAGE_SHIFT, 0, 0); memory_failure(pde >> PAGE_SHIFT, 0);
else else
soft_offline_page( soft_offline_page(
pfn_to_page(pde >> PAGE_SHIFT), 0); pfn_to_page(pde >> PAGE_SHIFT), 0);

View File

@ -60,7 +60,7 @@ static void handle_memory_error_event(struct OpalMemoryErrorData *merr_evt)
} }
for (; paddr_start < paddr_end; paddr_start += PAGE_SIZE) { for (; paddr_start < paddr_end; paddr_start += PAGE_SIZE) {
memory_failure(paddr_start >> PAGE_SHIFT, 0, 0); memory_failure(paddr_start >> PAGE_SHIFT, 0);
} }
} }

View File

@ -582,7 +582,7 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) { if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
pfn = mce->addr >> PAGE_SHIFT; pfn = mce->addr >> PAGE_SHIFT;
memory_failure(pfn, MCE_VECTOR, 0); memory_failure(pfn, 0);
} }
return NOTIFY_OK; return NOTIFY_OK;
@ -1046,7 +1046,7 @@ static int do_memory_failure(struct mce *m)
pr_err("Uncorrected hardware memory error in user-access at %llx", m->addr); pr_err("Uncorrected hardware memory error in user-access at %llx", m->addr);
if (!(m->mcgstatus & MCG_STATUS_RIPV)) if (!(m->mcgstatus & MCG_STATUS_RIPV))
flags |= MF_MUST_KILL; flags |= MF_MUST_KILL;
ret = memory_failure(m->addr >> PAGE_SHIFT, MCE_VECTOR, flags); ret = memory_failure(m->addr >> PAGE_SHIFT, flags);
if (ret) if (ret)
pr_err("Memory error not recovered"); pr_err("Memory error not recovered");
return ret; return ret;
@ -1325,7 +1325,7 @@ out_ist:
EXPORT_SYMBOL_GPL(do_machine_check); EXPORT_SYMBOL_GPL(do_machine_check);
#ifndef CONFIG_MEMORY_FAILURE #ifndef CONFIG_MEMORY_FAILURE
int memory_failure(unsigned long pfn, int vector, int flags) int memory_failure(unsigned long pfn, int flags)
{ {
/* mce_severity() should not hand us an ACTION_REQUIRED error */ /* mce_severity() should not hand us an ACTION_REQUIRED error */
BUG_ON(flags & MF_ACTION_REQUIRED); BUG_ON(flags & MF_ACTION_REQUIRED);

View File

@ -410,7 +410,7 @@ static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int
flags = 0; flags = 0;
if (flags != -1) if (flags != -1)
memory_failure_queue(pfn, 0, flags); memory_failure_queue(pfn, flags);
#endif #endif
} }

View File

@ -569,7 +569,7 @@ store_hard_offline_page(struct device *dev,
if (kstrtoull(buf, 0, &pfn) < 0) if (kstrtoull(buf, 0, &pfn) < 0)
return -EINVAL; return -EINVAL;
pfn >>= PAGE_SHIFT; pfn >>= PAGE_SHIFT;
ret = memory_failure(pfn, 0, 0); ret = memory_failure(pfn, 0);
return ret ? ret : count; return ret ? ret : count;
} }

View File

@ -327,7 +327,7 @@ int cec_add_elem(u64 pfn)
} else { } else {
/* We have reached max count for this page, soft-offline it. */ /* We have reached max count for this page, soft-offline it. */
pr_err("Soft-offlining pfn: 0x%llx\n", pfn); pr_err("Soft-offlining pfn: 0x%llx\n", pfn);
memory_failure_queue(pfn, 0, MF_SOFT_OFFLINE); memory_failure_queue(pfn, MF_SOFT_OFFLINE);
ca->pfns_poisoned++; ca->pfns_poisoned++;
} }

View File

@ -2570,8 +2570,8 @@ enum mf_flags {
MF_MUST_KILL = 1 << 2, MF_MUST_KILL = 1 << 2,
MF_SOFT_OFFLINE = 1 << 3, MF_SOFT_OFFLINE = 1 << 3,
}; };
extern int memory_failure(unsigned long pfn, int trapno, int flags); extern int memory_failure(unsigned long pfn, int flags);
extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); extern void memory_failure_queue(unsigned long pfn, int flags);
extern int unpoison_memory(unsigned long pfn); extern int unpoison_memory(unsigned long pfn);
extern int get_hwpoison_page(struct page *page); extern int get_hwpoison_page(struct page *page);
#define put_hwpoison_page(page) put_page(page) #define put_hwpoison_page(page) put_page(page)

View File

@ -52,7 +52,7 @@ static int hwpoison_inject(void *data, u64 val)
inject: inject:
pr_info("Injecting memory failure at pfn %#lx\n", pfn); pr_info("Injecting memory failure at pfn %#lx\n", pfn);
return memory_failure(pfn, 18, MF_COUNT_INCREASED); return memory_failure(pfn, MF_COUNT_INCREASED);
put_out: put_out:
put_hwpoison_page(p); put_hwpoison_page(p);
return 0; return 0;

View File

@ -661,7 +661,7 @@ static int madvise_inject_error(int behavior,
pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n", pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n",
page_to_pfn(page), start); page_to_pfn(page), start);
ret = memory_failure(page_to_pfn(page), 0, MF_COUNT_INCREASED); ret = memory_failure(page_to_pfn(page), MF_COUNT_INCREASED);
if (ret) if (ret)
return ret; return ret;
} }

View File

@ -178,7 +178,7 @@ EXPORT_SYMBOL_GPL(hwpoison_filter);
* ``action optional'' if they are not immediately affected by the error * ``action optional'' if they are not immediately affected by the error
* ``action required'' if error happened in current execution context * ``action required'' if error happened in current execution context
*/ */
static int kill_proc(struct task_struct *t, unsigned long addr, int trapno, static int kill_proc(struct task_struct *t, unsigned long addr,
unsigned long pfn, struct page *page, int flags) unsigned long pfn, struct page *page, int flags)
{ {
struct siginfo si; struct siginfo si;
@ -189,9 +189,6 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
si.si_signo = SIGBUS; si.si_signo = SIGBUS;
si.si_errno = 0; si.si_errno = 0;
si.si_addr = (void *)addr; si.si_addr = (void *)addr;
#ifdef __ARCH_SI_TRAPNO
si.si_trapno = trapno;
#endif
si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT; si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT;
if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) { if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) {
@ -323,7 +320,7 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
* Also when FAIL is set do a force kill because something went * Also when FAIL is set do a force kill because something went
* wrong earlier. * wrong earlier.
*/ */
static void kill_procs(struct list_head *to_kill, int forcekill, int trapno, static void kill_procs(struct list_head *to_kill, int forcekill,
bool fail, struct page *page, unsigned long pfn, bool fail, struct page *page, unsigned long pfn,
int flags) int flags)
{ {
@ -348,7 +345,7 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
* check for that, but we need to tell the * check for that, but we need to tell the
* process anyways. * process anyways.
*/ */
else if (kill_proc(tk->tsk, tk->addr, trapno, else if (kill_proc(tk->tsk, tk->addr,
pfn, page, flags) < 0) pfn, page, flags) < 0)
pr_err("Memory failure: %#lx: Cannot send advisory machine check signal to %s:%d\n", pr_err("Memory failure: %#lx: Cannot send advisory machine check signal to %s:%d\n",
pfn, tk->tsk->comm, tk->tsk->pid); pfn, tk->tsk->comm, tk->tsk->pid);
@ -927,7 +924,7 @@ EXPORT_SYMBOL_GPL(get_hwpoison_page);
* the pages and send SIGBUS to the processes if the data was dirty. * the pages and send SIGBUS to the processes if the data was dirty.
*/ */
static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
int trapno, int flags, struct page **hpagep) int flags, struct page **hpagep)
{ {
enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS; enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
struct address_space *mapping; struct address_space *mapping;
@ -1017,7 +1014,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
* any accesses to the poisoned memory. * any accesses to the poisoned memory.
*/ */
forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL); forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL);
kill_procs(&tokill, forcekill, trapno, !unmap_success, p, pfn, flags); kill_procs(&tokill, forcekill, !unmap_success, p, pfn, flags);
return unmap_success; return unmap_success;
} }
@ -1045,7 +1042,7 @@ static int identify_page_state(unsigned long pfn, struct page *p,
return page_action(ps, p, pfn); return page_action(ps, p, pfn);
} }
static int memory_failure_hugetlb(unsigned long pfn, int trapno, int flags) static int memory_failure_hugetlb(unsigned long pfn, int flags)
{ {
struct page *p = pfn_to_page(pfn); struct page *p = pfn_to_page(pfn);
struct page *head = compound_head(p); struct page *head = compound_head(p);
@ -1090,7 +1087,7 @@ static int memory_failure_hugetlb(unsigned long pfn, int trapno, int flags)
return 0; return 0;
} }
if (!hwpoison_user_mappings(p, pfn, trapno, flags, &head)) { if (!hwpoison_user_mappings(p, pfn, flags, &head)) {
action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED); action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
res = -EBUSY; res = -EBUSY;
goto out; goto out;
@ -1105,7 +1102,6 @@ out:
/** /**
* memory_failure - Handle memory failure of a page. * memory_failure - Handle memory failure of a page.
* @pfn: Page Number of the corrupted page * @pfn: Page Number of the corrupted page
* @trapno: Trap number reported in the signal to user space.
* @flags: fine tune action taken * @flags: fine tune action taken
* *
* This function is called by the low level machine check code * This function is called by the low level machine check code
@ -1120,7 +1116,7 @@ out:
* Must run in process context (e.g. a work queue) with interrupts * Must run in process context (e.g. a work queue) with interrupts
* enabled and no spinlocks hold. * enabled and no spinlocks hold.
*/ */
int memory_failure(unsigned long pfn, int trapno, int flags) int memory_failure(unsigned long pfn, int flags)
{ {
struct page *p; struct page *p;
struct page *hpage; struct page *hpage;
@ -1129,7 +1125,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
unsigned long page_flags; unsigned long page_flags;
if (!sysctl_memory_failure_recovery) if (!sysctl_memory_failure_recovery)
panic("Memory failure from trap %d on page %lx", trapno, pfn); panic("Memory failure on page %lx", pfn);
if (!pfn_valid(pfn)) { if (!pfn_valid(pfn)) {
pr_err("Memory failure: %#lx: memory outside kernel control\n", pr_err("Memory failure: %#lx: memory outside kernel control\n",
@ -1139,7 +1135,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
p = pfn_to_page(pfn); p = pfn_to_page(pfn);
if (PageHuge(p)) if (PageHuge(p))
return memory_failure_hugetlb(pfn, trapno, flags); return memory_failure_hugetlb(pfn, flags);
if (TestSetPageHWPoison(p)) { if (TestSetPageHWPoison(p)) {
pr_err("Memory failure: %#lx: already hardware poisoned\n", pr_err("Memory failure: %#lx: already hardware poisoned\n",
pfn); pfn);
@ -1268,7 +1264,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
* When the raw error page is thp tail page, hpage points to the raw * When the raw error page is thp tail page, hpage points to the raw
* page after thp split. * page after thp split.
*/ */
if (!hwpoison_user_mappings(p, pfn, trapno, flags, &hpage)) { if (!hwpoison_user_mappings(p, pfn, flags, &hpage)) {
action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED); action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
res = -EBUSY; res = -EBUSY;
goto out; goto out;
@ -1296,7 +1292,6 @@ EXPORT_SYMBOL_GPL(memory_failure);
struct memory_failure_entry { struct memory_failure_entry {
unsigned long pfn; unsigned long pfn;
int trapno;
int flags; int flags;
}; };
@ -1312,7 +1307,6 @@ static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);
/** /**
* memory_failure_queue - Schedule handling memory failure of a page. * memory_failure_queue - Schedule handling memory failure of a page.
* @pfn: Page Number of the corrupted page * @pfn: Page Number of the corrupted page
* @trapno: Trap number reported in the signal to user space.
* @flags: Flags for memory failure handling * @flags: Flags for memory failure handling
* *
* This function is called by the low level hardware error handler * This function is called by the low level hardware error handler
@ -1326,13 +1320,12 @@ static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);
* *
* Can run in IRQ context. * Can run in IRQ context.
*/ */
void memory_failure_queue(unsigned long pfn, int trapno, int flags) void memory_failure_queue(unsigned long pfn, int flags)
{ {
struct memory_failure_cpu *mf_cpu; struct memory_failure_cpu *mf_cpu;
unsigned long proc_flags; unsigned long proc_flags;
struct memory_failure_entry entry = { struct memory_failure_entry entry = {
.pfn = pfn, .pfn = pfn,
.trapno = trapno,
.flags = flags, .flags = flags,
}; };
@ -1365,7 +1358,7 @@ static void memory_failure_work_func(struct work_struct *work)
if (entry.flags & MF_SOFT_OFFLINE) if (entry.flags & MF_SOFT_OFFLINE)
soft_offline_page(pfn_to_page(entry.pfn), entry.flags); soft_offline_page(pfn_to_page(entry.pfn), entry.flags);
else else
memory_failure(entry.pfn, entry.trapno, entry.flags); memory_failure(entry.pfn, entry.flags);
} }
} }