mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-17 17:53:56 +08:00
2b74030354
Use new return type vm_fault_t for fault handler. For now, this is just
documenting that the function returns a VM_FAULT value rather than an
errno. Once all instances are converted, vm_fault_t will become a
distinct type.
Ref-> commit 1c8f422059
("mm: change return type to vm_fault_t")
The aim is to change the return type of finish_fault() and
handle_mm_fault() to vm_fault_t type. As part of that clean up return
type of all other recursively called functions have been changed to
vm_fault_t type.
The places from where handle_mm_fault() is getting invoked will be
change to vm_fault_t type but in a separate patch.
vmf_error() is the newly introduce inline function in 4.17-rc6.
[akpm@linux-foundation.org: don't shadow outer local `ret' in __do_huge_pmd_anonymous_page()]
Link: http://lkml.kernel.org/r/20180604171727.GA20279@jordon-HP-15-Notebook-PC
Signed-off-by: Souptick Joarder <jrdr.linux@gmail.com>
Reviewed-by: Matthew Wilcox <mawilcox@microsoft.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
121 lines
3.1 KiB
C
121 lines
3.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __INCLUDE_LINUX_OOM_H
|
|
#define __INCLUDE_LINUX_OOM_H
|
|
|
|
|
|
#include <linux/sched/signal.h>
|
|
#include <linux/types.h>
|
|
#include <linux/nodemask.h>
|
|
#include <uapi/linux/oom.h>
|
|
#include <linux/sched/coredump.h> /* MMF_* */
|
|
#include <linux/mm.h> /* VM_FAULT* */
|
|
|
|
struct zonelist;
|
|
struct notifier_block;
|
|
struct mem_cgroup;
|
|
struct task_struct;
|
|
|
|
/*
|
|
* Details of the page allocation that triggered the oom killer that are used to
|
|
* determine what should be killed.
|
|
*/
|
|
struct oom_control {
|
|
/* Used to determine cpuset */
|
|
struct zonelist *zonelist;
|
|
|
|
/* Used to determine mempolicy */
|
|
nodemask_t *nodemask;
|
|
|
|
/* Memory cgroup in which oom is invoked, or NULL for global oom */
|
|
struct mem_cgroup *memcg;
|
|
|
|
/* Used to determine cpuset and node locality requirement */
|
|
const gfp_t gfp_mask;
|
|
|
|
/*
|
|
* order == -1 means the oom kill is required by sysrq, otherwise only
|
|
* for display purposes.
|
|
*/
|
|
const int order;
|
|
|
|
/* Used by oom implementation, do not set */
|
|
unsigned long totalpages;
|
|
struct task_struct *chosen;
|
|
unsigned long chosen_points;
|
|
};
|
|
|
|
extern struct mutex oom_lock;
|
|
|
|
static inline void set_current_oom_origin(void)
|
|
{
|
|
current->signal->oom_flag_origin = true;
|
|
}
|
|
|
|
static inline void clear_current_oom_origin(void)
|
|
{
|
|
current->signal->oom_flag_origin = false;
|
|
}
|
|
|
|
static inline bool oom_task_origin(const struct task_struct *p)
|
|
{
|
|
return p->signal->oom_flag_origin;
|
|
}
|
|
|
|
static inline bool tsk_is_oom_victim(struct task_struct * tsk)
|
|
{
|
|
return tsk->signal->oom_mm;
|
|
}
|
|
|
|
/*
|
|
* Use this helper if tsk->mm != mm and the victim mm needs a special
|
|
* handling. This is guaranteed to stay true after once set.
|
|
*/
|
|
static inline bool mm_is_oom_victim(struct mm_struct *mm)
|
|
{
|
|
return test_bit(MMF_OOM_VICTIM, &mm->flags);
|
|
}
|
|
|
|
/*
|
|
* Checks whether a page fault on the given mm is still reliable.
|
|
* This is no longer true if the oom reaper started to reap the
|
|
* address space which is reflected by MMF_UNSTABLE flag set in
|
|
* the mm. At that moment any !shared mapping would lose the content
|
|
* and could cause a memory corruption (zero pages instead of the
|
|
* original content).
|
|
*
|
|
* User should call this before establishing a page table entry for
|
|
* a !shared mapping and under the proper page table lock.
|
|
*
|
|
* Return 0 when the PF is safe VM_FAULT_SIGBUS otherwise.
|
|
*/
|
|
static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
|
|
{
|
|
if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags)))
|
|
return VM_FAULT_SIGBUS;
|
|
return 0;
|
|
}
|
|
|
|
bool __oom_reap_task_mm(struct mm_struct *mm);
|
|
|
|
extern unsigned long oom_badness(struct task_struct *p,
|
|
struct mem_cgroup *memcg, const nodemask_t *nodemask,
|
|
unsigned long totalpages);
|
|
|
|
extern bool out_of_memory(struct oom_control *oc);
|
|
|
|
extern void exit_oom_victim(void);
|
|
|
|
extern int register_oom_notifier(struct notifier_block *nb);
|
|
extern int unregister_oom_notifier(struct notifier_block *nb);
|
|
|
|
extern bool oom_killer_disable(signed long timeout);
|
|
extern void oom_killer_enable(void);
|
|
|
|
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
|
|
|
|
/* sysctls */
|
|
extern int sysctl_oom_dump_tasks;
|
|
extern int sysctl_oom_kill_allocating_task;
|
|
extern int sysctl_panic_on_oom;
|
|
#endif /* _INCLUDE_LINUX_OOM_H */
|