mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-24 02:24:28 +08:00
asm-generic: mmu-context cleanup
This is a cleanup series from Nicholas Piggin, preparing for later changes. The asm/mmu_context.h header are generalized and common code moved to asm-gneneric/mmu_context.h. This saves a bit of code and makes it easier to change in the future. Signed-off-by: Arnd Bergmann <arnd@arndb.de> -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEo6/YBQwIrVS28WGKmmx57+YAGNkFAl/Y1LsACgkQmmx57+YA GNm6kBAAq4/n6nuNnh6b9LhjXaZRG75gEyW7JvHl8KE5wmZHwDHqbwiQgU1b3lUs JJGbfKqi5ASKxNg6MpfYodmCOqeTUUYG0FUCb6lMhcxxMdfLTLYBvkNd6Y143M+T boi5b/iz+OUQdNPzlVeSsUEVsD59FIXmP/GhscWZN9VAyf/aLV2MDBIOhrDSJlPo ObexnP0Iw1E1NRQYDQ6L2dKTHa6XmHyUtw40ABPmd/6MSd1S+D+j3FGg+CYmvnzG k9g8FbNby8xtUfc0pZV4W/322WN8cDFF9bc04eTDZiAv1bk9lmfvWJ2bWjs3s2qt RO/suiZEOAta/WUX9vVLgYn2td00ef+AyjNUgffiUfvQfl++fiCDFTGl+MoCLjbh xQUPcRuRdED7bMKNrC0CcDOSwWEBWVXvkU/szBLDeE1sPjXzGQ80q1Y72k9y961I mqg7FrHqjZsxT9luXMAzClHNhXAtvehkJZBIdHlFok83EFoTQp48Da4jaDuOOhlq p/lkPJWOHegIQMWtGwRyGmG1qzil7b/QBNAPLgu9pF4TA+ySRBEB2BOr2jRSkj6N mNTHQbSYxBoktdt+VhtrSsxR+i8lwlegx+RNRFmKK3VH5da2nfiBaOY7zBQQHxCK yxQvXvsljSVpfkFKLc/S2nLQL1zTkRfFKV1Xmd3+3owR+EoqM60= =NpMX -----END PGP SIGNATURE----- Merge tag 'asm-generic-mmu-context-5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic Pull asm-generic mmu-context cleanup from Arnd Bergmann: "This is a cleanup series from Nicholas Piggin, preparing for later changes. The asm/mmu_context.h header are generalized and common code moved to asm-gneneric/mmu_context.h. This saves a bit of code and makes it easier to change in the future" * tag 'asm-generic-mmu-context-5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic: (25 commits) h8300: Fix generic mmu_context build m68k: mmu_context: Fix Sun-3 build xtensa: use asm-generic/mmu_context.h for no-op implementations x86: use asm-generic/mmu_context.h for no-op implementations um: use asm-generic/mmu_context.h for no-op implementations sparc: use asm-generic/mmu_context.h for no-op implementations sh: use asm-generic/mmu_context.h for no-op implementations s390: use asm-generic/mmu_context.h for no-op implementations riscv: use asm-generic/mmu_context.h for no-op implementations powerpc: use asm-generic/mmu_context.h for no-op implementations parisc: use asm-generic/mmu_context.h for no-op implementations openrisc: use asm-generic/mmu_context.h for no-op implementations nios2: use asm-generic/mmu_context.h for no-op implementations nds32: use asm-generic/mmu_context.h for no-op implementations mips: use asm-generic/mmu_context.h for no-op implementations microblaze: use asm-generic/mmu_context.h for no-op implementations m68k: use asm-generic/mmu_context.h for no-op implementations ia64: use asm-generic/mmu_context.h for no-op implementations hexagon: use asm-generic/mmu_context.h for no-op implementations csky: use asm-generic/mmu_context.h for no-op implementations ...
This commit is contained in:
commit
157807123c
@ -214,8 +214,6 @@ ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
|
||||
tbiap();
|
||||
}
|
||||
|
||||
#define deactivate_mm(tsk,mm) do { } while (0)
|
||||
|
||||
#ifdef CONFIG_ALPHA_GENERIC
|
||||
# define switch_mm(a,b,c) alpha_mv.mv_switch_mm((a),(b),(c))
|
||||
# define activate_mm(x,y) alpha_mv.mv_activate_mm((x),(y))
|
||||
@ -229,6 +227,7 @@ ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#define init_new_context init_new_context
|
||||
static inline int
|
||||
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
||||
{
|
||||
@ -242,12 +241,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
extern inline void
|
||||
destroy_context(struct mm_struct *mm)
|
||||
{
|
||||
/* Nothing to do. */
|
||||
}
|
||||
|
||||
#define enter_lazy_tlb enter_lazy_tlb
|
||||
static inline void
|
||||
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
@ -255,6 +249,8 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
||||
= ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
#include <asm-generic/mmu_context.h>
|
||||
|
||||
#ifdef __MMU_EXTERN_INLINE
|
||||
#undef __EXTERN_INLINE
|
||||
#undef __MMU_EXTERN_INLINE
|
||||
|
@ -102,6 +102,7 @@ set_hw:
|
||||
* Initialize the context related info for a new mm_struct
|
||||
* instance.
|
||||
*/
|
||||
#define init_new_context init_new_context
|
||||
static inline int
|
||||
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
||||
{
|
||||
@ -113,6 +114,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define destroy_context destroy_context
|
||||
static inline void destroy_context(struct mm_struct *mm)
|
||||
{
|
||||
unsigned long flags;
|
||||
@ -153,13 +155,13 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
}
|
||||
|
||||
/*
|
||||
* Called at the time of execve() to get a new ASID
|
||||
* Note the subtlety here: get_new_mmu_context() behaves differently here
|
||||
* vs. in switch_mm(). Here it always returns a new ASID, because mm has
|
||||
* an unallocated "initial" value, while in latter, it moves to a new ASID,
|
||||
* only if it was unallocated
|
||||
* activate_mm defaults (in asm-generic) to switch_mm and is called at the
|
||||
* time of execve() to get a new ASID Note the subtlety here:
|
||||
* get_new_mmu_context() behaves differently here vs. in switch_mm(). Here
|
||||
* it always returns a new ASID, because mm has an unallocated "initial"
|
||||
* value, while in latter, it moves to a new ASID, only if it was
|
||||
* unallocated
|
||||
*/
|
||||
#define activate_mm(prev, next) switch_mm(prev, next, NULL)
|
||||
|
||||
/* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping
|
||||
* for retiring-mm. However destroy_context( ) still needs to do that because
|
||||
@ -168,8 +170,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
* there is a good chance that task gets sched-out/in, making it's ASID valid
|
||||
* again (this teased me for a whole day).
|
||||
*/
|
||||
#define deactivate_mm(tsk, mm) do { } while (0)
|
||||
|
||||
#define enter_lazy_tlb(mm, tsk)
|
||||
#include <asm-generic/mmu_context.h>
|
||||
|
||||
#endif /* __ASM_ARC_MMU_CONTEXT_H */
|
||||
|
@ -26,6 +26,8 @@ void __check_vmalloc_seq(struct mm_struct *mm);
|
||||
#ifdef CONFIG_CPU_HAS_ASID
|
||||
|
||||
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
|
||||
|
||||
#define init_new_context init_new_context
|
||||
static inline int
|
||||
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
||||
{
|
||||
@ -92,32 +94,10 @@ static inline void finish_arch_post_lock_switch(void)
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
static inline int
|
||||
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#endif /* CONFIG_CPU_HAS_ASID */
|
||||
|
||||
#define destroy_context(mm) do { } while(0)
|
||||
#define activate_mm(prev,next) switch_mm(prev, next, NULL)
|
||||
|
||||
/*
|
||||
* This is called when "tsk" is about to enter lazy TLB mode.
|
||||
*
|
||||
* mm: describes the currently active mm context
|
||||
* tsk: task which is entering lazy tlb
|
||||
* cpu: cpu number which is entering lazy tlb
|
||||
*
|
||||
* tsk->mm will be NULL
|
||||
*/
|
||||
static inline void
|
||||
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* This is the actual mm switch as far as the scheduler
|
||||
* is concerned. No registers are touched. We avoid
|
||||
@ -149,6 +129,6 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
#endif
|
||||
}
|
||||
|
||||
#define deactivate_mm(tsk,mm) do { } while (0)
|
||||
#include <asm-generic/mmu_context.h>
|
||||
|
||||
#endif
|
||||
|
@ -174,9 +174,9 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp)
|
||||
* Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you
|
||||
* take CPU migration into account.
|
||||
*/
|
||||
#define destroy_context(mm) do { } while(0)
|
||||
void check_and_switch_context(struct mm_struct *mm);
|
||||
|
||||
#define init_new_context(tsk, mm) init_new_context(tsk, mm)
|
||||
static inline int
|
||||
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
||||
{
|
||||
@ -208,6 +208,7 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
|
||||
}
|
||||
#endif
|
||||
|
||||
#define enter_lazy_tlb enter_lazy_tlb
|
||||
static inline void
|
||||
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
@ -248,15 +249,14 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
update_saved_ttbr0(tsk, next);
|
||||
}
|
||||
|
||||
#define deactivate_mm(tsk,mm) do { } while (0)
|
||||
#define activate_mm(prev,next) switch_mm(prev, next, current)
|
||||
|
||||
void verify_cpu_asid_bits(void);
|
||||
void post_ttbr_update_workaround(void);
|
||||
|
||||
unsigned long arm64_mm_context_get(struct mm_struct *mm);
|
||||
void arm64_mm_context_put(struct mm_struct *mm);
|
||||
|
||||
#include <asm-generic/mmu_context.h>
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* !__ASM_MMU_CONTEXT_H */
|
||||
|
6
arch/c6x/include/asm/mmu_context.h
Normal file
6
arch/c6x/include/asm/mmu_context.h
Normal file
@ -0,0 +1,6 @@
|
||||
#ifndef _ASM_C6X_MMU_CONTEXT_H
|
||||
#define _ASM_C6X_MMU_CONTEXT_H
|
||||
|
||||
#include <asm-generic/nommu_context.h>
|
||||
|
||||
#endif /* _ASM_C6X_MMU_CONTEXT_H */
|
@ -24,11 +24,6 @@
|
||||
#define cpu_asid(mm) (atomic64_read(&mm->context.asid) & ASID_MASK)
|
||||
|
||||
#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.asid, 0); 0; })
|
||||
#define activate_mm(prev,next) switch_mm(prev, next, current)
|
||||
|
||||
#define destroy_context(mm) do {} while (0)
|
||||
#define enter_lazy_tlb(mm, tsk) do {} while (0)
|
||||
#define deactivate_mm(tsk, mm) do {} while (0)
|
||||
|
||||
void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
|
||||
|
||||
@ -46,4 +41,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
|
||||
flush_icache_deferred(next);
|
||||
}
|
||||
|
||||
#include <asm-generic/mmu_context.h>
|
||||
|
||||
#endif /* __ASM_CSKY_MMU_CONTEXT_H */
|
||||
|
6
arch/h8300/include/asm/mmu_context.h
Normal file
6
arch/h8300/include/asm/mmu_context.h
Normal file
@ -0,0 +1,6 @@
|
||||
#ifndef _ASM_H8300_MMU_CONTEXT_H
|
||||
#define _ASM_H8300_MMU_CONTEXT_H
|
||||
|
||||
#include <asm-generic/nommu_context.h>
|
||||
|
||||
#endif /* _ASM_H8300_MMU_CONTEXT_H */
|
@ -15,39 +15,13 @@
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/mem-layout.h>
|
||||
|
||||
static inline void destroy_context(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* VM port hides all TLB management, so "lazy TLB" isn't very
|
||||
* meaningful. Even for ports to architectures with visble TLBs,
|
||||
* this is almost invariably a null function.
|
||||
*
|
||||
* mm->context is set up by pgd_alloc, so no init_new_context required.
|
||||
*/
|
||||
static inline void enter_lazy_tlb(struct mm_struct *mm,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Architecture-specific actions, if any, for memory map deactivation.
|
||||
*/
|
||||
static inline void deactivate_mm(struct task_struct *tsk,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* init_new_context - initialize context related info for new mm_struct instance
|
||||
* @tsk: pointer to a task struct
|
||||
* @mm: pointer to a new mm struct
|
||||
*/
|
||||
static inline int init_new_context(struct task_struct *tsk,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
/* mm->context is set up by pgd_alloc */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Switch active mm context
|
||||
@ -74,6 +48,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
/*
|
||||
* Activate new memory map for task
|
||||
*/
|
||||
#define activate_mm activate_mm
|
||||
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
|
||||
{
|
||||
unsigned long flags;
|
||||
@ -86,4 +61,6 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
|
||||
/* Generic hooks for arch_dup_mmap and arch_exit_mmap */
|
||||
#include <asm-generic/mm_hooks.h>
|
||||
|
||||
#include <asm-generic/mmu_context.h>
|
||||
|
||||
#endif
|
||||
|
@ -49,11 +49,6 @@ DECLARE_PER_CPU(u8, ia64_need_tlb_flush);
|
||||
extern void mmu_context_init (void);
|
||||
extern void wrap_mmu_context (struct mm_struct *mm);
|
||||
|
||||
static inline void
|
||||
enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* When the context counter wraps around all TLBs need to be flushed because
|
||||
* an old context number might have been reused. This is signalled by the
|
||||
@ -116,6 +111,7 @@ out:
|
||||
* Initialize context number to some sane value. MM is guaranteed to be a
|
||||
* brand-new address-space, so no TLB flushing is needed, ever.
|
||||
*/
|
||||
#define init_new_context init_new_context
|
||||
static inline int
|
||||
init_new_context (struct task_struct *p, struct mm_struct *mm)
|
||||
{
|
||||
@ -123,12 +119,6 @@ init_new_context (struct task_struct *p, struct mm_struct *mm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
destroy_context (struct mm_struct *mm)
|
||||
{
|
||||
/* Nothing to do. */
|
||||
}
|
||||
|
||||
static inline void
|
||||
reload_context (nv_mm_context_t context)
|
||||
{
|
||||
@ -178,11 +168,10 @@ activate_context (struct mm_struct *mm)
|
||||
} while (unlikely(context != mm->context));
|
||||
}
|
||||
|
||||
#define deactivate_mm(tsk,mm) do { } while (0)
|
||||
|
||||
/*
|
||||
* Switch from address space PREV to address space NEXT.
|
||||
*/
|
||||
#define activate_mm activate_mm
|
||||
static inline void
|
||||
activate_mm (struct mm_struct *prev, struct mm_struct *next)
|
||||
{
|
||||
@ -196,5 +185,7 @@ activate_mm (struct mm_struct *prev, struct mm_struct *next)
|
||||
|
||||
#define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm)
|
||||
|
||||
#include <asm-generic/mmu_context.h>
|
||||
|
||||
# endif /* ! __ASSEMBLY__ */
|
||||
#endif /* _ASM_IA64_MMU_CONTEXT_H */
|
||||
|
@ -5,10 +5,6 @@
|
||||
#include <asm-generic/mm_hooks.h>
|
||||
#include <linux/mm_types.h>
|
||||
|
||||
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
|
||||
#if defined(CONFIG_COLDFIRE)
|
||||
@ -58,6 +54,7 @@ static inline void get_mmu_context(struct mm_struct *mm)
|
||||
/*
|
||||
* We're finished using the context for an address space.
|
||||
*/
|
||||
#define destroy_context destroy_context
|
||||
static inline void destroy_context(struct mm_struct *mm)
|
||||
{
|
||||
if (mm->context != NO_CONTEXT) {
|
||||
@ -83,6 +80,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
* After we have set current->mm to a new value, this activates
|
||||
* the context for the new mm so we see the new mappings.
|
||||
*/
|
||||
#define activate_mm activate_mm
|
||||
static inline void activate_mm(struct mm_struct *active_mm,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
@ -90,8 +88,6 @@ static inline void activate_mm(struct mm_struct *active_mm,
|
||||
set_context(mm->context, mm->pgd);
|
||||
}
|
||||
|
||||
#define deactivate_mm(tsk, mm) do { } while (0)
|
||||
|
||||
#define prepare_arch_switch(next) load_ksp_mmu(next)
|
||||
|
||||
static inline void load_ksp_mmu(struct task_struct *task)
|
||||
@ -176,6 +172,7 @@ extern unsigned long get_free_context(struct mm_struct *mm);
|
||||
extern void clear_context(unsigned long context);
|
||||
|
||||
/* set the context for a new task to unmapped */
|
||||
#define init_new_context init_new_context
|
||||
static inline int init_new_context(struct task_struct *tsk,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
@ -192,6 +189,7 @@ static inline void get_mmu_context(struct mm_struct *mm)
|
||||
}
|
||||
|
||||
/* flush context if allocated... */
|
||||
#define destroy_context destroy_context
|
||||
static inline void destroy_context(struct mm_struct *mm)
|
||||
{
|
||||
if (mm->context != SUN3_INVALID_CONTEXT)
|
||||
@ -210,8 +208,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
activate_context(tsk->mm);
|
||||
}
|
||||
|
||||
#define deactivate_mm(tsk, mm) do { } while (0)
|
||||
|
||||
#define activate_mm activate_mm
|
||||
static inline void activate_mm(struct mm_struct *prev_mm,
|
||||
struct mm_struct *next_mm)
|
||||
{
|
||||
@ -224,6 +221,7 @@ static inline void activate_mm(struct mm_struct *prev_mm,
|
||||
#include <asm/page.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#define init_new_context init_new_context
|
||||
static inline int init_new_context(struct task_struct *tsk,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
@ -231,8 +229,6 @@ static inline int init_new_context(struct task_struct *tsk,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define destroy_context(mm) do { } while(0)
|
||||
|
||||
static inline void switch_mm_0230(struct mm_struct *mm)
|
||||
{
|
||||
unsigned long crp[2] = {
|
||||
@ -300,8 +296,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, str
|
||||
}
|
||||
}
|
||||
|
||||
#define deactivate_mm(tsk,mm) do { } while (0)
|
||||
|
||||
#define activate_mm activate_mm
|
||||
static inline void activate_mm(struct mm_struct *prev_mm,
|
||||
struct mm_struct *next_mm)
|
||||
{
|
||||
@ -315,24 +310,11 @@ static inline void activate_mm(struct mm_struct *prev_mm,
|
||||
|
||||
#endif
|
||||
|
||||
#include <asm-generic/mmu_context.h>
|
||||
|
||||
#else /* !CONFIG_MMU */
|
||||
|
||||
static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
|
||||
#define destroy_context(mm) do { } while (0)
|
||||
#define deactivate_mm(tsk,mm) do { } while (0)
|
||||
|
||||
static inline void activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
|
||||
{
|
||||
}
|
||||
#include <asm-generic/nommu_context.h>
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
#endif /* __M68K_MMU_CONTEXT_H */
|
||||
|
@ -2,5 +2,5 @@
|
||||
#ifdef CONFIG_MMU
|
||||
# include <asm/mmu_context_mm.h>
|
||||
#else
|
||||
# include <asm-generic/mmu_context.h>
|
||||
# include <asm-generic/nommu_context.h>
|
||||
#endif
|
||||
|
@ -33,10 +33,6 @@
|
||||
to represent all kernel pages as shared among all contexts.
|
||||
*/
|
||||
|
||||
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
|
||||
# define NO_CONTEXT 256
|
||||
# define LAST_CONTEXT 255
|
||||
# define FIRST_CONTEXT 1
|
||||
@ -105,6 +101,7 @@ static inline void get_mmu_context(struct mm_struct *mm)
|
||||
/*
|
||||
* We're finished using the context for an address space.
|
||||
*/
|
||||
#define destroy_context destroy_context
|
||||
static inline void destroy_context(struct mm_struct *mm)
|
||||
{
|
||||
if (mm->context != NO_CONTEXT) {
|
||||
@ -126,6 +123,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
* After we have set current->mm to a new value, this activates
|
||||
* the context for the new mm so we see the new mappings.
|
||||
*/
|
||||
#define activate_mm activate_mm
|
||||
static inline void activate_mm(struct mm_struct *active_mm,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
@ -136,5 +134,7 @@ static inline void activate_mm(struct mm_struct *active_mm,
|
||||
|
||||
extern void mmu_context_init(void);
|
||||
|
||||
#include <asm-generic/mmu_context.h>
|
||||
|
||||
# endif /* __KERNEL__ */
|
||||
#endif /* _ASM_MICROBLAZE_MMU_CONTEXT_H */
|
||||
|
@ -122,9 +122,6 @@ unsigned long get_wchan(struct task_struct *p);
|
||||
# define KSTK_EIP(task) (task_pc(task))
|
||||
# define KSTK_ESP(task) (task_sp(task))
|
||||
|
||||
/* FIXME */
|
||||
# define deactivate_mm(tsk, mm) do { } while (0)
|
||||
|
||||
# define STACK_TOP TASK_SIZE
|
||||
# define STACK_TOP_MAX STACK_TOP
|
||||
|
||||
|
@ -124,10 +124,6 @@ static inline void set_cpu_context(unsigned int cpu,
|
||||
#define cpu_asid(cpu, mm) \
|
||||
(cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu]))
|
||||
|
||||
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
|
||||
extern void get_new_mmu_context(struct mm_struct *mm);
|
||||
extern void check_mmu_context(struct mm_struct *mm);
|
||||
extern void check_switch_mmu_context(struct mm_struct *mm);
|
||||
@ -136,6 +132,7 @@ extern void check_switch_mmu_context(struct mm_struct *mm);
|
||||
* Initialize the context related info for a new mm_struct
|
||||
* instance.
|
||||
*/
|
||||
#define init_new_context init_new_context
|
||||
static inline int
|
||||
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
||||
{
|
||||
@ -180,14 +177,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
* Destroy context related info for an mm_struct that is about
|
||||
* to be put to rest.
|
||||
*/
|
||||
#define destroy_context destroy_context
|
||||
static inline void destroy_context(struct mm_struct *mm)
|
||||
{
|
||||
dsemul_mm_cleanup(mm);
|
||||
}
|
||||
|
||||
#define activate_mm(prev, next) switch_mm(prev, next, current)
|
||||
#define deactivate_mm(tsk, mm) do { } while (0)
|
||||
|
||||
static inline void
|
||||
drop_mmu_context(struct mm_struct *mm)
|
||||
{
|
||||
@ -237,4 +232,6 @@ drop_mmu_context(struct mm_struct *mm)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
#include <asm-generic/mmu_context.h>
|
||||
|
||||
#endif /* _ASM_MMU_CONTEXT_H */
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <asm/proc-fns.h>
|
||||
#include <asm-generic/mm_hooks.h>
|
||||
|
||||
#define init_new_context init_new_context
|
||||
static inline int
|
||||
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
||||
{
|
||||
@ -16,8 +17,6 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define destroy_context(mm) do { } while(0)
|
||||
|
||||
#define CID_BITS 9
|
||||
extern spinlock_t cid_lock;
|
||||
extern unsigned int cpu_last_cid;
|
||||
@ -47,10 +46,6 @@ static inline void check_context(struct mm_struct *mm)
|
||||
__new_context(mm);
|
||||
}
|
||||
|
||||
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
@ -62,7 +57,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
}
|
||||
}
|
||||
|
||||
#define deactivate_mm(tsk,mm) do { } while (0)
|
||||
#define activate_mm(prev,next) switch_mm(prev, next, NULL)
|
||||
#include <asm-generic/mmu_context.h>
|
||||
|
||||
#endif
|
||||
|
@ -26,16 +26,13 @@ extern unsigned long get_pid_from_context(mm_context_t *ctx);
|
||||
*/
|
||||
extern pgd_t *pgd_current;
|
||||
|
||||
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the context related info for a new mm_struct instance.
|
||||
*
|
||||
* Set all new contexts to 0, that way the generation will never match
|
||||
* the currently running generation when this context is switched in.
|
||||
*/
|
||||
#define init_new_context init_new_context
|
||||
static inline int init_new_context(struct task_struct *tsk,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
@ -43,26 +40,16 @@ static inline int init_new_context(struct task_struct *tsk,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Destroy context related info for an mm_struct that is about
|
||||
* to be put to rest.
|
||||
*/
|
||||
static inline void destroy_context(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
struct task_struct *tsk);
|
||||
|
||||
static inline void deactivate_mm(struct task_struct *tsk,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* After we have set current->mm to a new value, this activates
|
||||
* the context for the new mm so we see the new mappings.
|
||||
*/
|
||||
#define activate_mm activate_mm
|
||||
void activate_mm(struct mm_struct *prev, struct mm_struct *next);
|
||||
|
||||
#include <asm-generic/mmu_context.h>
|
||||
|
||||
#endif /* _ASM_NIOS2_MMU_CONTEXT_H */
|
||||
|
@ -17,13 +17,13 @@
|
||||
|
||||
#include <asm-generic/mm_hooks.h>
|
||||
|
||||
#define init_new_context init_new_context
|
||||
extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
|
||||
#define destroy_context destroy_context
|
||||
extern void destroy_context(struct mm_struct *mm);
|
||||
extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
struct task_struct *tsk);
|
||||
|
||||
#define deactivate_mm(tsk, mm) do { } while (0)
|
||||
|
||||
#define activate_mm(prev, next) switch_mm((prev), (next), NULL)
|
||||
|
||||
/* current active pgd - this is similar to other processors pgd
|
||||
@ -32,8 +32,6 @@ extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
|
||||
extern volatile pgd_t *current_pgd[]; /* defined in arch/openrisc/mm/fault.c */
|
||||
|
||||
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
#include <asm-generic/mmu_context.h>
|
||||
|
||||
#endif
|
||||
|
@ -7,16 +7,13 @@
|
||||
#include <linux/atomic.h>
|
||||
#include <asm-generic/mm_hooks.h>
|
||||
|
||||
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
|
||||
/* on PA-RISC, we actually have enough contexts to justify an allocator
|
||||
* for them. prumpf */
|
||||
|
||||
extern unsigned long alloc_sid(void);
|
||||
extern void free_sid(unsigned long);
|
||||
|
||||
#define init_new_context init_new_context
|
||||
static inline int
|
||||
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
||||
{
|
||||
@ -26,6 +23,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define destroy_context destroy_context
|
||||
static inline void
|
||||
destroy_context(struct mm_struct *mm)
|
||||
{
|
||||
@ -71,8 +69,7 @@ static inline void switch_mm(struct mm_struct *prev,
|
||||
}
|
||||
#define switch_mm_irqs_off switch_mm_irqs_off
|
||||
|
||||
#define deactivate_mm(tsk,mm) do { } while (0)
|
||||
|
||||
#define activate_mm activate_mm
|
||||
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
|
||||
{
|
||||
/*
|
||||
@ -90,4 +87,7 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
|
||||
|
||||
switch_mm(prev,next,current);
|
||||
}
|
||||
|
||||
#include <asm-generic/mmu_context.h>
|
||||
|
||||
#endif
|
||||
|
@ -14,7 +14,9 @@
|
||||
/*
|
||||
* Most if the context management is out of line
|
||||
*/
|
||||
#define init_new_context init_new_context
|
||||
extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
|
||||
#define destroy_context destroy_context
|
||||
extern void destroy_context(struct mm_struct *mm);
|
||||
#ifdef CONFIG_SPAPR_TCE_IOMMU
|
||||
struct mm_iommu_table_group_mem_t;
|
||||
@ -235,27 +237,26 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
}
|
||||
#define switch_mm_irqs_off switch_mm_irqs_off
|
||||
|
||||
|
||||
#define deactivate_mm(tsk,mm) do { } while (0)
|
||||
|
||||
/*
|
||||
* After we have set current->mm to a new value, this activates
|
||||
* the context for the new mm so we see the new mappings.
|
||||
*/
|
||||
#define activate_mm activate_mm
|
||||
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
|
||||
{
|
||||
switch_mm_irqs_off(prev, next, current);
|
||||
}
|
||||
|
||||
/* We don't currently use enter_lazy_tlb() for anything */
|
||||
#ifdef CONFIG_PPC_BOOK3E_64
|
||||
#define enter_lazy_tlb enter_lazy_tlb
|
||||
static inline void enter_lazy_tlb(struct mm_struct *mm,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
/* 64-bit Book3E keeps track of current PGD in the PACA */
|
||||
#ifdef CONFIG_PPC_BOOK3E_64
|
||||
get_paca()->pgd = NULL;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
extern void arch_exit_mmap(struct mm_struct *mm);
|
||||
|
||||
@ -298,5 +299,7 @@ static inline int arch_dup_mmap(struct mm_struct *oldmm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#include <asm-generic/mmu_context.h>
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __ASM_POWERPC_MMU_CONTEXT_H */
|
||||
|
@ -13,34 +13,16 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
static inline void enter_lazy_tlb(struct mm_struct *mm,
|
||||
struct task_struct *task)
|
||||
{
|
||||
}
|
||||
|
||||
/* Initialize context-related info for a new mm_struct */
|
||||
static inline int init_new_context(struct task_struct *task,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void destroy_context(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
struct task_struct *task);
|
||||
|
||||
#define activate_mm activate_mm
|
||||
static inline void activate_mm(struct mm_struct *prev,
|
||||
struct mm_struct *next)
|
||||
{
|
||||
switch_mm(prev, next, NULL);
|
||||
}
|
||||
|
||||
static inline void deactivate_mm(struct task_struct *task,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
#include <asm-generic/mmu_context.h>
|
||||
|
||||
#endif /* _ASM_RISCV_MMU_CONTEXT_H */
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <asm/ctl_reg.h>
|
||||
#include <asm-generic/mm_hooks.h>
|
||||
|
||||
#define init_new_context init_new_context
|
||||
static inline int init_new_context(struct task_struct *tsk,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
@ -69,8 +70,6 @@ static inline int init_new_context(struct task_struct *tsk,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define destroy_context(mm) do { } while (0)
|
||||
|
||||
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
@ -104,9 +103,7 @@ static inline void finish_arch_post_lock_switch(void)
|
||||
__ctl_load(S390_lowcore.user_asce, 7, 7);
|
||||
}
|
||||
|
||||
#define enter_lazy_tlb(mm,tsk) do { } while (0)
|
||||
#define deactivate_mm(tsk,mm) do { } while (0)
|
||||
|
||||
#define activate_mm activate_mm
|
||||
static inline void activate_mm(struct mm_struct *prev,
|
||||
struct mm_struct *next)
|
||||
{
|
||||
@ -115,4 +112,6 @@ static inline void activate_mm(struct mm_struct *prev,
|
||||
__ctl_load(S390_lowcore.user_asce, 7, 7);
|
||||
}
|
||||
|
||||
#include <asm-generic/mmu_context.h>
|
||||
|
||||
#endif /* __S390_MMU_CONTEXT_H */
|
||||
|
@ -84,6 +84,7 @@ static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
|
||||
* Initialize the context related info for a new mm_struct
|
||||
* instance.
|
||||
*/
|
||||
#define init_new_context init_new_context
|
||||
static inline int init_new_context(struct task_struct *tsk,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
@ -120,9 +121,7 @@ static inline void switch_mm(struct mm_struct *prev,
|
||||
activate_context(next, cpu);
|
||||
}
|
||||
|
||||
#define activate_mm(prev, next) switch_mm((prev),(next),NULL)
|
||||
#define deactivate_mm(tsk,mm) do { } while (0)
|
||||
#define enter_lazy_tlb(mm,tsk) do { } while (0)
|
||||
#include <asm-generic/mmu_context.h>
|
||||
|
||||
#else
|
||||
|
||||
@ -133,7 +132,7 @@ static inline void switch_mm(struct mm_struct *prev,
|
||||
#define set_TTB(pgd) do { } while (0)
|
||||
#define get_TTB() (0)
|
||||
|
||||
#include <asm-generic/mmu_context.h>
|
||||
#include <asm-generic/nommu_context.h>
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
|
@ -2,15 +2,6 @@
|
||||
#ifndef __ASM_SH_MMU_CONTEXT_32_H
|
||||
#define __ASM_SH_MMU_CONTEXT_32_H
|
||||
|
||||
/*
|
||||
* Destroy context related info for an mm_struct that is about
|
||||
* to be put to rest.
|
||||
*/
|
||||
static inline void destroy_context(struct mm_struct *mm)
|
||||
{
|
||||
/* Do nothing */
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_PTEAEX
|
||||
static inline void set_asid(unsigned long asid)
|
||||
{
|
||||
|
@ -6,13 +6,10 @@
|
||||
|
||||
#include <asm-generic/mm_hooks.h>
|
||||
|
||||
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
|
||||
/* Initialize a new mmu context. This is invoked when a new
|
||||
* address space instance (unique or shared) is instantiated.
|
||||
*/
|
||||
#define init_new_context init_new_context
|
||||
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
|
||||
|
||||
/* Destroy a dead context. This occurs when mmput drops the
|
||||
@ -20,17 +17,18 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
|
||||
* all the page tables have been flushed. Our job is to destroy
|
||||
* any remaining processor-specific state.
|
||||
*/
|
||||
#define destroy_context destroy_context
|
||||
void destroy_context(struct mm_struct *mm);
|
||||
|
||||
/* Switch the current MM context. */
|
||||
void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
|
||||
struct task_struct *tsk);
|
||||
|
||||
#define deactivate_mm(tsk,mm) do { } while (0)
|
||||
|
||||
/* Activate a new MM instance for the current task. */
|
||||
#define activate_mm(active_mm, mm) switch_mm((active_mm), (mm), NULL)
|
||||
|
||||
#include <asm-generic/mmu_context.h>
|
||||
|
||||
#endif /* !(__ASSEMBLY__) */
|
||||
|
||||
#endif /* !(__SPARC_MMU_CONTEXT_H) */
|
||||
|
@ -16,17 +16,16 @@
|
||||
#include <asm-generic/mm_hooks.h>
|
||||
#include <asm/percpu.h>
|
||||
|
||||
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
|
||||
extern spinlock_t ctx_alloc_lock;
|
||||
extern unsigned long tlb_context_cache;
|
||||
extern unsigned long mmu_context_bmap[];
|
||||
|
||||
DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
|
||||
void get_new_mmu_context(struct mm_struct *mm);
|
||||
|
||||
#define init_new_context init_new_context
|
||||
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
|
||||
#define destroy_context destroy_context
|
||||
void destroy_context(struct mm_struct *mm);
|
||||
|
||||
void __tsb_context_switch(unsigned long pgd_pa,
|
||||
@ -136,7 +135,6 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
|
||||
spin_unlock_irqrestore(&mm->context.lock, flags);
|
||||
}
|
||||
|
||||
#define deactivate_mm(tsk,mm) do { } while (0)
|
||||
#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
|
||||
|
||||
#define __HAVE_ARCH_START_CONTEXT_SWITCH
|
||||
@ -187,6 +185,8 @@ static inline void finish_arch_post_lock_switch(void)
|
||||
}
|
||||
}
|
||||
|
||||
#include <asm-generic/mmu_context.h>
|
||||
|
||||
#endif /* !(__ASSEMBLY__) */
|
||||
|
||||
#endif /* !(__SPARC64_MMU_CONTEXT_H) */
|
||||
|
@ -37,10 +37,9 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
|
||||
* end asm-generic/mm_hooks.h functions
|
||||
*/
|
||||
|
||||
#define deactivate_mm(tsk,mm) do { } while (0)
|
||||
|
||||
extern void force_flush_all(void);
|
||||
|
||||
#define activate_mm activate_mm
|
||||
static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
|
||||
{
|
||||
/*
|
||||
@ -66,13 +65,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
}
|
||||
}
|
||||
|
||||
static inline void enter_lazy_tlb(struct mm_struct *mm,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
|
||||
#define init_new_context init_new_context
|
||||
extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
|
||||
|
||||
#define destroy_context destroy_context
|
||||
extern void destroy_context(struct mm_struct *mm);
|
||||
|
||||
#include <asm-generic/mmu_context.h>
|
||||
|
||||
#endif
|
||||
|
@ -91,12 +91,14 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
|
||||
}
|
||||
#endif
|
||||
|
||||
#define enter_lazy_tlb enter_lazy_tlb
|
||||
extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
|
||||
|
||||
/*
|
||||
* Init a new mm. Used on mm copies, like at fork()
|
||||
* and on mm's that are brand-new, like at execve().
|
||||
*/
|
||||
#define init_new_context init_new_context
|
||||
static inline int init_new_context(struct task_struct *tsk,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
@ -116,6 +118,8 @@ static inline int init_new_context(struct task_struct *tsk,
|
||||
init_new_context_ldt(mm);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define destroy_context destroy_context
|
||||
static inline void destroy_context(struct mm_struct *mm)
|
||||
{
|
||||
destroy_context_ldt(mm);
|
||||
@ -214,4 +218,6 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
|
||||
|
||||
unsigned long __get_current_cr3_fast(void);
|
||||
|
||||
#include <asm-generic/mmu_context.h>
|
||||
|
||||
#endif /* _ASM_X86_MMU_CONTEXT_H */
|
||||
|
@ -111,6 +111,7 @@ static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
|
||||
* to -1 says the process has never run on any core.
|
||||
*/
|
||||
|
||||
#define init_new_context init_new_context
|
||||
static inline int init_new_context(struct task_struct *tsk,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
@ -136,24 +137,18 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
activate_context(next, cpu);
|
||||
}
|
||||
|
||||
#define activate_mm(prev, next) switch_mm((prev), (next), NULL)
|
||||
#define deactivate_mm(tsk, mm) do { } while (0)
|
||||
|
||||
/*
|
||||
* Destroy context related info for an mm_struct that is about
|
||||
* to be put to rest.
|
||||
*/
|
||||
#define destroy_context destroy_context
|
||||
static inline void destroy_context(struct mm_struct *mm)
|
||||
{
|
||||
invalidate_page_directory();
|
||||
}
|
||||
|
||||
|
||||
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
/* Nothing to do. */
|
||||
|
||||
}
|
||||
#include <asm-generic/mmu_context.h>
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
#endif /* _XTENSA_MMU_CONTEXT_H */
|
||||
|
@ -7,28 +7,4 @@ static inline void init_kio(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int init_new_context(struct task_struct *tsk,struct mm_struct *mm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void destroy_context(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void deactivate_mm(struct task_struct *tsk, struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
#include <asm-generic/nommu_context.h>
|
||||
|
@ -3,44 +3,74 @@
|
||||
#define __ASM_GENERIC_MMU_CONTEXT_H
|
||||
|
||||
/*
|
||||
* Generic hooks for NOMMU architectures, which do not need to do
|
||||
* anything special here.
|
||||
* Generic hooks to implement no-op functionality.
|
||||
*/
|
||||
|
||||
#include <asm-generic/mm_hooks.h>
|
||||
|
||||
struct task_struct;
|
||||
struct mm_struct;
|
||||
|
||||
/*
|
||||
* enter_lazy_tlb - Called when "tsk" is about to enter lazy TLB mode.
|
||||
*
|
||||
* @mm: the currently active mm context which is becoming lazy
|
||||
* @tsk: task which is entering lazy tlb
|
||||
*
|
||||
* tsk->mm will be NULL
|
||||
*/
|
||||
#ifndef enter_lazy_tlb
|
||||
static inline void enter_lazy_tlb(struct mm_struct *mm,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* init_new_context - Initialize context of a new mm_struct.
|
||||
* @tsk: task struct for the mm
|
||||
* @mm: the new mm struct
|
||||
* @return: 0 on success, -errno on failure
|
||||
*/
|
||||
#ifndef init_new_context
|
||||
static inline int init_new_context(struct task_struct *tsk,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* destroy_context - Undo init_new_context when the mm is going away
|
||||
* @mm: old mm struct
|
||||
*/
|
||||
#ifndef destroy_context
|
||||
static inline void destroy_context(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void deactivate_mm(struct task_struct *task,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void switch_mm(struct mm_struct *prev,
|
||||
struct mm_struct *next,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* activate_mm - called after exec switches the current task to a new mm, to switch to it
|
||||
* @prev_mm: previous mm of this task
|
||||
* @next_mm: new mm
|
||||
*/
|
||||
#ifndef activate_mm
|
||||
static inline void activate_mm(struct mm_struct *prev_mm,
|
||||
struct mm_struct *next_mm)
|
||||
{
|
||||
switch_mm(prev_mm, next_mm, current);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* dectivate_mm - called when an mm is released after exit or exec switches away from it
|
||||
* @tsk: the task
|
||||
* @mm: the old mm
|
||||
*/
|
||||
#ifndef deactivate_mm
|
||||
static inline void deactivate_mm(struct task_struct *tsk,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_GENERIC_MMU_CONTEXT_H */
|
||||
|
19
include/asm-generic/nommu_context.h
Normal file
19
include/asm-generic/nommu_context.h
Normal file
@ -0,0 +1,19 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __ASM_GENERIC_NOMMU_H
|
||||
#define __ASM_GENERIC_NOMMU_H
|
||||
|
||||
/*
|
||||
* Generic hooks for NOMMU architectures, which do not need to do
|
||||
* anything special here.
|
||||
*/
|
||||
#include <asm-generic/mm_hooks.h>
|
||||
|
||||
static inline void switch_mm(struct mm_struct *prev,
|
||||
struct mm_struct *next,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
|
||||
#include <asm-generic/mmu_context.h>
|
||||
|
||||
#endif /* __ASM_GENERIC_NOMMU_H */
|
Loading…
Reference in New Issue
Block a user