mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-13 09:15:02 +08:00
f56141e3e2
If an attacker can cause a controlled kernel stack overflow, overwriting the restart block is a very juicy exploit target. This is because the restart_block is held in the same memory allocation as the kernel stack. Moving the restart block to struct task_struct prevents this exploit by making the restart_block harder to locate. Note that there are other fields in thread_info that are also easy targets, at least on some architectures. It's also a decent simplification, since the restart code is more or less identical on all architectures. [james.hogan@imgtec.com: metag: align thread_info::supervisor_stack] Signed-off-by: Andy Lutomirski <luto@amacapital.net> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Kees Cook <keescook@chromium.org> Cc: David Miller <davem@davemloft.net> Acked-by: Richard Weinberger <richard@nod.at> Cc: Richard Henderson <rth@twiddle.net> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Matt Turner <mattst88@gmail.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Haavard Skinnemoen <hskinnemoen@gmail.com> Cc: Hans-Christian Egtvedt <egtvedt@samfundet.no> Cc: Steven Miao <realmz6@gmail.com> Cc: Mark Salter <msalter@redhat.com> Cc: Aurelien Jacquiot <a-jacquiot@ti.com> Cc: Mikael Starvik <starvik@axis.com> Cc: Jesper Nilsson <jesper.nilsson@axis.com> Cc: David Howells <dhowells@redhat.com> Cc: Richard Kuo <rkuo@codeaurora.org> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Michal Simek <monstr@monstr.eu> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Jonas Bonn <jonas@southpole.se> Cc: "James E.J. Bottomley" <jejb@parisc-linux.org> Cc: Helge Deller <deller@gmx.de> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Acked-by: Michael Ellerman <mpe@ellerman.id.au> (powerpc) Tested-by: Michael Ellerman <mpe@ellerman.id.au> (powerpc) Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Chen Liqin <liqin.linux@gmail.com> Cc: Lennox Wu <lennox.wu@gmail.com> Cc: Chris Metcalf <cmetcalf@ezchip.com> Cc: Guan Xuetao <gxt@mprc.pku.edu.cn> Cc: Chris Zankel <chris@zankel.net> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Guenter Roeck <linux@roeck-us.net> Signed-off-by: James Hogan <james.hogan@imgtec.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
91 lines
3.0 KiB
C
91 lines
3.0 KiB
C
#ifndef _ASM_PARISC_THREAD_INFO_H
|
|
#define _ASM_PARISC_THREAD_INFO_H
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
#ifndef __ASSEMBLY__
|
|
#include <asm/processor.h>
|
|
#include <asm/special_insns.h>
|
|
|
|
struct thread_info {
|
|
struct task_struct *task; /* main task structure */
|
|
struct exec_domain *exec_domain;/* execution domain */
|
|
unsigned long flags; /* thread_info flags (see TIF_*) */
|
|
mm_segment_t addr_limit; /* user-level address space limit */
|
|
__u32 cpu; /* current CPU */
|
|
int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
|
|
};
|
|
|
|
#define INIT_THREAD_INFO(tsk) \
|
|
{ \
|
|
.task = &tsk, \
|
|
.exec_domain = &default_exec_domain, \
|
|
.flags = 0, \
|
|
.cpu = 0, \
|
|
.addr_limit = KERNEL_DS, \
|
|
.preempt_count = INIT_PREEMPT_COUNT, \
|
|
}
|
|
|
|
#define init_thread_info (init_thread_union.thread_info)
|
|
#define init_stack (init_thread_union.stack)
|
|
|
|
/* how to get the thread information struct from C */
|
|
#define current_thread_info() ((struct thread_info *)mfctl(30))
|
|
|
|
#endif /* !__ASSEMBLY */
|
|
|
|
/* thread information allocation */
|
|
|
|
#define THREAD_SIZE_ORDER 2 /* PA-RISC requires at least 16k stack */
|
|
/* Be sure to hunt all references to this down when you change the size of
|
|
* the kernel stack */
|
|
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
|
|
#define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER)
|
|
|
|
/*
|
|
* thread information flags
|
|
*/
|
|
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
|
|
#define TIF_SIGPENDING 1 /* signal pending */
|
|
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
|
|
#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
|
#define TIF_32BIT 4 /* 32 bit binary */
|
|
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
|
|
#define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */
|
|
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
|
|
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
|
|
#define TIF_SINGLESTEP 9 /* single stepping? */
|
|
#define TIF_BLOCKSTEP 10 /* branch stepping? */
|
|
#define TIF_SECCOMP 11 /* secure computing */
|
|
|
|
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
|
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
|
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
|
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
|
#define _TIF_32BIT (1 << TIF_32BIT)
|
|
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
|
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
|
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
|
#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
|
|
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
|
|
|
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \
|
|
_TIF_NEED_RESCHED)
|
|
#define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
|
|
_TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT | \
|
|
_TIF_SECCOMP)
|
|
|
|
#ifdef CONFIG_64BIT
|
|
# ifdef CONFIG_COMPAT
|
|
# define is_32bit_task() (test_thread_flag(TIF_32BIT))
|
|
# else
|
|
# define is_32bit_task() (0)
|
|
# endif
|
|
#else
|
|
# define is_32bit_task() (1)
|
|
#endif
|
|
|
|
#endif /* __KERNEL__ */
|
|
|
|
#endif /* _ASM_PARISC_THREAD_INFO_H */
|