mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 04:34:08 +08:00
kernel-hardening updates for v5.19-rc1
- usercopy hardening expanded to check other allocation types (Matthew Wilcox, Yuanzheng Song) - arm64 stackleak behavioral improvements (Mark Rutland) - arm64 CFI code gen improvement (Sami Tolvanen) - LoadPin LSM block dev API adjustment (Christoph Hellwig) - Clang randstruct support (Bill Wendling, Kees Cook) -----BEGIN PGP SIGNATURE----- iQJKBAABCgA0FiEEpcP2jyKd1g9yPm4TiXL039xtwCYFAmKL1kMWHGtlZXNjb29r QGNocm9taXVtLm9yZwAKCRCJcvTf3G3AJlz6D/9lYEwDQYwKVK6fsXdgcs/eUkqc P06KGm7jDiYiua34LMpgu35wkRcxVDzB92kzQmt7yaVqhlIGjO9wnP+uZrq8q/LS X9FSb457fREg0XLPX5XC60abHYyikvgJMf06dSLaBcRq1Wzqwp5JZPpLZJUAM2ab rM1Vq0brfF1+lPAPECx1sYYNksP9XTw0dtzUu8D9tlTQDFAhKYhV6Io5yRFkA4JH ELSHjJHlNgLYeZE5IfWHRQBb+yofjnt61IwoVkqa5lSfoyvKpBPF5G+3gOgtdkyv A8So2aG/bMNUUY80Th5ojiZ6V7z5SYjUmHRil6I/swAdkc825n2wM+AQqsxv6U4I VvGz3cxaKklERw5N+EJw4amivcgm1jEppZ7qCx9ysLwVg/LI050qhv/T10TYPmOX 0sQEpZvbKuqGb6nzWo6DME8OpZ27yIa/oRzBHdkIkfkEefYlKWS+dfvWb/73cltj jx066Znk1hHZWGT48EsRmxdGAHn4kfIMcMgIs1ki1OO2II6LoXyaFJ0wSAYItxpz 5gCmDMjkGFRrtXXPEhi6kfKKpOuQux+BmpbVfEzox7Gnrf45sp92cYLncmpAsFB3 91nPa4/utqb/9ijFCIinazLdcUBPO8I1C8FOHDWSFCnNt4d3j2ozpLbrKWyQsm7+ RCGdcy+NU/FH1FwZlg== =nxsC -----END PGP SIGNATURE----- Merge tag 'kernel-hardening-v5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux Pull kernel hardening updates from Kees Cook: - usercopy hardening expanded to check other allocation types (Matthew Wilcox, Yuanzheng Song) - arm64 stackleak behavioral improvements (Mark Rutland) - arm64 CFI code gen improvement (Sami Tolvanen) - LoadPin LSM block dev API adjustment (Christoph Hellwig) - Clang randstruct support (Bill Wendling, Kees Cook) * tag 'kernel-hardening-v5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: (34 commits) loadpin: stop using bdevname mm: usercopy: move the virt_addr_valid() below the is_vmalloc_addr() gcc-plugins: randstruct: Remove cast exception handling af_unix: Silence randstruct GCC plugin warning niu: Silence randstruct warnings big_keys: Use struct for internal payload gcc-plugins: Change all version strings match kernel randomize_kstack: Improve docs on requirements/rationale lkdtm/stackleak: fix CONFIG_GCC_PLUGIN_STACKLEAK=n arm64: entry: use stackleak_erase_on_task_stack() stackleak: add on/off stack variants lkdtm/stackleak: check stack boundaries lkdtm/stackleak: prevent unexpected stack usage lkdtm/stackleak: rework boundary management lkdtm/stackleak: avoid spurious failure stackleak: rework poison scanning stackleak: rework stack high bound handling stackleak: clarify variable names stackleak: rework stack low bound handling stackleak: remove redundant check ...
This commit is contained in:
commit
0bf13a8436
@ -211,6 +211,7 @@ r200_reg_safe.h
|
|||||||
r300_reg_safe.h
|
r300_reg_safe.h
|
||||||
r420_reg_safe.h
|
r420_reg_safe.h
|
||||||
r600_reg_safe.h
|
r600_reg_safe.h
|
||||||
|
randstruct.seed
|
||||||
randomize_layout_hash.h
|
randomize_layout_hash.h
|
||||||
randomize_layout_seed.h
|
randomize_layout_seed.h
|
||||||
recordmcount
|
recordmcount
|
||||||
|
@ -99,10 +99,10 @@ unreproducible parts can be treated as sources:
|
|||||||
Structure randomisation
|
Structure randomisation
|
||||||
-----------------------
|
-----------------------
|
||||||
|
|
||||||
If you enable ``CONFIG_GCC_PLUGIN_RANDSTRUCT``, you will need to
|
If you enable ``CONFIG_RANDSTRUCT``, you will need to pre-generate
|
||||||
pre-generate the random seed in
|
the random seed in ``scripts/basic/randstruct.seed`` so the same
|
||||||
``scripts/gcc-plugins/randomize_layout_seed.h`` so the same value
|
value is used by each build. See ``scripts/gen-randstruct-seed.sh``
|
||||||
is used in rebuilds.
|
for details.
|
||||||
|
|
||||||
Debug info conflicts
|
Debug info conflicts
|
||||||
--------------------
|
--------------------
|
||||||
|
1
Makefile
1
Makefile
@ -1011,6 +1011,7 @@ include-$(CONFIG_KASAN) += scripts/Makefile.kasan
|
|||||||
include-$(CONFIG_KCSAN) += scripts/Makefile.kcsan
|
include-$(CONFIG_KCSAN) += scripts/Makefile.kcsan
|
||||||
include-$(CONFIG_UBSAN) += scripts/Makefile.ubsan
|
include-$(CONFIG_UBSAN) += scripts/Makefile.ubsan
|
||||||
include-$(CONFIG_KCOV) += scripts/Makefile.kcov
|
include-$(CONFIG_KCOV) += scripts/Makefile.kcov
|
||||||
|
include-$(CONFIG_RANDSTRUCT) += scripts/Makefile.randstruct
|
||||||
include-$(CONFIG_GCC_PLUGINS) += scripts/Makefile.gcc-plugins
|
include-$(CONFIG_GCC_PLUGINS) += scripts/Makefile.gcc-plugins
|
||||||
|
|
||||||
include $(addprefix $(srctree)/, $(include-y))
|
include $(addprefix $(srctree)/, $(include-y))
|
||||||
|
@ -732,10 +732,7 @@ config ARCH_SUPPORTS_CFI_CLANG
|
|||||||
config CFI_CLANG
|
config CFI_CLANG
|
||||||
bool "Use Clang's Control Flow Integrity (CFI)"
|
bool "Use Clang's Control Flow Integrity (CFI)"
|
||||||
depends on LTO_CLANG && ARCH_SUPPORTS_CFI_CLANG
|
depends on LTO_CLANG && ARCH_SUPPORTS_CFI_CLANG
|
||||||
# Clang >= 12:
|
depends on CLANG_VERSION >= 140000
|
||||||
# - https://bugs.llvm.org/show_bug.cgi?id=46258
|
|
||||||
# - https://bugs.llvm.org/show_bug.cgi?id=47479
|
|
||||||
depends on CLANG_VERSION >= 120000
|
|
||||||
select KALLSYMS
|
select KALLSYMS
|
||||||
help
|
help
|
||||||
This option enables Clang’s forward-edge Control Flow Integrity
|
This option enables Clang’s forward-edge Control Flow Integrity
|
||||||
|
@ -28,7 +28,7 @@ CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
|
|||||||
CFLAGS_REMOVE_vdso.o = -pg
|
CFLAGS_REMOVE_vdso.o = -pg
|
||||||
|
|
||||||
# Force -O2 to avoid libgcc dependencies
|
# Force -O2 to avoid libgcc dependencies
|
||||||
CFLAGS_REMOVE_vgettimeofday.o = -pg -Os $(GCC_PLUGINS_CFLAGS)
|
CFLAGS_REMOVE_vgettimeofday.o = -pg -Os $(RANDSTRUCT_CFLAGS) $(GCC_PLUGINS_CFLAGS)
|
||||||
ifeq ($(c-gettimeofday-y),)
|
ifeq ($(c-gettimeofday-y),)
|
||||||
CFLAGS_vgettimeofday.o = -O2
|
CFLAGS_vgettimeofday.o = -O2
|
||||||
else
|
else
|
||||||
|
@ -23,20 +23,4 @@
|
|||||||
#define __builtin_return_address(val) \
|
#define __builtin_return_address(val) \
|
||||||
(void *)(ptrauth_clear_pac((unsigned long)__builtin_return_address(val)))
|
(void *)(ptrauth_clear_pac((unsigned long)__builtin_return_address(val)))
|
||||||
|
|
||||||
#ifdef CONFIG_CFI_CLANG
|
|
||||||
/*
|
|
||||||
* With CONFIG_CFI_CLANG, the compiler replaces function address
|
|
||||||
* references with the address of the function's CFI jump table
|
|
||||||
* entry. The function_nocfi macro always returns the address of the
|
|
||||||
* actual function instead.
|
|
||||||
*/
|
|
||||||
#define function_nocfi(x) ({ \
|
|
||||||
void *addr; \
|
|
||||||
asm("adrp %0, " __stringify(x) "\n\t" \
|
|
||||||
"add %0, %0, :lo12:" __stringify(x) \
|
|
||||||
: "=r" (addr)); \
|
|
||||||
addr; \
|
|
||||||
})
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* __ASM_COMPILER_H */
|
#endif /* __ASM_COMPILER_H */
|
||||||
|
@ -405,12 +405,10 @@ long get_tagged_addr_ctrl(struct task_struct *task);
|
|||||||
* of header definitions for the use of task_stack_page.
|
* of header definitions for the use of task_stack_page.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define current_top_of_stack() \
|
/*
|
||||||
({ \
|
* The top of the current task's task stack
|
||||||
struct stack_info _info; \
|
*/
|
||||||
BUG_ON(!on_accessible_stack(current, current_stack_pointer, 1, &_info)); \
|
#define current_top_of_stack() ((unsigned long)current->stack + THREAD_SIZE)
|
||||||
_info.high; \
|
|
||||||
})
|
|
||||||
#define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1, NULL))
|
#define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1, NULL))
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
#endif /* __ASSEMBLY__ */
|
||||||
|
@ -596,7 +596,7 @@ SYM_CODE_START_LOCAL(ret_to_user)
|
|||||||
ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step
|
ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step
|
||||||
enable_step_tsk x19, x2
|
enable_step_tsk x19, x2
|
||||||
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
|
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
|
||||||
bl stackleak_erase
|
bl stackleak_erase_on_task_stack
|
||||||
#endif
|
#endif
|
||||||
kernel_exit 0
|
kernel_exit 0
|
||||||
SYM_CODE_END(ret_to_user)
|
SYM_CODE_END(ret_to_user)
|
||||||
|
@ -32,7 +32,8 @@ ccflags-y += -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
|
|||||||
# -Wmissing-prototypes and -Wmissing-declarations are removed from
|
# -Wmissing-prototypes and -Wmissing-declarations are removed from
|
||||||
# the CFLAGS of vgettimeofday.c to make possible to build the
|
# the CFLAGS of vgettimeofday.c to make possible to build the
|
||||||
# kernel with CONFIG_WERROR enabled.
|
# kernel with CONFIG_WERROR enabled.
|
||||||
CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) $(GCC_PLUGINS_CFLAGS) \
|
CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) \
|
||||||
|
$(RANDSTRUCT_CFLAGS) $(GCC_PLUGINS_CFLAGS) \
|
||||||
$(CC_FLAGS_LTO) -Wmissing-prototypes -Wmissing-declarations
|
$(CC_FLAGS_LTO) -Wmissing-prototypes -Wmissing-declarations
|
||||||
KASAN_SANITIZE := n
|
KASAN_SANITIZE := n
|
||||||
KCSAN_SANITIZE := n
|
KCSAN_SANITIZE := n
|
||||||
|
@ -468,7 +468,7 @@ config CC_HAVE_STACKPROTECTOR_TLS
|
|||||||
|
|
||||||
config STACKPROTECTOR_PER_TASK
|
config STACKPROTECTOR_PER_TASK
|
||||||
def_bool y
|
def_bool y
|
||||||
depends on !GCC_PLUGIN_RANDSTRUCT
|
depends on !RANDSTRUCT
|
||||||
depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_TLS
|
depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_TLS
|
||||||
|
|
||||||
config PHYS_RAM_BASE_FIXED
|
config PHYS_RAM_BASE_FIXED
|
||||||
|
@ -58,7 +58,7 @@ CFL := $(PROFILING) -mcmodel=medlow -fPIC -O2 -fasynchronous-unwind-tables -m64
|
|||||||
|
|
||||||
SPARC_REG_CFLAGS = -ffixed-g4 -ffixed-g5 -fcall-used-g5 -fcall-used-g7
|
SPARC_REG_CFLAGS = -ffixed-g4 -ffixed-g5 -fcall-used-g5 -fcall-used-g7
|
||||||
|
|
||||||
$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(SPARC_REG_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
|
$(vobjs): KBUILD_CFLAGS := $(filter-out $(RANDSTRUCT_CFLAGS) $(GCC_PLUGINS_CFLAGS) $(SPARC_REG_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
|
||||||
|
|
||||||
#
|
#
|
||||||
# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
|
# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
|
||||||
@ -88,6 +88,7 @@ $(obj)/vdso32.so.dbg: asflags-$(CONFIG_SPARC64) += -m32
|
|||||||
KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
|
KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
|
||||||
KBUILD_CFLAGS_32 := $(filter-out -mcmodel=medlow,$(KBUILD_CFLAGS_32))
|
KBUILD_CFLAGS_32 := $(filter-out -mcmodel=medlow,$(KBUILD_CFLAGS_32))
|
||||||
KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32))
|
KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32))
|
||||||
|
KBUILD_CFLAGS_32 := $(filter-out $(RANDSTRUCT_CFLAGS),$(KBUILD_CFLAGS_32))
|
||||||
KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32))
|
KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32))
|
||||||
KBUILD_CFLAGS_32 := $(filter-out $(SPARC_REG_CFLAGS),$(KBUILD_CFLAGS_32))
|
KBUILD_CFLAGS_32 := $(filter-out $(SPARC_REG_CFLAGS),$(KBUILD_CFLAGS_32))
|
||||||
KBUILD_CFLAGS_32 += -m32 -msoft-float -fpic
|
KBUILD_CFLAGS_32 += -m32 -msoft-float -fpic
|
||||||
|
@ -91,7 +91,7 @@ ifneq ($(RETPOLINE_VDSO_CFLAGS),)
|
|||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
$(vobjs): KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO) $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
|
$(vobjs): KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO) $(RANDSTRUCT_CFLAGS) $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
|
||||||
|
|
||||||
#
|
#
|
||||||
# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
|
# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
|
||||||
@ -148,6 +148,7 @@ KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
|
|||||||
KBUILD_CFLAGS_32 := $(filter-out -mcmodel=kernel,$(KBUILD_CFLAGS_32))
|
KBUILD_CFLAGS_32 := $(filter-out -mcmodel=kernel,$(KBUILD_CFLAGS_32))
|
||||||
KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32))
|
KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32))
|
||||||
KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32))
|
KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32))
|
||||||
|
KBUILD_CFLAGS_32 := $(filter-out $(RANDSTRUCT_CFLAGS),$(KBUILD_CFLAGS_32))
|
||||||
KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32))
|
KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32))
|
||||||
KBUILD_CFLAGS_32 := $(filter-out $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS_32))
|
KBUILD_CFLAGS_32 := $(filter-out $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS_32))
|
||||||
KBUILD_CFLAGS_32 := $(filter-out $(CC_FLAGS_LTO),$(KBUILD_CFLAGS_32))
|
KBUILD_CFLAGS_32 := $(filter-out $(CC_FLAGS_LTO),$(KBUILD_CFLAGS_32))
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
#include <asm/paravirt.h>
|
#include <asm/paravirt.h>
|
||||||
#include <asm/fixmap.h>
|
#include <asm/fixmap.h>
|
||||||
|
#include <asm/pgtable_areas.h>
|
||||||
|
|
||||||
/* declarations for highmem.c */
|
/* declarations for highmem.c */
|
||||||
extern unsigned long highstart_pfn, highend_pfn;
|
extern unsigned long highstart_pfn, highend_pfn;
|
||||||
|
@ -540,7 +540,7 @@ static inline bool pti_kernel_image_global_ok(void)
|
|||||||
* cases where RANDSTRUCT is in use to help keep the layout a
|
* cases where RANDSTRUCT is in use to help keep the layout a
|
||||||
* secret.
|
* secret.
|
||||||
*/
|
*/
|
||||||
if (IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT))
|
if (IS_ENABLED(CONFIG_RANDSTRUCT))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
@ -11,72 +11,125 @@
|
|||||||
#include "lkdtm.h"
|
#include "lkdtm.h"
|
||||||
#include <linux/stackleak.h>
|
#include <linux/stackleak.h>
|
||||||
|
|
||||||
void lkdtm_STACKLEAK_ERASING(void)
|
#if defined(CONFIG_GCC_PLUGIN_STACKLEAK)
|
||||||
|
/*
|
||||||
|
* Check that stackleak tracks the lowest stack pointer and erases the stack
|
||||||
|
* below this as expected.
|
||||||
|
*
|
||||||
|
* To prevent the lowest stack pointer changing during the test, IRQs are
|
||||||
|
* masked and instrumentation of this function is disabled. We assume that the
|
||||||
|
* compiler will create a fixed-size stack frame for this function.
|
||||||
|
*
|
||||||
|
* Any non-inlined function may make further use of the stack, altering the
|
||||||
|
* lowest stack pointer and/or clobbering poison values. To avoid spurious
|
||||||
|
* failures we must avoid printing until the end of the test or have already
|
||||||
|
* encountered a failure condition.
|
||||||
|
*/
|
||||||
|
static void noinstr check_stackleak_irqoff(void)
|
||||||
{
|
{
|
||||||
unsigned long *sp, left, found, i;
|
const unsigned long task_stack_base = (unsigned long)task_stack_page(current);
|
||||||
const unsigned long check_depth =
|
const unsigned long task_stack_low = stackleak_task_low_bound(current);
|
||||||
STACKLEAK_SEARCH_DEPTH / sizeof(unsigned long);
|
const unsigned long task_stack_high = stackleak_task_high_bound(current);
|
||||||
|
const unsigned long current_sp = current_stack_pointer;
|
||||||
|
const unsigned long lowest_sp = current->lowest_stack;
|
||||||
|
unsigned long untracked_high;
|
||||||
|
unsigned long poison_high, poison_low;
|
||||||
bool test_failed = false;
|
bool test_failed = false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For the details about the alignment of the poison values, see
|
* Check that the current and lowest recorded stack pointer values fall
|
||||||
* the comment in stackleak_track_stack().
|
* within the expected task stack boundaries. These tests should never
|
||||||
|
* fail unless the boundaries are incorrect or we're clobbering the
|
||||||
|
* STACK_END_MAGIC, and in either casee something is seriously wrong.
|
||||||
*/
|
*/
|
||||||
sp = PTR_ALIGN(&i, sizeof(unsigned long));
|
if (current_sp < task_stack_low || current_sp >= task_stack_high) {
|
||||||
|
pr_err("FAIL: current_stack_pointer (0x%lx) outside of task stack bounds [0x%lx..0x%lx]\n",
|
||||||
left = ((unsigned long)sp & (THREAD_SIZE - 1)) / sizeof(unsigned long);
|
current_sp, task_stack_low, task_stack_high - 1);
|
||||||
sp--;
|
test_failed = true;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
if (lowest_sp < task_stack_low || lowest_sp >= task_stack_high) {
|
||||||
|
pr_err("FAIL: current->lowest_stack (0x%lx) outside of task stack bounds [0x%lx..0x%lx]\n",
|
||||||
|
lowest_sp, task_stack_low, task_stack_high - 1);
|
||||||
|
test_failed = true;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* One 'long int' at the bottom of the thread stack is reserved
|
* Depending on what has run prior to this test, the lowest recorded
|
||||||
* and not poisoned.
|
* stack pointer could be above or below the current stack pointer.
|
||||||
|
* Start from the lowest of the two.
|
||||||
|
*
|
||||||
|
* Poison values are naturally-aligned unsigned longs. As the current
|
||||||
|
* stack pointer might not be sufficiently aligned, we must align
|
||||||
|
* downwards to find the lowest known stack pointer value. This is the
|
||||||
|
* high boundary for a portion of the stack which may have been used
|
||||||
|
* without being tracked, and has to be scanned for poison.
|
||||||
*/
|
*/
|
||||||
if (left > 1) {
|
untracked_high = min(current_sp, lowest_sp);
|
||||||
left--;
|
untracked_high = ALIGN_DOWN(untracked_high, sizeof(unsigned long));
|
||||||
} else {
|
|
||||||
pr_err("FAIL: not enough stack space for the test\n");
|
|
||||||
test_failed = true;
|
|
||||||
goto end;
|
|
||||||
}
|
|
||||||
|
|
||||||
pr_info("checking unused part of the thread stack (%lu bytes)...\n",
|
|
||||||
left * sizeof(unsigned long));
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Search for 'check_depth' poison values in a row (just like
|
* Find the top of the poison in the same way as the erasing code.
|
||||||
* stackleak_erase() does).
|
|
||||||
*/
|
*/
|
||||||
for (i = 0, found = 0; i < left && found <= check_depth; i++) {
|
poison_high = stackleak_find_top_of_poison(task_stack_low, untracked_high);
|
||||||
if (*(sp - i) == STACKLEAK_POISON)
|
|
||||||
found++;
|
|
||||||
else
|
|
||||||
found = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (found <= check_depth) {
|
/*
|
||||||
pr_err("FAIL: the erased part is not found (checked %lu bytes)\n",
|
* Check whether the poisoned portion of the stack (if any) consists
|
||||||
i * sizeof(unsigned long));
|
* entirely of poison. This verifies the entries that
|
||||||
|
* stackleak_find_top_of_poison() should have checked.
|
||||||
|
*/
|
||||||
|
poison_low = poison_high;
|
||||||
|
while (poison_low > task_stack_low) {
|
||||||
|
poison_low -= sizeof(unsigned long);
|
||||||
|
|
||||||
|
if (*(unsigned long *)poison_low == STACKLEAK_POISON)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
pr_err("FAIL: non-poison value %lu bytes below poison boundary: 0x%lx\n",
|
||||||
|
poison_high - poison_low, *(unsigned long *)poison_low);
|
||||||
test_failed = true;
|
test_failed = true;
|
||||||
goto end;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pr_info("the erased part begins after %lu not poisoned bytes\n",
|
pr_info("stackleak stack usage:\n"
|
||||||
(i - found) * sizeof(unsigned long));
|
" high offset: %lu bytes\n"
|
||||||
|
" current: %lu bytes\n"
|
||||||
|
" lowest: %lu bytes\n"
|
||||||
|
" tracked: %lu bytes\n"
|
||||||
|
" untracked: %lu bytes\n"
|
||||||
|
" poisoned: %lu bytes\n"
|
||||||
|
" low offset: %lu bytes\n",
|
||||||
|
task_stack_base + THREAD_SIZE - task_stack_high,
|
||||||
|
task_stack_high - current_sp,
|
||||||
|
task_stack_high - lowest_sp,
|
||||||
|
task_stack_high - untracked_high,
|
||||||
|
untracked_high - poison_high,
|
||||||
|
poison_high - task_stack_low,
|
||||||
|
task_stack_low - task_stack_base);
|
||||||
|
|
||||||
/* The rest of thread stack should be erased */
|
out:
|
||||||
for (; i < left; i++) {
|
|
||||||
if (*(sp - i) != STACKLEAK_POISON) {
|
|
||||||
pr_err("FAIL: bad value number %lu in the erased part: 0x%lx\n",
|
|
||||||
i, *(sp - i));
|
|
||||||
test_failed = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
end:
|
|
||||||
if (test_failed) {
|
if (test_failed) {
|
||||||
pr_err("FAIL: the thread stack is NOT properly erased!\n");
|
pr_err("FAIL: the thread stack is NOT properly erased!\n");
|
||||||
pr_expected_config(CONFIG_GCC_PLUGIN_STACKLEAK);
|
|
||||||
} else {
|
} else {
|
||||||
pr_info("OK: the rest of the thread stack is properly erased\n");
|
pr_info("OK: the rest of the thread stack is properly erased\n");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void lkdtm_STACKLEAK_ERASING(void)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
|
check_stackleak_irqoff();
|
||||||
|
local_irq_restore(flags);
|
||||||
|
}
|
||||||
|
#else /* defined(CONFIG_GCC_PLUGIN_STACKLEAK) */
|
||||||
|
void lkdtm_STACKLEAK_ERASING(void)
|
||||||
|
{
|
||||||
|
if (IS_ENABLED(CONFIG_HAVE_ARCH_STACKLEAK)) {
|
||||||
|
pr_err("XFAIL: stackleak is not enabled (CONFIG_GCC_PLUGIN_STACKLEAK=n)\n");
|
||||||
|
} else {
|
||||||
|
pr_err("XFAIL: stackleak is not supported on this arch (HAVE_ARCH_STACKLEAK=n)\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif /* defined(CONFIG_GCC_PLUGIN_STACKLEAK) */
|
||||||
|
@ -35,6 +35,25 @@
|
|||||||
|
|
||||||
#include "niu.h"
|
#include "niu.h"
|
||||||
|
|
||||||
|
/* This driver wants to store a link to a "next page" within the
|
||||||
|
* page struct itself by overloading the content of the "mapping"
|
||||||
|
* member. This is not expected by the page API, but does currently
|
||||||
|
* work. However, the randstruct plugin gets very bothered by this
|
||||||
|
* case because "mapping" (struct address_space) is randomized, so
|
||||||
|
* casts to/from it trigger warnings. Hide this by way of a union,
|
||||||
|
* to create a typed alias of "mapping", since that's how it is
|
||||||
|
* actually being used here.
|
||||||
|
*/
|
||||||
|
union niu_page {
|
||||||
|
struct page page;
|
||||||
|
struct {
|
||||||
|
unsigned long __flags; /* unused alias of "flags" */
|
||||||
|
struct list_head __lru; /* unused alias of "lru" */
|
||||||
|
struct page *next; /* alias of "mapping" */
|
||||||
|
};
|
||||||
|
};
|
||||||
|
#define niu_next_page(p) container_of(p, union niu_page, page)->next
|
||||||
|
|
||||||
#define DRV_MODULE_NAME "niu"
|
#define DRV_MODULE_NAME "niu"
|
||||||
#define DRV_MODULE_VERSION "1.1"
|
#define DRV_MODULE_VERSION "1.1"
|
||||||
#define DRV_MODULE_RELDATE "Apr 22, 2010"
|
#define DRV_MODULE_RELDATE "Apr 22, 2010"
|
||||||
@ -3283,7 +3302,7 @@ static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
|
|||||||
|
|
||||||
addr &= PAGE_MASK;
|
addr &= PAGE_MASK;
|
||||||
pp = &rp->rxhash[h];
|
pp = &rp->rxhash[h];
|
||||||
for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) {
|
for (; (p = *pp) != NULL; pp = &niu_next_page(p)) {
|
||||||
if (p->index == addr) {
|
if (p->index == addr) {
|
||||||
*link = pp;
|
*link = pp;
|
||||||
goto found;
|
goto found;
|
||||||
@ -3300,7 +3319,7 @@ static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
|
|||||||
unsigned int h = niu_hash_rxaddr(rp, base);
|
unsigned int h = niu_hash_rxaddr(rp, base);
|
||||||
|
|
||||||
page->index = base;
|
page->index = base;
|
||||||
page->mapping = (struct address_space *) rp->rxhash[h];
|
niu_next_page(page) = rp->rxhash[h];
|
||||||
rp->rxhash[h] = page;
|
rp->rxhash[h] = page;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3382,11 +3401,11 @@ static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
|
|||||||
rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
|
rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
|
||||||
RCR_ENTRY_PKTBUFSZ_SHIFT];
|
RCR_ENTRY_PKTBUFSZ_SHIFT];
|
||||||
if ((page->index + PAGE_SIZE) - rcr_size == addr) {
|
if ((page->index + PAGE_SIZE) - rcr_size == addr) {
|
||||||
*link = (struct page *) page->mapping;
|
*link = niu_next_page(page);
|
||||||
np->ops->unmap_page(np->device, page->index,
|
np->ops->unmap_page(np->device, page->index,
|
||||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||||
page->index = 0;
|
page->index = 0;
|
||||||
page->mapping = NULL;
|
niu_next_page(page) = NULL;
|
||||||
__free_page(page);
|
__free_page(page);
|
||||||
rp->rbr_refill_pending++;
|
rp->rbr_refill_pending++;
|
||||||
}
|
}
|
||||||
@ -3451,11 +3470,11 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
|
|||||||
|
|
||||||
niu_rx_skb_append(skb, page, off, append_size, rcr_size);
|
niu_rx_skb_append(skb, page, off, append_size, rcr_size);
|
||||||
if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
|
if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
|
||||||
*link = (struct page *) page->mapping;
|
*link = niu_next_page(page);
|
||||||
np->ops->unmap_page(np->device, page->index,
|
np->ops->unmap_page(np->device, page->index,
|
||||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||||
page->index = 0;
|
page->index = 0;
|
||||||
page->mapping = NULL;
|
niu_next_page(page) = NULL;
|
||||||
rp->rbr_refill_pending++;
|
rp->rbr_refill_pending++;
|
||||||
} else
|
} else
|
||||||
get_page(page);
|
get_page(page);
|
||||||
@ -3518,13 +3537,13 @@ static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
|
|||||||
|
|
||||||
page = rp->rxhash[i];
|
page = rp->rxhash[i];
|
||||||
while (page) {
|
while (page) {
|
||||||
struct page *next = (struct page *) page->mapping;
|
struct page *next = niu_next_page(page);
|
||||||
u64 base = page->index;
|
u64 base = page->index;
|
||||||
|
|
||||||
np->ops->unmap_page(np->device, base, PAGE_SIZE,
|
np->ops->unmap_page(np->device, base, PAGE_SIZE,
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
page->index = 0;
|
page->index = 0;
|
||||||
page->mapping = NULL;
|
niu_next_page(page) = NULL;
|
||||||
|
|
||||||
__free_page(page);
|
__free_page(page);
|
||||||
|
|
||||||
@ -6440,8 +6459,7 @@ static void niu_reset_buffers(struct niu *np)
|
|||||||
|
|
||||||
page = rp->rxhash[j];
|
page = rp->rxhash[j];
|
||||||
while (page) {
|
while (page) {
|
||||||
struct page *next =
|
struct page *next = niu_next_page(page);
|
||||||
(struct page *) page->mapping;
|
|
||||||
u64 base = page->index;
|
u64 base = page->index;
|
||||||
base = base >> RBR_DESCR_ADDR_SHIFT;
|
base = base >> RBR_DESCR_ADDR_SHIFT;
|
||||||
rp->rbr[k++] = cpu_to_le32(base);
|
rp->rbr[k++] = cpu_to_le32(base);
|
||||||
@ -10176,6 +10194,9 @@ static int __init niu_init(void)
|
|||||||
|
|
||||||
BUILD_BUG_ON(PAGE_SIZE < 4 * 1024);
|
BUILD_BUG_ON(PAGE_SIZE < 4 * 1024);
|
||||||
|
|
||||||
|
BUILD_BUG_ON(offsetof(struct page, mapping) !=
|
||||||
|
offsetof(union niu_page, next));
|
||||||
|
|
||||||
niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT);
|
niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT);
|
||||||
|
|
||||||
#ifdef CONFIG_SPARC64
|
#ifdef CONFIG_SPARC64
|
||||||
|
@ -69,6 +69,16 @@
|
|||||||
#define __nocfi __attribute__((__no_sanitize__("cfi")))
|
#define __nocfi __attribute__((__no_sanitize__("cfi")))
|
||||||
#define __cficanonical __attribute__((__cfi_canonical_jump_table__))
|
#define __cficanonical __attribute__((__cfi_canonical_jump_table__))
|
||||||
|
|
||||||
|
#if defined(CONFIG_CFI_CLANG)
|
||||||
|
/*
|
||||||
|
* With CONFIG_CFI_CLANG, the compiler replaces function address
|
||||||
|
* references with the address of the function's CFI jump table
|
||||||
|
* entry. The function_nocfi macro always returns the address of the
|
||||||
|
* actual function instead.
|
||||||
|
*/
|
||||||
|
#define function_nocfi(x) __builtin_function_start(x)
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Turn individual warnings and errors on and off locally, depending
|
* Turn individual warnings and errors on and off locally, depending
|
||||||
* on version.
|
* on version.
|
||||||
|
@ -66,14 +66,6 @@
|
|||||||
__builtin_unreachable(); \
|
__builtin_unreachable(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#if defined(RANDSTRUCT_PLUGIN) && !defined(__CHECKER__)
|
|
||||||
#define __randomize_layout __attribute__((randomize_layout))
|
|
||||||
#define __no_randomize_layout __attribute__((no_randomize_layout))
|
|
||||||
/* This anon struct can add padding, so only enable it under randstruct. */
|
|
||||||
#define randomized_struct_fields_start struct {
|
|
||||||
#define randomized_struct_fields_end } __randomize_layout;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* GCC 'asm goto' miscompiles certain code sequences:
|
* GCC 'asm goto' miscompiles certain code sequences:
|
||||||
*
|
*
|
||||||
|
@ -242,15 +242,15 @@ struct ftrace_likely_data {
|
|||||||
# define __latent_entropy
|
# define __latent_entropy
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef __randomize_layout
|
#if defined(RANDSTRUCT) && !defined(__CHECKER__)
|
||||||
|
# define __randomize_layout __designated_init __attribute__((randomize_layout))
|
||||||
|
# define __no_randomize_layout __attribute__((no_randomize_layout))
|
||||||
|
/* This anon struct can add padding, so only enable it under randstruct. */
|
||||||
|
# define randomized_struct_fields_start struct {
|
||||||
|
# define randomized_struct_fields_end } __randomize_layout;
|
||||||
|
#else
|
||||||
# define __randomize_layout __designated_init
|
# define __randomize_layout __designated_init
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef __no_randomize_layout
|
|
||||||
# define __no_randomize_layout
|
# define __no_randomize_layout
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef randomized_struct_fields_start
|
|
||||||
# define randomized_struct_fields_start
|
# define randomized_struct_fields_start
|
||||||
# define randomized_struct_fields_end
|
# define randomized_struct_fields_end
|
||||||
#endif
|
#endif
|
||||||
|
@ -149,6 +149,11 @@ static inline void totalhigh_pages_add(long count)
|
|||||||
atomic_long_add(count, &_totalhigh_pages);
|
atomic_long_add(count, &_totalhigh_pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool is_kmap_addr(const void *x)
|
||||||
|
{
|
||||||
|
unsigned long addr = (unsigned long)x;
|
||||||
|
return addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP);
|
||||||
|
}
|
||||||
#else /* CONFIG_HIGHMEM */
|
#else /* CONFIG_HIGHMEM */
|
||||||
|
|
||||||
static inline struct page *kmap_to_page(void *addr)
|
static inline struct page *kmap_to_page(void *addr)
|
||||||
@ -234,6 +239,11 @@ static inline void __kunmap_atomic(void *addr)
|
|||||||
static inline unsigned int nr_free_highpages(void) { return 0; }
|
static inline unsigned int nr_free_highpages(void) { return 0; }
|
||||||
static inline unsigned long totalhigh_pages(void) { return 0UL; }
|
static inline unsigned long totalhigh_pages(void) { return 0UL; }
|
||||||
|
|
||||||
|
static inline bool is_kmap_addr(const void *x)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_HIGHMEM */
|
#endif /* CONFIG_HIGHMEM */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -295,7 +295,7 @@ extern void netfs_stats_show(struct seq_file *);
|
|||||||
*/
|
*/
|
||||||
static inline struct netfs_i_context *netfs_i_context(struct inode *inode)
|
static inline struct netfs_i_context *netfs_i_context(struct inode *inode)
|
||||||
{
|
{
|
||||||
return (struct netfs_i_context *)(inode + 1);
|
return (void *)inode + sizeof(*inode);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -307,7 +307,7 @@ static inline struct netfs_i_context *netfs_i_context(struct inode *inode)
|
|||||||
*/
|
*/
|
||||||
static inline struct inode *netfs_inode(struct netfs_i_context *ctx)
|
static inline struct inode *netfs_inode(struct netfs_i_context *ctx)
|
||||||
{
|
{
|
||||||
return ((struct inode *)ctx) - 1;
|
return (void *)ctx - sizeof(struct inode);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -40,10 +40,14 @@ DECLARE_PER_CPU(u32, kstack_offset);
|
|||||||
*/
|
*/
|
||||||
#define KSTACK_OFFSET_MAX(x) ((x) & 0x3FF)
|
#define KSTACK_OFFSET_MAX(x) ((x) & 0x3FF)
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* These macros must be used during syscall entry when interrupts and
|
* add_random_kstack_offset - Increase stack utilization by previously
|
||||||
|
* chosen random offset
|
||||||
|
*
|
||||||
|
* This should be used in the syscall entry path when interrupts and
|
||||||
* preempt are disabled, and after user registers have been stored to
|
* preempt are disabled, and after user registers have been stored to
|
||||||
* the stack.
|
* the stack. For testing the resulting entropy, please see:
|
||||||
|
* tools/testing/selftests/lkdtm/stack-entropy.sh
|
||||||
*/
|
*/
|
||||||
#define add_random_kstack_offset() do { \
|
#define add_random_kstack_offset() do { \
|
||||||
if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
|
if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
|
||||||
@ -55,6 +59,23 @@ DECLARE_PER_CPU(u32, kstack_offset);
|
|||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* choose_random_kstack_offset - Choose the random offset for the next
|
||||||
|
* add_random_kstack_offset()
|
||||||
|
*
|
||||||
|
* This should only be used during syscall exit when interrupts and
|
||||||
|
* preempt are disabled. This position in the syscall flow is done to
|
||||||
|
* frustrate attacks from userspace attempting to learn the next offset:
|
||||||
|
* - Maximize the timing uncertainty visible from userspace: if the
|
||||||
|
* offset is chosen at syscall entry, userspace has much more control
|
||||||
|
* over the timing between choosing offsets. "How long will we be in
|
||||||
|
* kernel mode?" tends to be more difficult to predict than "how long
|
||||||
|
* will we be in user mode?"
|
||||||
|
* - Reduce the lifetime of the new offset sitting in memory during
|
||||||
|
* kernel mode execution. Exposure of "thread-local" memory content
|
||||||
|
* (e.g. current, percpu, etc) tends to be easier than arbitrary
|
||||||
|
* location memory exposure.
|
||||||
|
*/
|
||||||
#define choose_random_kstack_offset(rand) do { \
|
#define choose_random_kstack_offset(rand) do { \
|
||||||
if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
|
if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
|
||||||
&randomize_kstack_offset)) { \
|
&randomize_kstack_offset)) { \
|
||||||
|
@ -15,9 +15,62 @@
|
|||||||
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
|
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
|
||||||
#include <asm/stacktrace.h>
|
#include <asm/stacktrace.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The lowest address on tsk's stack which we can plausibly erase.
|
||||||
|
*/
|
||||||
|
static __always_inline unsigned long
|
||||||
|
stackleak_task_low_bound(const struct task_struct *tsk)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* The lowest unsigned long on the task stack contains STACK_END_MAGIC,
|
||||||
|
* which we must not corrupt.
|
||||||
|
*/
|
||||||
|
return (unsigned long)end_of_stack(tsk) + sizeof(unsigned long);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The address immediately after the highest address on tsk's stack which we
|
||||||
|
* can plausibly erase.
|
||||||
|
*/
|
||||||
|
static __always_inline unsigned long
|
||||||
|
stackleak_task_high_bound(const struct task_struct *tsk)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* The task's pt_regs lives at the top of the task stack and will be
|
||||||
|
* overwritten by exception entry, so there's no need to erase them.
|
||||||
|
*/
|
||||||
|
return (unsigned long)task_pt_regs(tsk);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Find the address immediately above the poisoned region of the stack, where
|
||||||
|
* that region falls between 'low' (inclusive) and 'high' (exclusive).
|
||||||
|
*/
|
||||||
|
static __always_inline unsigned long
|
||||||
|
stackleak_find_top_of_poison(const unsigned long low, const unsigned long high)
|
||||||
|
{
|
||||||
|
const unsigned int depth = STACKLEAK_SEARCH_DEPTH / sizeof(unsigned long);
|
||||||
|
unsigned int poison_count = 0;
|
||||||
|
unsigned long poison_high = high;
|
||||||
|
unsigned long sp = high;
|
||||||
|
|
||||||
|
while (sp > low && poison_count < depth) {
|
||||||
|
sp -= sizeof(unsigned long);
|
||||||
|
|
||||||
|
if (*(unsigned long *)sp == STACKLEAK_POISON) {
|
||||||
|
poison_count++;
|
||||||
|
} else {
|
||||||
|
poison_count = 0;
|
||||||
|
poison_high = sp;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return poison_high;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void stackleak_task_init(struct task_struct *t)
|
static inline void stackleak_task_init(struct task_struct *t)
|
||||||
{
|
{
|
||||||
t->lowest_stack = (unsigned long)end_of_stack(t) + sizeof(unsigned long);
|
t->lowest_stack = stackleak_task_low_bound(t);
|
||||||
# ifdef CONFIG_STACKLEAK_METRICS
|
# ifdef CONFIG_STACKLEAK_METRICS
|
||||||
t->prev_lowest_stack = t->lowest_stack;
|
t->prev_lowest_stack = t->lowest_stack;
|
||||||
# endif
|
# endif
|
||||||
|
@ -32,11 +32,11 @@
|
|||||||
#else
|
#else
|
||||||
#define MODULE_VERMAGIC_MODVERSIONS ""
|
#define MODULE_VERMAGIC_MODVERSIONS ""
|
||||||
#endif
|
#endif
|
||||||
#ifdef RANDSTRUCT_PLUGIN
|
#ifdef RANDSTRUCT
|
||||||
#include <generated/randomize_layout_hash.h>
|
#include <generated/randstruct_hash.h>
|
||||||
#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
|
#define MODULE_RANDSTRUCT "RANDSTRUCT_" RANDSTRUCT_HASHED_SEED
|
||||||
#else
|
#else
|
||||||
#define MODULE_RANDSTRUCT_PLUGIN
|
#define MODULE_RANDSTRUCT
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define VERMAGIC_STRING \
|
#define VERMAGIC_STRING \
|
||||||
@ -44,6 +44,6 @@
|
|||||||
MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
|
MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
|
||||||
MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
|
MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
|
||||||
MODULE_ARCH_VERMAGIC \
|
MODULE_ARCH_VERMAGIC \
|
||||||
MODULE_RANDSTRUCT_PLUGIN
|
MODULE_RANDSTRUCT
|
||||||
|
|
||||||
#endif /* _LINUX_VERMAGIC_H */
|
#endif /* _LINUX_VERMAGIC_H */
|
||||||
|
@ -48,7 +48,7 @@ unsigned int __read_mostly sysctl_oops_all_cpu_backtrace;
|
|||||||
|
|
||||||
int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE;
|
int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE;
|
||||||
static unsigned long tainted_mask =
|
static unsigned long tainted_mask =
|
||||||
IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT) ? (1 << TAINT_RANDSTRUCT) : 0;
|
IS_ENABLED(CONFIG_RANDSTRUCT) ? (1 << TAINT_RANDSTRUCT) : 0;
|
||||||
static int pause_on_oops;
|
static int pause_on_oops;
|
||||||
static int pause_on_oops_flag;
|
static int pause_on_oops_flag;
|
||||||
static DEFINE_SPINLOCK(pause_on_oops_lock);
|
static DEFINE_SPINLOCK(pause_on_oops_lock);
|
||||||
|
@ -70,59 +70,81 @@ late_initcall(stackleak_sysctls_init);
|
|||||||
#define skip_erasing() false
|
#define skip_erasing() false
|
||||||
#endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */
|
#endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */
|
||||||
|
|
||||||
asmlinkage void noinstr stackleak_erase(void)
|
static __always_inline void __stackleak_erase(bool on_task_stack)
|
||||||
{
|
{
|
||||||
/* It would be nice not to have 'kstack_ptr' and 'boundary' on stack */
|
const unsigned long task_stack_low = stackleak_task_low_bound(current);
|
||||||
unsigned long kstack_ptr = current->lowest_stack;
|
const unsigned long task_stack_high = stackleak_task_high_bound(current);
|
||||||
unsigned long boundary = (unsigned long)end_of_stack(current);
|
unsigned long erase_low, erase_high;
|
||||||
unsigned int poison_count = 0;
|
|
||||||
const unsigned int depth = STACKLEAK_SEARCH_DEPTH / sizeof(unsigned long);
|
|
||||||
|
|
||||||
if (skip_erasing())
|
erase_low = stackleak_find_top_of_poison(task_stack_low,
|
||||||
return;
|
current->lowest_stack);
|
||||||
|
|
||||||
/* Check that 'lowest_stack' value is sane */
|
|
||||||
if (unlikely(kstack_ptr - boundary >= THREAD_SIZE))
|
|
||||||
kstack_ptr = boundary;
|
|
||||||
|
|
||||||
/* Search for the poison value in the kernel stack */
|
|
||||||
while (kstack_ptr > boundary && poison_count <= depth) {
|
|
||||||
if (*(unsigned long *)kstack_ptr == STACKLEAK_POISON)
|
|
||||||
poison_count++;
|
|
||||||
else
|
|
||||||
poison_count = 0;
|
|
||||||
|
|
||||||
kstack_ptr -= sizeof(unsigned long);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* One 'long int' at the bottom of the thread stack is reserved and
|
|
||||||
* should not be poisoned (see CONFIG_SCHED_STACK_END_CHECK=y).
|
|
||||||
*/
|
|
||||||
if (kstack_ptr == boundary)
|
|
||||||
kstack_ptr += sizeof(unsigned long);
|
|
||||||
|
|
||||||
#ifdef CONFIG_STACKLEAK_METRICS
|
#ifdef CONFIG_STACKLEAK_METRICS
|
||||||
current->prev_lowest_stack = kstack_ptr;
|
current->prev_lowest_stack = erase_low;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now write the poison value to the kernel stack. Start from
|
* Write poison to the task's stack between 'erase_low' and
|
||||||
* 'kstack_ptr' and move up till the new 'boundary'. We assume that
|
* 'erase_high'.
|
||||||
* the stack pointer doesn't change when we write poison.
|
*
|
||||||
|
* If we're running on a different stack (e.g. an entry trampoline
|
||||||
|
* stack) we can erase everything below the pt_regs at the top of the
|
||||||
|
* task stack.
|
||||||
|
*
|
||||||
|
* If we're running on the task stack itself, we must not clobber any
|
||||||
|
* stack used by this function and its caller. We assume that this
|
||||||
|
* function has a fixed-size stack frame, and the current stack pointer
|
||||||
|
* doesn't change while we write poison.
|
||||||
*/
|
*/
|
||||||
if (on_thread_stack())
|
if (on_task_stack)
|
||||||
boundary = current_stack_pointer;
|
erase_high = current_stack_pointer;
|
||||||
else
|
else
|
||||||
boundary = current_top_of_stack();
|
erase_high = task_stack_high;
|
||||||
|
|
||||||
while (kstack_ptr < boundary) {
|
while (erase_low < erase_high) {
|
||||||
*(unsigned long *)kstack_ptr = STACKLEAK_POISON;
|
*(unsigned long *)erase_low = STACKLEAK_POISON;
|
||||||
kstack_ptr += sizeof(unsigned long);
|
erase_low += sizeof(unsigned long);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Reset the 'lowest_stack' value for the next syscall */
|
/* Reset the 'lowest_stack' value for the next syscall */
|
||||||
current->lowest_stack = current_top_of_stack() - THREAD_SIZE/64;
|
current->lowest_stack = task_stack_high;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Erase and poison the portion of the task stack used since the last erase.
|
||||||
|
* Can be called from the task stack or an entry stack when the task stack is
|
||||||
|
* no longer in use.
|
||||||
|
*/
|
||||||
|
asmlinkage void noinstr stackleak_erase(void)
|
||||||
|
{
|
||||||
|
if (skip_erasing())
|
||||||
|
return;
|
||||||
|
|
||||||
|
__stackleak_erase(on_thread_stack());
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Erase and poison the portion of the task stack used since the last erase.
|
||||||
|
* Can only be called from the task stack.
|
||||||
|
*/
|
||||||
|
asmlinkage void noinstr stackleak_erase_on_task_stack(void)
|
||||||
|
{
|
||||||
|
if (skip_erasing())
|
||||||
|
return;
|
||||||
|
|
||||||
|
__stackleak_erase(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Erase and poison the portion of the task stack used since the last erase.
|
||||||
|
* Can only be called from a stack other than the task stack.
|
||||||
|
*/
|
||||||
|
asmlinkage void noinstr stackleak_erase_off_task_stack(void)
|
||||||
|
{
|
||||||
|
if (skip_erasing())
|
||||||
|
return;
|
||||||
|
|
||||||
|
__stackleak_erase(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __used __no_caller_saved_registers noinstr stackleak_track_stack(void)
|
void __used __no_caller_saved_registers noinstr stackleak_track_stack(void)
|
||||||
@ -139,8 +161,7 @@ void __used __no_caller_saved_registers noinstr stackleak_track_stack(void)
|
|||||||
/* 'lowest_stack' should be aligned on the register width boundary */
|
/* 'lowest_stack' should be aligned on the register width boundary */
|
||||||
sp = ALIGN(sp, sizeof(unsigned long));
|
sp = ALIGN(sp, sizeof(unsigned long));
|
||||||
if (sp < current->lowest_stack &&
|
if (sp < current->lowest_stack &&
|
||||||
sp >= (unsigned long)task_stack_page(current) +
|
sp >= stackleak_task_low_bound(current)) {
|
||||||
sizeof(unsigned long)) {
|
|
||||||
current->lowest_stack = sp;
|
current->lowest_stack = sp;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
103
mm/usercopy.c
103
mm/usercopy.c
@ -17,6 +17,7 @@
|
|||||||
#include <linux/sched/task.h>
|
#include <linux/sched/task.h>
|
||||||
#include <linux/sched/task_stack.h>
|
#include <linux/sched/task_stack.h>
|
||||||
#include <linux/thread_info.h>
|
#include <linux/thread_info.h>
|
||||||
|
#include <linux/vmalloc.h>
|
||||||
#include <linux/atomic.h>
|
#include <linux/atomic.h>
|
||||||
#include <linux/jump_label.h>
|
#include <linux/jump_label.h>
|
||||||
#include <asm/sections.h>
|
#include <asm/sections.h>
|
||||||
@ -157,91 +158,47 @@ static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
|
|||||||
usercopy_abort("null address", NULL, to_user, ptr, n);
|
usercopy_abort("null address", NULL, to_user, ptr, n);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Checks for allocs that are marked in some way as spanning multiple pages. */
|
|
||||||
static inline void check_page_span(const void *ptr, unsigned long n,
|
|
||||||
struct page *page, bool to_user)
|
|
||||||
{
|
|
||||||
#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
|
|
||||||
const void *end = ptr + n - 1;
|
|
||||||
struct page *endpage;
|
|
||||||
bool is_reserved, is_cma;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Sometimes the kernel data regions are not marked Reserved (see
|
|
||||||
* check below). And sometimes [_sdata,_edata) does not cover
|
|
||||||
* rodata and/or bss, so check each range explicitly.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Allow reads of kernel rodata region (if not marked as Reserved). */
|
|
||||||
if (ptr >= (const void *)__start_rodata &&
|
|
||||||
end <= (const void *)__end_rodata) {
|
|
||||||
if (!to_user)
|
|
||||||
usercopy_abort("rodata", NULL, to_user, 0, n);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Allow kernel data region (if not marked as Reserved). */
|
|
||||||
if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* Allow kernel bss region (if not marked as Reserved). */
|
|
||||||
if (ptr >= (const void *)__bss_start &&
|
|
||||||
end <= (const void *)__bss_stop)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* Is the object wholly within one base page? */
|
|
||||||
if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
|
|
||||||
((unsigned long)end & (unsigned long)PAGE_MASK)))
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* Allow if fully inside the same compound (__GFP_COMP) page. */
|
|
||||||
endpage = virt_to_head_page(end);
|
|
||||||
if (likely(endpage == page))
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Reject if range is entirely either Reserved (i.e. special or
|
|
||||||
* device memory), or CMA. Otherwise, reject since the object spans
|
|
||||||
* several independently allocated pages.
|
|
||||||
*/
|
|
||||||
is_reserved = PageReserved(page);
|
|
||||||
is_cma = is_migrate_cma_page(page);
|
|
||||||
if (!is_reserved && !is_cma)
|
|
||||||
usercopy_abort("spans multiple pages", NULL, to_user, 0, n);
|
|
||||||
|
|
||||||
for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
|
|
||||||
page = virt_to_head_page(ptr);
|
|
||||||
if (is_reserved && !PageReserved(page))
|
|
||||||
usercopy_abort("spans Reserved and non-Reserved pages",
|
|
||||||
NULL, to_user, 0, n);
|
|
||||||
if (is_cma && !is_migrate_cma_page(page))
|
|
||||||
usercopy_abort("spans CMA and non-CMA pages", NULL,
|
|
||||||
to_user, 0, n);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void check_heap_object(const void *ptr, unsigned long n,
|
static inline void check_heap_object(const void *ptr, unsigned long n,
|
||||||
bool to_user)
|
bool to_user)
|
||||||
{
|
{
|
||||||
struct folio *folio;
|
struct folio *folio;
|
||||||
|
|
||||||
|
if (is_kmap_addr(ptr)) {
|
||||||
|
unsigned long page_end = (unsigned long)ptr | (PAGE_SIZE - 1);
|
||||||
|
|
||||||
|
if ((unsigned long)ptr + n - 1 > page_end)
|
||||||
|
usercopy_abort("kmap", NULL, to_user,
|
||||||
|
offset_in_page(ptr), n);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_vmalloc_addr(ptr)) {
|
||||||
|
struct vm_struct *area = find_vm_area(ptr);
|
||||||
|
unsigned long offset;
|
||||||
|
|
||||||
|
if (!area) {
|
||||||
|
usercopy_abort("vmalloc", "no area", to_user, 0, n);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
offset = ptr - area->addr;
|
||||||
|
if (offset + n > get_vm_area_size(area))
|
||||||
|
usercopy_abort("vmalloc", NULL, to_user, offset, n);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (!virt_addr_valid(ptr))
|
if (!virt_addr_valid(ptr))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
folio = virt_to_folio(ptr);
|
||||||
* When CONFIG_HIGHMEM=y, kmap_to_page() will give either the
|
|
||||||
* highmem page or fallback to virt_to_page(). The following
|
|
||||||
* is effectively a highmem-aware virt_to_slab().
|
|
||||||
*/
|
|
||||||
folio = page_folio(kmap_to_page((void *)ptr));
|
|
||||||
|
|
||||||
if (folio_test_slab(folio)) {
|
if (folio_test_slab(folio)) {
|
||||||
/* Check slab allocator for flags and size. */
|
/* Check slab allocator for flags and size. */
|
||||||
__check_heap_object(ptr, n, folio_slab(folio), to_user);
|
__check_heap_object(ptr, n, folio_slab(folio), to_user);
|
||||||
} else {
|
} else if (folio_test_large(folio)) {
|
||||||
/* Verify object does not incorrectly span multiple pages. */
|
unsigned long offset = ptr - folio_address(folio);
|
||||||
check_page_span(ptr, n, folio_page(folio, 0), to_user);
|
if (offset + n > folio_size(folio))
|
||||||
|
usercopy_abort("page alloc", NULL, to_user, offset, n);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1808,11 +1808,9 @@ static int maybe_init_creds(struct scm_cookie *scm,
|
|||||||
static bool unix_skb_scm_eq(struct sk_buff *skb,
|
static bool unix_skb_scm_eq(struct sk_buff *skb,
|
||||||
struct scm_cookie *scm)
|
struct scm_cookie *scm)
|
||||||
{
|
{
|
||||||
const struct unix_skb_parms *u = &UNIXCB(skb);
|
return UNIXCB(skb).pid == scm->pid &&
|
||||||
|
uid_eq(UNIXCB(skb).uid, scm->creds.uid) &&
|
||||||
return u->pid == scm->pid &&
|
gid_eq(UNIXCB(skb).gid, scm->creds.gid) &&
|
||||||
uid_eq(u->uid, scm->creds.uid) &&
|
|
||||||
gid_eq(u->gid, scm->creds.gid) &&
|
|
||||||
unix_secdata_eq(scm, skb);
|
unix_secdata_eq(scm, skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,8 +8,6 @@ ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
|
|||||||
endif
|
endif
|
||||||
export DISABLE_LATENT_ENTROPY_PLUGIN
|
export DISABLE_LATENT_ENTROPY_PLUGIN
|
||||||
|
|
||||||
gcc-plugin-$(CONFIG_GCC_PLUGIN_SANCOV) += sancov_plugin.so
|
|
||||||
|
|
||||||
gcc-plugin-$(CONFIG_GCC_PLUGIN_STRUCTLEAK) += structleak_plugin.so
|
gcc-plugin-$(CONFIG_GCC_PLUGIN_STRUCTLEAK) += structleak_plugin.so
|
||||||
gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK_VERBOSE) \
|
gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK_VERBOSE) \
|
||||||
+= -fplugin-arg-structleak_plugin-verbose
|
+= -fplugin-arg-structleak_plugin-verbose
|
||||||
@ -24,12 +22,6 @@ export DISABLE_STRUCTLEAK_PLUGIN
|
|||||||
gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK) \
|
gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK) \
|
||||||
+= -DSTRUCTLEAK_PLUGIN
|
+= -DSTRUCTLEAK_PLUGIN
|
||||||
|
|
||||||
gcc-plugin-$(CONFIG_GCC_PLUGIN_RANDSTRUCT) += randomize_layout_plugin.so
|
|
||||||
gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_RANDSTRUCT) \
|
|
||||||
+= -DRANDSTRUCT_PLUGIN
|
|
||||||
gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_RANDSTRUCT_PERFORMANCE) \
|
|
||||||
+= -fplugin-arg-randomize_layout_plugin-performance-mode
|
|
||||||
|
|
||||||
gcc-plugin-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak_plugin.so
|
gcc-plugin-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak_plugin.so
|
||||||
gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK) \
|
gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK) \
|
||||||
+= -DSTACKLEAK_PLUGIN
|
+= -DSTACKLEAK_PLUGIN
|
||||||
@ -53,13 +45,19 @@ export DISABLE_ARM_SSP_PER_TASK_PLUGIN
|
|||||||
# All the plugin CFLAGS are collected here in case a build target needs to
|
# All the plugin CFLAGS are collected here in case a build target needs to
|
||||||
# filter them out of the KBUILD_CFLAGS.
|
# filter them out of the KBUILD_CFLAGS.
|
||||||
GCC_PLUGINS_CFLAGS := $(strip $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y)) $(gcc-plugin-cflags-y))
|
GCC_PLUGINS_CFLAGS := $(strip $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y)) $(gcc-plugin-cflags-y))
|
||||||
# The sancov_plugin.so is included via CFLAGS_KCOV, so it is removed here.
|
|
||||||
GCC_PLUGINS_CFLAGS := $(filter-out %/sancov_plugin.so, $(GCC_PLUGINS_CFLAGS))
|
|
||||||
export GCC_PLUGINS_CFLAGS
|
export GCC_PLUGINS_CFLAGS
|
||||||
|
|
||||||
# Add the flags to the build!
|
# Add the flags to the build!
|
||||||
KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
|
KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
|
||||||
|
|
||||||
# All enabled GCC plugins are collected here for building below.
|
# Some plugins are enabled outside of this Makefile, but they still need to
|
||||||
GCC_PLUGIN := $(gcc-plugin-y)
|
# be included in GCC_PLUGIN so they can get built.
|
||||||
|
gcc-plugin-external-$(CONFIG_GCC_PLUGIN_SANCOV) \
|
||||||
|
+= sancov_plugin.so
|
||||||
|
gcc-plugin-external-$(CONFIG_GCC_PLUGIN_RANDSTRUCT) \
|
||||||
|
+= randomize_layout_plugin.so
|
||||||
|
|
||||||
|
# All enabled GCC plugins are collected here for building in
|
||||||
|
# scripts/gcc-scripts/Makefile.
|
||||||
|
GCC_PLUGIN := $(gcc-plugin-y) $(gcc-plugin-external-y)
|
||||||
export GCC_PLUGIN
|
export GCC_PLUGIN
|
||||||
|
17
scripts/Makefile.randstruct
Normal file
17
scripts/Makefile.randstruct
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
|
randstruct-cflags-y += -DRANDSTRUCT
|
||||||
|
|
||||||
|
ifdef CONFIG_GCC_PLUGIN_RANDSTRUCT
|
||||||
|
randstruct-cflags-y \
|
||||||
|
+= -fplugin=$(objtree)/scripts/gcc-plugins/randomize_layout_plugin.so
|
||||||
|
randstruct-cflags-$(CONFIG_RANDSTRUCT_PERFORMANCE) \
|
||||||
|
+= -fplugin-arg-randomize_layout_plugin-performance-mode
|
||||||
|
else
|
||||||
|
randstruct-cflags-y \
|
||||||
|
+= -frandomize-layout-seed-file=$(objtree)/scripts/basic/randstruct.seed
|
||||||
|
endif
|
||||||
|
|
||||||
|
export RANDSTRUCT_CFLAGS := $(randstruct-cflags-y)
|
||||||
|
|
||||||
|
KBUILD_CFLAGS += $(RANDSTRUCT_CFLAGS)
|
1
scripts/basic/.gitignore
vendored
1
scripts/basic/.gitignore
vendored
@ -1,2 +1,3 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0-only
|
# SPDX-License-Identifier: GPL-2.0-only
|
||||||
/fixdep
|
/fixdep
|
||||||
|
/randstruct.seed
|
||||||
|
@ -3,3 +3,14 @@
|
|||||||
# fixdep: used to generate dependency information during build process
|
# fixdep: used to generate dependency information during build process
|
||||||
|
|
||||||
hostprogs-always-y += fixdep
|
hostprogs-always-y += fixdep
|
||||||
|
|
||||||
|
# randstruct: the seed is needed before building the gcc-plugin or
|
||||||
|
# before running a Clang kernel build.
|
||||||
|
gen-randstruct-seed := $(srctree)/scripts/gen-randstruct-seed.sh
|
||||||
|
quiet_cmd_create_randstruct_seed = GENSEED $@
|
||||||
|
cmd_create_randstruct_seed = \
|
||||||
|
$(CONFIG_SHELL) $(gen-randstruct-seed) \
|
||||||
|
$@ $(objtree)/include/generated/randstruct_hash.h
|
||||||
|
$(obj)/randstruct.seed: $(gen-randstruct-seed) FORCE
|
||||||
|
$(call if_changed,create_randstruct_seed)
|
||||||
|
always-$(CONFIG_RANDSTRUCT) += randstruct.seed
|
||||||
|
@ -46,44 +46,6 @@ config GCC_PLUGIN_LATENT_ENTROPY
|
|||||||
* https://grsecurity.net/
|
* https://grsecurity.net/
|
||||||
* https://pax.grsecurity.net/
|
* https://pax.grsecurity.net/
|
||||||
|
|
||||||
config GCC_PLUGIN_RANDSTRUCT
|
|
||||||
bool "Randomize layout of sensitive kernel structures"
|
|
||||||
select MODVERSIONS if MODULES
|
|
||||||
help
|
|
||||||
If you say Y here, the layouts of structures that are entirely
|
|
||||||
function pointers (and have not been manually annotated with
|
|
||||||
__no_randomize_layout), or structures that have been explicitly
|
|
||||||
marked with __randomize_layout, will be randomized at compile-time.
|
|
||||||
This can introduce the requirement of an additional information
|
|
||||||
exposure vulnerability for exploits targeting these structure
|
|
||||||
types.
|
|
||||||
|
|
||||||
Enabling this feature will introduce some performance impact,
|
|
||||||
slightly increase memory usage, and prevent the use of forensic
|
|
||||||
tools like Volatility against the system (unless the kernel
|
|
||||||
source tree isn't cleaned after kernel installation).
|
|
||||||
|
|
||||||
The seed used for compilation is located at
|
|
||||||
scripts/gcc-plugins/randomize_layout_seed.h. It remains after
|
|
||||||
a make clean to allow for external modules to be compiled with
|
|
||||||
the existing seed and will be removed by a make mrproper or
|
|
||||||
make distclean.
|
|
||||||
|
|
||||||
This plugin was ported from grsecurity/PaX. More information at:
|
|
||||||
* https://grsecurity.net/
|
|
||||||
* https://pax.grsecurity.net/
|
|
||||||
|
|
||||||
config GCC_PLUGIN_RANDSTRUCT_PERFORMANCE
|
|
||||||
bool "Use cacheline-aware structure randomization"
|
|
||||||
depends on GCC_PLUGIN_RANDSTRUCT
|
|
||||||
depends on !COMPILE_TEST # do not reduce test coverage
|
|
||||||
help
|
|
||||||
If you say Y here, the RANDSTRUCT randomization will make a
|
|
||||||
best effort at restricting randomization to cacheline-sized
|
|
||||||
groups of elements. It will further not randomize bitfields
|
|
||||||
in structures. This reduces the performance hit of RANDSTRUCT
|
|
||||||
at the cost of weakened randomization.
|
|
||||||
|
|
||||||
config GCC_PLUGIN_ARM_SSP_PER_TASK
|
config GCC_PLUGIN_ARM_SSP_PER_TASK
|
||||||
bool
|
bool
|
||||||
depends on GCC_PLUGINS && ARM
|
depends on GCC_PLUGINS && ARM
|
||||||
|
@ -1,12 +1,17 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
$(obj)/randomize_layout_plugin.so: $(objtree)/$(obj)/randomize_layout_seed.h
|
$(obj)/randomize_layout_plugin.so: $(obj)/randomize_layout_seed.h
|
||||||
quiet_cmd_create_randomize_layout_seed = GENSEED $@
|
quiet_cmd_create_randomize_layout_seed = SEEDHDR $@
|
||||||
cmd_create_randomize_layout_seed = \
|
cmd_create_randomize_layout_seed = \
|
||||||
$(CONFIG_SHELL) $(srctree)/$(src)/gen-random-seed.sh $@ $(objtree)/include/generated/randomize_layout_hash.h
|
SEED=$$(cat $(filter-out FORCE,$^) </dev/null); \
|
||||||
$(objtree)/$(obj)/randomize_layout_seed.h: FORCE
|
echo '/*' > $@; \
|
||||||
|
echo ' * This file is automatically generated. Keep it private.' >> $@; \
|
||||||
|
echo ' * Exposing this value will expose the layout of randomized structures.' >> $@; \
|
||||||
|
echo ' */' >> $@; \
|
||||||
|
echo "const char *randstruct_seed = \"$$SEED\";" >> $@
|
||||||
|
$(obj)/randomize_layout_seed.h: $(objtree)/scripts/basic/randstruct.seed FORCE
|
||||||
$(call if_changed,create_randomize_layout_seed)
|
$(call if_changed,create_randomize_layout_seed)
|
||||||
targets += randomize_layout_seed.h randomize_layout_hash.h
|
targets += randomize_layout_seed.h
|
||||||
|
|
||||||
# Build rules for plugins
|
# Build rules for plugins
|
||||||
#
|
#
|
||||||
@ -23,10 +28,11 @@ GCC_PLUGINS_DIR = $(shell $(CC) -print-file-name=plugin)
|
|||||||
|
|
||||||
plugin_cxxflags = -Wp,-MMD,$(depfile) $(KBUILD_HOSTCXXFLAGS) -fPIC \
|
plugin_cxxflags = -Wp,-MMD,$(depfile) $(KBUILD_HOSTCXXFLAGS) -fPIC \
|
||||||
-include $(srctree)/include/linux/compiler-version.h \
|
-include $(srctree)/include/linux/compiler-version.h \
|
||||||
-I $(GCC_PLUGINS_DIR)/include -I $(obj) -std=gnu++11 \
|
-include $(objtree)/include/generated/utsrelease.h \
|
||||||
-fno-rtti -fno-exceptions -fasynchronous-unwind-tables \
|
-I $(GCC_PLUGINS_DIR)/include -I $(obj) -std=gnu++11 \
|
||||||
-ggdb -Wno-narrowing -Wno-unused-variable \
|
-fno-rtti -fno-exceptions -fasynchronous-unwind-tables \
|
||||||
-Wno-format-diag
|
-ggdb -Wno-narrowing -Wno-unused-variable \
|
||||||
|
-Wno-format-diag
|
||||||
|
|
||||||
plugin_ldflags = -shared
|
plugin_ldflags = -shared
|
||||||
|
|
||||||
|
@ -1,9 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
# SPDX-License-Identifier: GPL-2.0
|
|
||||||
|
|
||||||
if [ ! -f "$1" ]; then
|
|
||||||
SEED=`od -A n -t x8 -N 32 /dev/urandom | tr -d ' \n'`
|
|
||||||
echo "const char *randstruct_seed = \"$SEED\";" > "$1"
|
|
||||||
HASH=`echo -n "$SEED" | sha256sum | cut -d" " -f1 | tr -d ' \n'`
|
|
||||||
echo "#define RANDSTRUCT_HASHED_SEED \"$HASH\"" > "$2"
|
|
||||||
fi
|
|
@ -82,7 +82,7 @@ __visible int plugin_is_GPL_compatible;
|
|||||||
static GTY(()) tree latent_entropy_decl;
|
static GTY(()) tree latent_entropy_decl;
|
||||||
|
|
||||||
static struct plugin_info latent_entropy_plugin_info = {
|
static struct plugin_info latent_entropy_plugin_info = {
|
||||||
.version = "201606141920vanilla",
|
.version = UTS_RELEASE,
|
||||||
.help = "disable\tturn off latent entropy instrumentation\n",
|
.help = "disable\tturn off latent entropy instrumentation\n",
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -34,29 +34,11 @@ __visible int plugin_is_GPL_compatible;
|
|||||||
static int performance_mode;
|
static int performance_mode;
|
||||||
|
|
||||||
static struct plugin_info randomize_layout_plugin_info = {
|
static struct plugin_info randomize_layout_plugin_info = {
|
||||||
.version = "201402201816vanilla",
|
.version = UTS_RELEASE,
|
||||||
.help = "disable\t\t\tdo not activate plugin\n"
|
.help = "disable\t\t\tdo not activate plugin\n"
|
||||||
"performance-mode\tenable cacheline-aware layout randomization\n"
|
"performance-mode\tenable cacheline-aware layout randomization\n"
|
||||||
};
|
};
|
||||||
|
|
||||||
struct whitelist_entry {
|
|
||||||
const char *pathname;
|
|
||||||
const char *lhs;
|
|
||||||
const char *rhs;
|
|
||||||
};
|
|
||||||
|
|
||||||
static const struct whitelist_entry whitelist[] = {
|
|
||||||
/* NIU overloads mapping with page struct */
|
|
||||||
{ "drivers/net/ethernet/sun/niu.c", "page", "address_space" },
|
|
||||||
/* unix_skb_parms via UNIXCB() buffer */
|
|
||||||
{ "net/unix/af_unix.c", "unix_skb_parms", "char" },
|
|
||||||
/* big_key payload.data struct splashing */
|
|
||||||
{ "security/keys/big_key.c", "path", "void *" },
|
|
||||||
/* walk struct security_hook_heads as an array of struct hlist_head */
|
|
||||||
{ "security/security.c", "hlist_head", "security_hook_heads" },
|
|
||||||
{ }
|
|
||||||
};
|
|
||||||
|
|
||||||
/* from old Linux dcache.h */
|
/* from old Linux dcache.h */
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
partial_name_hash(unsigned long c, unsigned long prevhash)
|
partial_name_hash(unsigned long c, unsigned long prevhash)
|
||||||
@ -742,60 +724,6 @@ static void handle_local_var_initializers(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool type_name_eq(gimple stmt, const_tree type_tree, const char *wanted_name)
|
|
||||||
{
|
|
||||||
const char *type_name;
|
|
||||||
|
|
||||||
if (type_tree == NULL_TREE)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
switch (TREE_CODE(type_tree)) {
|
|
||||||
case RECORD_TYPE:
|
|
||||||
type_name = TYPE_NAME_POINTER(type_tree);
|
|
||||||
break;
|
|
||||||
case INTEGER_TYPE:
|
|
||||||
if (TYPE_PRECISION(type_tree) == CHAR_TYPE_SIZE)
|
|
||||||
type_name = "char";
|
|
||||||
else {
|
|
||||||
INFORM(gimple_location(stmt), "found non-char INTEGER_TYPE cast comparison: %qT\n", type_tree);
|
|
||||||
debug_tree(type_tree);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case POINTER_TYPE:
|
|
||||||
if (TREE_CODE(TREE_TYPE(type_tree)) == VOID_TYPE) {
|
|
||||||
type_name = "void *";
|
|
||||||
break;
|
|
||||||
} else {
|
|
||||||
INFORM(gimple_location(stmt), "found non-void POINTER_TYPE cast comparison %qT\n", type_tree);
|
|
||||||
debug_tree(type_tree);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
INFORM(gimple_location(stmt), "unhandled cast comparison: %qT\n", type_tree);
|
|
||||||
debug_tree(type_tree);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return strcmp(type_name, wanted_name) == 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool whitelisted_cast(gimple stmt, const_tree lhs_tree, const_tree rhs_tree)
|
|
||||||
{
|
|
||||||
const struct whitelist_entry *entry;
|
|
||||||
expanded_location xloc = expand_location(gimple_location(stmt));
|
|
||||||
|
|
||||||
for (entry = whitelist; entry->pathname; entry++) {
|
|
||||||
if (!strstr(xloc.file, entry->pathname))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (type_name_eq(stmt, lhs_tree, entry->lhs) && type_name_eq(stmt, rhs_tree, entry->rhs))
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* iterate over all statements to find "bad" casts:
|
* iterate over all statements to find "bad" casts:
|
||||||
* those where the address of the start of a structure is cast
|
* those where the address of the start of a structure is cast
|
||||||
@ -872,10 +800,7 @@ static unsigned int find_bad_casts_execute(void)
|
|||||||
#ifndef __DEBUG_PLUGIN
|
#ifndef __DEBUG_PLUGIN
|
||||||
if (lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(ptr_lhs_type)))
|
if (lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(ptr_lhs_type)))
|
||||||
#endif
|
#endif
|
||||||
{
|
MISMATCH(gimple_location(stmt), "rhs", ptr_lhs_type, ptr_rhs_type);
|
||||||
if (!whitelisted_cast(stmt, ptr_lhs_type, ptr_rhs_type))
|
|
||||||
MISMATCH(gimple_location(stmt), "rhs", ptr_lhs_type, ptr_rhs_type);
|
|
||||||
}
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -898,10 +823,7 @@ static unsigned int find_bad_casts_execute(void)
|
|||||||
#ifndef __DEBUG_PLUGIN
|
#ifndef __DEBUG_PLUGIN
|
||||||
if (lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(op0_type)))
|
if (lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(op0_type)))
|
||||||
#endif
|
#endif
|
||||||
{
|
MISMATCH(gimple_location(stmt), "op0", ptr_lhs_type, op0_type);
|
||||||
if (!whitelisted_cast(stmt, ptr_lhs_type, op0_type))
|
|
||||||
MISMATCH(gimple_location(stmt), "op0", ptr_lhs_type, op0_type);
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
const_tree ssa_name_var = SSA_NAME_VAR(rhs1);
|
const_tree ssa_name_var = SSA_NAME_VAR(rhs1);
|
||||||
/* skip bogus type casts introduced by container_of */
|
/* skip bogus type casts introduced by container_of */
|
||||||
@ -911,10 +833,7 @@ static unsigned int find_bad_casts_execute(void)
|
|||||||
#ifndef __DEBUG_PLUGIN
|
#ifndef __DEBUG_PLUGIN
|
||||||
if (lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(ptr_rhs_type)))
|
if (lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(ptr_rhs_type)))
|
||||||
#endif
|
#endif
|
||||||
{
|
MISMATCH(gimple_location(stmt), "ssa", ptr_lhs_type, ptr_rhs_type);
|
||||||
if (!whitelisted_cast(stmt, ptr_lhs_type, ptr_rhs_type))
|
|
||||||
MISMATCH(gimple_location(stmt), "ssa", ptr_lhs_type, ptr_rhs_type);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,7 @@ __visible int plugin_is_GPL_compatible;
|
|||||||
tree sancov_fndecl;
|
tree sancov_fndecl;
|
||||||
|
|
||||||
static struct plugin_info sancov_plugin_info = {
|
static struct plugin_info sancov_plugin_info = {
|
||||||
.version = "20160402",
|
.version = UTS_RELEASE,
|
||||||
.help = "sancov plugin\n",
|
.help = "sancov plugin\n",
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -44,7 +44,7 @@ static bool verbose = false;
|
|||||||
static GTY(()) tree track_function_decl;
|
static GTY(()) tree track_function_decl;
|
||||||
|
|
||||||
static struct plugin_info stackleak_plugin_info = {
|
static struct plugin_info stackleak_plugin_info = {
|
||||||
.version = "201707101337",
|
.version = UTS_RELEASE,
|
||||||
.help = "track-min-size=nn\ttrack stack for functions with a stack frame size >= nn bytes\n"
|
.help = "track-min-size=nn\ttrack stack for functions with a stack frame size >= nn bytes\n"
|
||||||
"arch=target_arch\tspecify target build arch\n"
|
"arch=target_arch\tspecify target build arch\n"
|
||||||
"disable\t\tdo not activate the plugin\n"
|
"disable\t\tdo not activate the plugin\n"
|
||||||
|
@ -37,7 +37,7 @@
|
|||||||
__visible int plugin_is_GPL_compatible;
|
__visible int plugin_is_GPL_compatible;
|
||||||
|
|
||||||
static struct plugin_info structleak_plugin_info = {
|
static struct plugin_info structleak_plugin_info = {
|
||||||
.version = "20190125vanilla",
|
.version = UTS_RELEASE,
|
||||||
.help = "disable\tdo not activate plugin\n"
|
.help = "disable\tdo not activate plugin\n"
|
||||||
"byref\tinit structs passed by reference\n"
|
"byref\tinit structs passed by reference\n"
|
||||||
"byref-all\tinit anything passed by reference\n"
|
"byref-all\tinit anything passed by reference\n"
|
||||||
|
7
scripts/gen-randstruct-seed.sh
Executable file
7
scripts/gen-randstruct-seed.sh
Executable file
@ -0,0 +1,7 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
|
SEED=$(od -A n -t x8 -N 32 /dev/urandom | tr -d ' \n')
|
||||||
|
echo "$SEED" > "$1"
|
||||||
|
HASH=$(echo -n "$SEED" | sha256sum | cut -d" " -f1)
|
||||||
|
echo "#define RANDSTRUCT_HASHED_SEED \"$HASH\"" > "$2"
|
@ -160,20 +160,9 @@ config HARDENED_USERCOPY
|
|||||||
copy_from_user() functions) by rejecting memory ranges that
|
copy_from_user() functions) by rejecting memory ranges that
|
||||||
are larger than the specified heap object, span multiple
|
are larger than the specified heap object, span multiple
|
||||||
separately allocated pages, are not on the process stack,
|
separately allocated pages, are not on the process stack,
|
||||||
or are part of the kernel text. This kills entire classes
|
or are part of the kernel text. This prevents entire classes
|
||||||
of heap overflow exploits and similar kernel memory exposures.
|
of heap overflow exploits and similar kernel memory exposures.
|
||||||
|
|
||||||
config HARDENED_USERCOPY_PAGESPAN
|
|
||||||
bool "Refuse to copy allocations that span multiple pages"
|
|
||||||
depends on HARDENED_USERCOPY
|
|
||||||
depends on BROKEN
|
|
||||||
help
|
|
||||||
When a multi-page allocation is done without __GFP_COMP,
|
|
||||||
hardened usercopy will reject attempts to copy it. There are,
|
|
||||||
however, several cases of this in the kernel that have not all
|
|
||||||
been removed. This config is intended to be used only while
|
|
||||||
trying to find such users.
|
|
||||||
|
|
||||||
config FORTIFY_SOURCE
|
config FORTIFY_SOURCE
|
||||||
bool "Harden common str/mem functions against buffer overflows"
|
bool "Harden common str/mem functions against buffer overflows"
|
||||||
depends on ARCH_HAS_FORTIFY_SOURCE
|
depends on ARCH_HAS_FORTIFY_SOURCE
|
||||||
|
@ -266,4 +266,77 @@ config ZERO_CALL_USED_REGS
|
|||||||
|
|
||||||
endmenu
|
endmenu
|
||||||
|
|
||||||
|
config CC_HAS_RANDSTRUCT
|
||||||
|
def_bool $(cc-option,-frandomize-layout-seed-file=/dev/null)
|
||||||
|
|
||||||
|
choice
|
||||||
|
prompt "Randomize layout of sensitive kernel structures"
|
||||||
|
default RANDSTRUCT_FULL if COMPILE_TEST && (GCC_PLUGINS || CC_HAS_RANDSTRUCT)
|
||||||
|
default RANDSTRUCT_NONE
|
||||||
|
help
|
||||||
|
If you enable this, the layouts of structures that are entirely
|
||||||
|
function pointers (and have not been manually annotated with
|
||||||
|
__no_randomize_layout), or structures that have been explicitly
|
||||||
|
marked with __randomize_layout, will be randomized at compile-time.
|
||||||
|
This can introduce the requirement of an additional information
|
||||||
|
exposure vulnerability for exploits targeting these structure
|
||||||
|
types.
|
||||||
|
|
||||||
|
Enabling this feature will introduce some performance impact,
|
||||||
|
slightly increase memory usage, and prevent the use of forensic
|
||||||
|
tools like Volatility against the system (unless the kernel
|
||||||
|
source tree isn't cleaned after kernel installation).
|
||||||
|
|
||||||
|
The seed used for compilation is in scripts/basic/randomize.seed.
|
||||||
|
It remains after a "make clean" to allow for external modules to
|
||||||
|
be compiled with the existing seed and will be removed by a
|
||||||
|
"make mrproper" or "make distclean". This file should not be made
|
||||||
|
public, or the structure layout can be determined.
|
||||||
|
|
||||||
|
config RANDSTRUCT_NONE
|
||||||
|
bool "Disable structure layout randomization"
|
||||||
|
help
|
||||||
|
Build normally: no structure layout randomization.
|
||||||
|
|
||||||
|
config RANDSTRUCT_FULL
|
||||||
|
bool "Fully randomize structure layout"
|
||||||
|
depends on CC_HAS_RANDSTRUCT || GCC_PLUGINS
|
||||||
|
select MODVERSIONS if MODULES
|
||||||
|
help
|
||||||
|
Fully randomize the member layout of sensitive
|
||||||
|
structures as much as possible, which may have both a
|
||||||
|
memory size and performance impact.
|
||||||
|
|
||||||
|
One difference between the Clang and GCC plugin
|
||||||
|
implementations is the handling of bitfields. The GCC
|
||||||
|
plugin treats them as fully separate variables,
|
||||||
|
introducing sometimes significant padding. Clang tries
|
||||||
|
to keep adjacent bitfields together, but with their bit
|
||||||
|
ordering randomized.
|
||||||
|
|
||||||
|
config RANDSTRUCT_PERFORMANCE
|
||||||
|
bool "Limit randomization of structure layout to cache-lines"
|
||||||
|
depends on GCC_PLUGINS
|
||||||
|
select MODVERSIONS if MODULES
|
||||||
|
help
|
||||||
|
Randomization of sensitive kernel structures will make a
|
||||||
|
best effort at restricting randomization to cacheline-sized
|
||||||
|
groups of members. It will further not randomize bitfields
|
||||||
|
in structures. This reduces the performance hit of RANDSTRUCT
|
||||||
|
at the cost of weakened randomization.
|
||||||
|
endchoice
|
||||||
|
|
||||||
|
config RANDSTRUCT
|
||||||
|
def_bool !RANDSTRUCT_NONE
|
||||||
|
|
||||||
|
config GCC_PLUGIN_RANDSTRUCT
|
||||||
|
def_bool GCC_PLUGINS && RANDSTRUCT
|
||||||
|
help
|
||||||
|
Use GCC plugin to randomize structure layout.
|
||||||
|
|
||||||
|
This plugin was ported from grsecurity/PaX. More
|
||||||
|
information at:
|
||||||
|
* https://grsecurity.net/
|
||||||
|
* https://pax.grsecurity.net/
|
||||||
|
|
||||||
endmenu
|
endmenu
|
||||||
|
@ -20,12 +20,13 @@
|
|||||||
/*
|
/*
|
||||||
* Layout of key payload words.
|
* Layout of key payload words.
|
||||||
*/
|
*/
|
||||||
enum {
|
struct big_key_payload {
|
||||||
big_key_data,
|
u8 *data;
|
||||||
big_key_path,
|
struct path path;
|
||||||
big_key_path_2nd_part,
|
size_t length;
|
||||||
big_key_len,
|
|
||||||
};
|
};
|
||||||
|
#define to_big_key_payload(payload) \
|
||||||
|
(struct big_key_payload *)((payload).data)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the data is under this limit, there's no point creating a shm file to
|
* If the data is under this limit, there's no point creating a shm file to
|
||||||
@ -55,7 +56,7 @@ struct key_type key_type_big_key = {
|
|||||||
*/
|
*/
|
||||||
int big_key_preparse(struct key_preparsed_payload *prep)
|
int big_key_preparse(struct key_preparsed_payload *prep)
|
||||||
{
|
{
|
||||||
struct path *path = (struct path *)&prep->payload.data[big_key_path];
|
struct big_key_payload *payload = to_big_key_payload(prep->payload);
|
||||||
struct file *file;
|
struct file *file;
|
||||||
u8 *buf, *enckey;
|
u8 *buf, *enckey;
|
||||||
ssize_t written;
|
ssize_t written;
|
||||||
@ -63,13 +64,15 @@ int big_key_preparse(struct key_preparsed_payload *prep)
|
|||||||
size_t enclen = datalen + CHACHA20POLY1305_AUTHTAG_SIZE;
|
size_t enclen = datalen + CHACHA20POLY1305_AUTHTAG_SIZE;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
BUILD_BUG_ON(sizeof(*payload) != sizeof(prep->payload.data));
|
||||||
|
|
||||||
if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data)
|
if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* Set an arbitrary quota */
|
/* Set an arbitrary quota */
|
||||||
prep->quotalen = 16;
|
prep->quotalen = 16;
|
||||||
|
|
||||||
prep->payload.data[big_key_len] = (void *)(unsigned long)datalen;
|
payload->length = datalen;
|
||||||
|
|
||||||
if (datalen > BIG_KEY_FILE_THRESHOLD) {
|
if (datalen > BIG_KEY_FILE_THRESHOLD) {
|
||||||
/* Create a shmem file to store the data in. This will permit the data
|
/* Create a shmem file to store the data in. This will permit the data
|
||||||
@ -117,9 +120,9 @@ int big_key_preparse(struct key_preparsed_payload *prep)
|
|||||||
/* Pin the mount and dentry to the key so that we can open it again
|
/* Pin the mount and dentry to the key so that we can open it again
|
||||||
* later
|
* later
|
||||||
*/
|
*/
|
||||||
prep->payload.data[big_key_data] = enckey;
|
payload->data = enckey;
|
||||||
*path = file->f_path;
|
payload->path = file->f_path;
|
||||||
path_get(path);
|
path_get(&payload->path);
|
||||||
fput(file);
|
fput(file);
|
||||||
kvfree_sensitive(buf, enclen);
|
kvfree_sensitive(buf, enclen);
|
||||||
} else {
|
} else {
|
||||||
@ -129,7 +132,7 @@ int big_key_preparse(struct key_preparsed_payload *prep)
|
|||||||
if (!data)
|
if (!data)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
prep->payload.data[big_key_data] = data;
|
payload->data = data;
|
||||||
memcpy(data, prep->data, prep->datalen);
|
memcpy(data, prep->data, prep->datalen);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
@ -148,12 +151,11 @@ error:
|
|||||||
*/
|
*/
|
||||||
void big_key_free_preparse(struct key_preparsed_payload *prep)
|
void big_key_free_preparse(struct key_preparsed_payload *prep)
|
||||||
{
|
{
|
||||||
if (prep->datalen > BIG_KEY_FILE_THRESHOLD) {
|
struct big_key_payload *payload = to_big_key_payload(prep->payload);
|
||||||
struct path *path = (struct path *)&prep->payload.data[big_key_path];
|
|
||||||
|
|
||||||
path_put(path);
|
if (prep->datalen > BIG_KEY_FILE_THRESHOLD)
|
||||||
}
|
path_put(&payload->path);
|
||||||
kfree_sensitive(prep->payload.data[big_key_data]);
|
kfree_sensitive(payload->data);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -162,13 +164,12 @@ void big_key_free_preparse(struct key_preparsed_payload *prep)
|
|||||||
*/
|
*/
|
||||||
void big_key_revoke(struct key *key)
|
void big_key_revoke(struct key *key)
|
||||||
{
|
{
|
||||||
struct path *path = (struct path *)&key->payload.data[big_key_path];
|
struct big_key_payload *payload = to_big_key_payload(key->payload);
|
||||||
|
|
||||||
/* clear the quota */
|
/* clear the quota */
|
||||||
key_payload_reserve(key, 0);
|
key_payload_reserve(key, 0);
|
||||||
if (key_is_positive(key) &&
|
if (key_is_positive(key) && payload->length > BIG_KEY_FILE_THRESHOLD)
|
||||||
(size_t)key->payload.data[big_key_len] > BIG_KEY_FILE_THRESHOLD)
|
vfs_truncate(&payload->path, 0);
|
||||||
vfs_truncate(path, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -176,17 +177,15 @@ void big_key_revoke(struct key *key)
|
|||||||
*/
|
*/
|
||||||
void big_key_destroy(struct key *key)
|
void big_key_destroy(struct key *key)
|
||||||
{
|
{
|
||||||
size_t datalen = (size_t)key->payload.data[big_key_len];
|
struct big_key_payload *payload = to_big_key_payload(key->payload);
|
||||||
|
|
||||||
if (datalen > BIG_KEY_FILE_THRESHOLD) {
|
if (payload->length > BIG_KEY_FILE_THRESHOLD) {
|
||||||
struct path *path = (struct path *)&key->payload.data[big_key_path];
|
path_put(&payload->path);
|
||||||
|
payload->path.mnt = NULL;
|
||||||
path_put(path);
|
payload->path.dentry = NULL;
|
||||||
path->mnt = NULL;
|
|
||||||
path->dentry = NULL;
|
|
||||||
}
|
}
|
||||||
kfree_sensitive(key->payload.data[big_key_data]);
|
kfree_sensitive(payload->data);
|
||||||
key->payload.data[big_key_data] = NULL;
|
payload->data = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -211,14 +210,14 @@ int big_key_update(struct key *key, struct key_preparsed_payload *prep)
|
|||||||
*/
|
*/
|
||||||
void big_key_describe(const struct key *key, struct seq_file *m)
|
void big_key_describe(const struct key *key, struct seq_file *m)
|
||||||
{
|
{
|
||||||
size_t datalen = (size_t)key->payload.data[big_key_len];
|
struct big_key_payload *payload = to_big_key_payload(key->payload);
|
||||||
|
|
||||||
seq_puts(m, key->description);
|
seq_puts(m, key->description);
|
||||||
|
|
||||||
if (key_is_positive(key))
|
if (key_is_positive(key))
|
||||||
seq_printf(m, ": %zu [%s]",
|
seq_printf(m, ": %zu [%s]",
|
||||||
datalen,
|
payload->length,
|
||||||
datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
|
payload->length > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -227,16 +226,16 @@ void big_key_describe(const struct key *key, struct seq_file *m)
|
|||||||
*/
|
*/
|
||||||
long big_key_read(const struct key *key, char *buffer, size_t buflen)
|
long big_key_read(const struct key *key, char *buffer, size_t buflen)
|
||||||
{
|
{
|
||||||
size_t datalen = (size_t)key->payload.data[big_key_len];
|
struct big_key_payload *payload = to_big_key_payload(key->payload);
|
||||||
|
size_t datalen = payload->length;
|
||||||
long ret;
|
long ret;
|
||||||
|
|
||||||
if (!buffer || buflen < datalen)
|
if (!buffer || buflen < datalen)
|
||||||
return datalen;
|
return datalen;
|
||||||
|
|
||||||
if (datalen > BIG_KEY_FILE_THRESHOLD) {
|
if (datalen > BIG_KEY_FILE_THRESHOLD) {
|
||||||
struct path *path = (struct path *)&key->payload.data[big_key_path];
|
|
||||||
struct file *file;
|
struct file *file;
|
||||||
u8 *buf, *enckey = (u8 *)key->payload.data[big_key_data];
|
u8 *buf, *enckey = payload->data;
|
||||||
size_t enclen = datalen + CHACHA20POLY1305_AUTHTAG_SIZE;
|
size_t enclen = datalen + CHACHA20POLY1305_AUTHTAG_SIZE;
|
||||||
loff_t pos = 0;
|
loff_t pos = 0;
|
||||||
|
|
||||||
@ -244,7 +243,7 @@ long big_key_read(const struct key *key, char *buffer, size_t buflen)
|
|||||||
if (!buf)
|
if (!buf)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
file = dentry_open(path, O_RDONLY, current_cred());
|
file = dentry_open(&payload->path, O_RDONLY, current_cred());
|
||||||
if (IS_ERR(file)) {
|
if (IS_ERR(file)) {
|
||||||
ret = PTR_ERR(file);
|
ret = PTR_ERR(file);
|
||||||
goto error;
|
goto error;
|
||||||
@ -274,7 +273,7 @@ error:
|
|||||||
kvfree_sensitive(buf, enclen);
|
kvfree_sensitive(buf, enclen);
|
||||||
} else {
|
} else {
|
||||||
ret = datalen;
|
ret = datalen;
|
||||||
memcpy(buffer, key->payload.data[big_key_data], datalen);
|
memcpy(buffer, payload->data, datalen);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -78,11 +78,8 @@ static void check_pinning_enforcement(struct super_block *mnt_sb)
|
|||||||
* device, allow sysctl to change modes for testing.
|
* device, allow sysctl to change modes for testing.
|
||||||
*/
|
*/
|
||||||
if (mnt_sb->s_bdev) {
|
if (mnt_sb->s_bdev) {
|
||||||
char bdev[BDEVNAME_SIZE];
|
|
||||||
|
|
||||||
ro = bdev_read_only(mnt_sb->s_bdev);
|
ro = bdev_read_only(mnt_sb->s_bdev);
|
||||||
bdevname(mnt_sb->s_bdev, bdev);
|
pr_info("%pg (%u:%u): %s\n", mnt_sb->s_bdev,
|
||||||
pr_info("%s (%u:%u): %s\n", bdev,
|
|
||||||
MAJOR(mnt_sb->s_bdev->bd_dev),
|
MAJOR(mnt_sb->s_bdev->bd_dev),
|
||||||
MINOR(mnt_sb->s_bdev->bd_dev),
|
MINOR(mnt_sb->s_bdev->bd_dev),
|
||||||
ro ? "read-only" : "writable");
|
ro ? "read-only" : "writable");
|
||||||
|
@ -367,13 +367,12 @@ static void __init ordered_lsm_init(void)
|
|||||||
|
|
||||||
int __init early_security_init(void)
|
int __init early_security_init(void)
|
||||||
{
|
{
|
||||||
int i;
|
|
||||||
struct hlist_head *list = (struct hlist_head *) &security_hook_heads;
|
|
||||||
struct lsm_info *lsm;
|
struct lsm_info *lsm;
|
||||||
|
|
||||||
for (i = 0; i < sizeof(security_hook_heads) / sizeof(struct hlist_head);
|
#define LSM_HOOK(RET, DEFAULT, NAME, ...) \
|
||||||
i++)
|
INIT_HLIST_HEAD(&security_hook_heads.NAME);
|
||||||
INIT_HLIST_HEAD(&list[i]);
|
#include "linux/lsm_hook_defs.h"
|
||||||
|
#undef LSM_HOOK
|
||||||
|
|
||||||
for (lsm = __start_early_lsm_info; lsm < __end_early_lsm_info; lsm++) {
|
for (lsm = __start_early_lsm_info; lsm < __end_early_lsm_info; lsm++) {
|
||||||
if (!lsm->enabled)
|
if (!lsm->enabled)
|
||||||
|
Loading…
Reference in New Issue
Block a user