mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
- force check_object_size() to be inline too
- move page-spanning check behind a CONFIG since it's triggering false positives -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 Comment: Kees Cook <kees@outflux.net> iQIcBAABCgAGBQJX0F3qAAoJEIly9N/cbcAmaxEP/R737i+XhP4VOv+rYW050NHB K+FDeLv5/Gx56JrHRRWwj80K5/9F09wS929AWcaTDjEb2NKp/o/6W/gN1eVdGlJF K3UQk+Kmncb44poVoHkjMRAl6+sfKm6mTWZBTjBECKwQuCFyDoDoqDXhn5IXTlw9 Ig+TTOSgNw9gRke3ECtFynbVnDWx/Ry/axfT9vGXhFOkWclMUFy2UOdDSTtFAB6x yw5hdrfGakk2BPscHLO1xNqRuVLRUSXZVUiJGIQ6AiUupm34Yqmm69mrMuxaOtPC Ai3zhNGDuYClcGJAiPJYX+7nRjgPCWAdlyzQqLp5hwx63TJ+gxvhmxoFOJxEmHE/ 99i2Ak073Es6WII532Eknk3vV+UJzQNT/HO+0LcrJFkOEp9EHfVUb19CngQTaX7Q UbfYdyFgp3y24cRp7v0tP8gE2LCrsRe0UEhUq2NGmrerw3caNqZGHS9Od5OYEM8D uIhaotWoOv9Z0r+DZMGkUjfqeLb6RWNcUoWc5wZ3VYG27BM/pfhRxKf/2aw6O9u0 2Jk1QJxBr+/8DQ500xu/IBOP9V7aGAc4nxKyqUlwA05/JEFGiAzCwqfZW5CKTxgD 5Ht994WbTEH3/VaAskKnwggeHvttiEpehBCdVA4bXuhBhJhmPjFiKHX7uRrcM2GV /yH3UTkPnr/VD/I2ndiX =r8BE -----END PGP SIGNATURE----- Merge tag 'usercopy-v4.8-rc6-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux Pull more hardened usercopyfixes from Kees Cook: - force check_object_size() to be inline too - move page-spanning check behind a CONFIG since it's triggering false positives [ Changed the page-spanning config option to depend on EXPERT in the merge. That way it still gets build testing, and you can enable it if you want to, but is never enabled for "normal" configurations ] * tag 'usercopy-v4.8-rc6-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: usercopy: remove page-spanning test for now usercopy: force check_object_size() inline
This commit is contained in:
commit
80a77045da
@ -118,8 +118,8 @@ static inline int arch_within_stack_frames(const void * const stack,
|
||||
extern void __check_object_size(const void *ptr, unsigned long n,
|
||||
bool to_user);
|
||||
|
||||
static inline void check_object_size(const void *ptr, unsigned long n,
|
||||
bool to_user)
|
||||
static __always_inline void check_object_size(const void *ptr, unsigned long n,
|
||||
bool to_user)
|
||||
{
|
||||
if (!__builtin_constant_p(n))
|
||||
__check_object_size(ptr, n, to_user);
|
||||
|
@ -134,30 +134,15 @@ static inline const char *check_bogus_address(const void *ptr, unsigned long n)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline const char *check_heap_object(const void *ptr, unsigned long n,
|
||||
bool to_user)
|
||||
/* Checks for allocs that are marked in some way as spanning multiple pages. */
|
||||
static inline const char *check_page_span(const void *ptr, unsigned long n,
|
||||
struct page *page, bool to_user)
|
||||
{
|
||||
struct page *page, *endpage;
|
||||
#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
|
||||
const void *end = ptr + n - 1;
|
||||
struct page *endpage;
|
||||
bool is_reserved, is_cma;
|
||||
|
||||
/*
|
||||
* Some architectures (arm64) return true for virt_addr_valid() on
|
||||
* vmalloced addresses. Work around this by checking for vmalloc
|
||||
* first.
|
||||
*/
|
||||
if (is_vmalloc_addr(ptr))
|
||||
return NULL;
|
||||
|
||||
if (!virt_addr_valid(ptr))
|
||||
return NULL;
|
||||
|
||||
page = virt_to_head_page(ptr);
|
||||
|
||||
/* Check slab allocator for flags and size. */
|
||||
if (PageSlab(page))
|
||||
return __check_heap_object(ptr, n, page);
|
||||
|
||||
/*
|
||||
* Sometimes the kernel data regions are not marked Reserved (see
|
||||
* check below). And sometimes [_sdata,_edata) does not cover
|
||||
@ -186,7 +171,7 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
|
||||
((unsigned long)end & (unsigned long)PAGE_MASK)))
|
||||
return NULL;
|
||||
|
||||
/* Allow if start and end are inside the same compound page. */
|
||||
/* Allow if fully inside the same compound (__GFP_COMP) page. */
|
||||
endpage = virt_to_head_page(end);
|
||||
if (likely(endpage == page))
|
||||
return NULL;
|
||||
@ -199,20 +184,44 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
|
||||
is_reserved = PageReserved(page);
|
||||
is_cma = is_migrate_cma_page(page);
|
||||
if (!is_reserved && !is_cma)
|
||||
goto reject;
|
||||
return "<spans multiple pages>";
|
||||
|
||||
for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
|
||||
page = virt_to_head_page(ptr);
|
||||
if (is_reserved && !PageReserved(page))
|
||||
goto reject;
|
||||
return "<spans Reserved and non-Reserved pages>";
|
||||
if (is_cma && !is_migrate_cma_page(page))
|
||||
goto reject;
|
||||
return "<spans CMA and non-CMA pages>";
|
||||
}
|
||||
#endif
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
reject:
|
||||
return "<spans multiple pages>";
|
||||
static inline const char *check_heap_object(const void *ptr, unsigned long n,
|
||||
bool to_user)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
/*
|
||||
* Some architectures (arm64) return true for virt_addr_valid() on
|
||||
* vmalloced addresses. Work around this by checking for vmalloc
|
||||
* first.
|
||||
*/
|
||||
if (is_vmalloc_addr(ptr))
|
||||
return NULL;
|
||||
|
||||
if (!virt_addr_valid(ptr))
|
||||
return NULL;
|
||||
|
||||
page = virt_to_head_page(ptr);
|
||||
|
||||
/* Check slab allocator for flags and size. */
|
||||
if (PageSlab(page))
|
||||
return __check_heap_object(ptr, n, page);
|
||||
|
||||
/* Verify object does not incorrectly span multiple pages. */
|
||||
return check_page_span(ptr, n, page, to_user);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -147,6 +147,17 @@ config HARDENED_USERCOPY
|
||||
or are part of the kernel text. This kills entire classes
|
||||
of heap overflow exploits and similar kernel memory exposures.
|
||||
|
||||
config HARDENED_USERCOPY_PAGESPAN
|
||||
bool "Refuse to copy allocations that span multiple pages"
|
||||
depends on HARDENED_USERCOPY
|
||||
depends on EXPERT
|
||||
help
|
||||
When a multi-page allocation is done without __GFP_COMP,
|
||||
hardened usercopy will reject attempts to copy it. There are,
|
||||
however, several cases of this in the kernel that have not all
|
||||
been removed. This config is intended to be used only while
|
||||
trying to find such users.
|
||||
|
||||
source security/selinux/Kconfig
|
||||
source security/smack/Kconfig
|
||||
source security/tomoyo/Kconfig
|
||||
|
Loading…
Reference in New Issue
Block a user