mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-27 22:24:11 +08:00
21 hotfixes, 8 of which are cc:stable. 11 are for MM, the remainder are
for other subsystems. -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZB48xAAKCRDdBJ7gKXxA js2rAP4zvcMn90vBJhWNElsA7pBgDYD66QCK6JBDHGe3J1qdeQEA8D606pjMBWkL ly7NifwCjOtFhfDRgEHOXu8g8g1k1QM= =Cswg -----END PGP SIGNATURE----- Merge tag 'mm-hotfixes-stable-2023-03-24-17-09' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull misc fixes from Andrew Morton: "21 hotfixes, 8 of which are cc:stable. 11 are for MM, the remainder are for other subsystems" * tag 'mm-hotfixes-stable-2023-03-24-17-09' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (21 commits) mm: mmap: remove newline at the end of the trace mailmap: add entries for Richard Leitner kcsan: avoid passing -g for test kfence: avoid passing -g for test mm: kfence: fix using kfence_metadata without initialization in show_object() lib: dhry: fix unstable smp_processor_id(_) usage mailmap: add entry for Enric Balletbo i Serra mailmap: map Sai Prakash Ranjan's old address to his current one mailmap: map Rajendra Nayak's old address to his current one Revert "kasan: drop skip_kasan_poison variable in free_pages_prepare" mailmap: add entry for Tobias Klauser kasan, powerpc: don't rename memintrinsics if compiler adds prefixes mm/ksm: fix race with VMA iteration and mm_struct teardown kselftest: vm: fix unused variable warning mm: fix error handling for map_deny_write_exec mm: deduplicate error handling for map_deny_write_exec checksyscalls: ignore fstat to silence build warning on LoongArch nilfs2: fix kernel-infoleak in nilfs_ioctl_wrap_copy() test_maple_tree: add more testing for mas_empty_area() maple_tree: fix mas_skip_node() end slot detection ...
This commit is contained in:
commit
65aca32efd
11
.mailmap
11
.mailmap
@ -133,6 +133,8 @@ Dmitry Safonov <0x7f454c46@gmail.com> <dsafonov@virtuozzo.com>
|
||||
Domen Puncer <domen@coderock.org>
|
||||
Douglas Gilbert <dougg@torque.net>
|
||||
Ed L. Cashin <ecashin@coraid.com>
|
||||
Enric Balletbo i Serra <eballetbo@kernel.org> <enric.balletbo@collabora.com>
|
||||
Enric Balletbo i Serra <eballetbo@kernel.org> <eballetbo@iseebcn.com>
|
||||
Erik Kaneda <erik.kaneda@intel.com> <erik.schmauss@intel.com>
|
||||
Eugen Hristev <eugen.hristev@collabora.com> <eugen.hristev@microchip.com>
|
||||
Evgeniy Polyakov <johnpol@2ka.mipt.ru>
|
||||
@ -379,6 +381,7 @@ Quentin Monnet <quentin@isovalent.com> <quentin.monnet@netronome.com>
|
||||
Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com>
|
||||
Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl>
|
||||
Rajeev Nandan <quic_rajeevny@quicinc.com> <rajeevny@codeaurora.org>
|
||||
Rajendra Nayak <quic_rjendra@quicinc.com> <rnayak@codeaurora.org>
|
||||
Rajesh Shah <rajesh.shah@intel.com>
|
||||
Ralf Baechle <ralf@linux-mips.org>
|
||||
Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
|
||||
@ -387,6 +390,9 @@ Rémi Denis-Courmont <rdenis@simphalempin.com>
|
||||
Ricardo Ribalda <ribalda@kernel.org> <ricardo@ribalda.com>
|
||||
Ricardo Ribalda <ribalda@kernel.org> Ricardo Ribalda Delgado <ribalda@kernel.org>
|
||||
Ricardo Ribalda <ribalda@kernel.org> <ricardo.ribalda@gmail.com>
|
||||
Richard Leitner <richard.leitner@linux.dev> <dev@g0hl1n.net>
|
||||
Richard Leitner <richard.leitner@linux.dev> <me@g0hl1n.net>
|
||||
Richard Leitner <richard.leitner@linux.dev> <richard.leitner@skidata.com>
|
||||
Robert Foss <rfoss@kernel.org> <robert.foss@linaro.org>
|
||||
Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com>
|
||||
Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com>
|
||||
@ -397,6 +403,7 @@ Ross Zwisler <zwisler@kernel.org> <ross.zwisler@linux.intel.com>
|
||||
Rudolf Marek <R.Marek@sh.cvut.cz>
|
||||
Rui Saraiva <rmps@joel.ist.utl.pt>
|
||||
Sachin P Sant <ssant@in.ibm.com>
|
||||
Sai Prakash Ranjan <quic_saipraka@quicinc.com> <saiprakash.ranjan@codeaurora.org>
|
||||
Sakari Ailus <sakari.ailus@linux.intel.com> <sakari.ailus@iki.fi>
|
||||
Sam Ravnborg <sam@mars.ravnborg.org>
|
||||
Sankeerth Billakanti <quic_sbillaka@quicinc.com> <sbillaka@codeaurora.org>
|
||||
@ -437,6 +444,10 @@ Thomas Graf <tgraf@suug.ch>
|
||||
Thomas Körper <socketcan@esd.eu> <thomas.koerper@esd.eu>
|
||||
Thomas Pedersen <twp@codeaurora.org>
|
||||
Tiezhu Yang <yangtiezhu@loongson.cn> <kernelpatch@126.com>
|
||||
Tobias Klauser <tklauser@distanz.ch> <tobias.klauser@gmail.com>
|
||||
Tobias Klauser <tklauser@distanz.ch> <klto@zhaw.ch>
|
||||
Tobias Klauser <tklauser@distanz.ch> <tklauser@nuerscht.ch>
|
||||
Tobias Klauser <tklauser@distanz.ch> <tklauser@xenon.tklauser.home>
|
||||
Todor Tomov <todor.too@gmail.com> <todor.tomov@linaro.org>
|
||||
Tony Luck <tony.luck@intel.com>
|
||||
TripleX Chung <xxx.phy@gmail.com> <triplex@zh-kernel.org>
|
||||
|
@ -2,7 +2,7 @@
|
||||
#ifndef __ASM_KASAN_H
|
||||
#define __ASM_KASAN_H
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
#if defined(CONFIG_KASAN) && !defined(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX)
|
||||
#define _GLOBAL_KASAN(fn) _GLOBAL(__##fn)
|
||||
#define _GLOBAL_TOC_KASAN(fn) _GLOBAL_TOC(__##fn)
|
||||
#define EXPORT_SYMBOL_KASAN(fn) EXPORT_SYMBOL(__##fn)
|
||||
|
@ -30,11 +30,17 @@ extern int memcmp(const void *,const void *,__kernel_size_t);
|
||||
extern void * memchr(const void *,int,__kernel_size_t);
|
||||
void memcpy_flushcache(void *dest, const void *src, size_t size);
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
/* __mem variants are used by KASAN to implement instrumented meminstrinsics. */
|
||||
#ifdef CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX
|
||||
#define __memset memset
|
||||
#define __memcpy memcpy
|
||||
#define __memmove memmove
|
||||
#else /* CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX */
|
||||
void *__memset(void *s, int c, __kernel_size_t count);
|
||||
void *__memcpy(void *to, const void *from, __kernel_size_t n);
|
||||
void *__memmove(void *to, const void *from, __kernel_size_t n);
|
||||
|
||||
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
|
||||
#ifndef __SANITIZE_ADDRESS__
|
||||
/*
|
||||
* For files that are not instrumented (e.g. mm/slub.c) we
|
||||
* should use not instrumented version of mem* functions.
|
||||
@ -46,8 +52,9 @@ void *__memmove(void *to, const void *from, __kernel_size_t n);
|
||||
#ifndef __NO_FORTIFY
|
||||
#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
|
||||
#endif
|
||||
|
||||
#endif
|
||||
#endif /* !__SANITIZE_ADDRESS__ */
|
||||
#endif /* CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX */
|
||||
#endif /* CONFIG_KASAN */
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
#ifndef CONFIG_KASAN
|
||||
|
@ -13,8 +13,13 @@
|
||||
# If you really need to reference something from prom_init.o add
|
||||
# it to the list below:
|
||||
|
||||
grep "^CONFIG_KASAN=y$" ${KCONFIG_CONFIG} >/dev/null
|
||||
if [ $? -eq 0 ]
|
||||
has_renamed_memintrinsics()
|
||||
{
|
||||
grep -q "^CONFIG_KASAN=y$" ${KCONFIG_CONFIG} && \
|
||||
! grep -q "^CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX=y" ${KCONFIG_CONFIG}
|
||||
}
|
||||
|
||||
if has_renamed_memintrinsics
|
||||
then
|
||||
MEM_FUNCS="__memcpy __memset"
|
||||
else
|
||||
|
@ -71,7 +71,7 @@ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs,
|
||||
if (argv->v_index > ~(__u64)0 - argv->v_nmembs)
|
||||
return -EINVAL;
|
||||
|
||||
buf = (void *)__get_free_pages(GFP_NOFS, 0);
|
||||
buf = (void *)get_zeroed_page(GFP_NOFS);
|
||||
if (unlikely(!buf))
|
||||
return -ENOMEM;
|
||||
maxmembs = PAGE_SIZE / argv->v_size;
|
||||
|
@ -35,7 +35,7 @@ TRACE_EVENT(vm_unmapped_area,
|
||||
__entry->align_offset = info->align_offset;
|
||||
),
|
||||
|
||||
TP_printk("addr=0x%lx err=%ld total_vm=0x%lx flags=0x%lx len=0x%lx lo=0x%lx hi=0x%lx mask=0x%lx ofs=0x%lx\n",
|
||||
TP_printk("addr=0x%lx err=%ld total_vm=0x%lx flags=0x%lx len=0x%lx lo=0x%lx hi=0x%lx mask=0x%lx ofs=0x%lx",
|
||||
IS_ERR_VALUE(__entry->addr) ? 0 : __entry->addr,
|
||||
IS_ERR_VALUE(__entry->addr) ? __entry->addr : 0,
|
||||
__entry->total_vm, __entry->flags, __entry->length,
|
||||
@ -110,7 +110,7 @@ TRACE_EVENT(exit_mmap,
|
||||
__entry->mt = &mm->mm_mt;
|
||||
),
|
||||
|
||||
TP_printk("mt_mod %p, DESTROY\n",
|
||||
TP_printk("mt_mod %p, DESTROY",
|
||||
__entry->mt
|
||||
)
|
||||
);
|
||||
|
@ -16,6 +16,6 @@ obj-y := core.o debugfs.o report.o
|
||||
KCSAN_INSTRUMENT_BARRIERS_selftest.o := y
|
||||
obj-$(CONFIG_KCSAN_SELFTEST) += selftest.o
|
||||
|
||||
CFLAGS_kcsan_test.o := $(CFLAGS_KCSAN) -g -fno-omit-frame-pointer
|
||||
CFLAGS_kcsan_test.o := $(CFLAGS_KCSAN) -fno-omit-frame-pointer
|
||||
CFLAGS_kcsan_test.o += $(DISABLE_STRUCTLEAK_PLUGIN)
|
||||
obj-$(CONFIG_KCSAN_KUNIT_TEST) += kcsan_test.o
|
||||
|
@ -31,6 +31,7 @@ MODULE_PARM_DESC(iterations,
|
||||
|
||||
static void dhry_benchmark(void)
|
||||
{
|
||||
unsigned int cpu = get_cpu();
|
||||
int i, n;
|
||||
|
||||
if (iterations > 0) {
|
||||
@ -45,9 +46,10 @@ static void dhry_benchmark(void)
|
||||
}
|
||||
|
||||
report:
|
||||
put_cpu();
|
||||
if (n >= 0)
|
||||
pr_info("CPU%u: Dhrystones per Second: %d (%d DMIPS)\n",
|
||||
smp_processor_id(), n, n / DHRY_VAX);
|
||||
pr_info("CPU%u: Dhrystones per Second: %d (%d DMIPS)\n", cpu,
|
||||
n, n / DHRY_VAX);
|
||||
else if (n == -EAGAIN)
|
||||
pr_err("Please increase the number of iterations\n");
|
||||
else
|
||||
|
@ -5099,35 +5099,21 @@ static inline bool mas_rewind_node(struct ma_state *mas)
|
||||
*/
|
||||
static inline bool mas_skip_node(struct ma_state *mas)
|
||||
{
|
||||
unsigned char slot, slot_count;
|
||||
unsigned long *pivots;
|
||||
enum maple_type mt;
|
||||
if (mas_is_err(mas))
|
||||
return false;
|
||||
|
||||
mt = mte_node_type(mas->node);
|
||||
slot_count = mt_slots[mt] - 1;
|
||||
do {
|
||||
if (mte_is_root(mas->node)) {
|
||||
slot = mas->offset;
|
||||
if (slot > slot_count) {
|
||||
if (mas->offset >= mas_data_end(mas)) {
|
||||
mas_set_err(mas, -EBUSY);
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
mas_ascend(mas);
|
||||
slot = mas->offset;
|
||||
mt = mte_node_type(mas->node);
|
||||
slot_count = mt_slots[mt] - 1;
|
||||
}
|
||||
} while (slot > slot_count);
|
||||
|
||||
mas->offset = ++slot;
|
||||
pivots = ma_pivots(mas_mn(mas), mt);
|
||||
if (slot > 0)
|
||||
mas->min = pivots[slot - 1] + 1;
|
||||
|
||||
if (slot <= slot_count)
|
||||
mas->max = pivots[slot];
|
||||
} while (mas->offset >= mas_data_end(mas));
|
||||
|
||||
mas->offset++;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -2670,6 +2670,49 @@ static noinline void check_empty_area_window(struct maple_tree *mt)
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static noinline void check_empty_area_fill(struct maple_tree *mt)
|
||||
{
|
||||
const unsigned long max = 0x25D78000;
|
||||
unsigned long size;
|
||||
int loop, shift;
|
||||
MA_STATE(mas, mt, 0, 0);
|
||||
|
||||
mt_set_non_kernel(99999);
|
||||
for (shift = 12; shift <= 16; shift++) {
|
||||
loop = 5000;
|
||||
size = 1 << shift;
|
||||
while (loop--) {
|
||||
mas_set(&mas, 0);
|
||||
mas_lock(&mas);
|
||||
MT_BUG_ON(mt, mas_empty_area(&mas, 0, max, size) != 0);
|
||||
MT_BUG_ON(mt, mas.last != mas.index + size - 1);
|
||||
mas_store_gfp(&mas, (void *)size, GFP_KERNEL);
|
||||
mas_unlock(&mas);
|
||||
mas_reset(&mas);
|
||||
}
|
||||
}
|
||||
|
||||
/* No space left. */
|
||||
size = 0x1000;
|
||||
rcu_read_lock();
|
||||
MT_BUG_ON(mt, mas_empty_area(&mas, 0, max, size) != -EBUSY);
|
||||
rcu_read_unlock();
|
||||
|
||||
/* Fill a depth 3 node to the maximum */
|
||||
for (unsigned long i = 629440511; i <= 629440800; i += 6)
|
||||
mtree_store_range(mt, i, i + 5, (void *)i, GFP_KERNEL);
|
||||
/* Make space in the second-last depth 4 node */
|
||||
mtree_erase(mt, 631668735);
|
||||
/* Make space in the last depth 4 node */
|
||||
mtree_erase(mt, 629506047);
|
||||
mas_reset(&mas);
|
||||
/* Search from just after the gap in the second-last depth 4 */
|
||||
rcu_read_lock();
|
||||
MT_BUG_ON(mt, mas_empty_area(&mas, 629506048, 690000000, 0x5000) != 0);
|
||||
rcu_read_unlock();
|
||||
mt_set_non_kernel(0);
|
||||
}
|
||||
|
||||
static DEFINE_MTREE(tree);
|
||||
static int maple_tree_seed(void)
|
||||
{
|
||||
@ -2926,6 +2969,11 @@ static int maple_tree_seed(void)
|
||||
check_empty_area_window(&tree);
|
||||
mtree_destroy(&tree);
|
||||
|
||||
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
|
||||
check_empty_area_fill(&tree);
|
||||
mtree_destroy(&tree);
|
||||
|
||||
|
||||
#if defined(BENCH)
|
||||
skip:
|
||||
#endif
|
||||
|
@ -2,5 +2,5 @@
|
||||
|
||||
obj-y := core.o report.o
|
||||
|
||||
CFLAGS_kfence_test.o := -g -fno-omit-frame-pointer -fno-optimize-sibling-calls
|
||||
CFLAGS_kfence_test.o := -fno-omit-frame-pointer -fno-optimize-sibling-calls
|
||||
obj-$(CONFIG_KFENCE_KUNIT_TEST) += kfence_test.o
|
||||
|
@ -726,10 +726,14 @@ static const struct seq_operations objects_sops = {
|
||||
};
|
||||
DEFINE_SEQ_ATTRIBUTE(objects);
|
||||
|
||||
static int __init kfence_debugfs_init(void)
|
||||
static int kfence_debugfs_init(void)
|
||||
{
|
||||
struct dentry *kfence_dir = debugfs_create_dir("kfence", NULL);
|
||||
struct dentry *kfence_dir;
|
||||
|
||||
if (!READ_ONCE(kfence_enabled))
|
||||
return 0;
|
||||
|
||||
kfence_dir = debugfs_create_dir("kfence", NULL);
|
||||
debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
|
||||
debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
|
||||
return 0;
|
||||
@ -883,6 +887,8 @@ static int kfence_init_late(void)
|
||||
}
|
||||
|
||||
kfence_init_enable();
|
||||
kfence_debugfs_init();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
11
mm/ksm.c
11
mm/ksm.c
@ -988,9 +988,15 @@ static int unmerge_and_remove_all_rmap_items(void)
|
||||
|
||||
mm = mm_slot->slot.mm;
|
||||
mmap_read_lock(mm);
|
||||
|
||||
/*
|
||||
* Exit right away if mm is exiting to avoid lockdep issue in
|
||||
* the maple tree
|
||||
*/
|
||||
if (ksm_test_exit(mm))
|
||||
goto mm_exiting;
|
||||
|
||||
for_each_vma(vmi, vma) {
|
||||
if (ksm_test_exit(mm))
|
||||
break;
|
||||
if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
|
||||
continue;
|
||||
err = unmerge_ksm_pages(vma,
|
||||
@ -999,6 +1005,7 @@ static int unmerge_and_remove_all_rmap_items(void)
|
||||
goto error;
|
||||
}
|
||||
|
||||
mm_exiting:
|
||||
remove_trailing_rmap_items(&mm_slot->rmap_list);
|
||||
mmap_read_unlock(mm);
|
||||
|
||||
|
@ -2621,12 +2621,7 @@ cannot_expand:
|
||||
|
||||
if (map_deny_write_exec(vma, vma->vm_flags)) {
|
||||
error = -EACCES;
|
||||
if (file)
|
||||
goto close_and_free_vma;
|
||||
else if (vma->vm_file)
|
||||
goto unmap_and_free_vma;
|
||||
else
|
||||
goto free_vma;
|
||||
goto close_and_free_vma;
|
||||
}
|
||||
|
||||
/* Allow architectures to sanity-check the vm_flags */
|
||||
|
@ -805,7 +805,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len,
|
||||
|
||||
if (map_deny_write_exec(vma, newflags)) {
|
||||
error = -EACCES;
|
||||
goto out;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Allow architectures to sanity-check the new flags */
|
||||
|
@ -1398,6 +1398,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
|
||||
unsigned int order, bool check_free, fpi_t fpi_flags)
|
||||
{
|
||||
int bad = 0;
|
||||
bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags);
|
||||
bool init = want_init_on_free();
|
||||
|
||||
VM_BUG_ON_PAGE(PageTail(page), page);
|
||||
@ -1470,7 +1471,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
|
||||
* With hardware tag-based KASAN, memory tags must be set before the
|
||||
* page becomes unavailable via debug_pagealloc or arch_free_page.
|
||||
*/
|
||||
if (!should_skip_kasan_poison(page, fpi_flags)) {
|
||||
if (!skip_kasan_poison) {
|
||||
kasan_poison_pages(page, order, init);
|
||||
|
||||
/* Memory is already initialized if KASAN did it internally. */
|
||||
|
28
mm/vmalloc.c
28
mm/vmalloc.c
@ -2883,6 +2883,8 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
|
||||
unsigned int order, unsigned int nr_pages, struct page **pages)
|
||||
{
|
||||
unsigned int nr_allocated = 0;
|
||||
gfp_t alloc_gfp = gfp;
|
||||
bool nofail = false;
|
||||
struct page *page;
|
||||
int i;
|
||||
|
||||
@ -2893,6 +2895,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
|
||||
* more permissive.
|
||||
*/
|
||||
if (!order) {
|
||||
/* bulk allocator doesn't support nofail req. officially */
|
||||
gfp_t bulk_gfp = gfp & ~__GFP_NOFAIL;
|
||||
|
||||
while (nr_allocated < nr_pages) {
|
||||
@ -2931,20 +2934,35 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
|
||||
if (nr != nr_pages_request)
|
||||
break;
|
||||
}
|
||||
} else if (gfp & __GFP_NOFAIL) {
|
||||
/*
|
||||
* Higher order nofail allocations are really expensive and
|
||||
* potentially dangerous (pre-mature OOM, disruptive reclaim
|
||||
* and compaction etc.
|
||||
*/
|
||||
alloc_gfp &= ~__GFP_NOFAIL;
|
||||
nofail = true;
|
||||
}
|
||||
|
||||
/* High-order pages or fallback path if "bulk" fails. */
|
||||
|
||||
while (nr_allocated < nr_pages) {
|
||||
if (fatal_signal_pending(current))
|
||||
break;
|
||||
|
||||
if (nid == NUMA_NO_NODE)
|
||||
page = alloc_pages(gfp, order);
|
||||
page = alloc_pages(alloc_gfp, order);
|
||||
else
|
||||
page = alloc_pages_node(nid, gfp, order);
|
||||
if (unlikely(!page))
|
||||
break;
|
||||
page = alloc_pages_node(nid, alloc_gfp, order);
|
||||
if (unlikely(!page)) {
|
||||
if (!nofail)
|
||||
break;
|
||||
|
||||
/* fall back to the zero order allocations */
|
||||
alloc_gfp |= __GFP_NOFAIL;
|
||||
order = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Higher order allocations must be able to be treated as
|
||||
* indepdenent small pages by callers (as they can with
|
||||
|
@ -114,7 +114,6 @@ cat << EOF
|
||||
#define __IGNORE_truncate
|
||||
#define __IGNORE_stat
|
||||
#define __IGNORE_lstat
|
||||
#define __IGNORE_fstat
|
||||
#define __IGNORE_fcntl
|
||||
#define __IGNORE_fadvise64
|
||||
#define __IGNORE_newfstatat
|
||||
@ -255,6 +254,9 @@ cat << EOF
|
||||
/* 64-bit ports never needed these, and new 32-bit ports can use statx */
|
||||
#define __IGNORE_fstat64
|
||||
#define __IGNORE_fstatat64
|
||||
|
||||
/* Newer ports are not required to provide fstat in favor of statx */
|
||||
#define __IGNORE_fstat
|
||||
EOF
|
||||
}
|
||||
|
||||
|
@ -163,9 +163,8 @@ TEST_F(mdwe, mprotect_WRITE_EXEC)
|
||||
|
||||
TEST_F(mdwe, mmap_FIXED)
|
||||
{
|
||||
void *p, *p2;
|
||||
void *p;
|
||||
|
||||
p2 = mmap(NULL, self->size, PROT_READ | PROT_EXEC, self->flags, 0, 0);
|
||||
self->p = mmap(NULL, self->size, PROT_READ, self->flags, 0, 0);
|
||||
ASSERT_NE(self->p, MAP_FAILED);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user