Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "12 patches.

  Subsystems affected by this patch series: mm (memcg, zsmalloc, swap,
  mailmap, selftests, pagecache, hugetlb, pagemap), lib, and coredump"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm/mmap.c: fix mmap return value when vma is merged after call_mmap()
  hugetlb_cgroup: fix offline of hugetlb cgroup with reservations
  mm/filemap: add static for function __add_to_page_cache_locked
  userfaultfd: selftests: fix SIGSEGV if huge mmap fails
  tools/testing/selftests/vm: fix build error
  mailmap: add two more addresses of Uwe Kleine-König
  mm/swapfile: do not sleep with a spin lock held
  mm/zsmalloc.c: drop ZSMALLOC_PGTABLE_MAPPING
  mm: list_lru: set shrinker map bit when child nr_items is not zero
  mm: memcg/slab: fix obj_cgroup_charge() return value handling
  coredump: fix core_pattern parse error
  zlib: export S390 symbols for zlib modules
This commit is contained in:
Linus Torvalds 2020-12-06 10:20:59 -08:00
commit 12c0ab6658
15 changed files with 75 additions and 121 deletions

View File

@ -322,6 +322,8 @@ TripleX Chung <xxx.phy@gmail.com> <zhongyu@18mail.cn>
Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com>
Tycho Andersen <tycho@tycho.pizza> <tycho@tycho.ws>
Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Uwe Kleine-König <ukleinek@strlen.de>
Uwe Kleine-König <ukl@pengutronix.de>
Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com>
Valdis Kletnieks <Valdis.Kletnieks@vt.edu>

View File

@ -81,7 +81,6 @@ CONFIG_PARTITION_ADVANCED=y
CONFIG_BINFMT_MISC=y
CONFIG_CMA=y
CONFIG_ZSMALLOC=m
CONFIG_ZSMALLOC_PGTABLE_MAPPING=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y

View File

@ -229,7 +229,8 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm,
*/
if (ispipe) {
if (isspace(*pat_ptr)) {
was_space = true;
if (cn->used != 0)
was_space = true;
pat_ptr++;
continue;
} else if (was_space) {

View File

@ -20,7 +20,6 @@
* zsmalloc mapping modes
*
* NOTE: These only make a difference when a mapped object spans pages.
* They also have no effect when ZSMALLOC_PGTABLE_MAPPING is selected.
*/
enum zs_mapmode {
ZS_MM_RW, /* normal read-write mapping */

View File

@ -4,6 +4,7 @@
#include "dfltcc_util.h"
#include "dfltcc.h"
#include <asm/setup.h>
#include <linux/export.h>
#include <linux/zutil.h>
/*
@ -29,6 +30,7 @@ int dfltcc_can_inflate(
return is_bit_set(dfltcc_state->af.fns, DFLTCC_XPND) &&
is_bit_set(dfltcc_state->af.fmts, DFLTCC_FMT0);
}
EXPORT_SYMBOL(dfltcc_can_inflate);
static int dfltcc_was_inflate_used(
z_streamp strm
@ -147,3 +149,4 @@ dfltcc_inflate_action dfltcc_inflate(
return (cc == DFLTCC_CC_OP1_TOO_SHORT || cc == DFLTCC_CC_OP2_TOO_SHORT) ?
DFLTCC_INFLATE_BREAK : DFLTCC_INFLATE_CONTINUE;
}
EXPORT_SYMBOL(dfltcc_inflate);

View File

@ -707,19 +707,6 @@ config ZSMALLOC
returned by an alloc(). This handle must be mapped in order to
access the allocated space.
config ZSMALLOC_PGTABLE_MAPPING
bool "Use page table mapping to access object in zsmalloc"
depends on ZSMALLOC=y
help
By default, zsmalloc uses a copy-based object mapping method to
access allocations that span two pages. However, if a particular
architecture (ex, ARM) performs VM mapping faster than copying,
then you should select this. This causes zsmalloc to use page table
mapping rather than copying for object mapping.
You can check speed with zsmalloc benchmark:
https://github.com/spartacus06/zsmapbench
config ZSMALLOC_STAT
bool "Export zsmalloc statistics"
depends on ZSMALLOC

View File

@ -827,7 +827,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
}
EXPORT_SYMBOL_GPL(replace_page_cache_page);
noinline int __add_to_page_cache_locked(struct page *page,
static noinline int __add_to_page_cache_locked(struct page *page,
struct address_space *mapping,
pgoff_t offset, gfp_t gfp,
void **shadowp)

View File

@ -82,11 +82,8 @@ static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
for (idx = 0; idx < hugetlb_max_hstate; idx++) {
if (page_counter_read(
hugetlb_cgroup_counter_from_cgroup(h_cg, idx)) ||
page_counter_read(hugetlb_cgroup_counter_from_cgroup_rsvd(
h_cg, idx))) {
hugetlb_cgroup_counter_from_cgroup(h_cg, idx)))
return true;
}
}
return false;
}
@ -202,9 +199,10 @@ static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
struct hstate *h;
struct page *page;
int idx = 0;
int idx;
do {
idx = 0;
for_each_hstate(h) {
spin_lock(&hugetlb_lock);
list_for_each_entry(page, &h->hugepage_activelist, lru)

View File

@ -534,7 +534,6 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
struct list_lru_node *nlru = &lru->node[nid];
int dst_idx = dst_memcg->kmemcg_id;
struct list_lru_one *src, *dst;
bool set;
/*
* Since list_lru_{add,del} may be called under an IRQ-safe lock,
@ -546,11 +545,12 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
dst = list_lru_from_memcg_idx(nlru, dst_idx);
list_splice_init(&src->list, &dst->list);
set = (!dst->nr_items && src->nr_items);
dst->nr_items += src->nr_items;
if (set)
if (src->nr_items) {
dst->nr_items += src->nr_items;
memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
src->nr_items = 0;
src->nr_items = 0;
}
spin_unlock_irq(&nlru->lock);
}

View File

@ -1808,6 +1808,17 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
if (error)
goto unmap_and_free_vma;
/* Can addr have changed??
*
* Answer: Yes, several device drivers can do it in their
* f_op->mmap method. -DaveM
* Bug: If addr is changed, prev, rb_link, rb_parent should
* be updated for vma_link()
*/
WARN_ON_ONCE(addr != vma->vm_start);
addr = vma->vm_start;
/* If vm_flags changed after call_mmap(), we should try merge vma again
* as we may succeed this time.
*/
@ -1822,25 +1833,12 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
fput(vma->vm_file);
vm_area_free(vma);
vma = merge;
/* Update vm_flags and possible addr to pick up the change. We don't
* warn here if addr changed as the vma is not linked by vma_link().
*/
addr = vma->vm_start;
/* Update vm_flags to pick up the change. */
vm_flags = vma->vm_flags;
goto unmap_writable;
}
}
/* Can addr have changed??
*
* Answer: Yes, several device drivers can do it in their
* f_op->mmap method. -DaveM
* Bug: If addr is changed, prev, rb_link, rb_parent should
* be updated for vma_link()
*/
WARN_ON_ONCE(addr != vma->vm_start);
addr = vma->vm_start;
vm_flags = vma->vm_flags;
} else if (vm_flags & VM_SHARED) {
error = shmem_zero_setup(vma);

View File

@ -274,22 +274,32 @@ static inline size_t obj_full_size(struct kmem_cache *s)
return s->size + sizeof(struct obj_cgroup *);
}
static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
size_t objects,
gfp_t flags)
/*
* Returns false if the allocation should fail.
*/
static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
struct obj_cgroup **objcgp,
size_t objects, gfp_t flags)
{
struct obj_cgroup *objcg;
if (!memcg_kmem_enabled())
return true;
if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
return true;
objcg = get_obj_cgroup_from_current();
if (!objcg)
return NULL;
return true;
if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) {
obj_cgroup_put(objcg);
return NULL;
return false;
}
return objcg;
*objcgp = objcg;
return true;
}
static inline void mod_objcg_state(struct obj_cgroup *objcg,
@ -315,7 +325,7 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
unsigned long off;
size_t i;
if (!objcg)
if (!memcg_kmem_enabled() || !objcg)
return;
flags &= ~__GFP_ACCOUNT;
@ -400,11 +410,11 @@ static inline void memcg_free_page_obj_cgroups(struct page *page)
{
}
static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
size_t objects,
gfp_t flags)
static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
struct obj_cgroup **objcgp,
size_t objects, gfp_t flags)
{
return NULL;
return true;
}
static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
@ -508,9 +518,8 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
if (should_failslab(s, flags))
return NULL;
if (memcg_kmem_enabled() &&
((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
*objcgp = memcg_slab_pre_alloc_hook(s, size, flags);
if (!memcg_slab_pre_alloc_hook(s, objcgp, size, flags))
return NULL;
return s;
}
@ -529,8 +538,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
s->flags, flags);
}
if (memcg_kmem_enabled())
memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
}
#ifndef CONFIG_SLOB

View File

@ -2867,6 +2867,7 @@ late_initcall(max_swapfiles_check);
static struct swap_info_struct *alloc_swap_info(void)
{
struct swap_info_struct *p;
struct swap_info_struct *defer = NULL;
unsigned int type;
int i;
@ -2895,7 +2896,7 @@ static struct swap_info_struct *alloc_swap_info(void)
smp_wmb();
WRITE_ONCE(nr_swapfiles, nr_swapfiles + 1);
} else {
kvfree(p);
defer = p;
p = swap_info[type];
/*
* Do not memset this entry: a racing procfs swap_next()
@ -2908,6 +2909,7 @@ static struct swap_info_struct *alloc_swap_info(void)
plist_node_init(&p->avail_lists[i], 0);
p->flags = SWP_USED;
spin_unlock(&swap_lock);
kvfree(defer);
spin_lock_init(&p->lock);
spin_lock_init(&p->cont_lock);

View File

@ -293,11 +293,7 @@ struct zspage {
};
struct mapping_area {
#ifdef CONFIG_ZSMALLOC_PGTABLE_MAPPING
struct vm_struct *vm; /* vm area for mapping object that span pages */
#else
char *vm_buf; /* copy buffer for objects that span pages */
#endif
char *vm_addr; /* address of kmap_atomic()'ed pages */
enum zs_mapmode vm_mm; /* mapping mode */
};
@ -1113,54 +1109,6 @@ static struct zspage *find_get_zspage(struct size_class *class)
return zspage;
}
#ifdef CONFIG_ZSMALLOC_PGTABLE_MAPPING
static inline int __zs_cpu_up(struct mapping_area *area)
{
/*
* Make sure we don't leak memory if a cpu UP notification
* and zs_init() race and both call zs_cpu_up() on the same cpu
*/
if (area->vm)
return 0;
area->vm = get_vm_area(PAGE_SIZE * 2, 0);
if (!area->vm)
return -ENOMEM;
/*
* Populate ptes in advance to avoid pte allocation with GFP_KERNEL
* in non-preemtible context of zs_map_object.
*/
return apply_to_page_range(&init_mm, (unsigned long)area->vm->addr,
PAGE_SIZE * 2, NULL, NULL);
}
static inline void __zs_cpu_down(struct mapping_area *area)
{
if (area->vm)
free_vm_area(area->vm);
area->vm = NULL;
}
static inline void *__zs_map_object(struct mapping_area *area,
struct page *pages[2], int off, int size)
{
unsigned long addr = (unsigned long)area->vm->addr;
BUG_ON(map_kernel_range(addr, PAGE_SIZE * 2, PAGE_KERNEL, pages) < 0);
area->vm_addr = area->vm->addr;
return area->vm_addr + off;
}
static inline void __zs_unmap_object(struct mapping_area *area,
struct page *pages[2], int off, int size)
{
unsigned long addr = (unsigned long)area->vm_addr;
unmap_kernel_range(addr, PAGE_SIZE * 2);
}
#else /* CONFIG_ZSMALLOC_PGTABLE_MAPPING */
static inline int __zs_cpu_up(struct mapping_area *area)
{
/*
@ -1241,8 +1189,6 @@ out:
pagefault_enable();
}
#endif /* CONFIG_ZSMALLOC_PGTABLE_MAPPING */
static int zs_cpu_prepare(unsigned int cpu)
{
struct mapping_area *area;

View File

@ -60,9 +60,13 @@ ifeq ($(CAN_BUILD_X86_64),1)
TEST_GEN_FILES += $(BINARIES_64)
endif
else
ifneq (,$(findstring $(ARCH),powerpc))
TEST_GEN_FILES += protection_keys
endif
endif
ifneq (,$(filter $(MACHINE),arm64 ia64 mips64 parisc64 ppc64 ppc64le riscv64 s390x sh64 sparc64 x86_64))
TEST_GEN_FILES += va_128TBswitch
TEST_GEN_FILES += virtual_address_range

View File

@ -206,19 +206,19 @@ static int hugetlb_release_pages(char *rel_area)
return ret;
}
static void hugetlb_allocate_area(void **alloc_area)
{
void *area_alias = NULL;
char **alloc_area_alias;
*alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
(map_shared ? MAP_SHARED : MAP_PRIVATE) |
MAP_HUGETLB,
huge_fd, *alloc_area == area_src ? 0 :
nr_pages * page_size);
if (*alloc_area == MAP_FAILED) {
fprintf(stderr, "mmap of hugetlbfs file failed\n");
*alloc_area = NULL;
perror("mmap of hugetlbfs file failed");
goto fail;
}
if (map_shared) {
@ -227,14 +227,11 @@ static void hugetlb_allocate_area(void **alloc_area)
huge_fd, *alloc_area == area_src ? 0 :
nr_pages * page_size);
if (area_alias == MAP_FAILED) {
if (munmap(*alloc_area, nr_pages * page_size) < 0) {
perror("hugetlb munmap");
exit(1);
}
*alloc_area = NULL;
return;
perror("mmap of hugetlb file alias failed");
goto fail_munmap;
}
}
if (*alloc_area == area_src) {
huge_fd_off0 = *alloc_area;
alloc_area_alias = &area_src_alias;
@ -243,6 +240,16 @@ static void hugetlb_allocate_area(void **alloc_area)
}
if (area_alias)
*alloc_area_alias = area_alias;
return;
fail_munmap:
if (munmap(*alloc_area, nr_pages * page_size) < 0) {
perror("hugetlb munmap");
exit(1);
}
fail:
*alloc_area = NULL;
}
static void hugetlb_alias_mapping(__u64 *start, size_t len, unsigned long offset)