Merge branch 'akpm' (patches from Andrew)

Merge fixes from Andrew Morton:
 "9 patches.

  Subsystems affected by this patch series: mm (migration, highmem,
  sparsemem, mremap, mempolicy, and memcg), lz4, mailmap, and
  MAINTAINERS"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  MAINTAINERS: add Tom as clang reviewer
  mm/list_lru.c: revert "mm/list_lru: optimize memcg_reparent_list_lru_node()"
  mailmap: update Vasily Averin's email address
  mm/mempolicy: fix mpol_new leak in shared_policy_replace
  mmmremap.c: avoid pointless invalidate_range_start/end on mremap(old_size=0)
  mm/sparsemem: fix 'mem_section' will never be NULL gcc 12 warning
  lz4: fix LZ4_decompress_safe_partial read out of bound
  highmem: fix checks in __kmap_local_sched_{in,out}
  mm: migrate: use thp_order instead of HPAGE_PMD_ORDER for new page allocation.
This commit is contained in:
Linus Torvalds 2022-04-08 14:31:41 -10:00
commit 911b2b9516
8 changed files with 24 additions and 14 deletions

View File

@ -391,6 +391,10 @@ Uwe Kleine-König <ukleinek@strlen.de>
Uwe Kleine-König <ukl@pengutronix.de> Uwe Kleine-König <ukl@pengutronix.de>
Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com> Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com>
Valdis Kletnieks <Valdis.Kletnieks@vt.edu> Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
Vasily Averin <vasily.averin@linux.dev> <vvs@virtuozzo.com>
Vasily Averin <vasily.averin@linux.dev> <vvs@openvz.org>
Vasily Averin <vasily.averin@linux.dev> <vvs@parallels.com>
Vasily Averin <vasily.averin@linux.dev> <vvs@sw.ru>
Vinod Koul <vkoul@kernel.org> <vinod.koul@intel.com> Vinod Koul <vkoul@kernel.org> <vinod.koul@intel.com>
Vinod Koul <vkoul@kernel.org> <vinod.koul@linux.intel.com> Vinod Koul <vkoul@kernel.org> <vinod.koul@linux.intel.com>
Vinod Koul <vkoul@kernel.org> <vkoul@infradead.org> Vinod Koul <vkoul@kernel.org> <vkoul@infradead.org>

View File

@ -4791,6 +4791,7 @@ F: .clang-format
CLANG/LLVM BUILD SUPPORT CLANG/LLVM BUILD SUPPORT
M: Nathan Chancellor <nathan@kernel.org> M: Nathan Chancellor <nathan@kernel.org>
M: Nick Desaulniers <ndesaulniers@google.com> M: Nick Desaulniers <ndesaulniers@google.com>
R: Tom Rix <trix@redhat.com>
L: llvm@lists.linux.dev L: llvm@lists.linux.dev
S: Supported S: Supported
W: https://clangbuiltlinux.github.io/ W: https://clangbuiltlinux.github.io/

View File

@ -1397,13 +1397,16 @@ static inline unsigned long *section_to_usemap(struct mem_section *ms)
static inline struct mem_section *__nr_to_section(unsigned long nr) static inline struct mem_section *__nr_to_section(unsigned long nr)
{ {
unsigned long root = SECTION_NR_TO_ROOT(nr);
if (unlikely(root >= NR_SECTION_ROOTS))
return NULL;
#ifdef CONFIG_SPARSEMEM_EXTREME #ifdef CONFIG_SPARSEMEM_EXTREME
if (!mem_section) if (!mem_section || !mem_section[root])
return NULL; return NULL;
#endif #endif
if (!mem_section[SECTION_NR_TO_ROOT(nr)]) return &mem_section[root][nr & SECTION_ROOT_MASK];
return NULL;
return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
} }
extern size_t mem_section_usage_size(void); extern size_t mem_section_usage_size(void);

View File

@ -271,8 +271,12 @@ static FORCE_INLINE int LZ4_decompress_generic(
ip += length; ip += length;
op += length; op += length;
/* Necessarily EOF, due to parsing restrictions */ /* Necessarily EOF when !partialDecoding.
if (!partialDecoding || (cpy == oend)) * When partialDecoding, it is EOF if we've either
* filled the output buffer or
* can't proceed with reading an offset for following match.
*/
if (!partialDecoding || (cpy == oend) || (ip >= (iend - 2)))
break; break;
} else { } else {
/* may overwrite up to WILDCOPYLENGTH beyond cpy */ /* may overwrite up to WILDCOPYLENGTH beyond cpy */

View File

@ -624,7 +624,7 @@ void __kmap_local_sched_out(void)
/* With debug all even slots are unmapped and act as guard */ /* With debug all even slots are unmapped and act as guard */
if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) { if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) {
WARN_ON_ONCE(!pte_none(pteval)); WARN_ON_ONCE(pte_val(pteval) != 0);
continue; continue;
} }
if (WARN_ON_ONCE(pte_none(pteval))) if (WARN_ON_ONCE(pte_none(pteval)))
@ -661,7 +661,7 @@ void __kmap_local_sched_in(void)
/* With debug all even slots are unmapped and act as guard */ /* With debug all even slots are unmapped and act as guard */
if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) { if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) {
WARN_ON_ONCE(!pte_none(pteval)); WARN_ON_ONCE(pte_val(pteval) != 0);
continue; continue;
} }
if (WARN_ON_ONCE(pte_none(pteval))) if (WARN_ON_ONCE(pte_none(pteval)))

View File

@ -394,12 +394,6 @@ static void memcg_reparent_list_lru_node(struct list_lru *lru, int nid,
int dst_idx = dst_memcg->kmemcg_id; int dst_idx = dst_memcg->kmemcg_id;
struct list_lru_one *src, *dst; struct list_lru_one *src, *dst;
/*
* If there is no lru entry in this nlru, we can skip it immediately.
*/
if (!READ_ONCE(nlru->nr_items))
return;
/* /*
* Since list_lru_{add,del} may be called under an IRQ-safe lock, * Since list_lru_{add,del} may be called under an IRQ-safe lock,
* we have to use IRQ-safe primitives here to avoid deadlock. * we have to use IRQ-safe primitives here to avoid deadlock.

View File

@ -2743,6 +2743,7 @@ alloc_new:
mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
if (!mpol_new) if (!mpol_new)
goto err_out; goto err_out;
atomic_set(&mpol_new->refcnt, 1);
goto restart; goto restart;
} }

View File

@ -486,6 +486,9 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
pmd_t *old_pmd, *new_pmd; pmd_t *old_pmd, *new_pmd;
pud_t *old_pud, *new_pud; pud_t *old_pud, *new_pud;
if (!len)
return 0;
old_end = old_addr + len; old_end = old_addr + len;
flush_cache_range(vma, old_addr, old_end); flush_cache_range(vma, old_addr, old_end);