mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 12:44:11 +08:00
17 hotfixes, mainly for MM. 5 are cc:stable and the remainder address
post-6.0 issues. -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCY1IgYgAKCRDdBJ7gKXxA jpyRAQDkfa1LDkfbA4dQBZShkUhBX1k3AyRO1NWMjwwTxP3H8wD9HUz1BB3ynoKc ipzQs7q5jbBvndczEksHiG2AC7SvQAI= =wD9I -----END PGP SIGNATURE----- Merge tag 'mm-hotfixes-stable-2022-10-20' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull misc fixes from Andrew Morron: "Seventeen hotfixes, mainly for MM. Five are cc:stable and the remainder address post-6.0 issues" * tag 'mm-hotfixes-stable-2022-10-20' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: nouveau: fix migrate_to_ram() for faulting page mm/huge_memory: do not clobber swp_entry_t during THP split hugetlb: fix memory leak associated with vma_lock structure mm/page_alloc: reduce potential fragmentation in make_alloc_exact() mm: /proc/pid/smaps_rollup: fix maple tree search mm,hugetlb: take hugetlb_lock before decrementing h->resv_huge_pages mm/mmap: fix MAP_FIXED address return on VMA merge mm/mmap.c: __vma_adjust(): suppress uninitialized var warning mm/mmap: undo ->mmap() when mas_preallocate() fails init: Kconfig: fix spelling mistake "satify" -> "satisfy" ocfs2: clear dinode links count in case of error ocfs2: fix BUG when iput after ocfs2_mknod fails gcov: support GCC 12.1 and newer compilers zsmalloc: zs_destroy_pool: add size_class NULL check mm/mempolicy: fix mbind_range() arguments to vma_merge() mailmap: update email for Qais Yousef mailmap: update Dan Carpenter's email address
This commit is contained in:
commit
440b7895c9
4
.mailmap
4
.mailmap
@ -104,6 +104,7 @@ Christoph Hellwig <hch@lst.de>
|
||||
Colin Ian King <colin.i.king@gmail.com> <colin.king@canonical.com>
|
||||
Corey Minyard <minyard@acm.org>
|
||||
Damian Hobson-Garcia <dhobsong@igel.co.jp>
|
||||
Dan Carpenter <error27@gmail.com> <dan.carpenter@oracle.com>
|
||||
Daniel Borkmann <daniel@iogearbox.net> <danborkmann@googlemail.com>
|
||||
Daniel Borkmann <daniel@iogearbox.net> <danborkmann@iogearbox.net>
|
||||
Daniel Borkmann <daniel@iogearbox.net> <daniel.borkmann@tik.ee.ethz.ch>
|
||||
@ -353,7 +354,8 @@ Peter Oruba <peter@oruba.de>
|
||||
Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com>
|
||||
Praveen BP <praveenbp@ti.com>
|
||||
Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com>
|
||||
Qais Yousef <qsyousef@gmail.com> <qais.yousef@imgtec.com>
|
||||
Qais Yousef <qyousef@layalina.io> <qais.yousef@imgtec.com>
|
||||
Qais Yousef <qyousef@layalina.io> <qais.yousef@arm.com>
|
||||
Quentin Monnet <quentin@isovalent.com> <quentin.monnet@netronome.com>
|
||||
Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com>
|
||||
Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl>
|
||||
|
@ -176,6 +176,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
|
||||
.src = &src,
|
||||
.dst = &dst,
|
||||
.pgmap_owner = drm->dev,
|
||||
.fault_page = vmf->page,
|
||||
.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE,
|
||||
};
|
||||
|
||||
|
@ -232,6 +232,7 @@ static int ocfs2_mknod(struct user_namespace *mnt_userns,
|
||||
handle_t *handle = NULL;
|
||||
struct ocfs2_super *osb;
|
||||
struct ocfs2_dinode *dirfe;
|
||||
struct ocfs2_dinode *fe = NULL;
|
||||
struct buffer_head *new_fe_bh = NULL;
|
||||
struct inode *inode = NULL;
|
||||
struct ocfs2_alloc_context *inode_ac = NULL;
|
||||
@ -382,6 +383,7 @@ static int ocfs2_mknod(struct user_namespace *mnt_userns,
|
||||
goto leave;
|
||||
}
|
||||
|
||||
fe = (struct ocfs2_dinode *) new_fe_bh->b_data;
|
||||
if (S_ISDIR(mode)) {
|
||||
status = ocfs2_fill_new_dir(osb, handle, dir, inode,
|
||||
new_fe_bh, data_ac, meta_ac);
|
||||
@ -454,8 +456,11 @@ roll_back:
|
||||
leave:
|
||||
if (status < 0 && did_quota_inode)
|
||||
dquot_free_inode(inode);
|
||||
if (handle)
|
||||
if (handle) {
|
||||
if (status < 0 && fe)
|
||||
ocfs2_set_links_count(fe, 0);
|
||||
ocfs2_commit_trans(osb, handle);
|
||||
}
|
||||
|
||||
ocfs2_inode_unlock(dir, 1);
|
||||
if (did_block_signals)
|
||||
@ -632,18 +637,9 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb,
|
||||
return status;
|
||||
}
|
||||
|
||||
status = __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh,
|
||||
return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh,
|
||||
parent_fe_bh, handle, inode_ac,
|
||||
fe_blkno, suballoc_loc, suballoc_bit);
|
||||
if (status < 0) {
|
||||
u64 bg_blkno = ocfs2_which_suballoc_group(fe_blkno, suballoc_bit);
|
||||
int tmp = ocfs2_free_suballoc_bits(handle, inode_ac->ac_inode,
|
||||
inode_ac->ac_bh, suballoc_bit, bg_blkno, 1);
|
||||
if (tmp)
|
||||
mlog_errno(tmp);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static int ocfs2_mkdir(struct user_namespace *mnt_userns,
|
||||
@ -2028,8 +2024,11 @@ bail:
|
||||
ocfs2_clusters_to_bytes(osb->sb, 1));
|
||||
if (status < 0 && did_quota_inode)
|
||||
dquot_free_inode(inode);
|
||||
if (handle)
|
||||
if (handle) {
|
||||
if (status < 0 && fe)
|
||||
ocfs2_set_links_count(fe, 0);
|
||||
ocfs2_commit_trans(osb, handle);
|
||||
}
|
||||
|
||||
ocfs2_inode_unlock(dir, 1);
|
||||
if (did_block_signals)
|
||||
|
@ -902,7 +902,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
|
||||
goto out_put_mm;
|
||||
|
||||
hold_task_mempolicy(priv);
|
||||
vma = mas_find(&mas, 0);
|
||||
vma = mas_find(&mas, ULONG_MAX);
|
||||
|
||||
if (unlikely(!vma))
|
||||
goto empty_set;
|
||||
|
@ -66,7 +66,7 @@ config RUST_IS_AVAILABLE
|
||||
This shows whether a suitable Rust toolchain is available (found).
|
||||
|
||||
Please see Documentation/rust/quick-start.rst for instructions on how
|
||||
to satify the build requirements of Rust support.
|
||||
to satisfy the build requirements of Rust support.
|
||||
|
||||
In particular, the Makefile target 'rustavailable' is useful to check
|
||||
why the Rust toolchain is not being detected.
|
||||
|
@ -30,6 +30,13 @@
|
||||
|
||||
#define GCOV_TAG_FUNCTION_LENGTH 3
|
||||
|
||||
/* Since GCC 12.1 sizes are in BYTES and not in WORDS (4B). */
|
||||
#if (__GNUC__ >= 12)
|
||||
#define GCOV_UNIT_SIZE 4
|
||||
#else
|
||||
#define GCOV_UNIT_SIZE 1
|
||||
#endif
|
||||
|
||||
static struct gcov_info *gcov_info_head;
|
||||
|
||||
/**
|
||||
@ -383,12 +390,18 @@ size_t convert_to_gcda(char *buffer, struct gcov_info *info)
|
||||
pos += store_gcov_u32(buffer, pos, info->version);
|
||||
pos += store_gcov_u32(buffer, pos, info->stamp);
|
||||
|
||||
#if (__GNUC__ >= 12)
|
||||
/* Use zero as checksum of the compilation unit. */
|
||||
pos += store_gcov_u32(buffer, pos, 0);
|
||||
#endif
|
||||
|
||||
for (fi_idx = 0; fi_idx < info->n_functions; fi_idx++) {
|
||||
fi_ptr = info->functions[fi_idx];
|
||||
|
||||
/* Function record. */
|
||||
pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION);
|
||||
pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION_LENGTH);
|
||||
pos += store_gcov_u32(buffer, pos,
|
||||
GCOV_TAG_FUNCTION_LENGTH * GCOV_UNIT_SIZE);
|
||||
pos += store_gcov_u32(buffer, pos, fi_ptr->ident);
|
||||
pos += store_gcov_u32(buffer, pos, fi_ptr->lineno_checksum);
|
||||
pos += store_gcov_u32(buffer, pos, fi_ptr->cfg_checksum);
|
||||
@ -402,7 +415,8 @@ size_t convert_to_gcda(char *buffer, struct gcov_info *info)
|
||||
/* Counter record. */
|
||||
pos += store_gcov_u32(buffer, pos,
|
||||
GCOV_TAG_FOR_COUNTER(ct_idx));
|
||||
pos += store_gcov_u32(buffer, pos, ci_ptr->num * 2);
|
||||
pos += store_gcov_u32(buffer, pos,
|
||||
ci_ptr->num * 2 * GCOV_UNIT_SIZE);
|
||||
|
||||
for (cv_idx = 0; cv_idx < ci_ptr->num; cv_idx++) {
|
||||
pos += store_gcov_u64(buffer, pos,
|
||||
|
@ -2455,7 +2455,16 @@ static void __split_huge_page_tail(struct page *head, int tail,
|
||||
page_tail);
|
||||
page_tail->mapping = head->mapping;
|
||||
page_tail->index = head->index + tail;
|
||||
page_tail->private = 0;
|
||||
|
||||
/*
|
||||
* page->private should not be set in tail pages with the exception
|
||||
* of swap cache pages that store the swp_entry_t in tail pages.
|
||||
* Fix up and warn once if private is unexpectedly set.
|
||||
*/
|
||||
if (!folio_test_swapcache(page_folio(head))) {
|
||||
VM_WARN_ON_ONCE_PAGE(page_tail->private != 0, head);
|
||||
page_tail->private = 0;
|
||||
}
|
||||
|
||||
/* Page flags must be visible before we make the page non-compound. */
|
||||
smp_wmb();
|
||||
|
37
mm/hugetlb.c
37
mm/hugetlb.c
@ -1014,15 +1014,23 @@ void hugetlb_dup_vma_private(struct vm_area_struct *vma)
|
||||
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
|
||||
/*
|
||||
* Clear vm_private_data
|
||||
* - For shared mappings this is a per-vma semaphore that may be
|
||||
* allocated in a subsequent call to hugetlb_vm_op_open.
|
||||
* Before clearing, make sure pointer is not associated with vma
|
||||
* as this will leak the structure. This is the case when called
|
||||
* via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already
|
||||
* been called to allocate a new structure.
|
||||
* - For MAP_PRIVATE mappings, this is the reserve map which does
|
||||
* not apply to children. Faults generated by the children are
|
||||
* not guaranteed to succeed, even if read-only.
|
||||
* - For shared mappings this is a per-vma semaphore that may be
|
||||
* allocated in a subsequent call to hugetlb_vm_op_open.
|
||||
*/
|
||||
vma->vm_private_data = (void *)0;
|
||||
if (!(vma->vm_flags & VM_MAYSHARE))
|
||||
return;
|
||||
if (vma->vm_flags & VM_MAYSHARE) {
|
||||
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
|
||||
|
||||
if (vma_lock && vma_lock->vma != vma)
|
||||
vma->vm_private_data = NULL;
|
||||
} else
|
||||
vma->vm_private_data = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2924,11 +2932,11 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
|
||||
page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
|
||||
if (!page)
|
||||
goto out_uncharge_cgroup;
|
||||
spin_lock_irq(&hugetlb_lock);
|
||||
if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
|
||||
SetHPageRestoreReserve(page);
|
||||
h->resv_huge_pages--;
|
||||
}
|
||||
spin_lock_irq(&hugetlb_lock);
|
||||
list_add(&page->lru, &h->hugepage_activelist);
|
||||
set_page_refcounted(page);
|
||||
/* Fall through */
|
||||
@ -4601,6 +4609,7 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
|
||||
struct resv_map *resv = vma_resv_map(vma);
|
||||
|
||||
/*
|
||||
* HPAGE_RESV_OWNER indicates a private mapping.
|
||||
* This new VMA should share its siblings reservation map if present.
|
||||
* The VMA will only ever have a valid reservation map pointer where
|
||||
* it is being copied for another still existing VMA. As that VMA
|
||||
@ -4615,11 +4624,21 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
|
||||
|
||||
/*
|
||||
* vma_lock structure for sharable mappings is vma specific.
|
||||
* Clear old pointer (if copied via vm_area_dup) and create new.
|
||||
* Clear old pointer (if copied via vm_area_dup) and allocate
|
||||
* new structure. Before clearing, make sure vma_lock is not
|
||||
* for this vma.
|
||||
*/
|
||||
if (vma->vm_flags & VM_MAYSHARE) {
|
||||
vma->vm_private_data = NULL;
|
||||
hugetlb_vma_lock_alloc(vma);
|
||||
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
|
||||
|
||||
if (vma_lock) {
|
||||
if (vma_lock->vma != vma) {
|
||||
vma->vm_private_data = NULL;
|
||||
hugetlb_vma_lock_alloc(vma);
|
||||
} else
|
||||
pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__);
|
||||
} else
|
||||
hugetlb_vma_lock_alloc(vma);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -787,17 +787,22 @@ static int vma_replace_policy(struct vm_area_struct *vma,
|
||||
static int mbind_range(struct mm_struct *mm, unsigned long start,
|
||||
unsigned long end, struct mempolicy *new_pol)
|
||||
{
|
||||
MA_STATE(mas, &mm->mm_mt, start - 1, start - 1);
|
||||
MA_STATE(mas, &mm->mm_mt, start, start);
|
||||
struct vm_area_struct *prev;
|
||||
struct vm_area_struct *vma;
|
||||
int err = 0;
|
||||
pgoff_t pgoff;
|
||||
|
||||
prev = mas_find_rev(&mas, 0);
|
||||
if (prev && (start < prev->vm_end))
|
||||
vma = prev;
|
||||
else
|
||||
vma = mas_next(&mas, end - 1);
|
||||
prev = mas_prev(&mas, 0);
|
||||
if (unlikely(!prev))
|
||||
mas_set(&mas, start);
|
||||
|
||||
vma = mas_find(&mas, end - 1);
|
||||
if (WARN_ON(!vma))
|
||||
return 0;
|
||||
|
||||
if (start > vma->vm_start)
|
||||
prev = vma;
|
||||
|
||||
for (; vma; vma = mas_next(&mas, end - 1)) {
|
||||
unsigned long vmstart = max(start, vma->vm_start);
|
||||
|
20
mm/mmap.c
20
mm/mmap.c
@ -618,7 +618,8 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
|
||||
struct vm_area_struct *expand)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
struct vm_area_struct *next_next, *next = find_vma(mm, vma->vm_end);
|
||||
struct vm_area_struct *next_next = NULL; /* uninit var warning */
|
||||
struct vm_area_struct *next = find_vma(mm, vma->vm_end);
|
||||
struct vm_area_struct *orig_vma = vma;
|
||||
struct address_space *mapping = NULL;
|
||||
struct rb_root_cached *root = NULL;
|
||||
@ -2625,14 +2626,14 @@ cannot_expand:
|
||||
if (error)
|
||||
goto unmap_and_free_vma;
|
||||
|
||||
/* Can addr have changed??
|
||||
*
|
||||
* Answer: Yes, several device drivers can do it in their
|
||||
* f_op->mmap method. -DaveM
|
||||
/*
|
||||
* Expansion is handled above, merging is handled below.
|
||||
* Drivers should not alter the address of the VMA.
|
||||
*/
|
||||
WARN_ON_ONCE(addr != vma->vm_start);
|
||||
|
||||
addr = vma->vm_start;
|
||||
if (WARN_ON((addr != vma->vm_start))) {
|
||||
error = -EINVAL;
|
||||
goto close_and_free_vma;
|
||||
}
|
||||
mas_reset(&mas);
|
||||
|
||||
/*
|
||||
@ -2654,7 +2655,6 @@ cannot_expand:
|
||||
vm_area_free(vma);
|
||||
vma = merge;
|
||||
/* Update vm_flags to pick up the change. */
|
||||
addr = vma->vm_start;
|
||||
vm_flags = vma->vm_flags;
|
||||
goto unmap_writable;
|
||||
}
|
||||
@ -2681,7 +2681,7 @@ cannot_expand:
|
||||
if (mas_preallocate(&mas, vma, GFP_KERNEL)) {
|
||||
error = -ENOMEM;
|
||||
if (file)
|
||||
goto unmap_and_free_vma;
|
||||
goto close_and_free_vma;
|
||||
else
|
||||
goto free_vma;
|
||||
}
|
||||
|
@ -5784,14 +5784,18 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order,
|
||||
size_t size)
|
||||
{
|
||||
if (addr) {
|
||||
unsigned long alloc_end = addr + (PAGE_SIZE << order);
|
||||
unsigned long used = addr + PAGE_ALIGN(size);
|
||||
unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE);
|
||||
struct page *page = virt_to_page((void *)addr);
|
||||
struct page *last = page + nr;
|
||||
|
||||
split_page(virt_to_page((void *)addr), order);
|
||||
while (used < alloc_end) {
|
||||
free_page(used);
|
||||
used += PAGE_SIZE;
|
||||
}
|
||||
split_page_owner(page, 1 << order);
|
||||
split_page_memcg(page, 1 << order);
|
||||
while (page < --last)
|
||||
set_page_refcounted(last);
|
||||
|
||||
last = page + (1UL << order);
|
||||
for (page += nr; page < last; page++)
|
||||
__free_pages_ok(page, 0, FPI_TO_TAIL);
|
||||
}
|
||||
return (void *)addr;
|
||||
}
|
||||
|
@ -2311,6 +2311,9 @@ void zs_destroy_pool(struct zs_pool *pool)
|
||||
int fg;
|
||||
struct size_class *class = pool->size_class[i];
|
||||
|
||||
if (!class)
|
||||
continue;
|
||||
|
||||
if (class->index != i)
|
||||
continue;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user