mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
mm: replace vma->vm_flags direct modifications with modifier calls
Replace direct modifications to vma->vm_flags with calls to modifier functions to be able to track flag changes and to keep vma locking correctness. [akpm@linux-foundation.org: fix drivers/misc/open-dice.c, per Hyeonggon Yoo] Link: https://lkml.kernel.org/r/20230126193752.297968-5-surenb@google.com Signed-off-by: Suren Baghdasaryan <surenb@google.com> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Mike Rapoport (IBM) <rppt@kernel.org> Acked-by: Sebastian Reichel <sebastian.reichel@collabora.com> Reviewed-by: Liam R. Howlett <Liam.Howlett@Oracle.com> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Arjun Roy <arjunroy@google.com> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: David Hildenbrand <david@redhat.com> Cc: David Howells <dhowells@redhat.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: David Rientjes <rientjes@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Greg Thelen <gthelen@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jann Horn <jannh@google.com> Cc: Joel Fernandes <joelaf@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Kent Overstreet <kent.overstreet@linux.dev> Cc: Laurent Dufour <ldufour@linux.ibm.com> Cc: Lorenzo Stoakes <lstoakes@gmail.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Minchan Kim <minchan@google.com> Cc: Paul E. McKenney <paulmck@kernel.org> Cc: Peter Oskolkov <posk@google.com> Cc: Peter Xu <peterx@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Punit Agrawal <punit.agrawal@bytedance.com> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: Shakeel Butt <shakeelb@google.com> Cc: Soheil Hassas Yeganeh <soheil@google.com> Cc: Song Liu <songliubraving@fb.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
e430a95a04
commit
1c71222e5f
@ -316,7 +316,7 @@ static int __init gate_vma_init(void)
|
||||
gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
|
||||
gate_vma.vm_start = 0xffff0000;
|
||||
gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
|
||||
gate_vma.vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
|
||||
vm_flags_init(&gate_vma, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC);
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(gate_vma_init);
|
||||
|
@ -109,7 +109,7 @@ ia64_init_addr_space (void)
|
||||
vma_set_anonymous(vma);
|
||||
vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
|
||||
vma->vm_end = vma->vm_start + PAGE_SIZE;
|
||||
vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
|
||||
vm_flags_init(vma, VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT);
|
||||
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
||||
mmap_write_lock(current->mm);
|
||||
if (insert_vm_struct(current->mm, vma)) {
|
||||
@ -127,8 +127,8 @@ ia64_init_addr_space (void)
|
||||
vma_set_anonymous(vma);
|
||||
vma->vm_end = PAGE_SIZE;
|
||||
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
|
||||
vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
|
||||
VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_init(vma, VM_READ | VM_MAYREAD | VM_IO |
|
||||
VM_DONTEXPAND | VM_DONTDUMP);
|
||||
mmap_write_lock(current->mm);
|
||||
if (insert_vm_struct(current->mm, vma)) {
|
||||
mmap_write_unlock(current->mm);
|
||||
@ -272,7 +272,7 @@ static int __init gate_vma_init(void)
|
||||
vma_init(&gate_vma, NULL);
|
||||
gate_vma.vm_start = FIXADDR_USER_START;
|
||||
gate_vma.vm_end = FIXADDR_USER_END;
|
||||
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
|
||||
vm_flags_init(&gate_vma, VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC);
|
||||
gate_vma.vm_page_prot = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX);
|
||||
|
||||
return 0;
|
||||
|
@ -149,7 +149,7 @@ static inline void tlb_flush(struct mmu_gather *tlb)
|
||||
struct vm_area_struct vma;
|
||||
|
||||
vma.vm_mm = tlb->mm;
|
||||
vma.vm_flags = 0;
|
||||
vm_flags_init(&vma, 0);
|
||||
if (tlb->fullmm) {
|
||||
flush_tlb_mm(tlb->mm);
|
||||
return;
|
||||
|
@ -324,7 +324,7 @@ static int kvmppc_xive_native_mmap(struct kvm_device *dev,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP);
|
||||
vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
|
||||
|
||||
/*
|
||||
|
@ -156,7 +156,7 @@ static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
|
||||
* VM_NOHUGEPAGE and split them.
|
||||
*/
|
||||
for_each_vma_range(vmi, vma, addr + len) {
|
||||
vma->vm_flags |= VM_NOHUGEPAGE;
|
||||
vm_flags_set(vma, VM_NOHUGEPAGE);
|
||||
walk_page_vma(vma, &subpage_walk_ops, NULL);
|
||||
}
|
||||
}
|
||||
|
@ -525,7 +525,7 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
|
||||
pfn = paste_addr >> PAGE_SHIFT;
|
||||
|
||||
/* flags, page_prot from cxl_mmap(), except we want cachable */
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP);
|
||||
vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
|
||||
|
||||
prot = __pgprot(pgprot_val(vma->vm_page_prot) | _PAGE_DIRTY);
|
||||
|
@ -291,7 +291,7 @@ static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
if (!(vma->vm_flags & VM_SHARED))
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP);
|
||||
vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
|
||||
|
||||
vma->vm_ops = &spufs_mem_mmap_vmops;
|
||||
@ -381,7 +381,7 @@ static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
if (!(vma->vm_flags & VM_SHARED))
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP);
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
|
||||
vma->vm_ops = &spufs_cntl_mmap_vmops;
|
||||
@ -1043,7 +1043,7 @@ static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
if (!(vma->vm_flags & VM_SHARED))
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP);
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
|
||||
vma->vm_ops = &spufs_signal1_mmap_vmops;
|
||||
@ -1179,7 +1179,7 @@ static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
if (!(vma->vm_flags & VM_SHARED))
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP);
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
|
||||
vma->vm_ops = &spufs_signal2_mmap_vmops;
|
||||
@ -1302,7 +1302,7 @@ static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
if (!(vma->vm_flags & VM_SHARED))
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP);
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
|
||||
vma->vm_ops = &spufs_mss_mmap_vmops;
|
||||
@ -1364,7 +1364,7 @@ static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
if (!(vma->vm_flags & VM_SHARED))
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP);
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
|
||||
vma->vm_ops = &spufs_psmap_mmap_vmops;
|
||||
@ -1424,7 +1424,7 @@ static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
if (!(vma->vm_flags & VM_SHARED))
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP);
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
|
||||
vma->vm_ops = &spufs_mfc_mmap_vmops;
|
||||
|
@ -2522,8 +2522,7 @@ static inline void thp_split_mm(struct mm_struct *mm)
|
||||
VMA_ITERATOR(vmi, mm, 0);
|
||||
|
||||
for_each_vma(vmi, vma) {
|
||||
vma->vm_flags &= ~VM_HUGEPAGE;
|
||||
vma->vm_flags |= VM_NOHUGEPAGE;
|
||||
vm_flags_mod(vma, VM_NOHUGEPAGE, VM_HUGEPAGE);
|
||||
walk_page_vma(vma, &thp_split_walk_ops, NULL);
|
||||
}
|
||||
mm->def_flags |= VM_NOHUGEPAGE;
|
||||
|
@ -391,7 +391,7 @@ void __init map_vsyscall(void)
|
||||
}
|
||||
|
||||
if (vsyscall_mode == XONLY)
|
||||
gate_vma.vm_flags = VM_EXEC;
|
||||
vm_flags_init(&gate_vma, VM_EXEC);
|
||||
|
||||
BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
|
||||
(unsigned long)VSYSCALL_ADDR);
|
||||
|
@ -95,7 +95,7 @@ static int sgx_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
return ret;
|
||||
|
||||
vma->vm_ops = &sgx_vm_ops;
|
||||
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
|
||||
vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
|
||||
vma->vm_private_data = encl;
|
||||
|
||||
return 0;
|
||||
|
@ -105,7 +105,7 @@ static int sgx_vepc_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
|
||||
vma->vm_ops = &sgx_vepc_vm_ops;
|
||||
/* Don't copy VMA in fork() */
|
||||
vma->vm_flags |= VM_PFNMAP | VM_IO | VM_DONTDUMP | VM_DONTCOPY;
|
||||
vm_flags_set(vma, VM_PFNMAP | VM_IO | VM_DONTDUMP | VM_DONTCOPY);
|
||||
vma->vm_private_data = vepc;
|
||||
|
||||
return 0;
|
||||
|
@ -1000,7 +1000,7 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
|
||||
|
||||
ret = reserve_pfn_range(paddr, size, prot, 0);
|
||||
if (ret == 0 && vma)
|
||||
vma->vm_flags |= VM_PAT;
|
||||
vm_flags_set(vma, VM_PAT);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1066,7 +1066,7 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
|
||||
}
|
||||
free_pfn_range(paddr, size);
|
||||
if (vma)
|
||||
vma->vm_flags &= ~VM_PAT;
|
||||
vm_flags_clear(vma, VM_PAT);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1076,7 +1076,7 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
|
||||
*/
|
||||
void untrack_pfn_moved(struct vm_area_struct *vma)
|
||||
{
|
||||
vma->vm_flags &= ~VM_PAT;
|
||||
vm_flags_clear(vma, VM_PAT);
|
||||
}
|
||||
|
||||
pgprot_t pgprot_writecombine(pgprot_t prot)
|
||||
|
@ -16,7 +16,7 @@ static int __init gate_vma_init(void)
|
||||
vma_init(&gate_vma, NULL);
|
||||
gate_vma.vm_start = FIXADDR_USER_START;
|
||||
gate_vma.vm_end = FIXADDR_USER_END;
|
||||
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
|
||||
vm_flags_init(&gate_vma, VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC);
|
||||
gate_vma.vm_page_prot = PAGE_READONLY;
|
||||
|
||||
return 0;
|
||||
|
@ -310,7 +310,7 @@ pfrt_log_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
return -EROFS;
|
||||
|
||||
/* changing from read to write with mprotect is not allowed */
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
vm_flags_clear(vma, VM_MAYWRITE);
|
||||
|
||||
pfrt_log_dev = to_pfrt_log_dev(file);
|
||||
|
||||
|
@ -5572,8 +5572,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
|
||||
return -EPERM;
|
||||
}
|
||||
vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
|
||||
|
||||
vma->vm_ops = &binder_vm_ops;
|
||||
vma->vm_private_data = proc;
|
||||
|
@ -206,7 +206,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
|
||||
refcount_set(&vdata->refcnt, 1);
|
||||
vma->vm_private_data = vdata;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
|
||||
if (vdata->type == MSPEC_UNCACHED)
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
vma->vm_ops = &mspec_vm_ops;
|
||||
|
@ -2363,7 +2363,7 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
vma->vm_flags |= VM_IO;
|
||||
vm_flags_set(vma, VM_IO);
|
||||
|
||||
return remap_pfn_range(vma, vma->vm_start,
|
||||
phys_base >> PAGE_SHIFT,
|
||||
|
@ -308,7 +308,7 @@ static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
return rc;
|
||||
|
||||
vma->vm_ops = &dax_vm_ops;
|
||||
vma->vm_flags |= VM_HUGEPAGE;
|
||||
vm_flags_set(vma, VM_HUGEPAGE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -201,7 +201,7 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
vma->vm_flags |= VM_DONTCOPY;
|
||||
vm_flags_set(vma, VM_DONTCOPY);
|
||||
pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
|
||||
IDXD_PORTAL_LIMITED)) >> PAGE_SHIFT;
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
|
@ -257,7 +257,7 @@ static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_str
|
||||
*/
|
||||
if (is_cow_mapping(vma->vm_flags) &&
|
||||
!(vma->vm_flags & VM_ACCESS_FLAGS))
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
vm_flags_clear(vma, VM_MAYWRITE);
|
||||
|
||||
return drm_gem_ttm_mmap(obj, vma);
|
||||
}
|
||||
|
@ -2879,8 +2879,8 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
|
||||
|
||||
address = dev->adev->rmmio_remap.bus_addr;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
|
||||
VM_DONTDUMP | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
|
||||
VM_DONTDUMP | VM_PFNMAP);
|
||||
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
|
||||
|
@ -159,8 +159,8 @@ int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
|
||||
address = kfd_get_process_doorbells(pdd);
|
||||
if (!address)
|
||||
return -ENOMEM;
|
||||
vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
|
||||
VM_DONTDUMP | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
|
||||
VM_DONTDUMP | VM_PFNMAP);
|
||||
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
|
||||
|
@ -1052,8 +1052,8 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
|
||||
pfn = __pa(page->kernel_address);
|
||||
pfn >>= PAGE_SHIFT;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
|
||||
| VM_DONTDUMP | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
|
||||
| VM_DONTDUMP | VM_PFNMAP);
|
||||
|
||||
pr_debug("Mapping signal page\n");
|
||||
pr_debug(" start user address == 0x%08lx\n", vma->vm_start);
|
||||
|
@ -1978,8 +1978,8 @@ int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
|
||||
| VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND
|
||||
| VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
|
||||
/* Mapping pages to user process */
|
||||
return remap_pfn_range(vma, vma->vm_start,
|
||||
PFN_DOWN(__pa(qpd->cwsr_kaddr)),
|
||||
|
@ -1047,7 +1047,7 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
|
||||
goto err_drm_gem_object_put;
|
||||
}
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
|
||||
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
|
||||
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
|
||||
}
|
||||
|
@ -530,8 +530,7 @@ int drm_gem_dma_mmap(struct drm_gem_dma_object *dma_obj, struct vm_area_struct *
|
||||
* the whole buffer.
|
||||
*/
|
||||
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
|
||||
vma->vm_flags &= ~VM_PFNMAP;
|
||||
vma->vm_flags |= VM_DONTEXPAND;
|
||||
vm_flags_mod(vma, VM_DONTEXPAND, VM_PFNMAP);
|
||||
|
||||
if (dma_obj->map_noncoherent) {
|
||||
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
||||
|
@ -633,7 +633,7 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
|
||||
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
||||
if (shmem->map_wc)
|
||||
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
||||
|
@ -476,7 +476,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN) &&
|
||||
(dma->flags & _DRM_DMA_USE_PCI_RO)) {
|
||||
vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
|
||||
vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE);
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
|
||||
#else
|
||||
@ -492,7 +492,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
|
||||
|
||||
vma->vm_ops = &drm_vm_dma_ops;
|
||||
|
||||
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
|
||||
|
||||
drm_vm_open_locked(dev, vma);
|
||||
return 0;
|
||||
@ -560,7 +560,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
|
||||
return -EINVAL;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
|
||||
vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
|
||||
vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE);
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
|
||||
#else
|
||||
@ -628,7 +628,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
|
||||
default:
|
||||
return -EINVAL; /* This should never happen. */
|
||||
}
|
||||
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
|
||||
|
||||
drm_vm_open_locked(dev, vma);
|
||||
return 0;
|
||||
|
@ -130,7 +130,7 @@ static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
|
||||
{
|
||||
pgprot_t vm_page_prot;
|
||||
|
||||
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
|
||||
|
||||
vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
||||
|
||||
|
@ -274,7 +274,7 @@ static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
|
||||
unsigned long vm_size;
|
||||
int ret;
|
||||
|
||||
vma->vm_flags &= ~VM_PFNMAP;
|
||||
vm_flags_clear(vma, VM_PFNMAP);
|
||||
vma->vm_pgoff = 0;
|
||||
|
||||
vm_size = vma->vm_end - vma->vm_start;
|
||||
@ -368,7 +368,7 @@ static int exynos_drm_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct
|
||||
if (obj->import_attach)
|
||||
return dma_buf_mmap(obj->dma_buf, vma, 0);
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
|
||||
|
||||
DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n",
|
||||
exynos_gem->flags);
|
||||
|
@ -139,7 +139,7 @@ static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
|
||||
*/
|
||||
vma->vm_ops = &psbfb_vm_ops;
|
||||
vma->vm_private_data = (void *)fb;
|
||||
vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -102,7 +102,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
|
||||
buf = dev_priv->mmap_buffer;
|
||||
buf_priv = buf->dev_private;
|
||||
|
||||
vma->vm_flags |= VM_DONTCOPY;
|
||||
vm_flags_set(vma, VM_DONTCOPY);
|
||||
|
||||
buf_priv->currently_mapped = I810_BUF_MAPPED;
|
||||
|
||||
|
@ -979,7 +979,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
i915_gem_object_put(obj);
|
||||
return -EINVAL;
|
||||
}
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
vm_flags_clear(vma, VM_MAYWRITE);
|
||||
}
|
||||
|
||||
anon = mmap_singleton(to_i915(dev));
|
||||
@ -988,7 +988,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
return PTR_ERR(anon);
|
||||
}
|
||||
|
||||
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
|
||||
vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
|
||||
|
||||
/*
|
||||
* We keep the ref on mmo->obj, not vm_file, but we require
|
||||
|
@ -158,7 +158,7 @@ static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
|
||||
* dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear
|
||||
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
|
||||
*/
|
||||
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
|
||||
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
|
||||
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
|
||||
|
||||
|
@ -1012,7 +1012,7 @@ static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
|
||||
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
|
||||
vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
|
||||
|
||||
return 0;
|
||||
|
@ -543,8 +543,7 @@ int omap_gem_mmap_obj(struct drm_gem_object *obj,
|
||||
{
|
||||
struct omap_gem_object *omap_obj = to_omap_bo(obj);
|
||||
|
||||
vma->vm_flags &= ~VM_PFNMAP;
|
||||
vma->vm_flags |= VM_MIXEDMAP;
|
||||
vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
|
||||
|
||||
if (omap_obj->flags & OMAP_BO_WC) {
|
||||
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
|
||||
|
@ -251,8 +251,7 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
|
||||
* We allocated a struct page table for rk_obj, so clear
|
||||
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
|
||||
*/
|
||||
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vma->vm_flags &= ~VM_PFNMAP;
|
||||
vm_flags_mod(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP, VM_PFNMAP);
|
||||
|
||||
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
|
||||
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
|
||||
|
@ -574,7 +574,7 @@ int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
|
||||
* and set the vm_pgoff (used as a fake buffer offset by DRM)
|
||||
* to 0 as we want to map the whole buffer.
|
||||
*/
|
||||
vma->vm_flags &= ~VM_PFNMAP;
|
||||
vm_flags_clear(vma, VM_PFNMAP);
|
||||
vma->vm_pgoff = 0;
|
||||
|
||||
err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
|
||||
@ -588,8 +588,7 @@ int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
|
||||
} else {
|
||||
pgprot_t prot = vm_get_page_prot(vma->vm_flags);
|
||||
|
||||
vma->vm_flags |= VM_MIXEDMAP;
|
||||
vma->vm_flags &= ~VM_PFNMAP;
|
||||
vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
|
||||
|
||||
vma->vm_page_prot = pgprot_writecombine(prot);
|
||||
}
|
||||
|
@ -468,8 +468,7 @@ int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
|
||||
|
||||
vma->vm_private_data = bo;
|
||||
|
||||
vma->vm_flags |= VM_PFNMAP;
|
||||
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_PFNMAP | VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_mmap_obj);
|
||||
|
@ -46,7 +46,7 @@ static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
|
||||
vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
|
||||
vm_flags_set(vma, VM_MIXEDMAP | VM_DONTEXPAND);
|
||||
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
||||
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
|
||||
vma->vm_ops = &virtio_gpu_vram_vm_ops;
|
||||
|
@ -97,7 +97,7 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
|
||||
/* Use VM_PFNMAP rather than VM_MIXEDMAP if not a COW mapping */
|
||||
if (!is_cow_mapping(vma->vm_flags))
|
||||
vma->vm_flags = (vma->vm_flags & ~VM_MIXEDMAP) | VM_PFNMAP;
|
||||
vm_flags_mod(vma, VM_PFNMAP, VM_MIXEDMAP);
|
||||
|
||||
ttm_bo_put(bo); /* release extra ref taken by ttm_bo_mmap_obj() */
|
||||
|
||||
|
@ -69,8 +69,7 @@ static int xen_drm_front_gem_object_mmap(struct drm_gem_object *gem_obj,
|
||||
* vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
|
||||
* the whole buffer.
|
||||
*/
|
||||
vma->vm_flags &= ~VM_PFNMAP;
|
||||
vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
|
||||
vm_flags_mod(vma, VM_MIXEDMAP | VM_DONTEXPAND, VM_PFNMAP);
|
||||
vma->vm_pgoff = 0;
|
||||
|
||||
/*
|
||||
|
@ -1264,7 +1264,7 @@ static int cs_char_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
if (vma_pages(vma) != 1)
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_DONTDUMP | VM_DONTEXPAND;
|
||||
vm_flags_set(vma, VM_IO | VM_DONTDUMP | VM_DONTEXPAND);
|
||||
vma->vm_ops = &cs_char_vm_ops;
|
||||
vma->vm_private_data = file->private_data;
|
||||
|
||||
|
@ -1659,7 +1659,7 @@ out:
|
||||
atomic_dec(&msc->user_count);
|
||||
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
vma->vm_flags |= VM_DONTEXPAND | VM_DONTCOPY;
|
||||
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTCOPY);
|
||||
vma->vm_ops = &msc_mmap_ops;
|
||||
return ret;
|
||||
}
|
||||
|
@ -715,7 +715,7 @@ static int stm_char_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
pm_runtime_get_sync(&stm->dev);
|
||||
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
|
||||
vma->vm_ops = &stm_mmap_vmops;
|
||||
vm_iomap_memory(vma, phys, size);
|
||||
|
||||
|
@ -403,7 +403,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
|
||||
ret = -EPERM;
|
||||
goto done;
|
||||
}
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
vm_flags_clear(vma, VM_MAYWRITE);
|
||||
addr = vma->vm_start;
|
||||
for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) {
|
||||
memlen = uctxt->egrbufs.buffers[i].len;
|
||||
@ -528,7 +528,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
|
||||
goto done;
|
||||
}
|
||||
|
||||
vma->vm_flags = flags;
|
||||
vm_flags_reset(vma, flags);
|
||||
hfi1_cdbg(PROC,
|
||||
"%u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
|
||||
ctxt, subctxt, type, mapio, vmf, memaddr, memlen,
|
||||
|
@ -2087,7 +2087,7 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
|
||||
|
||||
if (vma->vm_flags & (VM_WRITE | VM_EXEC))
|
||||
return -EPERM;
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
vm_flags_clear(vma, VM_MAYWRITE);
|
||||
|
||||
if (!dev->mdev->clock_info)
|
||||
return -EOPNOTSUPP;
|
||||
@ -2311,7 +2311,7 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
|
||||
|
||||
if (vma->vm_flags & VM_WRITE)
|
||||
return -EPERM;
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
vm_flags_clear(vma, VM_MAYWRITE);
|
||||
|
||||
/* Don't expose to user-space information it shouldn't have */
|
||||
if (PAGE_SIZE > 4096)
|
||||
|
@ -733,7 +733,7 @@ static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd,
|
||||
}
|
||||
|
||||
/* don't allow them to later change with mprotect */
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
vm_flags_clear(vma, VM_MAYWRITE);
|
||||
}
|
||||
|
||||
pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
|
||||
@ -769,7 +769,7 @@ static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
|
||||
phys = dd->physaddr + ureg;
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
|
||||
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
|
||||
vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND);
|
||||
ret = io_remap_pfn_range(vma, vma->vm_start,
|
||||
phys >> PAGE_SHIFT,
|
||||
vma->vm_end - vma->vm_start,
|
||||
@ -810,8 +810,7 @@ static int mmap_piobufs(struct vm_area_struct *vma,
|
||||
* don't allow them to later change to readable with mprotect (for when
|
||||
* not initially mapped readable, as is normally the case)
|
||||
*/
|
||||
vma->vm_flags &= ~VM_MAYREAD;
|
||||
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
|
||||
vm_flags_mod(vma, VM_DONTCOPY | VM_DONTEXPAND, VM_MAYREAD);
|
||||
|
||||
/* We used PAT if wc_cookie == 0 */
|
||||
if (!dd->wc_cookie)
|
||||
@ -852,7 +851,7 @@ static int mmap_rcvegrbufs(struct vm_area_struct *vma,
|
||||
goto bail;
|
||||
}
|
||||
/* don't allow them to later change to writable with mprotect */
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
vm_flags_clear(vma, VM_MAYWRITE);
|
||||
|
||||
start = vma->vm_start;
|
||||
|
||||
@ -944,7 +943,7 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
|
||||
* Don't allow permission to later change to writable
|
||||
* with mprotect.
|
||||
*/
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
vm_flags_clear(vma, VM_MAYWRITE);
|
||||
} else
|
||||
goto bail;
|
||||
len = vma->vm_end - vma->vm_start;
|
||||
@ -955,7 +954,7 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
|
||||
|
||||
vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
|
||||
vma->vm_ops = &qib_file_vm_ops;
|
||||
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
|
||||
ret = 1;
|
||||
|
||||
bail:
|
||||
|
@ -672,7 +672,7 @@ int usnic_ib_mmap(struct ib_ucontext *context,
|
||||
usnic_dbg("\n");
|
||||
|
||||
us_ibdev = to_usdev(context->device);
|
||||
vma->vm_flags |= VM_IO;
|
||||
vm_flags_set(vma, VM_IO);
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
vfid = vma->vm_pgoff;
|
||||
usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n",
|
||||
|
@ -408,7 +408,7 @@ int pvrdma_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
|
||||
}
|
||||
|
||||
/* Map UAR to kernel space, VM_LOCKED? */
|
||||
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
|
||||
vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND);
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
if (io_remap_pfn_range(vma, start, context->uar.pfn, size,
|
||||
vma->vm_page_prot))
|
||||
|
@ -293,7 +293,7 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
|
||||
return ret;
|
||||
}
|
||||
|
||||
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
|
||||
vma->vm_private_data = &buf->handler;
|
||||
vma->vm_ops = &vb2_common_vm_ops;
|
||||
|
||||
|
@ -185,7 +185,7 @@ static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
|
||||
/*
|
||||
* Make sure that vm_areas for 2 buffers won't be merged together
|
||||
*/
|
||||
vma->vm_flags |= VM_DONTEXPAND;
|
||||
vm_flags_set(vma, VM_DONTEXPAND);
|
||||
|
||||
/*
|
||||
* Use common vm_area operations to track buffer refcount.
|
||||
|
@ -314,7 +314,7 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
|
||||
}
|
||||
|
||||
vma->vm_ops = &videobuf_vm_ops;
|
||||
vma->vm_flags |= VM_DONTEXPAND;
|
||||
vm_flags_set(vma, VM_DONTEXPAND);
|
||||
vma->vm_private_data = map;
|
||||
|
||||
dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
|
||||
|
@ -630,8 +630,8 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
|
||||
map->count = 1;
|
||||
map->q = q;
|
||||
vma->vm_ops = &videobuf_vm_ops;
|
||||
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */
|
||||
/* using shared anonymous pages */
|
||||
vm_flags_mod(vma, VM_DONTEXPAND | VM_DONTDUMP, VM_IO);
|
||||
vma->vm_private_data = map;
|
||||
dprintk(1, "mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n",
|
||||
map, q, vma->vm_start, vma->vm_end, vma->vm_pgoff, first, last);
|
||||
|
@ -247,7 +247,7 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
|
||||
}
|
||||
|
||||
vma->vm_ops = &videobuf_vm_ops;
|
||||
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
|
||||
vma->vm_private_data = map;
|
||||
|
||||
dprintk(1, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
|
||||
|
@ -220,7 +220,7 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
|
||||
pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
|
||||
ctx->psn_phys, ctx->pe , ctx->master);
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP);
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
vma->vm_ops = &cxl_mmap_vmops;
|
||||
return 0;
|
||||
|
@ -2082,7 +2082,7 @@ static int hl_ts_mmap(struct hl_mmap_mem_buf *buf, struct vm_area_struct *vma, v
|
||||
{
|
||||
struct hl_ts_buff *ts_buff = buf->private;
|
||||
|
||||
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY | VM_NORESERVE;
|
||||
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY | VM_NORESERVE);
|
||||
return remap_vmalloc_range(vma, ts_buff->user_buff_address, 0);
|
||||
}
|
||||
|
||||
|
@ -4236,8 +4236,8 @@ static int gaudi_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
|
||||
{
|
||||
int rc;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
|
||||
VM_DONTCOPY | VM_NORESERVE;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
|
||||
VM_DONTCOPY | VM_NORESERVE);
|
||||
|
||||
rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
|
||||
(dma_addr - HOST_PHYS_BASE), size);
|
||||
|
@ -5538,8 +5538,8 @@ static int gaudi2_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
|
||||
{
|
||||
int rc;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
|
||||
VM_DONTCOPY | VM_NORESERVE;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
|
||||
VM_DONTCOPY | VM_NORESERVE);
|
||||
|
||||
#ifdef _HAS_DMA_MMAP_COHERENT
|
||||
|
||||
@ -10116,8 +10116,8 @@ static int gaudi2_block_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
|
||||
|
||||
address = pci_resource_start(hdev->pdev, SRAM_CFG_BAR_ID) + offset_in_bar;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
|
||||
VM_DONTCOPY | VM_NORESERVE;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
|
||||
VM_DONTCOPY | VM_NORESERVE);
|
||||
|
||||
rc = remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT,
|
||||
block_size, vma->vm_page_prot);
|
||||
|
@ -2880,8 +2880,8 @@ static int goya_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
|
||||
{
|
||||
int rc;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
|
||||
VM_DONTCOPY | VM_NORESERVE;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
|
||||
VM_DONTCOPY | VM_NORESERVE);
|
||||
|
||||
rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
|
||||
(dma_addr - HOST_PHYS_BASE), size);
|
||||
|
@ -180,7 +180,7 @@ static int check_mmap_afu_irq(struct ocxl_context *ctx,
|
||||
if ((vma->vm_flags & VM_READ) || (vma->vm_flags & VM_EXEC) ||
|
||||
!(vma->vm_flags & VM_WRITE))
|
||||
return -EINVAL;
|
||||
vma->vm_flags &= ~(VM_MAYREAD | VM_MAYEXEC);
|
||||
vm_flags_clear(vma, VM_MAYREAD | VM_MAYEXEC);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -204,7 +204,7 @@ int ocxl_context_mmap(struct ocxl_context *ctx, struct vm_area_struct *vma)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP);
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
vma->vm_ops = &ocxl_vmops;
|
||||
return 0;
|
||||
|
@ -134,7 +134,7 @@ static int global_mmio_mmap(struct file *filp, struct kobject *kobj,
|
||||
(afu->config.global_mmio_size >> PAGE_SHIFT))
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP);
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
vma->vm_ops = &global_mmio_vmops;
|
||||
vma->vm_private_data = afu;
|
||||
|
@ -95,12 +95,12 @@ static int open_dice_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
if (vma->vm_flags & VM_WRITE)
|
||||
return -EPERM;
|
||||
/* Ensure userspace cannot acquire VM_WRITE later. */
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
vm_flags_clear(vma, VM_MAYWRITE);
|
||||
}
|
||||
|
||||
/* Create write-combine mapping so all clients observe a wipe. */
|
||||
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
||||
vma->vm_flags |= VM_DONTCOPY | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_DONTCOPY | VM_DONTDUMP);
|
||||
return vm_iomap_memory(vma, drvdata->rmem->base, drvdata->rmem->size);
|
||||
}
|
||||
|
||||
|
@ -101,8 +101,8 @@ static int gru_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
vma->vm_end & (GRU_GSEG_PAGESIZE - 1))
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_LOCKED |
|
||||
VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_LOCKED |
|
||||
VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP);
|
||||
vma->vm_page_prot = PAGE_SHARED;
|
||||
vma->vm_ops = &gru_vm_ops;
|
||||
|
||||
|
@ -229,7 +229,7 @@ static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma)
|
||||
if (!qfr)
|
||||
return -ENOMEM;
|
||||
|
||||
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK;
|
||||
vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK);
|
||||
vma->vm_ops = &uacce_vm_ops;
|
||||
vma->vm_private_data = q;
|
||||
qfr->type = type;
|
||||
|
@ -389,7 +389,7 @@ static int dax_devmap(struct file *f, struct vm_area_struct *vma)
|
||||
/* completion area is mapped read-only for user */
|
||||
if (vma->vm_flags & VM_WRITE)
|
||||
return -EPERM;
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
vm_flags_clear(vma, VM_MAYWRITE);
|
||||
|
||||
if (remap_pfn_range(vma, vma->vm_start, ctx->ca_buf_ra >> PAGE_SHIFT,
|
||||
len, vma->vm_page_prot))
|
||||
|
@ -1167,7 +1167,7 @@ static int afu_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
(ctx->psn_size >> PAGE_SHIFT))
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP);
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
vma->vm_ops = &ocxlflash_vmops;
|
||||
return 0;
|
||||
|
@ -1288,7 +1288,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
}
|
||||
|
||||
sfp->mmap_called = 1;
|
||||
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
|
||||
vma->vm_private_data = sfp;
|
||||
vma->vm_ops = &sg_mmap_vm_ops;
|
||||
out:
|
||||
|
@ -1072,7 +1072,7 @@ int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo)
|
||||
vma->vm_private_data = bo;
|
||||
|
||||
vma->vm_ops = &hmm_bo_vm_ops;
|
||||
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
|
||||
|
||||
/*
|
||||
* call hmm_bo_vm_open explicitly.
|
||||
|
@ -1476,8 +1476,8 @@ static int meye_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
}
|
||||
|
||||
vma->vm_ops = &meye_vm_ops;
|
||||
vma->vm_flags &= ~VM_IO; /* not I/O memory */
|
||||
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
||||
/* not I/O memory */
|
||||
vm_flags_mod(vma, VM_DONTEXPAND | VM_DONTDUMP, VM_IO);
|
||||
vma->vm_private_data = (void *) (offset / gbufsize);
|
||||
meye_vm_open(vma);
|
||||
|
||||
|
@ -779,7 +779,7 @@ static int v4l_stk_mmap(struct file *fp, struct vm_area_struct *vma)
|
||||
ret = remap_vmalloc_range(vma, sbuf->buffer, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
vma->vm_flags |= VM_DONTEXPAND;
|
||||
vm_flags_set(vma, VM_DONTEXPAND);
|
||||
vma->vm_private_data = sbuf;
|
||||
vma->vm_ops = &stk_v4l_vm_ops;
|
||||
sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_MAPPED;
|
||||
|
@ -1928,7 +1928,7 @@ static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
|
||||
{
|
||||
struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
|
||||
|
||||
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
|
||||
vma->vm_ops = &tcmu_vm_ops;
|
||||
|
||||
vma->vm_private_data = udev;
|
||||
|
@ -713,7 +713,7 @@ static const struct vm_operations_struct uio_logical_vm_ops = {
|
||||
|
||||
static int uio_mmap_logical(struct vm_area_struct *vma)
|
||||
{
|
||||
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
|
||||
vma->vm_ops = &uio_logical_vm_ops;
|
||||
return 0;
|
||||
}
|
||||
|
@ -279,8 +279,7 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
}
|
||||
}
|
||||
|
||||
vma->vm_flags |= VM_IO;
|
||||
vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP);
|
||||
vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
|
||||
vma->vm_ops = &usbdev_vm_ops;
|
||||
vma->vm_private_data = usbm;
|
||||
|
||||
|
@ -1272,8 +1272,7 @@ static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
if (vma->vm_flags & VM_WRITE)
|
||||
return -EPERM;
|
||||
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_mod(vma, VM_DONTEXPAND | VM_DONTDUMP, VM_MAYWRITE);
|
||||
vma->vm_private_data = filp->private_data;
|
||||
mon_bin_vma_open(vma);
|
||||
return 0;
|
||||
|
@ -512,7 +512,7 @@ static int vduse_domain_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
struct vduse_iova_domain *domain = file->private_data;
|
||||
|
||||
vma->vm_flags |= VM_DONTDUMP | VM_DONTEXPAND;
|
||||
vm_flags_set(vma, VM_DONTDUMP | VM_DONTEXPAND);
|
||||
vma->vm_private_data = domain;
|
||||
vma->vm_ops = &vduse_domain_mmap_ops;
|
||||
|
||||
|
@ -1799,7 +1799,7 @@ int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma
|
||||
* See remap_pfn_range(), called from vfio_pci_fault() but we can't
|
||||
* change vm_flags within the fault handler. Set them now.
|
||||
*/
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
|
||||
vma->vm_ops = &vfio_pci_mmap_ops;
|
||||
|
||||
return 0;
|
||||
|
@ -1315,7 +1315,7 @@ static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
if (vma->vm_end - vma->vm_start != notify.size)
|
||||
return -ENOTSUPP;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
|
||||
vma->vm_ops = &vhost_vdpa_vm_ops;
|
||||
return 0;
|
||||
}
|
||||
|
@ -391,7 +391,7 @@ static int mc68x328fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
|
||||
#ifndef MMU
|
||||
/* this is uClinux (no MMU) specific code */
|
||||
|
||||
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
|
||||
vma->vm_start = videomemory;
|
||||
|
||||
return 0;
|
||||
|
@ -232,9 +232,9 @@ static const struct address_space_operations fb_deferred_io_aops = {
|
||||
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
|
||||
{
|
||||
vma->vm_ops = &fb_deferred_io_vm_ops;
|
||||
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
|
||||
if (!(info->flags & FBINFO_VIRTFB))
|
||||
vma->vm_flags |= VM_IO;
|
||||
vm_flags_set(vma, VM_IO);
|
||||
vma->vm_private_data = info;
|
||||
return 0;
|
||||
}
|
||||
|
@ -525,7 +525,7 @@ static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
|
||||
vma->vm_private_data = vm_priv;
|
||||
|
||||
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
|
||||
|
||||
vma->vm_ops = &gntalloc_vmops;
|
||||
|
||||
|
@ -1055,10 +1055,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
|
||||
|
||||
vma->vm_ops = &gntdev_vmops;
|
||||
|
||||
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP;
|
||||
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP);
|
||||
|
||||
if (use_ptemod)
|
||||
vma->vm_flags |= VM_DONTCOPY;
|
||||
vm_flags_set(vma, VM_DONTCOPY);
|
||||
|
||||
vma->vm_private_data = map;
|
||||
if (map->flags) {
|
||||
|
@ -156,7 +156,7 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
vma_priv->file_priv = file_priv;
|
||||
vma_priv->users = 1;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_DONTEXPAND;
|
||||
vm_flags_set(vma, VM_IO | VM_DONTEXPAND);
|
||||
vma->vm_ops = &privcmd_buf_vm_ops;
|
||||
vma->vm_private_data = vma_priv;
|
||||
|
||||
|
@ -934,8 +934,8 @@ static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
/* DONTCOPY is essential for Xen because copy_page_range doesn't know
|
||||
* how to recreate these mappings */
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
|
||||
VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTCOPY |
|
||||
VM_DONTEXPAND | VM_DONTDUMP);
|
||||
vma->vm_ops = &privcmd_vm_ops;
|
||||
vma->vm_private_data = NULL;
|
||||
|
||||
|
2
fs/aio.c
2
fs/aio.c
@ -390,7 +390,7 @@ static const struct vm_operations_struct aio_ring_vm_ops = {
|
||||
|
||||
static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
vma->vm_flags |= VM_DONTEXPAND;
|
||||
vm_flags_set(vma, VM_DONTEXPAND);
|
||||
vma->vm_ops = &aio_ring_vm_ops;
|
||||
return 0;
|
||||
}
|
||||
|
@ -408,7 +408,7 @@ static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
* unpopulated ptes via cramfs_read_folio().
|
||||
*/
|
||||
int i;
|
||||
vma->vm_flags |= VM_MIXEDMAP;
|
||||
vm_flags_set(vma, VM_MIXEDMAP);
|
||||
for (i = 0; i < pages && !ret; i++) {
|
||||
vm_fault_t vmf;
|
||||
unsigned long off = i * PAGE_SIZE;
|
||||
|
@ -429,7 +429,7 @@ static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_ops = &erofs_dax_vm_ops;
|
||||
vma->vm_flags |= VM_HUGEPAGE;
|
||||
vm_flags_set(vma, VM_HUGEPAGE);
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
|
@ -270,7 +270,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
|
||||
BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
|
||||
vma->vm_end = STACK_TOP_MAX;
|
||||
vma->vm_start = vma->vm_end - PAGE_SIZE;
|
||||
vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
|
||||
vm_flags_init(vma, VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP);
|
||||
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
||||
|
||||
err = insert_vm_struct(mm, vma);
|
||||
@ -834,7 +834,7 @@ int setup_arg_pages(struct linux_binprm *bprm,
|
||||
}
|
||||
|
||||
/* mprotect_fixup is overkill to remove the temporary stack flags */
|
||||
vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
|
||||
vm_flags_clear(vma, VM_STACK_INCOMPLETE_SETUP);
|
||||
|
||||
stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
|
||||
stack_size = vma->vm_end - vma->vm_start;
|
||||
|
@ -801,7 +801,7 @@ static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
file_accessed(file);
|
||||
if (IS_DAX(file_inode(file))) {
|
||||
vma->vm_ops = &ext4_dax_vm_ops;
|
||||
vma->vm_flags |= VM_HUGEPAGE;
|
||||
vm_flags_set(vma, VM_HUGEPAGE);
|
||||
} else {
|
||||
vma->vm_ops = &ext4_file_vm_ops;
|
||||
}
|
||||
|
@ -860,7 +860,7 @@ int fuse_dax_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
file_accessed(file);
|
||||
vma->vm_ops = &fuse_dax_vm_ops;
|
||||
vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
|
||||
vm_flags_set(vma, VM_MIXEDMAP | VM_HUGEPAGE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -132,7 +132,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
* way when do_mmap unwinds (may be important on powerpc
|
||||
* and ia64).
|
||||
*/
|
||||
vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
|
||||
vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND);
|
||||
vma->vm_ops = &hugetlb_vm_ops;
|
||||
|
||||
ret = seal_check_future_write(info->seals, vma);
|
||||
@ -811,7 +811,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
|
||||
* as input to create an allocation policy.
|
||||
*/
|
||||
vma_init(&pseudo_vma, mm);
|
||||
pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
|
||||
vm_flags_init(&pseudo_vma, VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
|
||||
pseudo_vma.vm_file = file;
|
||||
|
||||
for (index = start; index < end; index++) {
|
||||
|
@ -389,8 +389,7 @@ static int orangefs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
"orangefs_file_mmap: called on %pD\n", file);
|
||||
|
||||
/* set the sequential readahead hint */
|
||||
vma->vm_flags |= VM_SEQ_READ;
|
||||
vma->vm_flags &= ~VM_RAND_READ;
|
||||
vm_flags_mod(vma, VM_SEQ_READ, VM_RAND_READ);
|
||||
|
||||
file_accessed(file);
|
||||
vma->vm_ops = &orangefs_file_vm_ops;
|
||||
|
@ -1299,7 +1299,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
|
||||
for_each_vma(vmi, vma) {
|
||||
if (!(vma->vm_flags & VM_SOFTDIRTY))
|
||||
continue;
|
||||
vma->vm_flags &= ~VM_SOFTDIRTY;
|
||||
vm_flags_clear(vma, VM_SOFTDIRTY);
|
||||
vma_set_page_prot(vma);
|
||||
}
|
||||
|
||||
|
@ -582,8 +582,7 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
|
||||
if (vma->vm_flags & (VM_WRITE | VM_EXEC))
|
||||
return -EPERM;
|
||||
|
||||
vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
|
||||
vma->vm_flags |= VM_MIXEDMAP;
|
||||
vm_flags_mod(vma, VM_MIXEDMAP, VM_MAYWRITE | VM_MAYEXEC);
|
||||
vma->vm_ops = &vmcore_mmap_ops;
|
||||
|
||||
len = 0;
|
||||
|
@ -113,7 +113,7 @@ static void userfaultfd_set_vm_flags(struct vm_area_struct *vma,
|
||||
{
|
||||
const bool uffd_wp_changed = (vma->vm_flags ^ flags) & VM_UFFD_WP;
|
||||
|
||||
vma->vm_flags = flags;
|
||||
vm_flags_reset(vma, flags);
|
||||
/*
|
||||
* For shared mappings, we want to enable writenotify while
|
||||
* userfaultfd-wp is enabled (see vma_wants_writenotify()). We'll simply
|
||||
|
@ -1429,7 +1429,7 @@ xfs_file_mmap(
|
||||
file_accessed(file);
|
||||
vma->vm_ops = &xfs_file_vm_ops;
|
||||
if (IS_DAX(inode))
|
||||
vma->vm_flags |= VM_HUGEPAGE;
|
||||
vm_flags_set(vma, VM_HUGEPAGE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3653,7 +3653,7 @@ static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
|
||||
* VM_MAYWRITE as we still want them to be COW-writable.
|
||||
*/
|
||||
if (vma->vm_flags & VM_SHARED)
|
||||
vma->vm_flags &= ~(VM_MAYWRITE);
|
||||
vm_flags_clear(vma, VM_MAYWRITE);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -269,7 +269,7 @@ static int ringbuf_map_mmap_kern(struct bpf_map *map, struct vm_area_struct *vma
|
||||
if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE)
|
||||
return -EPERM;
|
||||
} else {
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
vm_flags_clear(vma, VM_MAYWRITE);
|
||||
}
|
||||
/* remap_vmalloc_range() checks size and offset constraints */
|
||||
return remap_vmalloc_range(vma, rb_map->rb,
|
||||
@ -290,7 +290,7 @@ static int ringbuf_map_mmap_user(struct bpf_map *map, struct vm_area_struct *vma
|
||||
*/
|
||||
return -EPERM;
|
||||
} else {
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
vm_flags_clear(vma, VM_MAYWRITE);
|
||||
}
|
||||
/* remap_vmalloc_range() checks size and offset constraints */
|
||||
return remap_vmalloc_range(vma, rb_map->rb, vma->vm_pgoff + RINGBUF_PGOFF);
|
||||
|
@ -882,10 +882,10 @@ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
/* set default open/close callbacks */
|
||||
vma->vm_ops = &bpf_map_default_vmops;
|
||||
vma->vm_private_data = map;
|
||||
vma->vm_flags &= ~VM_MAYEXEC;
|
||||
vm_flags_clear(vma, VM_MAYEXEC);
|
||||
if (!(vma->vm_flags & VM_WRITE))
|
||||
/* disallow re-mapping with PROT_WRITE */
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
vm_flags_clear(vma, VM_MAYWRITE);
|
||||
|
||||
err = map->ops->map_mmap(map, vma);
|
||||
if (err)
|
||||
|
@ -6573,7 +6573,7 @@ aux_unlock:
|
||||
* Since pinned accounting is per vm we cannot allow fork() to copy our
|
||||
* vma.
|
||||
*/
|
||||
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP);
|
||||
vma->vm_ops = &perf_mmap_vmops;
|
||||
|
||||
if (event->pmu->event_mapped)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user