From f0953a1bbaca71e1ebbcb9864eb1b273156157ed Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 6 May 2021 18:06:47 -0700 Subject: [PATCH] mm: fix typos in comments Fix ~94 single-word typos in locking code comments, plus a few very obvious grammar mistakes. Link: https://lkml.kernel.org/r/20210322212624.GA1963421@gmail.com Link: https://lore.kernel.org/r/20210322205203.GB1959563@gmail.com Signed-off-by: Ingo Molnar Reviewed-by: Matthew Wilcox (Oracle) Reviewed-by: Randy Dunlap Cc: Bhaskar Chowdhury Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 2 +- include/linux/vmalloc.h | 4 ++-- mm/balloon_compaction.c | 4 ++-- mm/compaction.c | 4 ++-- mm/filemap.c | 2 +- mm/gup.c | 2 +- mm/highmem.c | 2 +- mm/huge_memory.c | 6 +++--- mm/hugetlb.c | 6 +++--- mm/internal.h | 2 +- mm/kasan/kasan.h | 8 ++++---- mm/kasan/quarantine.c | 4 ++-- mm/kasan/shadow.c | 4 ++-- mm/kfence/report.c | 2 +- mm/khugepaged.c | 2 +- mm/ksm.c | 4 ++-- mm/madvise.c | 4 ++-- mm/memcontrol.c | 18 +++++++++--------- mm/memory-failure.c | 2 +- mm/memory.c | 10 +++++----- mm/mempolicy.c | 4 ++-- mm/migrate.c | 8 ++++---- mm/mmap.c | 4 ++-- mm/mprotect.c | 2 +- mm/mremap.c | 2 +- mm/oom_kill.c | 2 +- mm/page-writeback.c | 4 ++-- mm/page_alloc.c | 14 +++++++------- mm/page_owner.c | 2 +- mm/percpu-internal.h | 2 +- mm/percpu.c | 2 +- mm/pgalloc-track.h | 6 +++--- mm/slab.c | 6 +++--- mm/slub.c | 2 +- mm/swap_slots.c | 2 +- mm/vmalloc.c | 6 +++--- mm/vmstat.c | 2 +- mm/zpool.c | 2 +- mm/zsmalloc.c | 2 +- 39 files changed, 83 insertions(+), 83 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 76e27ebb28a3..322ec61d0da7 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -106,7 +106,7 @@ extern int mmap_rnd_compat_bits __read_mostly; * embedding these tags into addresses that point to these memory regions, and * checking that the memory and the pointer tags match on memory accesses) * redefine this macro to strip tags from pointers. - * It's defined as noop for arcitectures that don't support memory tagging. + * It's defined as noop for architectures that don't support memory tagging. */ #ifndef untagged_addr #define untagged_addr(addr) (addr) diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index b6ff16393bf6..4d668abb6391 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -33,7 +33,7 @@ struct notifier_block; /* in notifier.h */ * * If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after * shadow memory has been mapped. It's used to handle allocation errors so that - * we don't try to poision shadow on free if it was never allocated. + * we don't try to poison shadow on free if it was never allocated. * * Otherwise, VM_KASAN is set for kasan_module_alloc() allocations and used to * determine which allocations need the module shadow freed. @@ -43,7 +43,7 @@ struct notifier_block; /* in notifier.h */ /* * Maximum alignment for ioremap() regions. - * Can be overriden by arch-specific value. + * Can be overridden by arch-specific value. */ #ifndef IOREMAP_MAX_ORDER #define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */ diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c index 26de020aae7b..907fefde2572 100644 --- a/mm/balloon_compaction.c +++ b/mm/balloon_compaction.c @@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(balloon_page_list_enqueue); /** * balloon_page_list_dequeue() - removes pages from balloon's page list and * returns a list of the pages. - * @b_dev_info: balloon device decriptor where we will grab a page from. + * @b_dev_info: balloon device descriptor where we will grab a page from. * @pages: pointer to the list of pages that would be returned to the caller. * @n_req_pages: number of requested pages. * @@ -157,7 +157,7 @@ EXPORT_SYMBOL_GPL(balloon_page_enqueue); /* * balloon_page_dequeue - removes a page from balloon's page list and returns * its address to allow the driver to release the page. - * @b_dev_info: balloon device decriptor where we will grab a page from. + * @b_dev_info: balloon device descriptor where we will grab a page from. * * Driver must call this function to properly dequeue a previously enqueued page * before definitively releasing it back to the guest system. diff --git a/mm/compaction.c b/mm/compaction.c index 3a6c6b821f80..84fde270ae74 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2012,8 +2012,8 @@ static unsigned int fragmentation_score_wmark(pg_data_t *pgdat, bool low) unsigned int wmark_low; /* - * Cap the low watermak to avoid excessive compaction - * activity in case a user sets the proactivess tunable + * Cap the low watermark to avoid excessive compaction + * activity in case a user sets the proactiveness tunable * close to 100 (maximum). */ wmark_low = max(100U - sysctl_compaction_proactiveness, 5U); diff --git a/mm/filemap.c b/mm/filemap.c index 7fadf211643c..66f7e9fdfbc4 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2755,7 +2755,7 @@ unsigned int seek_page_size(struct xa_state *xas, struct page *page) * entirely memory-based such as tmpfs, and filesystems which support * unwritten extents. * - * Return: The requested offset on successs, or -ENXIO if @whence specifies + * Return: The requested offset on success, or -ENXIO if @whence specifies * SEEK_DATA and there is no data after @start. There is an implicit hole * after @end - 1, so SEEK_HOLE returns @end if all the bytes between @start * and @end contain data. diff --git a/mm/gup.c b/mm/gup.c index aa09535cf4d4..0697134b6a12 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1575,7 +1575,7 @@ finish_or_fault: * Returns NULL on any kind of failure - a hole must then be inserted into * the corefile, to preserve alignment with its headers; and also returns * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - - * allowing a hole to be left in the corefile to save diskspace. + * allowing a hole to be left in the corefile to save disk space. * * Called without mmap_lock (takes and releases the mmap_lock by itself). */ diff --git a/mm/highmem.c b/mm/highmem.c index e389337e00b4..4fb51d735aa6 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -519,7 +519,7 @@ void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot) /* * Disable migration so resulting virtual address is stable - * accross preemption. + * across preemption. */ migrate_disable(); preempt_disable(); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 98456017744d..63ed6b25deaa 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1792,8 +1792,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, /* * Returns * - 0 if PMD could not be locked - * - 1 if PMD was locked but protections unchange and TLB flush unnecessary - * - HPAGE_PMD_NR is protections changed and TLB flush necessary + * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary + * - HPAGE_PMD_NR if protections changed and TLB flush necessary */ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot, unsigned long cp_flags) @@ -2469,7 +2469,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, xa_lock(&swap_cache->i_pages); } - /* lock lru list/PageCompound, ref freezed by page_ref_freeze */ + /* lock lru list/PageCompound, ref frozen by page_ref_freeze */ lruvec = lock_page_lruvec(head); for (i = nr - 1; i >= 1; i--) { diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 629aa4c2259c..3db405dea3dc 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -466,7 +466,7 @@ static int allocate_file_region_entries(struct resv_map *resv, resv->region_cache_count; /* At this point, we should have enough entries in the cache - * for all the existings adds_in_progress. We should only be + * for all the existing adds_in_progress. We should only be * needing to allocate for regions_needed. */ VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress); @@ -5536,8 +5536,8 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); /* - * vma need span at least one aligned PUD size and the start,end range - * must at least partialy within it. + * vma needs to span at least one aligned PUD size, and the range + * must be at least partially within in. */ if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) || (*end <= v_start) || (*start >= v_end)) diff --git a/mm/internal.h b/mm/internal.h index feeaaf06705d..54bd0dc2c23c 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -334,7 +334,7 @@ static inline bool is_exec_mapping(vm_flags_t flags) } /* - * Stack area - atomatically grows in one direction + * Stack area - automatically grows in one direction * * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous: * do_mmap() forbids all other combinations. diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 3820ca54743b..8f450bc28045 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -55,9 +55,9 @@ extern bool kasan_flag_async __ro_after_init; #define KASAN_TAG_MAX 0xFD /* maximum value for random tags */ #ifdef CONFIG_KASAN_HW_TAGS -#define KASAN_TAG_MIN 0xF0 /* mimimum value for random tags */ +#define KASAN_TAG_MIN 0xF0 /* minimum value for random tags */ #else -#define KASAN_TAG_MIN 0x00 /* mimimum value for random tags */ +#define KASAN_TAG_MIN 0x00 /* minimum value for random tags */ #endif #ifdef CONFIG_KASAN_GENERIC @@ -403,7 +403,7 @@ static inline bool kasan_byte_accessible(const void *addr) #else /* CONFIG_KASAN_HW_TAGS */ /** - * kasan_poison - mark the memory range as unaccessible + * kasan_poison - mark the memory range as inaccessible * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE * @size - range size, must be aligned to KASAN_GRANULE_SIZE * @value - value that's written to metadata for the range @@ -434,7 +434,7 @@ bool kasan_byte_accessible(const void *addr); /** * kasan_poison_last_granule - mark the last granule of the memory range as - * unaccessible + * inaccessible * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE * @size - range size * diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c index 728fb24c5683..d8ccff4c1275 100644 --- a/mm/kasan/quarantine.c +++ b/mm/kasan/quarantine.c @@ -27,7 +27,7 @@ /* Data structure and operations for quarantine queues. */ /* - * Each queue is a signle-linked list, which also stores the total size of + * Each queue is a single-linked list, which also stores the total size of * objects inside of it. */ struct qlist_head { @@ -138,7 +138,7 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache) local_irq_save(flags); /* - * As the object now gets freed from the quaratine, assume that its + * As the object now gets freed from the quarantine, assume that its * free track is no longer valid. */ *(u8 *)kasan_mem_to_shadow(object) = KASAN_KMALLOC_FREE; diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index 727ad4629173..082ee5b6d9a1 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -316,7 +316,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size) * // rest of vmalloc process * STORE p, a LOAD shadow(x+99) * - * If there is no barrier between the end of unpoisioning the shadow + * If there is no barrier between the end of unpoisoning the shadow * and the store of the result to p, the stores could be committed * in a different order by CPU#0, and CPU#1 could erroneously observe * poison in the shadow. @@ -384,7 +384,7 @@ static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr, * How does this work? * ------------------- * - * We have a region that is page aligned, labelled as A. + * We have a region that is page aligned, labeled as A. * That might not map onto the shadow in a way that is page-aligned: * * start end diff --git a/mm/kfence/report.c b/mm/kfence/report.c index e3f71451ad9e..2a319c21c939 100644 --- a/mm/kfence/report.c +++ b/mm/kfence/report.c @@ -263,6 +263,6 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r if (panic_on_warn) panic("panic_on_warn set ...\n"); - /* We encountered a memory unsafety error, taint the kernel! */ + /* We encountered a memory safety error, taint the kernel! */ add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK); } diff --git a/mm/khugepaged.c b/mm/khugepaged.c index ea74da3232ab..6c0185fdd815 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -667,7 +667,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, * * The page table that maps the page has been already unlinked * from the page table tree and this process cannot get - * an additinal pin on the page. + * an additional pin on the page. * * New pins can come later if the page is shared across fork, * but not from this process. The other process cannot write to diff --git a/mm/ksm.c b/mm/ksm.c index b7cbcc7d4977..6bbe314c5260 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1065,7 +1065,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, /* * Ok this is tricky, when get_user_pages_fast() run it doesn't * take any lock, therefore the check that we are going to make - * with the pagecount against the mapcount is racey and + * with the pagecount against the mapcount is racy and * O_DIRECT can happen right after the check. * So we clear the pte and flush the tlb before the check * this assure us that no O_DIRECT can happen after the check @@ -1435,7 +1435,7 @@ static struct page *stable_node_dup(struct stable_node **_stable_node_dup, */ *_stable_node = found; /* - * Just for robustneess as stable_node is + * Just for robustness, as stable_node is * otherwise left as a stable pointer, the * compiler shall optimize it away at build * time. diff --git a/mm/madvise.c b/mm/madvise.c index 01fef79ac761..63e489e5bfdb 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -799,7 +799,7 @@ static long madvise_dontneed_free(struct vm_area_struct *vma, if (end > vma->vm_end) { /* * Don't fail if end > vma->vm_end. If the old - * vma was splitted while the mmap_lock was + * vma was split while the mmap_lock was * released the effect of the concurrent * operation may not cause madvise() to * have an undefined result. There may be an @@ -1039,7 +1039,7 @@ process_madvise_behavior_valid(int behavior) * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump. * MADV_COLD - the application is not expected to use this memory soon, * deactivate pages in this range so that they can be reclaimed - * easily if memory pressure hanppens. + * easily if memory pressure happens. * MADV_PAGEOUT - the application is not expected to use this memory soon, * page out the pages in this range immediately. * diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3004afb6d090..64ada9e650a5 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -215,7 +215,7 @@ enum res_type { #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) #define MEMFILE_ATTR(val) ((val) & 0xffff) -/* Used for OOM nofiier */ +/* Used for OOM notifier */ #define OOM_CONTROL (0) /* @@ -786,7 +786,7 @@ void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) * __count_memcg_events - account VM events in a cgroup * @memcg: the memory cgroup * @idx: the event item - * @count: the number of events that occured + * @count: the number of events that occurred */ void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, unsigned long count) @@ -904,7 +904,7 @@ struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) rcu_read_lock(); do { /* - * Page cache insertions can happen withou an + * Page cache insertions can happen without an * actual mm context, e.g. during disk probing * on boot, loopback IO, acct() writes etc. */ @@ -1712,7 +1712,7 @@ static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) struct mem_cgroup *iter; /* - * Be careful about under_oom underflows becase a child memcg + * Be careful about under_oom underflows because a child memcg * could have been added after mem_cgroup_mark_under_oom. */ spin_lock(&memcg_oom_lock); @@ -1884,7 +1884,7 @@ bool mem_cgroup_oom_synchronize(bool handle) /* * There is no guarantee that an OOM-lock contender * sees the wakeups triggered by the OOM kill - * uncharges. Wake any sleepers explicitely. + * uncharges. Wake any sleepers explicitly. */ memcg_oom_recover(memcg); } @@ -4364,7 +4364,7 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, * Foreign dirty flushing * * There's an inherent mismatch between memcg and writeback. The former - * trackes ownership per-page while the latter per-inode. This was a + * tracks ownership per-page while the latter per-inode. This was a * deliberate design decision because honoring per-page ownership in the * writeback path is complicated, may lead to higher CPU and IO overheads * and deemed unnecessary given that write-sharing an inode across @@ -4379,9 +4379,9 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, * triggering background writeback. A will be slowed down without a way to * make writeback of the dirty pages happen. * - * Conditions like the above can lead to a cgroup getting repatedly and + * Conditions like the above can lead to a cgroup getting repeatedly and * severely throttled after making some progress after each - * dirty_expire_interval while the underyling IO device is almost + * dirty_expire_interval while the underlying IO device is almost * completely idle. * * Solving this problem completely requires matching the ownership tracking @@ -5774,7 +5774,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset) return 0; /* - * We are now commited to this value whatever it is. Changes in this + * We are now committed to this value whatever it is. Changes in this * tunable will only affect upcoming migrations, not the current one. * So we need to save it, and keep it going. */ diff --git a/mm/memory-failure.c b/mm/memory-failure.c index bd3945446d47..85ad98c00fd9 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -75,7 +75,7 @@ static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, boo if (dissolve_free_huge_page(page) || !take_page_off_buddy(page)) /* * We could fail to take off the target page from buddy - * for example due to racy page allocaiton, but that's + * for example due to racy page allocation, but that's * acceptable because soft-offlined page is not broken * and if someone really want to use it, they should * take it. diff --git a/mm/memory.c b/mm/memory.c index 8c491f813687..730daa00952b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3727,7 +3727,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) return ret; /* - * Archs like ppc64 need additonal space to store information + * Archs like ppc64 need additional space to store information * related to pte entry. Use the preallocated table for that. */ if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) { @@ -4503,7 +4503,7 @@ retry_pud: } /** - * mm_account_fault - Do page fault accountings + * mm_account_fault - Do page fault accounting * * @regs: the pt_regs struct pointer. When set to NULL, will skip accounting * of perf event counters, but we'll still do the per-task accounting to @@ -4512,9 +4512,9 @@ retry_pud: * @flags: the fault flags. * @ret: the fault retcode. * - * This will take care of most of the page fault accountings. Meanwhile, it + * This will take care of most of the page fault accounting. Meanwhile, it * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter - * updates. However note that the handling of PERF_COUNT_SW_PAGE_FAULTS should + * updates. However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should * still be in per-arch page fault handlers at the entry of page fault. */ static inline void mm_account_fault(struct pt_regs *regs, @@ -4848,7 +4848,7 @@ out: /** * generic_access_phys - generic implementation for iomem mmap access * @vma: the vma to access - * @addr: userspace addres, not relative offset within @vma + * @addr: userspace address, not relative offset within @vma * @buf: buffer to read/write * @len: length of transfer * @write: set to FOLL_WRITE when writing, otherwise reading diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 3ebe2cfc64af..5690513c5668 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1867,7 +1867,7 @@ static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. * * policy->v.nodes is intersect with node_states[N_MEMORY]. - * so if the following test faile, it implies + * so if the following test fails, it implies * policy->v.nodes has movable memory only. */ if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY])) @@ -2098,7 +2098,7 @@ bool init_nodemask_of_mempolicy(nodemask_t *mask) * * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default * policy. Otherwise, check for intersection between mask and the policy - * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local' + * nodemask for 'bind' or 'interleave' policy. For 'preferred' or 'local' * policy, always return true since it may allocate elsewhere on fallback. * * Takes task_lock(tsk) to prevent freeing of its mempolicy. diff --git a/mm/migrate.c b/mm/migrate.c index 6b37d00890ca..b234c3f3acb7 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2779,11 +2779,11 @@ restore: * * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus - * allowing the caller to allocate device memory for those unback virtual - * address. For this the caller simply has to allocate device memory and + * allowing the caller to allocate device memory for those unbacked virtual + * addresses. For this the caller simply has to allocate device memory and * properly set the destination entry like for regular migration. Note that - * this can still fails and thus inside the device driver must check if the - * migration was successful for those entries after calling migrate_vma_pages() + * this can still fail, and thus inside the device driver you must check if the + * migration was successful for those entries after calling migrate_vma_pages(), * just like for regular migration. * * After that, the callers must call migrate_vma_pages() to go over each entry diff --git a/mm/mmap.c b/mm/mmap.c index c1b848fa7da6..0584e540246e 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -612,7 +612,7 @@ static unsigned long count_vma_pages_range(struct mm_struct *mm, unsigned long nr_pages = 0; struct vm_area_struct *vma; - /* Find first overlaping mapping */ + /* Find first overlapping mapping */ vma = find_vma_intersection(mm, addr, end); if (!vma) return 0; @@ -2875,7 +2875,7 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, if (unlikely(uf)) { /* * If userfaultfd_unmap_prep returns an error the vmas - * will remain splitted, but userland will get a + * will remain split, but userland will get a * highly unexpected error anyway. This is no * different than the case where the first of the two * __split_vma fails, but we don't undo the first diff --git a/mm/mprotect.c b/mm/mprotect.c index 94188df1ee55..e7a443157988 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -699,7 +699,7 @@ SYSCALL_DEFINE1(pkey_free, int, pkey) mmap_write_unlock(current->mm); /* - * We could provie warnings or errors if any VMA still + * We could provide warnings or errors if any VMA still * has the pkey set here. */ return ret; diff --git a/mm/mremap.c b/mm/mremap.c index d22629ff8f3c..47c255b60150 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -730,7 +730,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len, * So, to avoid such scenario we can pre-compute if the whole * operation has high chances to success map-wise. * Worst-scenario case is when both vma's (new_addr and old_addr) get - * split in 3 before unmaping it. + * split in 3 before unmapping it. * That means 2 more maps (1 for each) to the ones we already hold. * Check whether current map count plus 2 still leads us to 4 maps below * the threshold, otherwise return -ENOMEM here to be more safe. diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 3df2ac6b8686..eefd3f5fde46 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -74,7 +74,7 @@ static inline bool is_memcg_oom(struct oom_control *oc) #ifdef CONFIG_NUMA /** - * oom_cpuset_eligible() - check task eligiblity for kill + * oom_cpuset_eligible() - check task eligibility for kill * @start: task struct of which task to consider * @oc: pointer to struct oom_control * diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 5e761fb62800..0062d5c57d41 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -1806,7 +1806,7 @@ pause: break; /* - * In the case of an unresponding NFS server and the NFS dirty + * In the case of an unresponsive NFS server and the NFS dirty * pages exceeds dirty_thresh, give the other good wb's a pipe * to go through, so that tasks on them still remain responsive. * @@ -2216,7 +2216,7 @@ int write_cache_pages(struct address_space *mapping, * Page truncated or invalidated. We can freely skip it * then, even for data integrity operations: the page * has disappeared concurrently, so there could be no - * real expectation of this data interity operation + * real expectation of this data integrity operation * even if there is now a new, dirty page at the same * pagecache address. */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index bcdc0c6f21f1..0582c85da08c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -893,7 +893,7 @@ compaction_capture(struct capture_control *capc, struct page *page, return false; /* - * Do not let lower order allocations polluate a movable pageblock. + * Do not let lower order allocations pollute a movable pageblock. * This might let an unmovable request use a reclaimable pageblock * and vice-versa but no more than normal fallback logic which can * have trouble finding a high-order free page. @@ -2776,7 +2776,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, /* * In page freeing path, migratetype change is racy so * we can counter several free pages in a pageblock - * in this loop althoug we changed the pageblock type + * in this loop although we changed the pageblock type * from highatomic to ac->migratetype. So we should * adjust the count once. */ @@ -3080,7 +3080,7 @@ static void drain_local_pages_wq(struct work_struct *work) * drain_all_pages doesn't use proper cpu hotplug protection so * we can race with cpu offline when the WQ can move this from * a cpu pinned worker to an unbound one. We can operate on a different - * cpu which is allright but we also have to make sure to not move to + * cpu which is alright but we also have to make sure to not move to * a different one. */ preempt_disable(); @@ -5929,7 +5929,7 @@ static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) static int __parse_numa_zonelist_order(char *s) { /* - * We used to support different zonlists modes but they turned + * We used to support different zonelists modes but they turned * out to be just not useful. Let's keep the warning in place * if somebody still use the cmd line parameter so that we do * not fail it silently @@ -7670,7 +7670,7 @@ static void check_for_memory(pg_data_t *pgdat, int nid) } /* - * Some architecturs, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For + * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For * such cases we allow max_zone_pfn sorted in the descending order */ bool __weak arch_has_descending_max_zone_pfns(void) @@ -8728,7 +8728,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc, * alloc_contig_range() -- tries to allocate given range of pages * @start: start PFN to allocate * @end: one-past-the-last PFN to allocate - * @migratetype: migratetype of the underlaying pageblocks (either + * @migratetype: migratetype of the underlying pageblocks (either * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks * in range must have the same migratetype and it must * be either of the two. @@ -8988,7 +8988,7 @@ EXPORT_SYMBOL(free_contig_range); /* * The zone indicated has a new number of managed_pages; batch sizes and percpu - * page high values need to be recalulated. + * page high values need to be recalculated. */ void __meminit zone_pcp_update(struct zone *zone) { diff --git a/mm/page_owner.c b/mm/page_owner.c index 9661d5320a07..adfabb560eb9 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -233,7 +233,7 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage) /* * We don't clear the bit on the oldpage as it's going to be freed * after migration. Until then, the info can be useful in case of - * a bug, and the overal stats will be off a bit only temporarily. + * a bug, and the overall stats will be off a bit only temporarily. * Also, migrate_misplaced_transhuge_page() can still fail the * migration and then we want the oldpage to retain the info. But * in that case we also don't need to explicitly clear the info from diff --git a/mm/percpu-internal.h b/mm/percpu-internal.h index 095d7eaa0db4..ae26b118e246 100644 --- a/mm/percpu-internal.h +++ b/mm/percpu-internal.h @@ -170,7 +170,7 @@ struct percpu_stats { u64 nr_max_alloc; /* max # of live allocations */ u32 nr_chunks; /* current # of live chunks */ u32 nr_max_chunks; /* max # of live chunks */ - size_t min_alloc_size; /* min allocaiton size */ + size_t min_alloc_size; /* min allocation size */ size_t max_alloc_size; /* max allocation size */ }; diff --git a/mm/percpu.c b/mm/percpu.c index 23308113a5ff..f99e9306b939 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1862,7 +1862,7 @@ fail: pr_info("limit reached, disable warning\n"); } if (is_atomic) { - /* see the flag handling in pcpu_blance_workfn() */ + /* see the flag handling in pcpu_balance_workfn() */ pcpu_atomic_alloc_failed = true; pcpu_schedule_balance_work(); } else { diff --git a/mm/pgalloc-track.h b/mm/pgalloc-track.h index 1dcc865029a2..e9e879de8649 100644 --- a/mm/pgalloc-track.h +++ b/mm/pgalloc-track.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_PGALLLC_TRACK_H -#define _LINUX_PGALLLC_TRACK_H +#ifndef _LINUX_PGALLOC_TRACK_H +#define _LINUX_PGALLOC_TRACK_H #if defined(CONFIG_MMU) static inline p4d_t *p4d_alloc_track(struct mm_struct *mm, pgd_t *pgd, @@ -48,4 +48,4 @@ static inline pmd_t *pmd_alloc_track(struct mm_struct *mm, pud_t *pud, (__pte_alloc_kernel(pmd) || ({*(mask)|=PGTBL_PMD_MODIFIED;0;})))?\ NULL: pte_offset_kernel(pmd, address)) -#endif /* _LINUX_PGALLLC_TRACK_H */ +#endif /* _LINUX_PGALLOC_TRACK_H */ diff --git a/mm/slab.c b/mm/slab.c index d56607a80fa6..d0f725637663 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -259,7 +259,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent) #define BATCHREFILL_LIMIT 16 /* - * Optimization question: fewer reaps means less probability for unnessary + * Optimization question: fewer reaps means less probability for unnecessary * cpucache drain/refill cycles. * * OTOH the cpuarrays can contain lots of objects, @@ -2381,8 +2381,8 @@ union freelist_init_state { }; /* - * Initialize the state based on the randomization methode available. - * return true if the pre-computed list is available, false otherwize. + * Initialize the state based on the randomization method available. + * return true if the pre-computed list is available, false otherwise. */ static bool freelist_state_initialize(union freelist_init_state *state, struct kmem_cache *cachep, diff --git a/mm/slub.c b/mm/slub.c index 68123b21e65f..feda53ae62ba 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3391,7 +3391,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_bulk); */ /* - * Mininum / Maximum order of slab pages. This influences locking overhead + * Minimum / Maximum order of slab pages. This influences locking overhead * and slab fragmentation. A higher order reduces the number of partial slabs * and increases the number of allocations possible without having to * take the list_lock. diff --git a/mm/swap_slots.c b/mm/swap_slots.c index be9de6d5b516..6248d1030a9b 100644 --- a/mm/swap_slots.c +++ b/mm/swap_slots.c @@ -16,7 +16,7 @@ * to local caches without needing to acquire swap_info * lock. We do not reuse the returned slots directly but * move them back to the global pool in a batch. This - * allows the slots to coaellesce and reduce fragmentation. + * allows the slots to coalesce and reduce fragmentation. * * The swap entry allocated is marked with SWAP_HAS_CACHE * flag in map_count that prevents it from being allocated diff --git a/mm/vmalloc.c b/mm/vmalloc.c index a7f318c9e426..a13ac524f6ff 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1583,7 +1583,7 @@ static unsigned long lazy_max_pages(void) static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0); /* - * Serialize vmap purging. There is no actual criticial section protected + * Serialize vmap purging. There is no actual critical section protected * by this look, but we want to avoid concurrent calls for performance * reasons and to make the pcpu_get_vm_areas more deterministic. */ @@ -2628,7 +2628,7 @@ static void __vfree(const void *addr) * May sleep if called *not* from interrupt context. * Must not be called in NMI context (strictly speaking, it could be * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling - * conventions for vfree() arch-depenedent would be a really bad idea). + * conventions for vfree() arch-dependent would be a really bad idea). */ void vfree(const void *addr) { @@ -3141,7 +3141,7 @@ static int aligned_vread(char *buf, char *addr, unsigned long count) /* * To do safe access to this _mapped_ area, we need * lock. But adding lock here means that we need to add - * overhead of vmalloc()/vfree() calles for this _debug_ + * overhead of vmalloc()/vfree() calls for this _debug_ * interface, rarely used. Instead of that, we'll use * kmap() and get small overhead in this access function. */ diff --git a/mm/vmstat.c b/mm/vmstat.c index 5ba118521ded..cccee36b289c 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -934,7 +934,7 @@ void cpu_vm_stats_fold(int cpu) /* * this is only called if !populated_zone(zone), which implies no other users of - * pset->vm_stat_diff[] exsist. + * pset->vm_stat_diff[] exist. */ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset) { diff --git a/mm/zpool.c b/mm/zpool.c index 5ed71207ced7..6d9ed48141e5 100644 --- a/mm/zpool.c +++ b/mm/zpool.c @@ -336,7 +336,7 @@ int zpool_shrink(struct zpool *zpool, unsigned int pages, * This may hold locks, disable interrupts, and/or preemption, * and the zpool_unmap_handle() must be called to undo those * actions. The code that uses the mapped handle should complete - * its operatons on the mapped handle memory quickly and unmap + * its operations on the mapped handle memory quickly and unmap * as soon as possible. As the implementation may use per-cpu * data, multiple handles should not be mapped concurrently on * any cpu. diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 5004c176b045..19b563bc6c48 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1227,7 +1227,7 @@ EXPORT_SYMBOL_GPL(zs_get_total_pages); * zs_map_object - get address of allocated object from handle. * @pool: pool from which the object was allocated * @handle: handle returned from zs_malloc - * @mm: maping mode to use + * @mm: mapping mode to use * * Before using an object allocated from zs_malloc, it must be mapped using * this function. When done with the object, it must be unmapped using