mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-01 05:54:58 +08:00
mm: fix multiple typos in multiple files
Link: https://lkml.kernel.org/r/20231023124405.36981-1-m.muzzammilashraf@gmail.com Signed-off-by: Muhammad Muzammil <m.muzzammilashraf@gmail.com> Reviewed-by: Randy Dunlap <rdunlap@infradead.org> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Muhammad Muzammil <m.muzzammilashraf@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
98b32d296d
commit
be16dd764a
@ -1322,8 +1322,8 @@ static int __init debug_vm_pgtable(void)
|
||||
* true irrespective of the starting protection value for a
|
||||
* given page table entry.
|
||||
*
|
||||
* Protection based vm_flags combinatins are always linear
|
||||
* and increasing i.e starting from VM_NONE and going upto
|
||||
* Protection based vm_flags combinations are always linear
|
||||
* and increasing i.e starting from VM_NONE and going up to
|
||||
* (VM_SHARED | READ | WRITE | EXEC).
|
||||
*/
|
||||
#define VM_FLAGS_START (VM_NONE)
|
||||
|
@ -592,7 +592,7 @@ extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
|
||||
* range.
|
||||
* "fully mapped" means all the pages of folio is associated with the page
|
||||
* table of range while this function just check whether the folio range is
|
||||
* within the range [start, end). Funcation caller nees to do page table
|
||||
* within the range [start, end). Function caller needs to do page table
|
||||
* check if it cares about the page table association.
|
||||
*
|
||||
* Typical usage (like mlock or madvise) is:
|
||||
|
@ -819,7 +819,7 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
|
||||
memcg = pn->memcg;
|
||||
|
||||
/*
|
||||
* The caller from rmap relay on disabled preemption becase they never
|
||||
* The caller from rmap relies on disabled preemption because they never
|
||||
* update their counter from in-interrupt context. For these two
|
||||
* counters we check that the update is never performed from an
|
||||
* interrupt context while other caller need to have disabled interrupt.
|
||||
@ -8044,7 +8044,7 @@ static struct cftype memsw_files[] = {
|
||||
*
|
||||
* This doesn't check for specific headroom, and it is not atomic
|
||||
* either. But with zswap, the size of the allocation is only known
|
||||
* once compression has occured, and this optimistic pre-check avoids
|
||||
* once compression has occurred, and this optimistic pre-check avoids
|
||||
* spending cycles on compression when there is already no room left
|
||||
* or zswap is disabled altogether somewhere in the hierarchy.
|
||||
*/
|
||||
|
@ -1223,7 +1223,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
|
||||
* Does the application expect PROT_READ to imply PROT_EXEC?
|
||||
*
|
||||
* (the exception is when the underlying filesystem is noexec
|
||||
* mounted, in which case we dont add PROT_EXEC.)
|
||||
* mounted, in which case we don't add PROT_EXEC.)
|
||||
*/
|
||||
if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
|
||||
if (!(file && path_noexec(&file->f_path)))
|
||||
|
Loading…
Reference in New Issue
Block a user