mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-01 09:43:34 +08:00
kernel/: fix repeated words in comments
Fix multiple occurrences of duplicated words in kernel/. Fix one typo/spello on the same line as a duplicate word. Change one instance of "the the" to "that the". Otherwise just drop one of the repeated words. Signed-off-by: Randy Dunlap <rdunlap@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Link: https://lkml.kernel.org/r/98202fa6-8919-ef63-9efe-c0fad5ca7af1@infradead.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
15ec0fcff6
commit
7b7b8a2c95
@ -25,7 +25,7 @@
|
||||
* Now we silently close acct_file on attempt to reopen. Cleaned sys_acct().
|
||||
* XTerms and EMACS are manifestations of pure evil. 21/10/98, AV.
|
||||
*
|
||||
* Fixed a nasty interaction with with sys_umount(). If the accointing
|
||||
* Fixed a nasty interaction with sys_umount(). If the accounting
|
||||
* was suspeneded we failed to stop it on umount(). Messy.
|
||||
* Another one: remount to readonly didn't stop accounting.
|
||||
* Question: what should we do if we have CAP_SYS_ADMIN but not
|
||||
|
@ -390,7 +390,7 @@ static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
|
||||
* The top cpuset doesn't have any online cpu as a
|
||||
* consequence of a race between cpuset_hotplug_work
|
||||
* and cpu hotplug notifier. But we know the top
|
||||
* cpuset's effective_cpus is on its way to to be
|
||||
* cpuset's effective_cpus is on its way to be
|
||||
* identical to cpu_online_mask.
|
||||
*/
|
||||
cpumask_copy(pmask, cpu_online_mask);
|
||||
|
@ -16,7 +16,7 @@
|
||||
#include "direct.h"
|
||||
|
||||
/*
|
||||
* Most architectures use ZONE_DMA for the first 16 Megabytes, but some use it
|
||||
* Most architectures use ZONE_DMA for the first 16 Megabytes, but some use
|
||||
* it for entirely different regions. In that case the arch code needs to
|
||||
* override the variable below for dma-direct to work properly.
|
||||
*/
|
||||
|
@ -2189,7 +2189,7 @@ static __latent_entropy struct task_struct *copy_process(
|
||||
|
||||
/*
|
||||
* Ensure that the cgroup subsystem policies allow the new process to be
|
||||
* forked. It should be noted the the new process's css_set can be changed
|
||||
* forked. It should be noted that the new process's css_set can be changed
|
||||
* between here and cgroup_post_fork() if an organisation operation is in
|
||||
* progress.
|
||||
*/
|
||||
|
@ -916,7 +916,7 @@ static inline void exit_pi_state_list(struct task_struct *curr) { }
|
||||
* [10] Found | Found | task | !=taskTID | 0/1 | Invalid
|
||||
*
|
||||
* [1] Indicates that the kernel can acquire the futex atomically. We
|
||||
* came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
|
||||
* came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
|
||||
*
|
||||
* [2] Valid, if TID does not belong to a kernel thread. If no matching
|
||||
* thread is found then it indicates that the owner TID has died.
|
||||
|
@ -604,7 +604,7 @@ int irq_timings_alloc(int irq)
|
||||
|
||||
/*
|
||||
* Some platforms can have the same private interrupt per cpu,
|
||||
* so this function may be be called several times with the
|
||||
* so this function may be called several times with the
|
||||
* same interrupt number. Just bail out in case the per cpu
|
||||
* stat structure is already allocated.
|
||||
*/
|
||||
|
@ -19,7 +19,7 @@
|
||||
#include <linux/cpu.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
/* mutex to protect coming/going of the the jump_label table */
|
||||
/* mutex to protect coming/going of the jump_label table */
|
||||
static DEFINE_MUTEX(jump_label_mutex);
|
||||
|
||||
void jump_label_lock(void)
|
||||
|
@ -32,7 +32,7 @@
|
||||
* 1. different addresses but with the same encoded address race;
|
||||
* 2. and both map onto the same watchpoint slots;
|
||||
*
|
||||
* Both these are assumed to be very unlikely. However, in case it still happens
|
||||
* Both these are assumed to be very unlikely. However, in case it still
|
||||
* happens, the report logic will filter out the false positive (see report.c).
|
||||
*/
|
||||
#define WATCHPOINT_ADDR_BITS (BITS_PER_LONG-1 - WATCHPOINT_SIZE_BITS)
|
||||
|
@ -109,7 +109,7 @@ EXPORT_SYMBOL_GPL(kexec_crash_loaded);
|
||||
* defined more restrictively in <asm/kexec.h>.
|
||||
*
|
||||
* The code for the transition from the current kernel to the
|
||||
* the new kernel is placed in the control_code_buffer, whose size
|
||||
* new kernel is placed in the control_code_buffer, whose size
|
||||
* is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
|
||||
* page of memory is necessary, but some architectures require more.
|
||||
* Because this memory must be identity mapped in the transition from
|
||||
|
@ -775,7 +775,7 @@ EXPORT_SYMBOL(kthread_create_worker);
|
||||
|
||||
/**
|
||||
* kthread_create_worker_on_cpu - create a kthread worker and bind it
|
||||
* it to a given CPU and the associated NUMA node.
|
||||
* to a given CPU and the associated NUMA node.
|
||||
* @cpu: CPU number
|
||||
* @flags: flags modifying the default behavior of the worker
|
||||
* @namefmt: printf-style name for the kthread worker (task).
|
||||
|
@ -55,7 +55,7 @@ EXPORT_SYMBOL_GPL(klp_get_state);
|
||||
*
|
||||
* The function can be called only during transition when a new
|
||||
* livepatch is being enabled or when such a transition is reverted.
|
||||
* It is typically called only from from pre/post (un)patch
|
||||
* It is typically called only from pre/post (un)patch
|
||||
* callbacks.
|
||||
*
|
||||
* Return: pointer to the latest struct klp_state from already
|
||||
|
@ -233,7 +233,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
|
||||
* to pid_ns->child_reaper. Thus pidns->child_reaper needs to
|
||||
* stay valid until they all go away.
|
||||
*
|
||||
* The code relies on the the pid_ns->child_reaper ignoring
|
||||
* The code relies on the pid_ns->child_reaper ignoring
|
||||
* SIGCHILD to cause those EXIT_ZOMBIE processes to be
|
||||
* autoreaped if reparented.
|
||||
*
|
||||
|
@ -735,7 +735,7 @@ zone_found:
|
||||
*/
|
||||
|
||||
/*
|
||||
* If the zone we wish to scan is the the current zone and the
|
||||
* If the zone we wish to scan is the current zone and the
|
||||
* pfn falls into the current node then we do not need to walk
|
||||
* the tree.
|
||||
*/
|
||||
|
@ -741,7 +741,7 @@ EXPORT_SYMBOL(on_each_cpu_mask);
|
||||
* for all the required CPUs to finish. This may include the local
|
||||
* processor.
|
||||
* @cond_func: A callback function that is passed a cpu id and
|
||||
* the the info parameter. The function is called
|
||||
* the info parameter. The function is called
|
||||
* with preemption disabled. The function should
|
||||
* return a blooean value indicating whether to IPI
|
||||
* the specified CPU.
|
||||
|
@ -515,7 +515,7 @@ EXPORT_SYMBOL(from_kgid_munged);
|
||||
*
|
||||
* When there is no mapping defined for the user-namespace projid
|
||||
* pair INVALID_PROJID is returned. Callers are expected to test
|
||||
* for and handle handle INVALID_PROJID being returned. INVALID_PROJID
|
||||
* for and handle INVALID_PROJID being returned. INVALID_PROJID
|
||||
* may be tested for using projid_valid().
|
||||
*/
|
||||
kprojid_t make_kprojid(struct user_namespace *ns, projid_t projid)
|
||||
|
Loading…
Reference in New Issue
Block a user