mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-24 05:04:00 +08:00
arc: Fix typos/spellos
s/commiting/committing/ s/defintion/definition/ s/gaurantees/guarantees/ s/interrpted/interrupted/ s/interrutps/interrupts/ s/succeded/succeeded/ s/unconditonally/unconditionally/ Reviewed-by: Christian Brauner <christian.brauner@ubuntu.com> Acked-by: Randy Dunlap <rdunlap@infradead.org> Signed-off-by: Bhaskar Chowdhury <unixbhaskar@gmail.com> Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
This commit is contained in:
parent
6efb943b86
commit
f79f7a2d96
@ -31,7 +31,7 @@ endif
|
|||||||
|
|
||||||
|
|
||||||
ifdef CONFIG_ARC_CURR_IN_REG
|
ifdef CONFIG_ARC_CURR_IN_REG
|
||||||
# For a global register defintion, make sure it gets passed to every file
|
# For a global register definition, make sure it gets passed to every file
|
||||||
# We had a customer reported bug where some code built in kernel was NOT using
|
# We had a customer reported bug where some code built in kernel was NOT using
|
||||||
# any kernel headers, and missing the r25 global register
|
# any kernel headers, and missing the r25 global register
|
||||||
# Can't do unconditionally because of recursive include issues
|
# Can't do unconditionally because of recursive include issues
|
||||||
|
@ -116,7 +116,7 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
|
|||||||
*
|
*
|
||||||
* Technically the lock is also needed for UP (boils down to irq save/restore)
|
* Technically the lock is also needed for UP (boils down to irq save/restore)
|
||||||
* but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to
|
* but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to
|
||||||
* be disabled thus can't possibly be interrpted/preempted/clobbered by xchg()
|
* be disabled thus can't possibly be interrupted/preempted/clobbered by xchg()
|
||||||
* Other way around, xchg is one instruction anyways, so can't be interrupted
|
* Other way around, xchg is one instruction anyways, so can't be interrupted
|
||||||
* as such
|
* as such
|
||||||
*/
|
*/
|
||||||
@ -143,7 +143,7 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
|
|||||||
/*
|
/*
|
||||||
* "atomic" variant of xchg()
|
* "atomic" variant of xchg()
|
||||||
* REQ: It needs to follow the same serialization rules as other atomic_xxx()
|
* REQ: It needs to follow the same serialization rules as other atomic_xxx()
|
||||||
* Since xchg() doesn't always do that, it would seem that following defintion
|
* Since xchg() doesn't always do that, it would seem that following definition
|
||||||
* is incorrect. But here's the rationale:
|
* is incorrect. But here's the rationale:
|
||||||
* SMP : Even xchg() takes the atomic_ops_lock, so OK.
|
* SMP : Even xchg() takes the atomic_ops_lock, so OK.
|
||||||
* LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC
|
* LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC
|
||||||
|
@ -50,14 +50,14 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is only for old cores lacking LLOCK/SCOND, which by defintion
|
* This is only for old cores lacking LLOCK/SCOND, which by definition
|
||||||
* can't possibly be SMP. Thus doesn't need to be SMP safe.
|
* can't possibly be SMP. Thus doesn't need to be SMP safe.
|
||||||
* And this also helps reduce the overhead for serializing in
|
* And this also helps reduce the overhead for serializing in
|
||||||
* the UP case
|
* the UP case
|
||||||
*/
|
*/
|
||||||
WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
|
WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
|
||||||
|
|
||||||
/* Z indicates to userspace if operation succeded */
|
/* Z indicates to userspace if operation succeeded */
|
||||||
regs->status32 &= ~STATUS_Z_MASK;
|
regs->status32 &= ~STATUS_Z_MASK;
|
||||||
|
|
||||||
ret = access_ok(uaddr, sizeof(*uaddr));
|
ret = access_ok(uaddr, sizeof(*uaddr));
|
||||||
@ -107,7 +107,7 @@ fail:
|
|||||||
|
|
||||||
void arch_cpu_idle(void)
|
void arch_cpu_idle(void)
|
||||||
{
|
{
|
||||||
/* Re-enable interrupts <= default irq priority before commiting SLEEP */
|
/* Re-enable interrupts <= default irq priority before committing SLEEP */
|
||||||
const unsigned int arg = 0x10 | ARCV2_IRQ_DEF_PRIO;
|
const unsigned int arg = 0x10 | ARCV2_IRQ_DEF_PRIO;
|
||||||
|
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
@ -120,7 +120,7 @@ void arch_cpu_idle(void)
|
|||||||
|
|
||||||
void arch_cpu_idle(void)
|
void arch_cpu_idle(void)
|
||||||
{
|
{
|
||||||
/* sleep, but enable both set E1/E2 (levels of interrutps) before committing */
|
/* sleep, but enable both set E1/E2 (levels of interrupts) before committing */
|
||||||
__asm__ __volatile__("sleep 0x3 \n");
|
__asm__ __volatile__("sleep 0x3 \n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -259,7 +259,7 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
|
|||||||
regs->r2 = (unsigned long)&sf->uc;
|
regs->r2 = (unsigned long)&sf->uc;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* small optim to avoid unconditonally calling do_sigaltstack
|
* small optim to avoid unconditionally calling do_sigaltstack
|
||||||
* in sigreturn path, now that we only have rt_sigreturn
|
* in sigreturn path, now that we only have rt_sigreturn
|
||||||
*/
|
*/
|
||||||
magic = MAGIC_SIGALTSTK;
|
magic = MAGIC_SIGALTSTK;
|
||||||
@ -391,7 +391,7 @@ void do_signal(struct pt_regs *regs)
|
|||||||
void do_notify_resume(struct pt_regs *regs)
|
void do_notify_resume(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* ASM glue gaurantees that this is only called when returning to
|
* ASM glue guarantees that this is only called when returning to
|
||||||
* user mode
|
* user mode
|
||||||
*/
|
*/
|
||||||
if (test_thread_flag(TIF_NOTIFY_RESUME))
|
if (test_thread_flag(TIF_NOTIFY_RESUME))
|
||||||
|
Loading…
Reference in New Issue
Block a user