Skip to content

Commit

Permalink
arc: Fix typos/spellos
Browse files Browse the repository at this point in the history
s/commiting/committing/
s/defintion/definition/
s/gaurantees/guarantees/
s/interrpted/interrupted/
s/interrutps/interrupts/
s/succeded/succeeded/
s/unconditonally/unconditionally/

Reviewed-by: Christian Brauner <[email protected]>
Acked-by: Randy Dunlap <[email protected]>
Signed-off-by: Bhaskar Chowdhury <[email protected]>
Signed-off-by: Vineet Gupta <[email protected]>
  • Loading branch information
unixbhaskar authored and vineetgarc committed May 10, 2021
1 parent 6efb943 commit f79f7a2
Show file tree
Hide file tree
Showing 4 changed files with 9 additions and 9 deletions.
2 changes: 1 addition & 1 deletion arch/arc/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ endif


ifdef CONFIG_ARC_CURR_IN_REG
# For a global register defintion, make sure it gets passed to every file
# For a global register definition, make sure it gets passed to every file
# We had a customer reported bug where some code built in kernel was NOT using
# any kernel headers, and missing the r25 global register
# Can't do unconditionally because of recursive include issues
Expand Down
4 changes: 2 additions & 2 deletions arch/arc/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
*
* Technically the lock is also needed for UP (boils down to irq save/restore)
* but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to
* be disabled thus can't possibly be interrpted/preempted/clobbered by xchg()
* be disabled thus can't possibly be interrupted/preempted/clobbered by xchg()
* Other way around, xchg is one instruction anyways, so can't be interrupted
* as such
*/
Expand All @@ -143,7 +143,7 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
/*
* "atomic" variant of xchg()
* REQ: It needs to follow the same serialization rules as other atomic_xxx()
* Since xchg() doesn't always do that, it would seem that following defintion
* Since xchg() doesn't always do that, it would seem that following definition
* is incorrect. But here's the rationale:
* SMP : Even xchg() takes the atomic_ops_lock, so OK.
* LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC
Expand Down
8 changes: 4 additions & 4 deletions arch/arc/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -50,14 +50,14 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
int ret;

/*
* This is only for old cores lacking LLOCK/SCOND, which by defintion
* This is only for old cores lacking LLOCK/SCOND, which by definition
* can't possibly be SMP. Thus doesn't need to be SMP safe.
* And this also helps reduce the overhead for serializing in
* the UP case
*/
WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));

/* Z indicates to userspace if operation succeded */
/* Z indicates to userspace if operation succeeded */
regs->status32 &= ~STATUS_Z_MASK;

ret = access_ok(uaddr, sizeof(*uaddr));
Expand Down Expand Up @@ -107,7 +107,7 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)

void arch_cpu_idle(void)
{
/* Re-enable interrupts <= default irq priority before commiting SLEEP */
/* Re-enable interrupts <= default irq priority before committing SLEEP */
const unsigned int arg = 0x10 | ARCV2_IRQ_DEF_PRIO;

__asm__ __volatile__(
Expand All @@ -120,7 +120,7 @@ void arch_cpu_idle(void)

void arch_cpu_idle(void)
{
/* sleep, but enable both set E1/E2 (levels of interrutps) before committing */
/* sleep, but enable both set E1/E2 (levels of interrupts) before committing */
__asm__ __volatile__("sleep 0x3 \n");
}

Expand Down
4 changes: 2 additions & 2 deletions arch/arc/kernel/signal.c
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,7 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
regs->r2 = (unsigned long)&sf->uc;

/*
* small optim to avoid unconditonally calling do_sigaltstack
* small optim to avoid unconditionally calling do_sigaltstack
* in sigreturn path, now that we only have rt_sigreturn
*/
magic = MAGIC_SIGALTSTK;
Expand Down Expand Up @@ -391,7 +391,7 @@ void do_signal(struct pt_regs *regs)
void do_notify_resume(struct pt_regs *regs)
{
/*
* ASM glue gaurantees that this is only called when returning to
* ASM glue guarantees that this is only called when returning to
* user mode
*/
if (test_thread_flag(TIF_NOTIFY_RESUME))
Expand Down

0 comments on commit f79f7a2

Please sign in to comment.