Skip to content

Commit

Permalink
x86: Fix common misspellings
Browse files Browse the repository at this point in the history
They were generated by 'codespell' and then manually reviewed.

Signed-off-by: Lucas De Marchi <[email protected]>
Cc: [email protected]
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
  • Loading branch information
lucasdemarchi authored and Ingo Molnar committed Mar 18, 2011
1 parent a6c3270 commit 0d2eb44
Show file tree
Hide file tree
Showing 50 changed files with 67 additions and 67 deletions.
2 changes: 1 addition & 1 deletion arch/x86/Kconfig.cpu
Original file line number Diff line number Diff line change
Expand Up @@ -326,7 +326,7 @@ config X86_PPRO_FENCE
Old PentiumPro multiprocessor systems had errata that could cause
memory operations to violate the x86 ordering standard in rare cases.
Enabling this option will attempt to work around some (but not all)
occurances of this problem, at the cost of much heavier spinlock and
occurrences of this problem, at the cost of much heavier spinlock and
memory barrier operations.

If unsure, say n here. Even distro kernels should think twice before
Expand Down
6 changes: 3 additions & 3 deletions arch/x86/crypto/aesni-intel_asm.S
Original file line number Diff line number Diff line change
Expand Up @@ -1346,7 +1346,7 @@ _zero_cipher_left_decrypt:
and $15, %r13 # %r13 = arg4 (mod 16)
je _multiple_of_16_bytes_decrypt

# Handle the last <16 byte block seperately
# Handle the last <16 byte block separately

paddd ONE(%rip), %xmm0 # increment CNT to get Yn
movdqa SHUF_MASK(%rip), %xmm10
Expand All @@ -1355,7 +1355,7 @@ _zero_cipher_left_decrypt:
ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Yn)
sub $16, %r11
add %r13, %r11
movdqu (%arg3,%r11,1), %xmm1 # recieve the last <16 byte block
movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
lea SHIFT_MASK+16(%rip), %r12
sub %r13, %r12
# adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
Expand Down Expand Up @@ -1607,7 +1607,7 @@ _zero_cipher_left_encrypt:
and $15, %r13 # %r13 = arg4 (mod 16)
je _multiple_of_16_bytes_encrypt

# Handle the last <16 Byte block seperately
# Handle the last <16 Byte block separately
paddd ONE(%rip), %xmm0 # INCR CNT to get Yn
movdqa SHUF_MASK(%rip), %xmm10
PSHUFB_XMM %xmm10, %xmm0
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/include/asm/cacheflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype) { }
* Read/Write : ReadOnly, ReadWrite
* Presence : NotPresent
*
* Within a catagory, the attributes are mutually exclusive.
* Within a category, the attributes are mutually exclusive.
*
* The implementation of this API will take care of various aspects that
* are associated with changing such attributes, such as:
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/include/asm/nmi.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@ void arch_trigger_all_cpu_backtrace(void);
* external nmis, because the local ones are more frequent.
*
* Also setup some default high/normal/low settings for
* subsystems to registers with. Using 4 bits to seperate
* the priorities. This can go alot higher if needed be.
* subsystems to registers with. Using 4 bits to separate
* the priorities. This can go a lot higher if needed be.
*/

#define NMI_LOCAL_SHIFT 16 /* randomly picked */
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/include/asm/nops.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
#define K8_NOP8 K8_NOP4 K8_NOP4

/* K7 nops
uses eax dependencies (arbitary choice)
uses eax dependencies (arbitrary choice)
1: nop
2: movl %eax,%eax
3: leal (,%eax,1),%eax
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/include/asm/olpc.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ extern struct olpc_platform_t olpc_platform_info;

/*
* OLPC board IDs contain the major build number within the mask 0x0ff0,
* and the minor build number withing 0x000f. Pre-builds have a minor
* and the minor build number within 0x000f. Pre-builds have a minor
* number less than 8, and normal builds start at 8. For example, 0x0B10
* is a PreB1, and 0x0C18 is a C1.
*/
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/include/asm/perf_event_p4.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Netburst Perfomance Events (P4, old Xeon)
* Netburst Performance Events (P4, old Xeon)
*/

#ifndef PERF_EVENT_P4_H
Expand All @@ -9,7 +9,7 @@
#include <linux/bitops.h>

/*
* NetBurst has perfomance MSRs shared between
* NetBurst has performance MSRs shared between
* threads if HT is turned on, ie for both logical
* processors (mem: in turn in Atom with HT support
* perf-MSRs are not shared and every thread has its
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/include/asm/processor-flags.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
*/
#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
#define X86_EFLAGS_AF 0x00000010 /* Auxiliary carry Flag */
#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/include/asm/ptrace-abi.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
#define R12 24
#define RBP 32
#define RBX 40
/* arguments: interrupts/non tracing syscalls only save upto here*/
/* arguments: interrupts/non tracing syscalls only save up to here*/
#define R11 48
#define R10 56
#define R9 64
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/include/asm/ptrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ struct pt_regs {
unsigned long r12;
unsigned long rbp;
unsigned long rbx;
/* arguments: non interrupts/non tracing syscalls only save upto here*/
/* arguments: non interrupts/non tracing syscalls only save up to here*/
unsigned long r11;
unsigned long r10;
unsigned long r9;
Expand Down Expand Up @@ -103,7 +103,7 @@ struct pt_regs {
unsigned long r12;
unsigned long bp;
unsigned long bx;
/* arguments: non interrupts/non tracing syscalls only save upto here*/
/* arguments: non interrupts/non tracing syscalls only save up to here*/
unsigned long r11;
unsigned long r10;
unsigned long r9;
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/include/asm/tsc.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ static inline cycles_t get_cycles(void)
static __always_inline cycles_t vget_cycles(void)
{
/*
* We only do VDSOs on TSC capable CPUs, so this shouldnt
* We only do VDSOs on TSC capable CPUs, so this shouldn't
* access boot_cpu_data (which is not VDSO-safe):
*/
#ifndef CONFIG_X86_TSC
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/include/asm/xen/interface.h
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ DEFINE_GUEST_HANDLE(void);
* The privilege level specifies which modes may enter a trap via a software
* interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate
* privilege levels as follows:
* Level == 0: Noone may enter
* Level == 0: No one may enter
* Level == 1: Kernel may enter
* Level == 2: Kernel may enter
* Level == 3: Everyone may enter
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/alternative.c
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ void *text_poke_early(void *addr, const void *opcode, size_t len);

/* Replace instructions with better alternatives for this CPU type.
This runs before SMP is initialized to avoid SMP problems with
self modifying code. This implies that assymetric systems where
self modifying code. This implies that asymmetric systems where
APs have less capabilities than the boot processor are not handled.
Tough. Make sure you disable such features by hand. */

Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/aperture_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ static u32 __init allocate_aperture(void)
/*
* using 512M as goal, in case kexec will load kernel_big
* that will do the on position decompress, and could overlap with
* that positon with gart that is used.
* that position with gart that is used.
* sequende:
* kernel_small
* ==> kexec (with kdump trigger path or previous doesn't shutdown gart)
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kernel/apic/io_apic.c
Original file line number Diff line number Diff line change
Expand Up @@ -1886,7 +1886,7 @@ void disable_IO_APIC(void)
*
* With interrupt-remapping, for now we will use virtual wire A mode,
* as virtual wire B is little complex (need to configure both
* IOAPIC RTE aswell as interrupt-remapping table entry).
* IOAPIC RTE as well as interrupt-remapping table entry).
* As this gets called during crash dump, keep this simple for now.
*/
if (ioapic_i8259.pin != -1 && !intr_remapping_enabled) {
Expand Down Expand Up @@ -2905,7 +2905,7 @@ void __init setup_IO_APIC(void)
}

/*
* Called after all the initialization is done. If we didnt find any
* Called after all the initialization is done. If we didn't find any
* APIC bugs then we can allow the modify fast path
*/

Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/apm_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@
* 1.5: Fix segment register reloading (in case of bad segments saved
* across BIOS call).
* Stephen Rothwell
* 1.6: Cope with complier/assembler differences.
* 1.6: Cope with compiler/assembler differences.
* Only try to turn off the first display device.
* Fix OOPS at power off with no APM BIOS by Jan Echternach
* <[email protected]>
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kernel/cpu/cpufreq/longhaul.c
Original file line number Diff line number Diff line change
Expand Up @@ -444,7 +444,7 @@ static int __cpuinit longhaul_get_ranges(void)
return -EINVAL;
}
/* Get max multiplier - as we always did.
* Longhaul MSR is usefull only when voltage scaling is enabled.
* Longhaul MSR is useful only when voltage scaling is enabled.
* C3 is booting at max anyway. */
maxmult = mult;
/* Get min multiplier */
Expand Down Expand Up @@ -1011,7 +1011,7 @@ static void __exit longhaul_exit(void)
* trigger frequency transition in some cases. */
module_param(disable_acpi_c3, int, 0644);
MODULE_PARM_DESC(disable_acpi_c3, "Don't use ACPI C3 support");
/* Change CPU voltage with frequency. Very usefull to save
/* Change CPU voltage with frequency. Very useful to save
* power, but most VIA C3 processors aren't supporting it. */
module_param(scale_voltage, int, 0644);
MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor");
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/cpu/cpufreq/powernow-k8.c
Original file line number Diff line number Diff line change
Expand Up @@ -1276,7 +1276,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)

if (powernow_k8_cpu_init_acpi(data)) {
/*
* Use the PSB BIOS structure. This is only availabe on
* Use the PSB BIOS structure. This is only available on
* an UP version, and is deprecated by AMD.
*/
if (num_online_cpus() != 1) {
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kernel/cpu/cpufreq/speedstep-smi.c
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)

result = speedstep_smi_ownership();
if (result) {
dprintk("fails in aquiring ownership of a SMI interface.\n");
dprintk("fails in acquiring ownership of a SMI interface.\n");
return -EINVAL;
}

Expand Down Expand Up @@ -360,7 +360,7 @@ static int speedstep_resume(struct cpufreq_policy *policy)
int result = speedstep_smi_ownership();

if (result)
dprintk("fails in re-aquiring ownership of a SMI interface.\n");
dprintk("fails in re-acquiring ownership of a SMI interface.\n");

return result;
}
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/cpu/mcheck/mce-inject.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ static void inject_mce(struct mce *m)
{
struct mce *i = &per_cpu(injectm, m->extcpu);

/* Make sure noone reads partially written injectm */
/* Make sure no one reads partially written injectm */
i->finished = 0;
mb();
m->finished = 0;
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/cpu/mcheck/mce.c
Original file line number Diff line number Diff line change
Expand Up @@ -881,7 +881,7 @@ static int mce_end(int order)
* Check if the address reported by the CPU is in a format we can parse.
* It would be possible to add code for most other cases, but all would
* be somewhat complicated (e.g. segment offset would require an instruction
* parser). So only support physical addresses upto page granuality for now.
* parser). So only support physical addresses up to page granuality for now.
*/
static int mce_usable_address(struct mce *m)
{
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/cpu/mtrr/generic.c
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
/*
* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
* because MTRRs can span upto 40 bits (36bits on most modern x86)
* because MTRRs can span up to 40 bits (36bits on most modern x86)
*/
#define DEBUG

Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/cpu/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -1111,7 +1111,7 @@ static int x86_pmu_add(struct perf_event *event, int flags)

/*
* If group events scheduling transaction was started,
* skip the schedulability test here, it will be peformed
* skip the schedulability test here, it will be performed
* at commit time (->commit_txn) as a whole
*/
if (cpuc->group_flag & PERF_EVENT_TXN)
Expand Down
8 changes: 4 additions & 4 deletions arch/x86/kernel/cpu/perf_event_p4.c
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Netburst Perfomance Events (P4, old Xeon)
* Netburst Performance Events (P4, old Xeon)
*
* Copyright (C) 2010 Parallels, Inc., Cyrill Gorcunov <[email protected]>
* Copyright (C) 2010 Intel Corporation, Lin Ming <[email protected]>
Expand Down Expand Up @@ -679,7 +679,7 @@ static int p4_validate_raw_event(struct perf_event *event)
*/

/*
* if an event is shared accross the logical threads
* if an event is shared across the logical threads
* the user needs special permissions to be able to use it
*/
if (p4_ht_active() && p4_event_bind_map[v].shared) {
Expand Down Expand Up @@ -790,13 +790,13 @@ static void p4_pmu_disable_pebs(void)
*
* It's still allowed that two threads setup same cache
* events so we can't simply clear metrics until we knew
* noone is depending on us, so we need kind of counter
* no one is depending on us, so we need kind of counter
* for "ReplayEvent" users.
*
* What is more complex -- RAW events, if user (for some
* reason) will pass some cache event metric with improper
* event opcode -- it's fine from hardware point of view
* but completely nonsence from "meaning" of such action.
* but completely nonsense from "meaning" of such action.
*
* So at moment let leave metrics turned on forever -- it's
* ok for now but need to be revisited!
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/cpu/vmware.c
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ static void __init vmware_platform_setup(void)
}

/*
* While checking the dmi string infomation, just checking the product
* While checking the dmi string information, just checking the product
* serial key should be enough, as this will always have a VMware
* specific string when running under VMware hypervisor.
*/
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kernel/entry_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
* A note on terminology:
* - top of stack: Architecture defined interrupt frame from SS to RIP
* at the top of the kernel process stack.
* - partial stack frame: partially saved registers upto R11.
* - partial stack frame: partially saved registers up to R11.
* - full stack frame: Like partial stack frame, but all register saved.
*
* Some macro usage:
Expand Down Expand Up @@ -422,7 +422,7 @@ ENTRY(ret_from_fork)
END(ret_from_fork)

/*
* System call entry. Upto 6 arguments in registers are supported.
* System call entry. Up to 6 arguments in registers are supported.
*
* SYSCALL does not save anything on the stack and does not change the
* stack pointer.
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/i387.c
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ EXPORT_SYMBOL_GPL(fpu_finit);
* The _current_ task is using the FPU for the first time
* so initialize it and set the mxcsr to its default
* value at reset if we support XMM instructions and then
* remeber the current task has used the FPU.
* remember the current task has used the FPU.
*/
int init_fpu(struct task_struct *tsk)
{
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/irq_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ asmlinkage void do_softirq(void)

call_on_stack(__do_softirq, isp);
/*
* Shouldnt happen, we returned above if in_interrupt():
* Shouldn't happen, we returned above if in_interrupt():
*/
WARN_ON_ONCE(softirq_count());
}
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/kgdb.c
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,7 @@ static int hw_break_release_slot(int breakno)
pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
if (dbg_release_bp_slot(*pevent))
/*
* The debugger is responisble for handing the retry on
* The debugger is responsible for handing the retry on
* remove failure.
*/
return -1;
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/mca_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,7 @@ static int __init mca_init(void)
/*
* WARNING: Be careful when making changes here. Putting an adapter
* and the motherboard simultaneously into setup mode may result in
* damage to chips (according to The Indispensible PC Hardware Book
* damage to chips (according to The Indispensable PC Hardware Book
* by Hans-Peter Messmer). Also, we disable system interrupts (so
* that we are not disturbed in the middle of this).
*/
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kernel/mpparse.c
Original file line number Diff line number Diff line change
Expand Up @@ -883,7 +883,7 @@ static int __init update_mp_table(void)

if (!mpc_new_phys) {
unsigned char old, new;
/* check if we can change the postion */
/* check if we can change the position */
mpc->checksum = 0;
old = mpf_checksum((unsigned char *)mpc, mpc->length);
mpc->checksum = 0xff;
Expand All @@ -892,7 +892,7 @@ static int __init update_mp_table(void)
printk(KERN_INFO "mpc is readonly, please try alloc_mptable instead\n");
return 0;
}
printk(KERN_INFO "use in-positon replacing\n");
printk(KERN_INFO "use in-position replacing\n");
} else {
mpf->physptr = mpc_new_phys;
mpc_new = phys_to_virt(mpc_new_phys);
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kernel/pci-calgary_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -1279,7 +1279,7 @@ static int __init calgary_bus_has_devices(int bus, unsigned short pci_dev)

if (pci_dev == PCI_DEVICE_ID_IBM_CALIOC2) {
/*
* FIXME: properly scan for devices accross the
* FIXME: properly scan for devices across the
* PCI-to-PCI bridge on every CalIOC2 port.
*/
return 1;
Expand All @@ -1295,7 +1295,7 @@ static int __init calgary_bus_has_devices(int bus, unsigned short pci_dev)

/*
* calgary_init_bitmap_from_tce_table():
* Funtion for kdump case. In the second/kdump kernel initialize
* Function for kdump case. In the second/kdump kernel initialize
* the bitmap based on the tce table entries obtained from first kernel
*/
static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl)
Expand Down
Loading

0 comments on commit 0d2eb44

Please sign in to comment.