Commit 0d2eb44f authored by Lucas De Marchi's avatar Lucas De Marchi Committed by Ingo Molnar

x86: Fix common misspellings

They were generated by 'codespell' and then manually reviewed.
Signed-off-by: default avatarLucas De Marchi <lucas.demarchi@profusion.mobi>
Cc: trivial@kernel.org
LKML-Reference: <1300389856-1099-3-git-send-email-lucas.demarchi@profusion.mobi>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent a6c3270b
...@@ -326,7 +326,7 @@ config X86_PPRO_FENCE ...@@ -326,7 +326,7 @@ config X86_PPRO_FENCE
Old PentiumPro multiprocessor systems had errata that could cause Old PentiumPro multiprocessor systems had errata that could cause
memory operations to violate the x86 ordering standard in rare cases. memory operations to violate the x86 ordering standard in rare cases.
Enabling this option will attempt to work around some (but not all) Enabling this option will attempt to work around some (but not all)
occurances of this problem, at the cost of much heavier spinlock and occurrences of this problem, at the cost of much heavier spinlock and
memory barrier operations. memory barrier operations.
If unsure, say n here. Even distro kernels should think twice before If unsure, say n here. Even distro kernels should think twice before
......
...@@ -1346,7 +1346,7 @@ _zero_cipher_left_decrypt: ...@@ -1346,7 +1346,7 @@ _zero_cipher_left_decrypt:
and $15, %r13 # %r13 = arg4 (mod 16) and $15, %r13 # %r13 = arg4 (mod 16)
je _multiple_of_16_bytes_decrypt je _multiple_of_16_bytes_decrypt
# Handle the last <16 byte block seperately # Handle the last <16 byte block separately
paddd ONE(%rip), %xmm0 # increment CNT to get Yn paddd ONE(%rip), %xmm0 # increment CNT to get Yn
movdqa SHUF_MASK(%rip), %xmm10 movdqa SHUF_MASK(%rip), %xmm10
...@@ -1355,7 +1355,7 @@ _zero_cipher_left_decrypt: ...@@ -1355,7 +1355,7 @@ _zero_cipher_left_decrypt:
ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Yn) ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Yn)
sub $16, %r11 sub $16, %r11
add %r13, %r11 add %r13, %r11
movdqu (%arg3,%r11,1), %xmm1 # recieve the last <16 byte block movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
lea SHIFT_MASK+16(%rip), %r12 lea SHIFT_MASK+16(%rip), %r12
sub %r13, %r12 sub %r13, %r12
# adjust the shuffle mask pointer to be able to shift 16-%r13 bytes # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
...@@ -1607,7 +1607,7 @@ _zero_cipher_left_encrypt: ...@@ -1607,7 +1607,7 @@ _zero_cipher_left_encrypt:
and $15, %r13 # %r13 = arg4 (mod 16) and $15, %r13 # %r13 = arg4 (mod 16)
je _multiple_of_16_bytes_encrypt je _multiple_of_16_bytes_encrypt
# Handle the last <16 Byte block seperately # Handle the last <16 Byte block separately
paddd ONE(%rip), %xmm0 # INCR CNT to get Yn paddd ONE(%rip), %xmm0 # INCR CNT to get Yn
movdqa SHUF_MASK(%rip), %xmm10 movdqa SHUF_MASK(%rip), %xmm10
PSHUFB_XMM %xmm10, %xmm0 PSHUFB_XMM %xmm10, %xmm0
......
...@@ -71,7 +71,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype) { } ...@@ -71,7 +71,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype) { }
* Read/Write : ReadOnly, ReadWrite * Read/Write : ReadOnly, ReadWrite
* Presence : NotPresent * Presence : NotPresent
* *
* Within a catagory, the attributes are mutually exclusive. * Within a category, the attributes are mutually exclusive.
* *
* The implementation of this API will take care of various aspects that * The implementation of this API will take care of various aspects that
* are associated with changing such attributes, such as: * are associated with changing such attributes, such as:
......
...@@ -29,8 +29,8 @@ void arch_trigger_all_cpu_backtrace(void); ...@@ -29,8 +29,8 @@ void arch_trigger_all_cpu_backtrace(void);
* external nmis, because the local ones are more frequent. * external nmis, because the local ones are more frequent.
* *
* Also setup some default high/normal/low settings for * Also setup some default high/normal/low settings for
* subsystems to registers with. Using 4 bits to seperate * subsystems to registers with. Using 4 bits to separate
* the priorities. This can go alot higher if needed be. * the priorities. This can go a lot higher if needed be.
*/ */
#define NMI_LOCAL_SHIFT 16 /* randomly picked */ #define NMI_LOCAL_SHIFT 16 /* randomly picked */
......
...@@ -38,7 +38,7 @@ ...@@ -38,7 +38,7 @@
#define K8_NOP8 K8_NOP4 K8_NOP4 #define K8_NOP8 K8_NOP4 K8_NOP4
/* K7 nops /* K7 nops
uses eax dependencies (arbitary choice) uses eax dependencies (arbitrary choice)
1: nop 1: nop
2: movl %eax,%eax 2: movl %eax,%eax
3: leal (,%eax,1),%eax 3: leal (,%eax,1),%eax
......
...@@ -20,7 +20,7 @@ extern struct olpc_platform_t olpc_platform_info; ...@@ -20,7 +20,7 @@ extern struct olpc_platform_t olpc_platform_info;
/* /*
* OLPC board IDs contain the major build number within the mask 0x0ff0, * OLPC board IDs contain the major build number within the mask 0x0ff0,
* and the minor build number withing 0x000f. Pre-builds have a minor * and the minor build number within 0x000f. Pre-builds have a minor
* number less than 8, and normal builds start at 8. For example, 0x0B10 * number less than 8, and normal builds start at 8. For example, 0x0B10
* is a PreB1, and 0x0C18 is a C1. * is a PreB1, and 0x0C18 is a C1.
*/ */
......
/* /*
* Netburst Perfomance Events (P4, old Xeon) * Netburst Performance Events (P4, old Xeon)
*/ */
#ifndef PERF_EVENT_P4_H #ifndef PERF_EVENT_P4_H
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
#include <linux/bitops.h> #include <linux/bitops.h>
/* /*
* NetBurst has perfomance MSRs shared between * NetBurst has performance MSRs shared between
* threads if HT is turned on, ie for both logical * threads if HT is turned on, ie for both logical
* processors (mem: in turn in Atom with HT support * processors (mem: in turn in Atom with HT support
* perf-MSRs are not shared and every thread has its * perf-MSRs are not shared and every thread has its
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
*/ */
#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */ #define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */ #define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */ #define X86_EFLAGS_AF 0x00000010 /* Auxiliary carry Flag */
#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */ #define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */ #define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */ #define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
#define R12 24 #define R12 24
#define RBP 32 #define RBP 32
#define RBX 40 #define RBX 40
/* arguments: interrupts/non tracing syscalls only save upto here*/ /* arguments: interrupts/non tracing syscalls only save up to here*/
#define R11 48 #define R11 48
#define R10 56 #define R10 56
#define R9 64 #define R9 64
......
...@@ -73,7 +73,7 @@ struct pt_regs { ...@@ -73,7 +73,7 @@ struct pt_regs {
unsigned long r12; unsigned long r12;
unsigned long rbp; unsigned long rbp;
unsigned long rbx; unsigned long rbx;
/* arguments: non interrupts/non tracing syscalls only save upto here*/ /* arguments: non interrupts/non tracing syscalls only save up to here*/
unsigned long r11; unsigned long r11;
unsigned long r10; unsigned long r10;
unsigned long r9; unsigned long r9;
...@@ -103,7 +103,7 @@ struct pt_regs { ...@@ -103,7 +103,7 @@ struct pt_regs {
unsigned long r12; unsigned long r12;
unsigned long bp; unsigned long bp;
unsigned long bx; unsigned long bx;
/* arguments: non interrupts/non tracing syscalls only save upto here*/ /* arguments: non interrupts/non tracing syscalls only save up to here*/
unsigned long r11; unsigned long r11;
unsigned long r10; unsigned long r10;
unsigned long r9; unsigned long r9;
......
...@@ -35,7 +35,7 @@ static inline cycles_t get_cycles(void) ...@@ -35,7 +35,7 @@ static inline cycles_t get_cycles(void)
static __always_inline cycles_t vget_cycles(void) static __always_inline cycles_t vget_cycles(void)
{ {
/* /*
* We only do VDSOs on TSC capable CPUs, so this shouldnt * We only do VDSOs on TSC capable CPUs, so this shouldn't
* access boot_cpu_data (which is not VDSO-safe): * access boot_cpu_data (which is not VDSO-safe):
*/ */
#ifndef CONFIG_X86_TSC #ifndef CONFIG_X86_TSC
......
...@@ -86,7 +86,7 @@ DEFINE_GUEST_HANDLE(void); ...@@ -86,7 +86,7 @@ DEFINE_GUEST_HANDLE(void);
* The privilege level specifies which modes may enter a trap via a software * The privilege level specifies which modes may enter a trap via a software
* interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate
* privilege levels as follows: * privilege levels as follows:
* Level == 0: Noone may enter * Level == 0: No one may enter
* Level == 1: Kernel may enter * Level == 1: Kernel may enter
* Level == 2: Kernel may enter * Level == 2: Kernel may enter
* Level == 3: Everyone may enter * Level == 3: Everyone may enter
......
...@@ -199,7 +199,7 @@ void *text_poke_early(void *addr, const void *opcode, size_t len); ...@@ -199,7 +199,7 @@ void *text_poke_early(void *addr, const void *opcode, size_t len);
/* Replace instructions with better alternatives for this CPU type. /* Replace instructions with better alternatives for this CPU type.
This runs before SMP is initialized to avoid SMP problems with This runs before SMP is initialized to avoid SMP problems with
self modifying code. This implies that assymetric systems where self modifying code. This implies that asymmetric systems where
APs have less capabilities than the boot processor are not handled. APs have less capabilities than the boot processor are not handled.
Tough. Make sure you disable such features by hand. */ Tough. Make sure you disable such features by hand. */
......
...@@ -73,7 +73,7 @@ static u32 __init allocate_aperture(void) ...@@ -73,7 +73,7 @@ static u32 __init allocate_aperture(void)
/* /*
* using 512M as goal, in case kexec will load kernel_big * using 512M as goal, in case kexec will load kernel_big
* that will do the on position decompress, and could overlap with * that will do the on position decompress, and could overlap with
* that positon with gart that is used. * that position with gart that is used.
* sequende: * sequende:
* kernel_small * kernel_small
* ==> kexec (with kdump trigger path or previous doesn't shutdown gart) * ==> kexec (with kdump trigger path or previous doesn't shutdown gart)
......
...@@ -1886,7 +1886,7 @@ void disable_IO_APIC(void) ...@@ -1886,7 +1886,7 @@ void disable_IO_APIC(void)
* *
* With interrupt-remapping, for now we will use virtual wire A mode, * With interrupt-remapping, for now we will use virtual wire A mode,
* as virtual wire B is little complex (need to configure both * as virtual wire B is little complex (need to configure both
* IOAPIC RTE aswell as interrupt-remapping table entry). * IOAPIC RTE as well as interrupt-remapping table entry).
* As this gets called during crash dump, keep this simple for now. * As this gets called during crash dump, keep this simple for now.
*/ */
if (ioapic_i8259.pin != -1 && !intr_remapping_enabled) { if (ioapic_i8259.pin != -1 && !intr_remapping_enabled) {
...@@ -2905,7 +2905,7 @@ void __init setup_IO_APIC(void) ...@@ -2905,7 +2905,7 @@ void __init setup_IO_APIC(void)
} }
/* /*
* Called after all the initialization is done. If we didnt find any * Called after all the initialization is done. If we didn't find any
* APIC bugs then we can allow the modify fast path * APIC bugs then we can allow the modify fast path
*/ */
......
...@@ -66,7 +66,7 @@ ...@@ -66,7 +66,7 @@
* 1.5: Fix segment register reloading (in case of bad segments saved * 1.5: Fix segment register reloading (in case of bad segments saved
* across BIOS call). * across BIOS call).
* Stephen Rothwell * Stephen Rothwell
* 1.6: Cope with complier/assembler differences. * 1.6: Cope with compiler/assembler differences.
* Only try to turn off the first display device. * Only try to turn off the first display device.
* Fix OOPS at power off with no APM BIOS by Jan Echternach * Fix OOPS at power off with no APM BIOS by Jan Echternach
* <echter@informatik.uni-rostock.de> * <echter@informatik.uni-rostock.de>
......
...@@ -444,7 +444,7 @@ static int __cpuinit longhaul_get_ranges(void) ...@@ -444,7 +444,7 @@ static int __cpuinit longhaul_get_ranges(void)
return -EINVAL; return -EINVAL;
} }
/* Get max multiplier - as we always did. /* Get max multiplier - as we always did.
* Longhaul MSR is usefull only when voltage scaling is enabled. * Longhaul MSR is useful only when voltage scaling is enabled.
* C3 is booting at max anyway. */ * C3 is booting at max anyway. */
maxmult = mult; maxmult = mult;
/* Get min multiplier */ /* Get min multiplier */
...@@ -1011,7 +1011,7 @@ static void __exit longhaul_exit(void) ...@@ -1011,7 +1011,7 @@ static void __exit longhaul_exit(void)
* trigger frequency transition in some cases. */ * trigger frequency transition in some cases. */
module_param(disable_acpi_c3, int, 0644); module_param(disable_acpi_c3, int, 0644);
MODULE_PARM_DESC(disable_acpi_c3, "Don't use ACPI C3 support"); MODULE_PARM_DESC(disable_acpi_c3, "Don't use ACPI C3 support");
/* Change CPU voltage with frequency. Very usefull to save /* Change CPU voltage with frequency. Very useful to save
* power, but most VIA C3 processors aren't supporting it. */ * power, but most VIA C3 processors aren't supporting it. */
module_param(scale_voltage, int, 0644); module_param(scale_voltage, int, 0644);
MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor"); MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor");
......
...@@ -1276,7 +1276,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) ...@@ -1276,7 +1276,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
if (powernow_k8_cpu_init_acpi(data)) { if (powernow_k8_cpu_init_acpi(data)) {
/* /*
* Use the PSB BIOS structure. This is only availabe on * Use the PSB BIOS structure. This is only available on
* an UP version, and is deprecated by AMD. * an UP version, and is deprecated by AMD.
*/ */
if (num_online_cpus() != 1) { if (num_online_cpus() != 1) {
......
...@@ -292,7 +292,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) ...@@ -292,7 +292,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
result = speedstep_smi_ownership(); result = speedstep_smi_ownership();
if (result) { if (result) {
dprintk("fails in aquiring ownership of a SMI interface.\n"); dprintk("fails in acquiring ownership of a SMI interface.\n");
return -EINVAL; return -EINVAL;
} }
...@@ -360,7 +360,7 @@ static int speedstep_resume(struct cpufreq_policy *policy) ...@@ -360,7 +360,7 @@ static int speedstep_resume(struct cpufreq_policy *policy)
int result = speedstep_smi_ownership(); int result = speedstep_smi_ownership();
if (result) if (result)
dprintk("fails in re-aquiring ownership of a SMI interface.\n"); dprintk("fails in re-acquiring ownership of a SMI interface.\n");
return result; return result;
} }
......
...@@ -32,7 +32,7 @@ static void inject_mce(struct mce *m) ...@@ -32,7 +32,7 @@ static void inject_mce(struct mce *m)
{ {
struct mce *i = &per_cpu(injectm, m->extcpu); struct mce *i = &per_cpu(injectm, m->extcpu);
/* Make sure noone reads partially written injectm */ /* Make sure no one reads partially written injectm */
i->finished = 0; i->finished = 0;
mb(); mb();
m->finished = 0; m->finished = 0;
......
...@@ -881,7 +881,7 @@ static int mce_end(int order) ...@@ -881,7 +881,7 @@ static int mce_end(int order)
* Check if the address reported by the CPU is in a format we can parse. * Check if the address reported by the CPU is in a format we can parse.
* It would be possible to add code for most other cases, but all would * It would be possible to add code for most other cases, but all would
* be somewhat complicated (e.g. segment offset would require an instruction * be somewhat complicated (e.g. segment offset would require an instruction
* parser). So only support physical addresses upto page granuality for now. * parser). So only support physical addresses up to page granuality for now.
*/ */
static int mce_usable_address(struct mce *m) static int mce_usable_address(struct mce *m)
{ {
......
/* /*
* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
* because MTRRs can span upto 40 bits (36bits on most modern x86) * because MTRRs can span up to 40 bits (36bits on most modern x86)
*/ */
#define DEBUG #define DEBUG
......
...@@ -1111,7 +1111,7 @@ static int x86_pmu_add(struct perf_event *event, int flags) ...@@ -1111,7 +1111,7 @@ static int x86_pmu_add(struct perf_event *event, int flags)
/* /*
* If group events scheduling transaction was started, * If group events scheduling transaction was started,
* skip the schedulability test here, it will be peformed * skip the schedulability test here, it will be performed
* at commit time (->commit_txn) as a whole * at commit time (->commit_txn) as a whole
*/ */
if (cpuc->group_flag & PERF_EVENT_TXN) if (cpuc->group_flag & PERF_EVENT_TXN)
......
/* /*
* Netburst Perfomance Events (P4, old Xeon) * Netburst Performance Events (P4, old Xeon)
* *
* Copyright (C) 2010 Parallels, Inc., Cyrill Gorcunov <gorcunov@openvz.org> * Copyright (C) 2010 Parallels, Inc., Cyrill Gorcunov <gorcunov@openvz.org>
* Copyright (C) 2010 Intel Corporation, Lin Ming <ming.m.lin@intel.com> * Copyright (C) 2010 Intel Corporation, Lin Ming <ming.m.lin@intel.com>
...@@ -679,7 +679,7 @@ static int p4_validate_raw_event(struct perf_event *event) ...@@ -679,7 +679,7 @@ static int p4_validate_raw_event(struct perf_event *event)
*/ */
/* /*
* if an event is shared accross the logical threads * if an event is shared across the logical threads
* the user needs special permissions to be able to use it * the user needs special permissions to be able to use it
*/ */
if (p4_ht_active() && p4_event_bind_map[v].shared) { if (p4_ht_active() && p4_event_bind_map[v].shared) {
...@@ -790,13 +790,13 @@ static void p4_pmu_disable_pebs(void) ...@@ -790,13 +790,13 @@ static void p4_pmu_disable_pebs(void)
* *
* It's still allowed that two threads setup same cache * It's still allowed that two threads setup same cache
* events so we can't simply clear metrics until we knew * events so we can't simply clear metrics until we knew
* noone is depending on us, so we need kind of counter * no one is depending on us, so we need kind of counter
* for "ReplayEvent" users. * for "ReplayEvent" users.
* *
* What is more complex -- RAW events, if user (for some * What is more complex -- RAW events, if user (for some
* reason) will pass some cache event metric with improper * reason) will pass some cache event metric with improper
* event opcode -- it's fine from hardware point of view * event opcode -- it's fine from hardware point of view
* but completely nonsence from "meaning" of such action. * but completely nonsense from "meaning" of such action.
* *
* So at moment let leave metrics turned on forever -- it's * So at moment let leave metrics turned on forever -- it's
* ok for now but need to be revisited! * ok for now but need to be revisited!
......
...@@ -86,7 +86,7 @@ static void __init vmware_platform_setup(void) ...@@ -86,7 +86,7 @@ static void __init vmware_platform_setup(void)
} }
/* /*
* While checking the dmi string infomation, just checking the product * While checking the dmi string information, just checking the product
* serial key should be enough, as this will always have a VMware * serial key should be enough, as this will always have a VMware
* specific string when running under VMware hypervisor. * specific string when running under VMware hypervisor.
*/ */
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
* A note on terminology: * A note on terminology:
* - top of stack: Architecture defined interrupt frame from SS to RIP * - top of stack: Architecture defined interrupt frame from SS to RIP
* at the top of the kernel process stack. * at the top of the kernel process stack.
* - partial stack frame: partially saved registers upto R11. * - partial stack frame: partially saved registers up to R11.
* - full stack frame: Like partial stack frame, but all register saved. * - full stack frame: Like partial stack frame, but all register saved.
* *
* Some macro usage: * Some macro usage:
...@@ -422,7 +422,7 @@ ENTRY(ret_from_fork) ...@@ -422,7 +422,7 @@ ENTRY(ret_from_fork)
END(ret_from_fork) END(ret_from_fork)
/* /*
* System call entry. Upto 6 arguments in registers are supported. * System call entry. Up to 6 arguments in registers are supported.
* *
* SYSCALL does not save anything on the stack and does not change the * SYSCALL does not save anything on the stack and does not change the
* stack pointer. * stack pointer.
......
...@@ -145,7 +145,7 @@ EXPORT_SYMBOL_GPL(fpu_finit); ...@@ -145,7 +145,7 @@ EXPORT_SYMBOL_GPL(fpu_finit);
* The _current_ task is using the FPU for the first time * The _current_ task is using the FPU for the first time
* so initialize it and set the mxcsr to its default * so initialize it and set the mxcsr to its default
* value at reset if we support XMM instructions and then * value at reset if we support XMM instructions and then
* remeber the current task has used the FPU. * remember the current task has used the FPU.
*/ */
int init_fpu(struct task_struct *tsk) int init_fpu(struct task_struct *tsk)
{ {
......
...@@ -172,7 +172,7 @@ asmlinkage void do_softirq(void) ...@@ -172,7 +172,7 @@ asmlinkage void do_softirq(void)
call_on_stack(__do_softirq, isp); call_on_stack(__do_softirq, isp);
/* /*
* Shouldnt happen, we returned above if in_interrupt(): * Shouldn't happen, we returned above if in_interrupt():
*/ */
WARN_ON_ONCE(softirq_count()); WARN_ON_ONCE(softirq_count());
} }
......
...@@ -278,7 +278,7 @@ static int hw_break_release_slot(int breakno) ...@@ -278,7 +278,7 @@ static int hw_break_release_slot(int breakno)
pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
if (dbg_release_bp_slot(*pevent)) if (dbg_release_bp_slot(*pevent))
/* /*
* The debugger is responisble for handing the retry on * The debugger is responsible for handing the retry on
* remove failure. * remove failure.
*/ */
return -1; return -1;
......
...@@ -259,7 +259,7 @@ static int __init mca_init(void) ...@@ -259,7 +259,7 @@ static int __init mca_init(void)
/* /*
* WARNING: Be careful when making changes here. Putting an adapter * WARNING: Be careful when making changes here. Putting an adapter
* and the motherboard simultaneously into setup mode may result in * and the motherboard simultaneously into setup mode may result in
* damage to chips (according to The Indispensible PC Hardware Book * damage to chips (according to The Indispensable PC Hardware Book
* by Hans-Peter Messmer). Also, we disable system interrupts (so * by Hans-Peter Messmer). Also, we disable system interrupts (so
* that we are not disturbed in the middle of this). * that we are not disturbed in the middle of this).
*/ */
......
...@@ -883,7 +883,7 @@ static int __init update_mp_table(void) ...@@ -883,7 +883,7 @@ static int __init update_mp_table(void)
if (!mpc_new_phys) { if (!mpc_new_phys) {
unsigned char old, new; unsigned char old, new;
/* check if we can change the postion */ /* check if we can change the position */
mpc->checksum = 0; mpc->checksum = 0;
old = mpf_checksum((unsigned char *)mpc, mpc->length); old = mpf_checksum((unsigned char *)mpc, mpc->length);
mpc->checksum = 0xff; mpc->checksum = 0xff;
...@@ -892,7 +892,7 @@ static int __init update_mp_table(void) ...@@ -892,7 +892,7 @@ static int __init update_mp_table(void)
printk(KERN_INFO "mpc is readonly, please try alloc_mptable instead\n"); printk(KERN_INFO "mpc is readonly, please try alloc_mptable instead\n");
return 0; return 0;
} }
printk(KERN_INFO "use in-positon replacing\n"); printk(KERN_INFO "use in-position replacing\n");
} else { } else {
mpf->physptr = mpc_new_phys; mpf->physptr = mpc_new_phys;
mpc_new = phys_to_virt(mpc_new_phys); mpc_new = phys_to_virt(mpc_new_phys);
......
...@@ -1279,7 +1279,7 @@ static int __init calgary_bus_has_devices(int bus, unsigned short pci_dev) ...@@ -1279,7 +1279,7 @@ static int __init calgary_bus_has_devices(int bus, unsigned short pci_dev)
if (pci_dev == PCI_DEVICE_ID_IBM_CALIOC2) { if (pci_dev == PCI_DEVICE_ID_IBM_CALIOC2) {
/* /*
* FIXME: properly scan for devices accross the * FIXME: properly scan for devices across the
* PCI-to-PCI bridge on every CalIOC2 port. * PCI-to-PCI bridge on every CalIOC2 port.
*/ */
return 1; return 1;
...@@ -1295,7 +1295,7 @@ static int __init calgary_bus_has_devices(int bus, unsigned short pci_dev) ...@@ -1295,7 +1295,7 @@ static int __init calgary_bus_has_devices(int bus, unsigned short pci_dev)
/* /*
* calgary_init_bitmap_from_tce_table(): * calgary_init_bitmap_from_tce_table():
* Funtion for kdump case. In the second/kdump kernel initialize * Function for kdump case. In the second/kdump kernel initialize
* the bitmap based on the tce table entries obtained from first kernel * the bitmap based on the tce table entries obtained from first kernel
*/ */
static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl) static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl)
......
...@@ -166,7 +166,7 @@ static void enable_step(struct task_struct *child, bool block) ...@@ -166,7 +166,7 @@ static void enable_step(struct task_struct *child, bool block)
* Make sure block stepping (BTF) is not enabled unless it should be. * Make sure block stepping (BTF) is not enabled unless it should be.
* Note that we don't try to worry about any is_setting_trap_flag() * Note that we don't try to worry about any is_setting_trap_flag()
* instructions after the first when using block stepping. * instructions after the first when using block stepping.
* So noone should try to use debugger block stepping in a program * So no one should try to use debugger block stepping in a program
* that uses user-mode single stepping itself. * that uses user-mode single stepping itself.
*/ */
if (enable_single_step(child) && block) { if (enable_single_step(child) && block) {
......
...@@ -39,7 +39,7 @@ int __ref arch_register_cpu(int num) ...@@ -39,7 +39,7 @@ int __ref arch_register_cpu(int num)
/* /*
* CPU0 cannot be offlined due to several * CPU0 cannot be offlined due to several
* restrictions and assumptions in kernel. This basically * restrictions and assumptions in kernel. This basically
* doesnt add a control file, one cannot attempt to offline * doesn't add a control file, one cannot attempt to offline
* BSP. * BSP.
* *
* Also certain PCI quirks require not to enable hotplug control * Also certain PCI quirks require not to enable hotplug control
......
...@@ -427,7 +427,7 @@ unsigned long native_calibrate_tsc(void) ...@@ -427,7 +427,7 @@ unsigned long native_calibrate_tsc(void)
* the delta to the previous read. We keep track of the min * the delta to the previous read. We keep track of the min
* and max values of that delta. The delta is mostly defined * and max values of that delta. The delta is mostly defined
* by the IO time of the PIT access, so we can detect when a * by the IO time of the PIT access, so we can detect when a
* SMI/SMM disturbance happend between the two reads. If the * SMI/SMM disturbance happened between the two reads. If the
* maximum time is significantly larger than the minimum time, * maximum time is significantly larger than the minimum time,
* then we discard the result and have another try. * then we discard the result and have another try.
* *
...@@ -900,7 +900,7 @@ static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work); ...@@ -900,7 +900,7 @@ static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
* timer based, instead of loop based, we don't block the boot * timer based, instead of loop based, we don't block the boot
* process while this longer calibration is done. * process while this longer calibration is done.
* *
* If there are any calibration anomolies (too many SMIs, etc), * If there are any calibration anomalies (too many SMIs, etc),
* or the refined calibration is off by 1% of the fast early * or the refined calibration is off by 1% of the fast early
* calibration, we throw out the new calibration and use the * calibration, we throw out the new calibration and use the
* early calibration. * early calibration.
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
* This file is expected to run in 32bit code. Currently: * This file is expected to run in 32bit code. Currently:
* *
* arch/x86/boot/compressed/head_64.S: Boot cpu verification * arch/x86/boot/compressed/head_64.S: Boot cpu verification
* arch/x86/kernel/trampoline_64.S: secondary processor verfication * arch/x86/kernel/trampoline_64.S: secondary processor verification
* arch/x86/kernel/head_32.S: processor startup * arch/x86/kernel/head_32.S: processor startup
* *
* verify_cpu, returns the status of longmode and SSE in register %eax. * verify_cpu, returns the status of longmode and SSE in register %eax.
......
...@@ -53,7 +53,7 @@ void __sanitize_i387_state(struct task_struct *tsk) ...@@ -53,7 +53,7 @@ void __sanitize_i387_state(struct task_struct *tsk)
/* /*
* None of the feature bits are in init state. So nothing else * None of the feature bits are in init state. So nothing else
* to do for us, as the memory layout is upto date. * to do for us, as the memory layout is up to date.
*/ */
if ((xstate_bv & pcntxt_mask) == pcntxt_mask) if ((xstate_bv & pcntxt_mask) == pcntxt_mask)
return; return;
......
...@@ -348,7 +348,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, ...@@ -348,7 +348,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
return; return;
kvm_get_pfn(pfn); kvm_get_pfn(pfn);
/* /*
* we call mmu_set_spte() with host_writable = true beacuse that * we call mmu_set_spte() with host_writable = true because that
* vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1). * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
*/ */
mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0, mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
......
...@@ -25,7 +25,7 @@ static int __kvm_timer_fn(struct kvm_vcpu *vcpu, struct kvm_timer *ktimer) ...@@ -25,7 +25,7 @@ static int __kvm_timer_fn(struct kvm_vcpu *vcpu, struct kvm_timer *ktimer)
/* /*
* There is a race window between reading and incrementing, but we do * There is a race window between reading and incrementing, but we do
* not care about potentially loosing timer events in the !reinject * not care about potentially losing timer events in the !reinject
* case anyway. Note: KVM_REQ_PENDING_TIMER is implicitly checked * case anyway. Note: KVM_REQ_PENDING_TIMER is implicitly checked
* in vcpu_enter_guest. * in vcpu_enter_guest.
*/ */
......
...@@ -1028,7 +1028,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data) ...@@ -1028,7 +1028,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
/* /*
* Special case: close write to TSC within 5 seconds of * Special case: close write to TSC within 5 seconds of
* another CPU is interpreted as an attempt to synchronize * another CPU is interpreted as an attempt to synchronize
* The 5 seconds is to accomodate host load / swapping as * The 5 seconds is to accommodate host load / swapping as
* well as any reset of TSC during the boot process. * well as any reset of TSC during the boot process.
* *
* In that case, for a reliable TSC, we can match TSC offsets, * In that case, for a reliable TSC, we can match TSC offsets,
......
...@@ -397,7 +397,7 @@ static void lguest_load_tr_desc(void) ...@@ -397,7 +397,7 @@ static void lguest_load_tr_desc(void)
* instead we just use the real "cpuid" instruction. Then I pretty much turned * instead we just use the real "cpuid" instruction. Then I pretty much turned
* off feature bits until the Guest booted. (Don't say that: you'll damage * off feature bits until the Guest booted. (Don't say that: you'll damage
* lguest sales!) Shut up, inner voice! (Hey, just pointing out that this is * lguest sales!) Shut up, inner voice! (Hey, just pointing out that this is
* hardly future proof.) Noone's listening! They don't like you anyway, * hardly future proof.) No one's listening! They don't like you anyway,
* parenthetic weirdo! * parenthetic weirdo!
* *
* Replacing the cpuid so we can turn features off is great for the kernel, but * Replacing the cpuid so we can turn features off is great for the kernel, but
......
...@@ -117,7 +117,7 @@ ENDPROC(bad_from_user) ...@@ -117,7 +117,7 @@ ENDPROC(bad_from_user)
* rdx count * rdx count
* *
* Output: * Output:
* eax uncopied bytes or 0 if successfull. * eax uncopied bytes or 0 if successful.
*/ */
ENTRY(copy_user_generic_unrolled) ENTRY(copy_user_generic_unrolled)
CFI_STARTPROC CFI_STARTPROC
......
...@@ -152,7 +152,7 @@ ENTRY(csum_partial_copy_generic) ...@@ -152,7 +152,7 @@ ENTRY(csum_partial_copy_generic)
adcq %r9,%rax adcq %r9,%rax
/* do last upto 56 bytes */ /* do last up to 56 bytes */
.Lhandle_tail: .Lhandle_tail:
/* ecx: count */ /* ecx: count */
movl %ecx,%r10d movl %ecx,%r10d
...@@ -180,7 +180,7 @@ ENTRY(csum_partial_copy_generic) ...@@ -180,7 +180,7 @@ ENTRY(csum_partial_copy_generic)
addl %ebx,%eax addl %ebx,%eax
adcl %r9d,%eax adcl %r9d,%eax
/* do last upto 6 bytes */ /* do last up to 6 bytes */
.Lhandle_7: .Lhandle_7:
movl %r10d,%ecx movl %r10d,%ecx
andl $7,%ecx andl $7,%ecx
......
...@@ -84,7 +84,7 @@ static unsigned do_csum(const unsigned char *buff, unsigned len) ...@@ -84,7 +84,7 @@ static unsigned do_csum(const unsigned char *buff, unsigned len)
count64--; count64--;
} }
/* last upto 7 8byte blocks */ /* last up to 7 8byte blocks */
count %= 8; count %= 8;
while (count) { while (count) {
asm("addq %1,%0\n\t" asm("addq %1,%0\n\t"
......
...@@ -326,7 +326,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, ...@@ -326,7 +326,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
if (mm->free_area_cache < len) if (mm->free_area_cache < len)
goto fail; goto fail;
/* either no address requested or cant fit in requested address hole */ /* either no address requested or can't fit in requested address hole */
addr = (mm->free_area_cache - len) & huge_page_mask(h); addr = (mm->free_area_cache - len) & huge_page_mask(h);
do { do {
/* /*
......
...@@ -917,7 +917,7 @@ static void mark_nxdata_nx(void) ...@@ -917,7 +917,7 @@ static void mark_nxdata_nx(void)
{ {
/* /*
* When this called, init has already been executed and released, * When this called, init has already been executed and released,
* so everything past _etext sould be NX. * so everything past _etext should be NX.
*/ */
unsigned long start = PFN_ALIGN(_etext); unsigned long start = PFN_ALIGN(_etext);
/* /*
......
...@@ -446,7 +446,7 @@ static int __init numa_alloc_distance(void) ...@@ -446,7 +446,7 @@ static int __init numa_alloc_distance(void)
* @distance: NUMA distance * @distance: NUMA distance
* *
* Set the distance from node @from to @to to @distance. If distance table * Set the distance from node @from to @to to @distance. If distance table
* doesn't exist, one which is large enough to accomodate all the currently * doesn't exist, one which is large enough to accommodate all the currently
* known nodes will be created. * known nodes will be created.
* *
* If such table cannot be allocated, a warning is printed and further * If such table cannot be allocated, a warning is printed and further
......
...@@ -310,7 +310,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, ...@@ -310,7 +310,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
* these shared mappings are made of small page mappings. * these shared mappings are made of small page mappings.
* Thus this don't enforce !RW mapping for small page kernel * Thus this don't enforce !RW mapping for small page kernel
* text mapping logic will help Linux Xen parvirt guest boot * text mapping logic will help Linux Xen parvirt guest boot
* aswell. * as well.
*/ */
if (lookup_address(address, &level) && (level != PG_LEVEL_4K)) if (lookup_address(address, &level) && (level != PG_LEVEL_4K))
pgprot_val(forbidden) |= _PAGE_RW; pgprot_val(forbidden) |= _PAGE_RW;
......
...@@ -241,7 +241,7 @@ void __init pcibios_resource_survey(void) ...@@ -241,7 +241,7 @@ void __init pcibios_resource_survey(void)
e820_reserve_resources_late(); e820_reserve_resources_late();
/* /*
* Insert the IO APIC resources after PCI initialization has * Insert the IO APIC resources after PCI initialization has
* occured to handle IO APICS that are mapped in on a BAR in * occurred to handle IO APICS that are mapped in on a BAR in
* PCI space, but before trying to assign unassigned pci res. * PCI space, but before trying to assign unassigned pci res.
*/ */
ioapic_insert_resources(); ioapic_insert_resources();
...@@ -304,7 +304,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, ...@@ -304,7 +304,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
/* /*
* ioremap() and ioremap_nocache() defaults to UC MINUS for now. * ioremap() and ioremap_nocache() defaults to UC MINUS for now.
* To avoid attribute conflicts, request UC MINUS here * To avoid attribute conflicts, request UC MINUS here
* aswell. * as well.
*/ */
prot |= _PAGE_CACHE_UC_MINUS; prot |= _PAGE_CACHE_UC_MINUS;
......
...@@ -1745,7 +1745,7 @@ static void convert_pfn_mfn(void *v) ...@@ -1745,7 +1745,7 @@ static void convert_pfn_mfn(void *v)
} }
/* /*
* Set up the inital kernel pagetable. * Set up the initial kernel pagetable.
* *
* We can construct this by grafting the Xen provided pagetable into * We can construct this by grafting the Xen provided pagetable into
* head_64.S's preconstructed pagetables. We copy the Xen L2's into * head_64.S's preconstructed pagetables. We copy the Xen L2's into
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment