Commit ad14c192 authored by Xiaoming Ni's avatar Xiaoming Ni Committed by Will Deacon

arm64: fix some spelling mistakes in the comments by codespell

arch/arm64/include/asm/cpu_ops.h:24: necesary ==> necessary
arch/arm64/include/asm/kvm_arm.h:69: maintainance ==> maintenance
arch/arm64/include/asm/cpufeature.h:361: capabilties ==> capabilities
arch/arm64/kernel/perf_regs.c:19: compatability ==> compatibility
arch/arm64/kernel/smp_spin_table.c:86: endianess ==> endianness
arch/arm64/kernel/smp_spin_table.c:88: endianess ==> endianness
arch/arm64/kvm/vgic/vgic-mmio-v3.c:1004: targetting ==> targeting
arch/arm64/kvm/vgic/vgic-mmio-v3.c:1005: targetting ==> targeting
Signed-off-by: default avatarXiaoming Ni <nixiaoming@huawei.com>
Link: https://lore.kernel.org/r/20200828031822.35928-1-nixiaoming@huawei.comSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent f75aef39
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
* mechanism for doing so, tests whether it is possible to boot * mechanism for doing so, tests whether it is possible to boot
* the given CPU. * the given CPU.
* @cpu_boot: Boots a cpu into the kernel. * @cpu_boot: Boots a cpu into the kernel.
* @cpu_postboot: Optionally, perform any post-boot cleanup or necesary * @cpu_postboot: Optionally, perform any post-boot cleanup or necessary
* synchronisation. Called from the cpu being booted. * synchronisation. Called from the cpu being booted.
* @cpu_can_disable: Determines whether a CPU can be disabled based on * @cpu_can_disable: Determines whether a CPU can be disabled based on
* mechanism-specific information. * mechanism-specific information.
......
...@@ -358,7 +358,7 @@ static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap) ...@@ -358,7 +358,7 @@ static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
} }
/* /*
* Generic helper for handling capabilties with multiple (match,enable) pairs * Generic helper for handling capabilities with multiple (match,enable) pairs
* of call backs, sharing the same capability bit. * of call backs, sharing the same capability bit.
* Iterate over each entry to see if at least one matches. * Iterate over each entry to see if at least one matches.
*/ */
......
...@@ -66,7 +66,7 @@ ...@@ -66,7 +66,7 @@
* TWI: Trap WFI * TWI: Trap WFI
* TIDCP: Trap L2CTLR/L2ECTLR * TIDCP: Trap L2CTLR/L2ECTLR
* BSU_IS: Upgrade barriers to the inner shareable domain * BSU_IS: Upgrade barriers to the inner shareable domain
* FB: Force broadcast of all maintainance operations * FB: Force broadcast of all maintenance operations
* AMO: Override CPSR.A and enable signaling with VA * AMO: Override CPSR.A and enable signaling with VA
* IMO: Override CPSR.I and enable signaling with VI * IMO: Override CPSR.I and enable signaling with VI
* FMO: Override CPSR.F and enable signaling with VF * FMO: Override CPSR.F and enable signaling with VF
......
...@@ -16,7 +16,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) ...@@ -16,7 +16,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
/* /*
* Our handling of compat tasks (PERF_SAMPLE_REGS_ABI_32) is weird, but * Our handling of compat tasks (PERF_SAMPLE_REGS_ABI_32) is weird, but
* we're stuck with it for ABI compatability reasons. * we're stuck with it for ABI compatibility reasons.
* *
* For a 32-bit consumer inspecting a 32-bit task, then it will look at * For a 32-bit consumer inspecting a 32-bit task, then it will look at
* the first 16 registers (see arch/arm/include/uapi/asm/perf_regs.h). * the first 16 registers (see arch/arm/include/uapi/asm/perf_regs.h).
......
...@@ -83,9 +83,9 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu) ...@@ -83,9 +83,9 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
/* /*
* We write the release address as LE regardless of the native * We write the release address as LE regardless of the native
* endianess of the kernel. Therefore, any boot-loaders that * endianness of the kernel. Therefore, any boot-loaders that
* read this address need to convert this address to the * read this address need to convert this address to the
* boot-loader's endianess before jumping. This is mandated by * boot-loader's endianness before jumping. This is mandated by
* the boot protocol. * the boot protocol.
*/ */
writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr); writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr);
......
...@@ -1001,8 +1001,8 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1) ...@@ -1001,8 +1001,8 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
raw_spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
/* /*
* An access targetting Group0 SGIs can only generate * An access targeting Group0 SGIs can only generate
* those, while an access targetting Group1 SGIs can * those, while an access targeting Group1 SGIs can
* generate interrupts of either group. * generate interrupts of either group.
*/ */
if (!irq->group || allow_group1) { if (!irq->group || allow_group1) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment