Commit 95534043 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'loongarch-fixes-6.8-1' of...

Merge tag 'loongarch-fixes-6.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson

Pull LoongArch fixes from Huacai Chen:
 "Fix boot failure on machines with more than 8 nodes, and fix two build
  errors about KVM"

* tag 'loongarch-fixes-6.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson:
  LoongArch: KVM: Add returns to SIMD stubs
  LoongArch: KVM: Fix build due to API changes
  LoongArch/smp: Call rcutree_report_cpu_starting() at tlb_init()
parents cd2286fc 48ef9e87
...@@ -60,7 +60,7 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu); ...@@ -60,7 +60,7 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu);
void kvm_save_lsx(struct loongarch_fpu *fpu); void kvm_save_lsx(struct loongarch_fpu *fpu);
void kvm_restore_lsx(struct loongarch_fpu *fpu); void kvm_restore_lsx(struct loongarch_fpu *fpu);
#else #else
static inline int kvm_own_lsx(struct kvm_vcpu *vcpu) { } static inline int kvm_own_lsx(struct kvm_vcpu *vcpu) { return -EINVAL; }
static inline void kvm_save_lsx(struct loongarch_fpu *fpu) { } static inline void kvm_save_lsx(struct loongarch_fpu *fpu) { }
static inline void kvm_restore_lsx(struct loongarch_fpu *fpu) { } static inline void kvm_restore_lsx(struct loongarch_fpu *fpu) { }
#endif #endif
...@@ -70,7 +70,7 @@ int kvm_own_lasx(struct kvm_vcpu *vcpu); ...@@ -70,7 +70,7 @@ int kvm_own_lasx(struct kvm_vcpu *vcpu);
void kvm_save_lasx(struct loongarch_fpu *fpu); void kvm_save_lasx(struct loongarch_fpu *fpu);
void kvm_restore_lasx(struct loongarch_fpu *fpu); void kvm_restore_lasx(struct loongarch_fpu *fpu);
#else #else
static inline int kvm_own_lasx(struct kvm_vcpu *vcpu) { } static inline int kvm_own_lasx(struct kvm_vcpu *vcpu) { return -EINVAL; }
static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { } static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { }
static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { } static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { }
#endif #endif
......
...@@ -509,7 +509,6 @@ asmlinkage void start_secondary(void) ...@@ -509,7 +509,6 @@ asmlinkage void start_secondary(void)
sync_counter(); sync_counter();
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
set_my_cpu_offset(per_cpu_offset(cpu)); set_my_cpu_offset(per_cpu_offset(cpu));
rcutree_report_cpu_starting(cpu);
cpu_probe(); cpu_probe();
constant_clockevent_init(); constant_clockevent_init();
......
...@@ -675,7 +675,7 @@ static bool fault_supports_huge_mapping(struct kvm_memory_slot *memslot, ...@@ -675,7 +675,7 @@ static bool fault_supports_huge_mapping(struct kvm_memory_slot *memslot,
* *
* There are several ways to safely use this helper: * There are several ways to safely use this helper:
* *
* - Check mmu_invalidate_retry_hva() after grabbing the mapping level, before * - Check mmu_invalidate_retry_gfn() after grabbing the mapping level, before
* consuming it. In this case, mmu_lock doesn't need to be held during the * consuming it. In this case, mmu_lock doesn't need to be held during the
* lookup, but it does need to be held while checking the MMU notifier. * lookup, but it does need to be held while checking the MMU notifier.
* *
...@@ -855,7 +855,7 @@ static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) ...@@ -855,7 +855,7 @@ static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
/* Check if an invalidation has taken place since we got pfn */ /* Check if an invalidation has taken place since we got pfn */
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
if (mmu_invalidate_retry_hva(kvm, mmu_seq, hva)) { if (mmu_invalidate_retry_gfn(kvm, mmu_seq, gfn)) {
/* /*
* This can happen when mappings are changed asynchronously, but * This can happen when mappings are changed asynchronously, but
* also synchronously if a COW is triggered by * also synchronously if a COW is triggered by
......
...@@ -284,12 +284,16 @@ static void setup_tlb_handler(int cpu) ...@@ -284,12 +284,16 @@ static void setup_tlb_handler(int cpu)
set_handler(EXCCODE_TLBNR * VECSIZE, handle_tlb_protect, VECSIZE); set_handler(EXCCODE_TLBNR * VECSIZE, handle_tlb_protect, VECSIZE);
set_handler(EXCCODE_TLBNX * VECSIZE, handle_tlb_protect, VECSIZE); set_handler(EXCCODE_TLBNX * VECSIZE, handle_tlb_protect, VECSIZE);
set_handler(EXCCODE_TLBPE * VECSIZE, handle_tlb_protect, VECSIZE); set_handler(EXCCODE_TLBPE * VECSIZE, handle_tlb_protect, VECSIZE);
} } else {
int vec_sz __maybe_unused;
void *addr __maybe_unused;
struct page *page __maybe_unused;
/* Avoid lockdep warning */
rcutree_report_cpu_starting(cpu);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
else { vec_sz = sizeof(exception_handlers);
void *addr;
struct page *page;
const int vec_sz = sizeof(exception_handlers);
if (pcpu_handlers[cpu]) if (pcpu_handlers[cpu])
return; return;
...@@ -305,8 +309,8 @@ static void setup_tlb_handler(int cpu) ...@@ -305,8 +309,8 @@ static void setup_tlb_handler(int cpu)
csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_EENTRY); csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_EENTRY);
csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_MERRENTRY); csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_MERRENTRY);
csr_write64(pcpu_handlers[cpu] + 80*VECSIZE, LOONGARCH_CSR_TLBRENTRY); csr_write64(pcpu_handlers[cpu] + 80*VECSIZE, LOONGARCH_CSR_TLBRENTRY);
}
#endif #endif
}
} }
void tlb_init(int cpu) void tlb_init(int cpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment