Commit 26708234 authored by Anup Patel's avatar Anup Patel Committed by Anup Patel

RISC-V: KVM: Use G-stage name for hypervisor page table

The two-stage address translation defined by the RISC-V privileged
specification defines: VS-stage (guest virtual address to guest
physical address) programmed by the Guest OS  and G-stage (guest
physical addree to host physical address) programmed by the
hypervisor.

To align with above terminology, we replace "stage2" with "gstage"
and "Stage2" with "G-stage" name everywhere in KVM RISC-V sources.
Signed-off-by: default avatarAnup Patel <apatel@ventanamicro.com>
Reviewed-by: default avatarAtish Patra <atishp@rivosinc.com>
Signed-off-by: default avatarAnup Patel <anup@brainfault.org>
parent dba90d6f
...@@ -54,10 +54,10 @@ struct kvm_vmid { ...@@ -54,10 +54,10 @@ struct kvm_vmid {
}; };
struct kvm_arch { struct kvm_arch {
/* stage2 vmid */ /* G-stage vmid */
struct kvm_vmid vmid; struct kvm_vmid vmid;
/* stage2 page table */ /* G-stage page table */
pgd_t *pgd; pgd_t *pgd;
phys_addr_t pgd_phys; phys_addr_t pgd_phys;
...@@ -207,21 +207,21 @@ void __kvm_riscv_hfence_gvma_vmid(unsigned long vmid); ...@@ -207,21 +207,21 @@ void __kvm_riscv_hfence_gvma_vmid(unsigned long vmid);
void __kvm_riscv_hfence_gvma_gpa(unsigned long gpa_divby_4); void __kvm_riscv_hfence_gvma_gpa(unsigned long gpa_divby_4);
void __kvm_riscv_hfence_gvma_all(void); void __kvm_riscv_hfence_gvma_all(void);
int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu, int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
struct kvm_memory_slot *memslot, struct kvm_memory_slot *memslot,
gpa_t gpa, unsigned long hva, bool is_write); gpa_t gpa, unsigned long hva, bool is_write);
int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm); int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm);
void kvm_riscv_stage2_free_pgd(struct kvm *kvm); void kvm_riscv_gstage_free_pgd(struct kvm *kvm);
void kvm_riscv_stage2_update_hgatp(struct kvm_vcpu *vcpu); void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu);
void kvm_riscv_stage2_mode_detect(void); void kvm_riscv_gstage_mode_detect(void);
unsigned long kvm_riscv_stage2_mode(void); unsigned long kvm_riscv_gstage_mode(void);
int kvm_riscv_stage2_gpa_bits(void); int kvm_riscv_gstage_gpa_bits(void);
void kvm_riscv_stage2_vmid_detect(void); void kvm_riscv_gstage_vmid_detect(void);
unsigned long kvm_riscv_stage2_vmid_bits(void); unsigned long kvm_riscv_gstage_vmid_bits(void);
int kvm_riscv_stage2_vmid_init(struct kvm *kvm); int kvm_riscv_gstage_vmid_init(struct kvm *kvm);
bool kvm_riscv_stage2_vmid_ver_changed(struct kvm_vmid *vmid); bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid);
void kvm_riscv_stage2_vmid_update(struct kvm_vcpu *vcpu); void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu);
void __kvm_riscv_unpriv_trap(void); void __kvm_riscv_unpriv_trap(void);
......
...@@ -89,13 +89,13 @@ int kvm_arch_init(void *opaque) ...@@ -89,13 +89,13 @@ int kvm_arch_init(void *opaque)
return -ENODEV; return -ENODEV;
} }
kvm_riscv_stage2_mode_detect(); kvm_riscv_gstage_mode_detect();
kvm_riscv_stage2_vmid_detect(); kvm_riscv_gstage_vmid_detect();
kvm_info("hypervisor extension available\n"); kvm_info("hypervisor extension available\n");
switch (kvm_riscv_stage2_mode()) { switch (kvm_riscv_gstage_mode()) {
case HGATP_MODE_SV32X4: case HGATP_MODE_SV32X4:
str = "Sv32x4"; str = "Sv32x4";
break; break;
...@@ -110,7 +110,7 @@ int kvm_arch_init(void *opaque) ...@@ -110,7 +110,7 @@ int kvm_arch_init(void *opaque)
} }
kvm_info("using %s G-stage page table format\n", str); kvm_info("using %s G-stage page table format\n", str);
kvm_info("VMID %ld bits available\n", kvm_riscv_stage2_vmid_bits()); kvm_info("VMID %ld bits available\n", kvm_riscv_gstage_vmid_bits());
return 0; return 0;
} }
......
This diff is collapsed.
...@@ -137,7 +137,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) ...@@ -137,7 +137,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
/* Cleanup VCPU timer */ /* Cleanup VCPU timer */
kvm_riscv_vcpu_timer_deinit(vcpu); kvm_riscv_vcpu_timer_deinit(vcpu);
/* Free unused pages pre-allocated for Stage2 page table mappings */ /* Free unused pages pre-allocated for G-stage page table mappings */
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
} }
...@@ -635,7 +635,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -635,7 +635,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
csr_write(CSR_HVIP, csr->hvip); csr_write(CSR_HVIP, csr->hvip);
csr_write(CSR_VSATP, csr->vsatp); csr_write(CSR_VSATP, csr->vsatp);
kvm_riscv_stage2_update_hgatp(vcpu); kvm_riscv_gstage_update_hgatp(vcpu);
kvm_riscv_vcpu_timer_restore(vcpu); kvm_riscv_vcpu_timer_restore(vcpu);
...@@ -690,7 +690,7 @@ static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu) ...@@ -690,7 +690,7 @@ static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
kvm_riscv_reset_vcpu(vcpu); kvm_riscv_reset_vcpu(vcpu);
if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu)) if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu))
kvm_riscv_stage2_update_hgatp(vcpu); kvm_riscv_gstage_update_hgatp(vcpu);
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
__kvm_riscv_hfence_gvma_all(); __kvm_riscv_hfence_gvma_all();
...@@ -762,7 +762,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -762,7 +762,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
/* Check conditions before entering the guest */ /* Check conditions before entering the guest */
cond_resched(); cond_resched();
kvm_riscv_stage2_vmid_update(vcpu); kvm_riscv_gstage_vmid_update(vcpu);
kvm_riscv_check_vcpu_requests(vcpu); kvm_riscv_check_vcpu_requests(vcpu);
...@@ -800,7 +800,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -800,7 +800,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
kvm_riscv_update_hvip(vcpu); kvm_riscv_update_hvip(vcpu);
if (ret <= 0 || if (ret <= 0 ||
kvm_riscv_stage2_vmid_ver_changed(&vcpu->kvm->arch.vmid) || kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) ||
kvm_request_pending(vcpu)) { kvm_request_pending(vcpu)) {
vcpu->mode = OUTSIDE_GUEST_MODE; vcpu->mode = OUTSIDE_GUEST_MODE;
local_irq_enable(); local_irq_enable();
......
...@@ -412,7 +412,7 @@ static int emulate_store(struct kvm_vcpu *vcpu, struct kvm_run *run, ...@@ -412,7 +412,7 @@ static int emulate_store(struct kvm_vcpu *vcpu, struct kvm_run *run,
return 0; return 0;
} }
static int stage2_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run, static int gstage_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_cpu_trap *trap) struct kvm_cpu_trap *trap)
{ {
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
...@@ -440,7 +440,7 @@ static int stage2_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run, ...@@ -440,7 +440,7 @@ static int stage2_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
}; };
} }
ret = kvm_riscv_stage2_map(vcpu, memslot, fault_addr, hva, ret = kvm_riscv_gstage_map(vcpu, memslot, fault_addr, hva,
(trap->scause == EXC_STORE_GUEST_PAGE_FAULT) ? true : false); (trap->scause == EXC_STORE_GUEST_PAGE_FAULT) ? true : false);
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -686,7 +686,7 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, ...@@ -686,7 +686,7 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
case EXC_LOAD_GUEST_PAGE_FAULT: case EXC_LOAD_GUEST_PAGE_FAULT:
case EXC_STORE_GUEST_PAGE_FAULT: case EXC_STORE_GUEST_PAGE_FAULT:
if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
ret = stage2_page_fault(vcpu, run, trap); ret = gstage_page_fault(vcpu, run, trap);
break; break;
case EXC_SUPERVISOR_SYSCALL: case EXC_SUPERVISOR_SYSCALL:
if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
......
...@@ -31,13 +31,13 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -31,13 +31,13 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{ {
int r; int r;
r = kvm_riscv_stage2_alloc_pgd(kvm); r = kvm_riscv_gstage_alloc_pgd(kvm);
if (r) if (r)
return r; return r;
r = kvm_riscv_stage2_vmid_init(kvm); r = kvm_riscv_gstage_vmid_init(kvm);
if (r) { if (r) {
kvm_riscv_stage2_free_pgd(kvm); kvm_riscv_gstage_free_pgd(kvm);
return r; return r;
} }
...@@ -75,7 +75,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -75,7 +75,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = KVM_USER_MEM_SLOTS; r = KVM_USER_MEM_SLOTS;
break; break;
case KVM_CAP_VM_GPA_BITS: case KVM_CAP_VM_GPA_BITS:
r = kvm_riscv_stage2_gpa_bits(); r = kvm_riscv_gstage_gpa_bits();
break; break;
default: default:
r = 0; r = 0;
......
...@@ -20,7 +20,7 @@ static unsigned long vmid_next; ...@@ -20,7 +20,7 @@ static unsigned long vmid_next;
static unsigned long vmid_bits; static unsigned long vmid_bits;
static DEFINE_SPINLOCK(vmid_lock); static DEFINE_SPINLOCK(vmid_lock);
void kvm_riscv_stage2_vmid_detect(void) void kvm_riscv_gstage_vmid_detect(void)
{ {
unsigned long old; unsigned long old;
...@@ -40,12 +40,12 @@ void kvm_riscv_stage2_vmid_detect(void) ...@@ -40,12 +40,12 @@ void kvm_riscv_stage2_vmid_detect(void)
vmid_bits = 0; vmid_bits = 0;
} }
unsigned long kvm_riscv_stage2_vmid_bits(void) unsigned long kvm_riscv_gstage_vmid_bits(void)
{ {
return vmid_bits; return vmid_bits;
} }
int kvm_riscv_stage2_vmid_init(struct kvm *kvm) int kvm_riscv_gstage_vmid_init(struct kvm *kvm)
{ {
/* Mark the initial VMID and VMID version invalid */ /* Mark the initial VMID and VMID version invalid */
kvm->arch.vmid.vmid_version = 0; kvm->arch.vmid.vmid_version = 0;
...@@ -54,7 +54,7 @@ int kvm_riscv_stage2_vmid_init(struct kvm *kvm) ...@@ -54,7 +54,7 @@ int kvm_riscv_stage2_vmid_init(struct kvm *kvm)
return 0; return 0;
} }
bool kvm_riscv_stage2_vmid_ver_changed(struct kvm_vmid *vmid) bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid)
{ {
if (!vmid_bits) if (!vmid_bits)
return false; return false;
...@@ -63,13 +63,13 @@ bool kvm_riscv_stage2_vmid_ver_changed(struct kvm_vmid *vmid) ...@@ -63,13 +63,13 @@ bool kvm_riscv_stage2_vmid_ver_changed(struct kvm_vmid *vmid)
READ_ONCE(vmid_version)); READ_ONCE(vmid_version));
} }
void kvm_riscv_stage2_vmid_update(struct kvm_vcpu *vcpu) void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu)
{ {
unsigned long i; unsigned long i;
struct kvm_vcpu *v; struct kvm_vcpu *v;
struct kvm_vmid *vmid = &vcpu->kvm->arch.vmid; struct kvm_vmid *vmid = &vcpu->kvm->arch.vmid;
if (!kvm_riscv_stage2_vmid_ver_changed(vmid)) if (!kvm_riscv_gstage_vmid_ver_changed(vmid))
return; return;
spin_lock(&vmid_lock); spin_lock(&vmid_lock);
...@@ -78,7 +78,7 @@ void kvm_riscv_stage2_vmid_update(struct kvm_vcpu *vcpu) ...@@ -78,7 +78,7 @@ void kvm_riscv_stage2_vmid_update(struct kvm_vcpu *vcpu)
* We need to re-check the vmid_version here to ensure that if * We need to re-check the vmid_version here to ensure that if
* another vcpu already allocated a valid vmid for this vm. * another vcpu already allocated a valid vmid for this vm.
*/ */
if (!kvm_riscv_stage2_vmid_ver_changed(vmid)) { if (!kvm_riscv_gstage_vmid_ver_changed(vmid)) {
spin_unlock(&vmid_lock); spin_unlock(&vmid_lock);
return; return;
} }
...@@ -96,7 +96,7 @@ void kvm_riscv_stage2_vmid_update(struct kvm_vcpu *vcpu) ...@@ -96,7 +96,7 @@ void kvm_riscv_stage2_vmid_update(struct kvm_vcpu *vcpu)
* instances is invalid and we have force VMID re-assignement * instances is invalid and we have force VMID re-assignement
* for all Guest instances. The Guest instances that were not * for all Guest instances. The Guest instances that were not
* running will automatically pick-up new VMIDs because will * running will automatically pick-up new VMIDs because will
* call kvm_riscv_stage2_vmid_update() whenever they enter * call kvm_riscv_gstage_vmid_update() whenever they enter
* in-kernel run loop. For Guest instances that are already * in-kernel run loop. For Guest instances that are already
* running, we force VM exits on all host CPUs using IPI and * running, we force VM exits on all host CPUs using IPI and
* flush all Guest TLBs. * flush all Guest TLBs.
...@@ -112,7 +112,7 @@ void kvm_riscv_stage2_vmid_update(struct kvm_vcpu *vcpu) ...@@ -112,7 +112,7 @@ void kvm_riscv_stage2_vmid_update(struct kvm_vcpu *vcpu)
spin_unlock(&vmid_lock); spin_unlock(&vmid_lock);
/* Request stage2 page table update for all VCPUs */ /* Request G-stage page table update for all VCPUs */
kvm_for_each_vcpu(i, v, vcpu->kvm) kvm_for_each_vcpu(i, v, vcpu->kvm)
kvm_make_request(KVM_REQ_UPDATE_HGATP, v); kvm_make_request(KVM_REQ_UPDATE_HGATP, v);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment