Commit 8b9cf98c authored by Rusty Russell's avatar Rusty Russell Committed by Avi Kivity

KVM: VMX: pass vcpu_vmx internally

container_of is wonderful, but not casting at all is better.  This
patch changes vmx.c's internal functions to pass "struct vcpu_vmx"
instead of "struct kvm_vcpu" and using container_of.
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent 9bd01506
...@@ -140,9 +140,8 @@ static inline u64 msr_efer_save_restore_bits(struct kvm_msr_entry msr) ...@@ -140,9 +140,8 @@ static inline u64 msr_efer_save_restore_bits(struct kvm_msr_entry msr)
return (u64)msr.data & EFER_SAVE_RESTORE_BITS; return (u64)msr.data & EFER_SAVE_RESTORE_BITS;
} }
static inline int msr_efer_need_save_restore(struct kvm_vcpu *vcpu) static inline int msr_efer_need_save_restore(struct vcpu_vmx *vmx)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu);
int efer_offset = vmx->msr_offset_efer; int efer_offset = vmx->msr_offset_efer;
return msr_efer_save_restore_bits(vmx->host_msrs[efer_offset]) != return msr_efer_save_restore_bits(vmx->host_msrs[efer_offset]) !=
msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]); msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
...@@ -168,9 +167,8 @@ static inline int is_external_interrupt(u32 intr_info) ...@@ -168,9 +167,8 @@ static inline int is_external_interrupt(u32 intr_info)
== (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
} }
static int __find_msr_index(struct kvm_vcpu *vcpu, u32 msr) static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu);
int i; int i;
for (i = 0; i < vmx->nmsrs; ++i) for (i = 0; i < vmx->nmsrs; ++i)
...@@ -179,12 +177,11 @@ static int __find_msr_index(struct kvm_vcpu *vcpu, u32 msr) ...@@ -179,12 +177,11 @@ static int __find_msr_index(struct kvm_vcpu *vcpu, u32 msr)
return -1; return -1;
} }
static struct kvm_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr) static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu);
int i; int i;
i = __find_msr_index(vcpu, msr); i = __find_msr_index(vmx, msr);
if (i >= 0) if (i >= 0)
return &vmx->guest_msrs[i]; return &vmx->guest_msrs[i];
return NULL; return NULL;
...@@ -205,24 +202,24 @@ static void vmcs_clear(struct vmcs *vmcs) ...@@ -205,24 +202,24 @@ static void vmcs_clear(struct vmcs *vmcs)
static void __vcpu_clear(void *arg) static void __vcpu_clear(void *arg)
{ {
struct kvm_vcpu *vcpu = arg; struct vcpu_vmx *vmx = arg;
struct vcpu_vmx *vmx = to_vmx(vcpu);
int cpu = raw_smp_processor_id(); int cpu = raw_smp_processor_id();
if (vcpu->cpu == cpu) if (vmx->vcpu.cpu == cpu)
vmcs_clear(vmx->vmcs); vmcs_clear(vmx->vmcs);
if (per_cpu(current_vmcs, cpu) == vmx->vmcs) if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
per_cpu(current_vmcs, cpu) = NULL; per_cpu(current_vmcs, cpu) = NULL;
rdtscll(vcpu->host_tsc); rdtscll(vmx->vcpu.host_tsc);
} }
static void vcpu_clear(struct kvm_vcpu *vcpu) static void vcpu_clear(struct vcpu_vmx *vmx)
{ {
if (vcpu->cpu != raw_smp_processor_id() && vcpu->cpu != -1) if (vmx->vcpu.cpu != raw_smp_processor_id() && vmx->vcpu.cpu != -1)
smp_call_function_single(vcpu->cpu, __vcpu_clear, vcpu, 0, 1); smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear,
vmx, 0, 1);
else else
__vcpu_clear(vcpu); __vcpu_clear(vmx);
to_vmx(vcpu)->launched = 0; vmx->launched = 0;
} }
static unsigned long vmcs_readl(unsigned long field) static unsigned long vmcs_readl(unsigned long field)
...@@ -332,23 +329,20 @@ static void reload_tss(void) ...@@ -332,23 +329,20 @@ static void reload_tss(void)
#endif #endif
} }
static void load_transition_efer(struct kvm_vcpu *vcpu) static void load_transition_efer(struct vcpu_vmx *vmx)
{ {
u64 trans_efer; u64 trans_efer;
struct vcpu_vmx *vmx = to_vmx(vcpu);
int efer_offset = vmx->msr_offset_efer; int efer_offset = vmx->msr_offset_efer;
trans_efer = vmx->host_msrs[efer_offset].data; trans_efer = vmx->host_msrs[efer_offset].data;
trans_efer &= ~EFER_SAVE_RESTORE_BITS; trans_efer &= ~EFER_SAVE_RESTORE_BITS;
trans_efer |= msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]); trans_efer |= msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
wrmsrl(MSR_EFER, trans_efer); wrmsrl(MSR_EFER, trans_efer);
vcpu->stat.efer_reload++; vmx->vcpu.stat.efer_reload++;
} }
static void vmx_save_host_state(struct kvm_vcpu *vcpu) static void vmx_save_host_state(struct vcpu_vmx *vmx)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (vmx->host_state.loaded) if (vmx->host_state.loaded)
return; return;
...@@ -383,19 +377,18 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) ...@@ -383,19 +377,18 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
#endif #endif
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (is_long_mode(vcpu)) { if (is_long_mode(&vmx->vcpu)) {
save_msrs(vmx->host_msrs + save_msrs(vmx->host_msrs +
vmx->msr_offset_kernel_gs_base, 1); vmx->msr_offset_kernel_gs_base, 1);
} }
#endif #endif
load_msrs(vmx->guest_msrs, vmx->save_nmsrs); load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
if (msr_efer_need_save_restore(vcpu)) if (msr_efer_need_save_restore(vmx))
load_transition_efer(vcpu); load_transition_efer(vmx);
} }
static void vmx_load_host_state(struct kvm_vcpu *vcpu) static void vmx_load_host_state(struct vcpu_vmx *vmx)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long flags; unsigned long flags;
if (!vmx->host_state.loaded) if (!vmx->host_state.loaded)
...@@ -420,7 +413,7 @@ static void vmx_load_host_state(struct kvm_vcpu *vcpu) ...@@ -420,7 +413,7 @@ static void vmx_load_host_state(struct kvm_vcpu *vcpu)
} }
save_msrs(vmx->guest_msrs, vmx->save_nmsrs); save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
load_msrs(vmx->host_msrs, vmx->save_nmsrs); load_msrs(vmx->host_msrs, vmx->save_nmsrs);
if (msr_efer_need_save_restore(vcpu)) if (msr_efer_need_save_restore(vmx))
load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1); load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
} }
...@@ -435,7 +428,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -435,7 +428,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
u64 tsc_this, delta; u64 tsc_this, delta;
if (vcpu->cpu != cpu) if (vcpu->cpu != cpu)
vcpu_clear(vcpu); vcpu_clear(vmx);
if (per_cpu(current_vmcs, cpu) != vmx->vmcs) { if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
u8 error; u8 error;
...@@ -476,7 +469,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -476,7 +469,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
static void vmx_vcpu_put(struct kvm_vcpu *vcpu) static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
{ {
vmx_load_host_state(vcpu); vmx_load_host_state(to_vmx(vcpu));
kvm_put_guest_fpu(vcpu); kvm_put_guest_fpu(vcpu);
} }
...@@ -502,7 +495,7 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu) ...@@ -502,7 +495,7 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
static void vmx_vcpu_decache(struct kvm_vcpu *vcpu) static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
{ {
vcpu_clear(vcpu); vcpu_clear(to_vmx(vcpu));
} }
static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
...@@ -550,9 +543,8 @@ static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code) ...@@ -550,9 +543,8 @@ static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
/* /*
* Swap MSR entry in host/guest MSR entry array. * Swap MSR entry in host/guest MSR entry array.
*/ */
void move_msr_up(struct kvm_vcpu *vcpu, int from, int to) static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct kvm_msr_entry tmp; struct kvm_msr_entry tmp;
tmp = vmx->guest_msrs[to]; tmp = vmx->guest_msrs[to];
...@@ -568,44 +560,43 @@ void move_msr_up(struct kvm_vcpu *vcpu, int from, int to) ...@@ -568,44 +560,43 @@ void move_msr_up(struct kvm_vcpu *vcpu, int from, int to)
* msrs. Don't touch the 64-bit msrs if the guest is in legacy * msrs. Don't touch the 64-bit msrs if the guest is in legacy
* mode, as fiddling with msrs is very expensive. * mode, as fiddling with msrs is very expensive.
*/ */
static void setup_msrs(struct kvm_vcpu *vcpu) static void setup_msrs(struct vcpu_vmx *vmx)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu);
int save_nmsrs; int save_nmsrs;
save_nmsrs = 0; save_nmsrs = 0;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (is_long_mode(vcpu)) { if (is_long_mode(&vmx->vcpu)) {
int index; int index;
index = __find_msr_index(vcpu, MSR_SYSCALL_MASK); index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
if (index >= 0) if (index >= 0)
move_msr_up(vcpu, index, save_nmsrs++); move_msr_up(vmx, index, save_nmsrs++);
index = __find_msr_index(vcpu, MSR_LSTAR); index = __find_msr_index(vmx, MSR_LSTAR);
if (index >= 0) if (index >= 0)
move_msr_up(vcpu, index, save_nmsrs++); move_msr_up(vmx, index, save_nmsrs++);
index = __find_msr_index(vcpu, MSR_CSTAR); index = __find_msr_index(vmx, MSR_CSTAR);
if (index >= 0) if (index >= 0)
move_msr_up(vcpu, index, save_nmsrs++); move_msr_up(vmx, index, save_nmsrs++);
index = __find_msr_index(vcpu, MSR_KERNEL_GS_BASE); index = __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
if (index >= 0) if (index >= 0)
move_msr_up(vcpu, index, save_nmsrs++); move_msr_up(vmx, index, save_nmsrs++);
/* /*
* MSR_K6_STAR is only needed on long mode guests, and only * MSR_K6_STAR is only needed on long mode guests, and only
* if efer.sce is enabled. * if efer.sce is enabled.
*/ */
index = __find_msr_index(vcpu, MSR_K6_STAR); index = __find_msr_index(vmx, MSR_K6_STAR);
if ((index >= 0) && (vcpu->shadow_efer & EFER_SCE)) if ((index >= 0) && (vmx->vcpu.shadow_efer & EFER_SCE))
move_msr_up(vcpu, index, save_nmsrs++); move_msr_up(vmx, index, save_nmsrs++);
} }
#endif #endif
vmx->save_nmsrs = save_nmsrs; vmx->save_nmsrs = save_nmsrs;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
vmx->msr_offset_kernel_gs_base = vmx->msr_offset_kernel_gs_base =
__find_msr_index(vcpu, MSR_KERNEL_GS_BASE); __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
#endif #endif
vmx->msr_offset_efer = __find_msr_index(vcpu, MSR_EFER); vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER);
} }
/* /*
...@@ -672,7 +663,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) ...@@ -672,7 +663,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
data = vmcs_readl(GUEST_SYSENTER_ESP); data = vmcs_readl(GUEST_SYSENTER_ESP);
break; break;
default: default:
msr = find_msr_entry(vcpu, msr_index); msr = find_msr_entry(to_vmx(vcpu), msr_index);
if (msr) { if (msr) {
data = msr->data; data = msr->data;
break; break;
...@@ -700,7 +691,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) ...@@ -700,7 +691,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
case MSR_EFER: case MSR_EFER:
ret = kvm_set_msr_common(vcpu, msr_index, data); ret = kvm_set_msr_common(vcpu, msr_index, data);
if (vmx->host_state.loaded) if (vmx->host_state.loaded)
load_transition_efer(vcpu); load_transition_efer(vmx);
break; break;
case MSR_FS_BASE: case MSR_FS_BASE:
vmcs_writel(GUEST_FS_BASE, data); vmcs_writel(GUEST_FS_BASE, data);
...@@ -722,7 +713,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) ...@@ -722,7 +713,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
guest_write_tsc(data); guest_write_tsc(data);
break; break;
default: default:
msr = find_msr_entry(vcpu, msr_index); msr = find_msr_entry(vmx, msr_index);
if (msr) { if (msr) {
msr->data = data; msr->data = data;
if (vmx->host_state.loaded) if (vmx->host_state.loaded)
...@@ -1116,7 +1107,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu) ...@@ -1116,7 +1107,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
vcpu->shadow_efer |= EFER_LMA; vcpu->shadow_efer |= EFER_LMA;
find_msr_entry(vcpu, MSR_EFER)->data |= EFER_LMA | EFER_LME; find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME;
vmcs_write32(VM_ENTRY_CONTROLS, vmcs_write32(VM_ENTRY_CONTROLS,
vmcs_read32(VM_ENTRY_CONTROLS) vmcs_read32(VM_ENTRY_CONTROLS)
| VM_ENTRY_CONTROLS_IA32E_MASK); | VM_ENTRY_CONTROLS_IA32E_MASK);
...@@ -1186,7 +1177,8 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) ...@@ -1186,7 +1177,8 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{ {
struct kvm_msr_entry *msr = find_msr_entry(vcpu, MSR_EFER); struct vcpu_vmx *vmx = to_vmx(vcpu);
struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
vcpu->shadow_efer = efer; vcpu->shadow_efer = efer;
if (efer & EFER_LMA) { if (efer & EFER_LMA) {
...@@ -1202,7 +1194,7 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) ...@@ -1202,7 +1194,7 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
msr->data = efer & ~EFER_LME; msr->data = efer & ~EFER_LME;
} }
setup_msrs(vcpu); setup_msrs(vmx);
} }
#endif #endif
...@@ -1364,9 +1356,8 @@ static void seg_setup(int seg) ...@@ -1364,9 +1356,8 @@ static void seg_setup(int seg)
/* /*
* Sets up the vmcs for emulated real mode. * Sets up the vmcs for emulated real mode.
*/ */
static int vmx_vcpu_setup(struct kvm_vcpu *vcpu) static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 host_sysenter_cs; u32 host_sysenter_cs;
u32 junk; u32 junk;
unsigned long a; unsigned long a;
...@@ -1375,19 +1366,18 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -1375,19 +1366,18 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
int ret = 0; int ret = 0;
unsigned long kvm_vmx_return; unsigned long kvm_vmx_return;
if (!init_rmode_tss(vcpu->kvm)) { if (!init_rmode_tss(vmx->vcpu.kvm)) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
memset(vcpu->regs, 0, sizeof(vcpu->regs)); vmx->vcpu.regs[VCPU_REGS_RDX] = get_rdx_init_val();
vcpu->regs[VCPU_REGS_RDX] = get_rdx_init_val(); vmx->vcpu.cr8 = 0;
vcpu->cr8 = 0; vmx->vcpu.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
vcpu->apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; if (vmx->vcpu.vcpu_id == 0)
if (vcpu->vcpu_id == 0) vmx->vcpu.apic_base |= MSR_IA32_APICBASE_BSP;
vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
fx_init(vcpu); fx_init(&vmx->vcpu);
/* /*
* GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
...@@ -1512,7 +1502,7 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -1512,7 +1502,7 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
++vmx->nmsrs; ++vmx->nmsrs;
} }
setup_msrs(vcpu); setup_msrs(vmx);
vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl); vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
...@@ -1529,14 +1519,14 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -1529,14 +1519,14 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
vcpu->cr0 = 0x60000010; vmx->vcpu.cr0 = 0x60000010;
vmx_set_cr0(vcpu, vcpu->cr0); // enter rmode vmx_set_cr0(&vmx->vcpu, vmx->vcpu.cr0); // enter rmode
vmx_set_cr4(vcpu, 0); vmx_set_cr4(&vmx->vcpu, 0);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
vmx_set_efer(vcpu, 0); vmx_set_efer(&vmx->vcpu, 0);
#endif #endif
vmx_fpu_activate(vcpu); vmx_fpu_activate(&vmx->vcpu);
update_exception_bitmap(vcpu); update_exception_bitmap(&vmx->vcpu);
return 0; return 0;
...@@ -2129,7 +2119,7 @@ static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -2129,7 +2119,7 @@ static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (!vcpu->mmio_read_completed) if (!vcpu->mmio_read_completed)
do_interrupt_requests(vcpu, kvm_run); do_interrupt_requests(vcpu, kvm_run);
vmx_save_host_state(vcpu); vmx_save_host_state(vmx);
kvm_load_guest_fpu(vcpu); kvm_load_guest_fpu(vcpu);
/* /*
...@@ -2352,7 +2342,7 @@ static void vmx_free_vmcs(struct kvm_vcpu *vcpu) ...@@ -2352,7 +2342,7 @@ static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
if (vmx->vmcs) { if (vmx->vmcs) {
on_each_cpu(__vcpu_clear, vcpu, 0, 1); on_each_cpu(__vcpu_clear, vmx, 0, 1);
free_vmcs(vmx->vmcs); free_vmcs(vmx->vmcs);
vmx->vmcs = NULL; vmx->vmcs = NULL;
} }
...@@ -2400,7 +2390,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) ...@@ -2400,7 +2390,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
cpu = get_cpu(); cpu = get_cpu();
vmx_vcpu_load(&vmx->vcpu, cpu); vmx_vcpu_load(&vmx->vcpu, cpu);
err = vmx_vcpu_setup(&vmx->vcpu); err = vmx_vcpu_setup(vmx);
vmx_vcpu_put(&vmx->vcpu); vmx_vcpu_put(&vmx->vcpu);
put_cpu(); put_cpu();
if (err) if (err)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment