Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
4e71cad3
Commit
4e71cad3
authored
Feb 14, 2022
by
Paolo Bonzini
Browse files
Options
Browse Files
Download
Plain Diff
Merge remote-tracking branch 'kvm/master' into HEAD
Merge bugfix patches from Linux 5.17-rc.
parents
48ebd0cf
710c4765
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
76 additions
and
61 deletions
+76
-61
arch/arm64/kvm/vgic/vgic-mmio.c
arch/arm64/kvm/vgic/vgic-mmio.c
+2
-0
arch/x86/kvm/pmu.c
arch/x86/kvm/pmu.c
+4
-3
arch/x86/kvm/svm/avic.c
arch/x86/kvm/svm/avic.c
+23
-48
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.c
+41
-7
arch/x86/kvm/svm/svm.h
arch/x86/kvm/svm/svm.h
+3
-1
arch/x86/kvm/x86.c
arch/x86/kvm/x86.c
+3
-1
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/Makefile
+0
-1
No files found.
arch/arm64/kvm/vgic/vgic-mmio.c
View file @
4e71cad3
...
...
@@ -248,6 +248,8 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
IRQCHIP_STATE_PENDING
,
&
val
);
WARN_RATELIMIT
(
err
,
"IRQ %d"
,
irq
->
host_irq
);
}
else
if
(
vgic_irq_is_mapped_level
(
irq
))
{
val
=
vgic_get_phys_line_level
(
irq
);
}
else
{
val
=
irq_is_pending
(
irq
);
}
...
...
arch/x86/kvm/pmu.c
View file @
4e71cad3
...
...
@@ -95,7 +95,7 @@ static void kvm_perf_overflow(struct perf_event *perf_event,
}
static
void
pmc_reprogram_counter
(
struct
kvm_pmc
*
pmc
,
u32
type
,
u
nsigned
config
,
bool
exclude_user
,
u
64
config
,
bool
exclude_user
,
bool
exclude_kernel
,
bool
intr
,
bool
in_tx
,
bool
in_tx_cp
)
{
...
...
@@ -181,7 +181,8 @@ static int cmp_u64(const void *a, const void *b)
void
reprogram_gp_counter
(
struct
kvm_pmc
*
pmc
,
u64
eventsel
)
{
unsigned
config
,
type
=
PERF_TYPE_RAW
;
u64
config
;
u32
type
=
PERF_TYPE_RAW
;
struct
kvm
*
kvm
=
pmc
->
vcpu
->
kvm
;
struct
kvm_pmu_event_filter
*
filter
;
bool
allow_event
=
true
;
...
...
@@ -220,7 +221,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
}
if
(
type
==
PERF_TYPE_RAW
)
config
=
eventsel
&
X86
_RAW_EVENT_MASK
;
config
=
eventsel
&
AMD64
_RAW_EVENT_MASK
;
if
(
pmc
->
current_config
==
eventsel
&&
pmc_resume_counter
(
pmc
))
return
;
...
...
arch/x86/kvm/svm/avic.c
View file @
4e71cad3
...
...
@@ -269,6 +269,22 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
return
0
;
}
void
avic_ring_doorbell
(
struct
kvm_vcpu
*
vcpu
)
{
/*
* Note, the vCPU could get migrated to a different pCPU at any point,
* which could result in signalling the wrong/previous pCPU. But if
* that happens the vCPU is guaranteed to do a VMRUN (after being
* migrated) and thus will process pending interrupts, i.e. a doorbell
* is not needed (and the spurious one is harmless).
*/
int
cpu
=
READ_ONCE
(
vcpu
->
cpu
);
if
(
cpu
!=
get_cpu
())
wrmsrl
(
MSR_AMD64_SVM_AVIC_DOORBELL
,
kvm_cpu_get_apicid
(
cpu
));
put_cpu
();
}
static
void
avic_kick_target_vcpus
(
struct
kvm
*
kvm
,
struct
kvm_lapic
*
source
,
u32
icrl
,
u32
icrh
)
{
...
...
@@ -284,8 +300,13 @@ static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
kvm_for_each_vcpu
(
i
,
vcpu
,
kvm
)
{
if
(
kvm_apic_match_dest
(
vcpu
,
source
,
icrl
&
APIC_SHORT_MASK
,
GET_APIC_DEST_FIELD
(
icrh
),
icrl
&
APIC_DEST_MASK
))
kvm_vcpu_wake_up
(
vcpu
);
icrl
&
APIC_DEST_MASK
))
{
vcpu
->
arch
.
apic
->
irr_pending
=
true
;
svm_complete_interrupt_delivery
(
vcpu
,
icrl
&
APIC_MODE_MASK
,
icrl
&
APIC_INT_LEVELTRIG
,
icrl
&
APIC_VECTOR_MASK
);
}
}
}
...
...
@@ -647,52 +668,6 @@ void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
return
;
}
int
svm_deliver_avic_intr
(
struct
kvm_vcpu
*
vcpu
,
int
vec
)
{
if
(
!
vcpu
->
arch
.
apicv_active
)
return
-
1
;
kvm_lapic_set_irr
(
vec
,
vcpu
->
arch
.
apic
);
/*
* Pairs with the smp_mb_*() after setting vcpu->guest_mode in
* vcpu_enter_guest() to ensure the write to the vIRR is ordered before
* the read of guest_mode, which guarantees that either VMRUN will see
* and process the new vIRR entry, or that the below code will signal
* the doorbell if the vCPU is already running in the guest.
*/
smp_mb__after_atomic
();
/*
* Signal the doorbell to tell hardware to inject the IRQ if the vCPU
* is in the guest. If the vCPU is not in the guest, hardware will
* automatically process AVIC interrupts at VMRUN.
*/
if
(
vcpu
->
mode
==
IN_GUEST_MODE
)
{
int
cpu
=
READ_ONCE
(
vcpu
->
cpu
);
/*
* Note, the vCPU could get migrated to a different pCPU at any
* point, which could result in signalling the wrong/previous
* pCPU. But if that happens the vCPU is guaranteed to do a
* VMRUN (after being migrated) and thus will process pending
* interrupts, i.e. a doorbell is not needed (and the spurious
* one is harmless).
*/
if
(
cpu
!=
get_cpu
())
wrmsrl
(
MSR_AMD64_SVM_AVIC_DOORBELL
,
kvm_cpu_get_apicid
(
cpu
));
put_cpu
();
}
else
{
/*
* Wake the vCPU if it was blocking. KVM will then detect the
* pending IRQ when checking if the vCPU has a wake event.
*/
kvm_vcpu_wake_up
(
vcpu
);
}
return
0
;
}
bool
svm_dy_apicv_has_pending_interrupt
(
struct
kvm_vcpu
*
vcpu
)
{
return
false
;
...
...
arch/x86/kvm/svm/svm.c
View file @
4e71cad3
...
...
@@ -3311,21 +3311,55 @@ static void svm_inject_irq(struct kvm_vcpu *vcpu)
SVM_EVTINJ_VALID
|
SVM_EVTINJ_TYPE_INTR
;
}
static
void
svm_deliver_interrupt
(
struct
kvm_lapic
*
apic
,
int
delivery_mode
,
int
trig_mode
,
int
vector
)
void
svm_complete_interrupt_delivery
(
struct
kvm_vcpu
*
vcpu
,
int
delivery_mode
,
int
trig_mode
,
int
vector
)
{
struct
kvm_vcpu
*
vcpu
=
apic
->
vcpu
;
/*
* vcpu->arch.apicv_active must be read after vcpu->mode.
* Pairs with smp_store_release in vcpu_enter_guest.
*/
bool
in_guest_mode
=
(
smp_load_acquire
(
&
vcpu
->
mode
)
==
IN_GUEST_MODE
);
if
(
svm_deliver_avic_intr
(
vcpu
,
vector
))
{
kvm_lapic_set_irr
(
vector
,
apic
);
if
(
!
READ_ONCE
(
vcpu
->
arch
.
apicv_active
))
{
/* Process the interrupt via inject_pending_event */
kvm_make_request
(
KVM_REQ_EVENT
,
vcpu
);
kvm_vcpu_kick
(
vcpu
);
return
;
}
trace_kvm_apicv_accept_irq
(
vcpu
->
vcpu_id
,
delivery_mode
,
trig_mode
,
vector
);
if
(
in_guest_mode
)
{
/*
* Signal the doorbell to tell hardware to inject the IRQ. If
* the vCPU exits the guest before the doorbell chimes, hardware
* will automatically process AVIC interrupts at the next VMRUN.
*/
avic_ring_doorbell
(
vcpu
);
}
else
{
trace_kvm_apicv_accept_irq
(
vcpu
->
vcpu_id
,
delivery_mode
,
trig_mode
,
vector
);
/*
* Wake the vCPU if it was blocking. KVM will then detect the
* pending IRQ when checking if the vCPU has a wake event.
*/
kvm_vcpu_wake_up
(
vcpu
);
}
}
static
void
svm_deliver_interrupt
(
struct
kvm_lapic
*
apic
,
int
delivery_mode
,
int
trig_mode
,
int
vector
)
{
kvm_lapic_set_irr
(
vector
,
apic
);
/*
* Pairs with the smp_mb_*() after setting vcpu->guest_mode in
* vcpu_enter_guest() to ensure the write to the vIRR is ordered before
* the read of guest_mode. This guarantees that either VMRUN will see
* and process the new vIRR entry, or that svm_complete_interrupt_delivery
* will signal the doorbell if the CPU has already entered the guest.
*/
smp_mb__after_atomic
();
svm_complete_interrupt_delivery
(
apic
->
vcpu
,
delivery_mode
,
trig_mode
,
vector
);
}
static
void
svm_update_cr8_intercept
(
struct
kvm_vcpu
*
vcpu
,
int
tpr
,
int
irr
)
{
struct
vcpu_svm
*
svm
=
to_svm
(
vcpu
);
...
...
arch/x86/kvm/svm/svm.h
View file @
4e71cad3
...
...
@@ -499,6 +499,8 @@ void svm_set_gif(struct vcpu_svm *svm, bool value);
int
svm_invoke_exit_handler
(
struct
kvm_vcpu
*
vcpu
,
u64
exit_code
);
void
set_msr_interception
(
struct
kvm_vcpu
*
vcpu
,
u32
*
msrpm
,
u32
msr
,
int
read
,
int
write
);
void
svm_complete_interrupt_delivery
(
struct
kvm_vcpu
*
vcpu
,
int
delivery_mode
,
int
trig_mode
,
int
vec
);
/* nested.c */
...
...
@@ -582,12 +584,12 @@ bool svm_check_apicv_inhibit_reasons(ulong bit);
void
svm_load_eoi_exitmap
(
struct
kvm_vcpu
*
vcpu
,
u64
*
eoi_exit_bitmap
);
void
svm_hwapic_irr_update
(
struct
kvm_vcpu
*
vcpu
,
int
max_irr
);
void
svm_hwapic_isr_update
(
struct
kvm_vcpu
*
vcpu
,
int
max_isr
);
int
svm_deliver_avic_intr
(
struct
kvm_vcpu
*
vcpu
,
int
vec
);
bool
svm_dy_apicv_has_pending_interrupt
(
struct
kvm_vcpu
*
vcpu
);
int
svm_update_pi_irte
(
struct
kvm
*
kvm
,
unsigned
int
host_irq
,
uint32_t
guest_irq
,
bool
set
);
void
avic_vcpu_blocking
(
struct
kvm_vcpu
*
vcpu
);
void
avic_vcpu_unblocking
(
struct
kvm_vcpu
*
vcpu
);
void
avic_ring_doorbell
(
struct
kvm_vcpu
*
vcpu
);
/* sev.c */
...
...
arch/x86/kvm/x86.c
View file @
4e71cad3
...
...
@@ -9994,7 +9994,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
* result in virtual interrupt delivery.
*/
local_irq_disable
();
vcpu
->
mode
=
IN_GUEST_MODE
;
/* Store vcpu->apicv_active before vcpu->mode. */
smp_store_release
(
&
vcpu
->
mode
,
IN_GUEST_MODE
);
srcu_read_unlock
(
&
vcpu
->
kvm
->
srcu
,
vcpu
->
srcu_idx
);
...
...
tools/testing/selftests/kvm/Makefile
View file @
4e71cad3
...
...
@@ -83,7 +83,6 @@ TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test
TEST_GEN_PROGS_x86_64
+=
x86_64/vmx_pmu_msrs_test
TEST_GEN_PROGS_x86_64
+=
x86_64/xen_shinfo_test
TEST_GEN_PROGS_x86_64
+=
x86_64/xen_vmcall_test
TEST_GEN_PROGS_x86_64
+=
x86_64/vmx_pi_mmio_test
TEST_GEN_PROGS_x86_64
+=
x86_64/sev_migrate_tests
TEST_GEN_PROGS_x86_64
+=
x86_64/amx_test
TEST_GEN_PROGS_x86_64
+=
access_tracking_perf_test
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment