Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
57e45ea4
Commit
57e45ea4
authored
Apr 02, 2021
by
Paolo Bonzini
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'kvm-tdp-fix-flushes' into HEAD
parents
cb9b6a1b
33a31641
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
42 additions
and
17 deletions
+42
-17
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/mmu.c
+5
-4
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/mmu/tdp_mmu.c
+14
-12
arch/x86/kvm/mmu/tdp_mmu.h
arch/x86/kvm/mmu/tdp_mmu.h
+23
-1
No files found.
arch/x86/kvm/mmu/mmu.c
View file @
57e45ea4
...
@@ -5939,6 +5939,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
...
@@ -5939,6 +5939,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
struct
kvm_mmu_page
*
sp
;
struct
kvm_mmu_page
*
sp
;
unsigned
int
ratio
;
unsigned
int
ratio
;
LIST_HEAD
(
invalid_list
);
LIST_HEAD
(
invalid_list
);
bool
flush
=
false
;
ulong
to_zap
;
ulong
to_zap
;
rcu_idx
=
srcu_read_lock
(
&
kvm
->
srcu
);
rcu_idx
=
srcu_read_lock
(
&
kvm
->
srcu
);
...
@@ -5960,19 +5961,19 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
...
@@ -5960,19 +5961,19 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
lpage_disallowed_link
);
lpage_disallowed_link
);
WARN_ON_ONCE
(
!
sp
->
lpage_disallowed
);
WARN_ON_ONCE
(
!
sp
->
lpage_disallowed
);
if
(
is_tdp_mmu_page
(
sp
))
{
if
(
is_tdp_mmu_page
(
sp
))
{
kvm_tdp_mmu_zap_gfn_range
(
kvm
,
sp
->
gfn
,
flush
=
kvm_tdp_mmu_zap_sp
(
kvm
,
sp
);
sp
->
gfn
+
KVM_PAGES_PER_HPAGE
(
sp
->
role
.
level
));
}
else
{
}
else
{
kvm_mmu_prepare_zap_page
(
kvm
,
sp
,
&
invalid_list
);
kvm_mmu_prepare_zap_page
(
kvm
,
sp
,
&
invalid_list
);
WARN_ON_ONCE
(
sp
->
lpage_disallowed
);
WARN_ON_ONCE
(
sp
->
lpage_disallowed
);
}
}
if
(
need_resched
()
||
rwlock_needbreak
(
&
kvm
->
mmu_lock
))
{
if
(
need_resched
()
||
rwlock_needbreak
(
&
kvm
->
mmu_lock
))
{
kvm_mmu_
commit_zap_page
(
kvm
,
&
invalid_list
);
kvm_mmu_
remote_flush_or_zap
(
kvm
,
&
invalid_list
,
flush
);
cond_resched_rwlock_write
(
&
kvm
->
mmu_lock
);
cond_resched_rwlock_write
(
&
kvm
->
mmu_lock
);
flush
=
false
;
}
}
}
}
kvm_mmu_
commit_zap_page
(
kvm
,
&
invalid_list
);
kvm_mmu_
remote_flush_or_zap
(
kvm
,
&
invalid_list
,
flush
);
write_unlock
(
&
kvm
->
mmu_lock
);
write_unlock
(
&
kvm
->
mmu_lock
);
srcu_read_unlock
(
&
kvm
->
srcu
,
rcu_idx
);
srcu_read_unlock
(
&
kvm
->
srcu
,
rcu_idx
);
...
...
arch/x86/kvm/mmu/tdp_mmu.c
View file @
57e45ea4
...
@@ -86,7 +86,7 @@ static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
...
@@ -86,7 +86,7 @@ static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
static
bool
zap_gfn_range
(
struct
kvm
*
kvm
,
struct
kvm_mmu_page
*
root
,
static
bool
zap_gfn_range
(
struct
kvm
*
kvm
,
struct
kvm_mmu_page
*
root
,
gfn_t
start
,
gfn_t
end
,
bool
can_yield
);
gfn_t
start
,
gfn_t
end
,
bool
can_yield
,
bool
flush
);
void
kvm_tdp_mmu_free_root
(
struct
kvm
*
kvm
,
struct
kvm_mmu_page
*
root
)
void
kvm_tdp_mmu_free_root
(
struct
kvm
*
kvm
,
struct
kvm_mmu_page
*
root
)
{
{
...
@@ -99,7 +99,7 @@ void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
...
@@ -99,7 +99,7 @@ void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
list_del
(
&
root
->
link
);
list_del
(
&
root
->
link
);
zap_gfn_range
(
kvm
,
root
,
0
,
max_gfn
,
false
);
zap_gfn_range
(
kvm
,
root
,
0
,
max_gfn
,
false
,
false
);
free_page
((
unsigned
long
)
root
->
spt
);
free_page
((
unsigned
long
)
root
->
spt
);
kmem_cache_free
(
mmu_page_header_cache
,
root
);
kmem_cache_free
(
mmu_page_header_cache
,
root
);
...
@@ -664,20 +664,21 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
...
@@ -664,20 +664,21 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
* scheduler needs the CPU or there is contention on the MMU lock. If this
* scheduler needs the CPU or there is contention on the MMU lock. If this
* function cannot yield, it will not release the MMU lock or reschedule and
* function cannot yield, it will not release the MMU lock or reschedule and
* the caller must ensure it does not supply too large a GFN range, or the
* the caller must ensure it does not supply too large a GFN range, or the
* operation can cause a soft lockup.
* operation can cause a soft lockup. Note, in some use cases a flush may be
* required by prior actions. Ensure the pending flush is performed prior to
* yielding.
*/
*/
static
bool
zap_gfn_range
(
struct
kvm
*
kvm
,
struct
kvm_mmu_page
*
root
,
static
bool
zap_gfn_range
(
struct
kvm
*
kvm
,
struct
kvm_mmu_page
*
root
,
gfn_t
start
,
gfn_t
end
,
bool
can_yield
)
gfn_t
start
,
gfn_t
end
,
bool
can_yield
,
bool
flush
)
{
{
struct
tdp_iter
iter
;
struct
tdp_iter
iter
;
bool
flush_needed
=
false
;
rcu_read_lock
();
rcu_read_lock
();
tdp_root_for_each_pte
(
iter
,
root
,
start
,
end
)
{
tdp_root_for_each_pte
(
iter
,
root
,
start
,
end
)
{
if
(
can_yield
&&
if
(
can_yield
&&
tdp_mmu_iter_cond_resched
(
kvm
,
&
iter
,
flush
_needed
))
{
tdp_mmu_iter_cond_resched
(
kvm
,
&
iter
,
flush
))
{
flush
_needed
=
false
;
flush
=
false
;
continue
;
continue
;
}
}
...
@@ -695,11 +696,11 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
...
@@ -695,11 +696,11 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
continue
;
continue
;
tdp_mmu_set_spte
(
kvm
,
&
iter
,
0
);
tdp_mmu_set_spte
(
kvm
,
&
iter
,
0
);
flush
_needed
=
true
;
flush
=
true
;
}
}
rcu_read_unlock
();
rcu_read_unlock
();
return
flush
_needed
;
return
flush
;
}
}
/*
/*
...
@@ -708,13 +709,14 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
...
@@ -708,13 +709,14 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
* SPTEs have been cleared and a TLB flush is needed before releasing the
* SPTEs have been cleared and a TLB flush is needed before releasing the
* MMU lock.
* MMU lock.
*/
*/
bool
kvm_tdp_mmu_zap_gfn_range
(
struct
kvm
*
kvm
,
gfn_t
start
,
gfn_t
end
)
bool
__kvm_tdp_mmu_zap_gfn_range
(
struct
kvm
*
kvm
,
gfn_t
start
,
gfn_t
end
,
bool
can_yield
)
{
{
struct
kvm_mmu_page
*
root
;
struct
kvm_mmu_page
*
root
;
bool
flush
=
false
;
bool
flush
=
false
;
for_each_tdp_mmu_root_yield_safe
(
kvm
,
root
)
for_each_tdp_mmu_root_yield_safe
(
kvm
,
root
)
flush
|=
zap_gfn_range
(
kvm
,
root
,
start
,
end
,
true
);
flush
=
zap_gfn_range
(
kvm
,
root
,
start
,
end
,
can_yield
,
flush
);
return
flush
;
return
flush
;
}
}
...
@@ -931,7 +933,7 @@ static int zap_gfn_range_hva_wrapper(struct kvm *kvm,
...
@@ -931,7 +933,7 @@ static int zap_gfn_range_hva_wrapper(struct kvm *kvm,
struct
kvm_mmu_page
*
root
,
gfn_t
start
,
struct
kvm_mmu_page
*
root
,
gfn_t
start
,
gfn_t
end
,
unsigned
long
unused
)
gfn_t
end
,
unsigned
long
unused
)
{
{
return
zap_gfn_range
(
kvm
,
root
,
start
,
end
,
false
);
return
zap_gfn_range
(
kvm
,
root
,
start
,
end
,
false
,
false
);
}
}
int
kvm_tdp_mmu_zap_hva_range
(
struct
kvm
*
kvm
,
unsigned
long
start
,
int
kvm_tdp_mmu_zap_hva_range
(
struct
kvm
*
kvm
,
unsigned
long
start
,
...
...
arch/x86/kvm/mmu/tdp_mmu.h
View file @
57e45ea4
...
@@ -8,7 +8,29 @@
...
@@ -8,7 +8,29 @@
hpa_t
kvm_tdp_mmu_get_vcpu_root_hpa
(
struct
kvm_vcpu
*
vcpu
);
hpa_t
kvm_tdp_mmu_get_vcpu_root_hpa
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_tdp_mmu_free_root
(
struct
kvm
*
kvm
,
struct
kvm_mmu_page
*
root
);
void
kvm_tdp_mmu_free_root
(
struct
kvm
*
kvm
,
struct
kvm_mmu_page
*
root
);
bool
kvm_tdp_mmu_zap_gfn_range
(
struct
kvm
*
kvm
,
gfn_t
start
,
gfn_t
end
);
bool
__kvm_tdp_mmu_zap_gfn_range
(
struct
kvm
*
kvm
,
gfn_t
start
,
gfn_t
end
,
bool
can_yield
);
static
inline
bool
kvm_tdp_mmu_zap_gfn_range
(
struct
kvm
*
kvm
,
gfn_t
start
,
gfn_t
end
)
{
return
__kvm_tdp_mmu_zap_gfn_range
(
kvm
,
start
,
end
,
true
);
}
static
inline
bool
kvm_tdp_mmu_zap_sp
(
struct
kvm
*
kvm
,
struct
kvm_mmu_page
*
sp
)
{
gfn_t
end
=
sp
->
gfn
+
KVM_PAGES_PER_HPAGE
(
sp
->
role
.
level
);
/*
* Don't allow yielding, as the caller may have a flush pending. Note,
* if mmu_lock is held for write, zapping will never yield in this case,
* but explicitly disallow it for safety. The TDP MMU does not yield
* until it has made forward progress (steps sideways), and when zapping
* a single shadow page that it's guaranteed to see (thus the mmu_lock
* requirement), its "step sideways" will always step beyond the bounds
* of the shadow page's gfn range and stop iterating before yielding.
*/
lockdep_assert_held_write
(
&
kvm
->
mmu_lock
);
return
__kvm_tdp_mmu_zap_gfn_range
(
kvm
,
sp
->
gfn
,
end
,
false
);
}
void
kvm_tdp_mmu_zap_all
(
struct
kvm
*
kvm
);
void
kvm_tdp_mmu_zap_all
(
struct
kvm
*
kvm
);
int
kvm_tdp_mmu_map
(
struct
kvm_vcpu
*
vcpu
,
gpa_t
gpa
,
u32
error_code
,
int
kvm_tdp_mmu_map
(
struct
kvm_vcpu
*
vcpu
,
gpa_t
gpa
,
u32
error_code
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment