Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
657f1d86
Commit
657f1d86
authored
Apr 02, 2021
by
Paolo Bonzini
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'kvm-tdp-fix-rcu' into HEAD
parents
57e45ea4
08889894
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
41 additions
and
38 deletions
+41
-38
arch/x86/kvm/mmu/mmu_internal.h
arch/x86/kvm/mmu/mmu_internal.h
+5
-0
arch/x86/kvm/mmu/tdp_iter.c
arch/x86/kvm/mmu/tdp_iter.c
+18
-12
arch/x86/kvm/mmu/tdp_iter.h
arch/x86/kvm/mmu/tdp_iter.h
+3
-1
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/mmu/tdp_mmu.c
+15
-25
No files found.
arch/x86/kvm/mmu/mmu_internal.h
View file @
657f1d86
...
...
@@ -88,6 +88,11 @@ static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
return
to_shadow_page
(
__pa
(
sptep
));
}
static
inline
int
kvm_mmu_page_as_id
(
struct
kvm_mmu_page
*
sp
)
{
return
sp
->
role
.
smm
?
1
:
0
;
}
static
inline
bool
kvm_vcpu_ad_need_write_protect
(
struct
kvm_vcpu
*
vcpu
)
{
/*
...
...
arch/x86/kvm/mmu/tdp_iter.c
View file @
657f1d86
...
...
@@ -20,6 +20,21 @@ static gfn_t round_gfn_for_level(gfn_t gfn, int level)
return
gfn
&
-
KVM_PAGES_PER_HPAGE
(
level
);
}
/*
* Return the TDP iterator to the root PT and allow it to continue its
* traversal over the paging structure from there.
*/
void
tdp_iter_restart
(
struct
tdp_iter
*
iter
)
{
iter
->
yielded_gfn
=
iter
->
next_last_level_gfn
;
iter
->
level
=
iter
->
root_level
;
iter
->
gfn
=
round_gfn_for_level
(
iter
->
next_last_level_gfn
,
iter
->
level
);
tdp_iter_refresh_sptep
(
iter
);
iter
->
valid
=
true
;
}
/*
* Sets a TDP iterator to walk a pre-order traversal of the paging structure
* rooted at root_pt, starting with the walk to translate next_last_level_gfn.
...
...
@@ -31,16 +46,12 @@ void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level,
WARN_ON
(
root_level
>
PT64_ROOT_MAX_LEVEL
);
iter
->
next_last_level_gfn
=
next_last_level_gfn
;
iter
->
yielded_gfn
=
iter
->
next_last_level_gfn
;
iter
->
root_level
=
root_level
;
iter
->
min_level
=
min_level
;
iter
->
level
=
root_level
;
iter
->
pt_path
[
iter
->
level
-
1
]
=
(
tdp_ptep_t
)
root_pt
;
iter
->
gfn
=
round_gfn_for_level
(
iter
->
next_last_level_gfn
,
iter
->
level
);
tdp_iter_refresh_sptep
(
iter
);
iter
->
pt_path
[
iter
->
root_level
-
1
]
=
(
tdp_ptep_t
)
root_pt
;
iter
->
as_id
=
kvm_mmu_page_as_id
(
sptep_to_sp
(
root_pt
));
iter
->
valid
=
true
;
tdp_iter_restart
(
iter
)
;
}
/*
...
...
@@ -159,8 +170,3 @@ void tdp_iter_next(struct tdp_iter *iter)
iter
->
valid
=
false
;
}
tdp_ptep_t
tdp_iter_root_pt
(
struct
tdp_iter
*
iter
)
{
return
iter
->
pt_path
[
iter
->
root_level
-
1
];
}
arch/x86/kvm/mmu/tdp_iter.h
View file @
657f1d86
...
...
@@ -36,6 +36,8 @@ struct tdp_iter {
int
min_level
;
/* The iterator's current level within the paging structure */
int
level
;
/* The address space ID, i.e. SMM vs. regular. */
int
as_id
;
/* A snapshot of the value at sptep */
u64
old_spte
;
/*
...
...
@@ -62,6 +64,6 @@ tdp_ptep_t spte_to_child_pt(u64 pte, int level);
void
tdp_iter_start
(
struct
tdp_iter
*
iter
,
u64
*
root_pt
,
int
root_level
,
int
min_level
,
gfn_t
next_last_level_gfn
);
void
tdp_iter_next
(
struct
tdp_iter
*
iter
);
tdp_ptep_t
tdp_iter_root_p
t
(
struct
tdp_iter
*
iter
);
void
tdp_iter_restar
t
(
struct
tdp_iter
*
iter
);
#endif
/* __KVM_X86_MMU_TDP_ITER_H */
arch/x86/kvm/mmu/tdp_mmu.c
View file @
657f1d86
...
...
@@ -190,11 +190,6 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
u64
old_spte
,
u64
new_spte
,
int
level
,
bool
shared
);
static
int
kvm_mmu_page_as_id
(
struct
kvm_mmu_page
*
sp
)
{
return
sp
->
role
.
smm
?
1
:
0
;
}
static
void
handle_changed_spte_acc_track
(
u64
old_spte
,
u64
new_spte
,
int
level
)
{
if
(
!
is_shadow_present_pte
(
old_spte
)
||
!
is_last_spte
(
old_spte
,
level
))
...
...
@@ -287,11 +282,16 @@ static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp,
*
* Given a page table that has been removed from the TDP paging structure,
* iterates through the page table to clear SPTEs and free child page tables.
*
* Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
* protection. Since this thread removed it from the paging structure,
* this thread will be responsible for ensuring the page is freed. Hence the
* early rcu_dereferences in the function.
*/
static
void
handle_removed_tdp_mmu_page
(
struct
kvm
*
kvm
,
u64
*
pt
,
static
void
handle_removed_tdp_mmu_page
(
struct
kvm
*
kvm
,
tdp_ptep_t
pt
,
bool
shared
)
{
struct
kvm_mmu_page
*
sp
=
sptep_to_sp
(
pt
);
struct
kvm_mmu_page
*
sp
=
sptep_to_sp
(
rcu_dereference
(
pt
)
);
int
level
=
sp
->
role
.
level
;
gfn_t
base_gfn
=
sp
->
gfn
;
u64
old_child_spte
;
...
...
@@ -304,7 +304,7 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt,
tdp_mmu_unlink_page
(
kvm
,
sp
,
shared
);
for
(
i
=
0
;
i
<
PT64_ENT_PER_PAGE
;
i
++
)
{
sptep
=
pt
+
i
;
sptep
=
rcu_dereference
(
pt
)
+
i
;
gfn
=
base_gfn
+
(
i
*
KVM_PAGES_PER_HPAGE
(
level
-
1
));
if
(
shared
)
{
...
...
@@ -478,10 +478,6 @@ static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
struct
tdp_iter
*
iter
,
u64
new_spte
)
{
u64
*
root_pt
=
tdp_iter_root_pt
(
iter
);
struct
kvm_mmu_page
*
root
=
sptep_to_sp
(
root_pt
);
int
as_id
=
kvm_mmu_page_as_id
(
root
);
lockdep_assert_held_read
(
&
kvm
->
mmu_lock
);
/*
...
...
@@ -495,8 +491,8 @@ static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
new_spte
)
!=
iter
->
old_spte
)
return
false
;
handle_changed_spte
(
kvm
,
as_id
,
iter
->
gfn
,
iter
->
old_spte
,
new
_spte
,
iter
->
level
,
true
);
handle_changed_spte
(
kvm
,
iter
->
as_id
,
iter
->
gfn
,
iter
->
old
_spte
,
new_spte
,
iter
->
level
,
true
);
return
true
;
}
...
...
@@ -524,7 +520,7 @@ static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm,
* here since the SPTE is going from non-present
* to non-present.
*/
WRITE_ONCE
(
*
iter
->
sptep
,
0
);
WRITE_ONCE
(
*
rcu_dereference
(
iter
->
sptep
)
,
0
);
return
true
;
}
...
...
@@ -550,10 +546,6 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
u64
new_spte
,
bool
record_acc_track
,
bool
record_dirty_log
)
{
tdp_ptep_t
root_pt
=
tdp_iter_root_pt
(
iter
);
struct
kvm_mmu_page
*
root
=
sptep_to_sp
(
root_pt
);
int
as_id
=
kvm_mmu_page_as_id
(
root
);
lockdep_assert_held_write
(
&
kvm
->
mmu_lock
);
/*
...
...
@@ -567,13 +559,13 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
WRITE_ONCE
(
*
rcu_dereference
(
iter
->
sptep
),
new_spte
);
__handle_changed_spte
(
kvm
,
as_id
,
iter
->
gfn
,
iter
->
old_spte
,
new
_spte
,
iter
->
level
,
false
);
__handle_changed_spte
(
kvm
,
iter
->
as_id
,
iter
->
gfn
,
iter
->
old
_spte
,
new_spte
,
iter
->
level
,
false
);
if
(
record_acc_track
)
handle_changed_spte_acc_track
(
iter
->
old_spte
,
new_spte
,
iter
->
level
);
if
(
record_dirty_log
)
handle_changed_spte_dirty_log
(
kvm
,
as_id
,
iter
->
gfn
,
handle_changed_spte_dirty_log
(
kvm
,
iter
->
as_id
,
iter
->
gfn
,
iter
->
old_spte
,
new_spte
,
iter
->
level
);
}
...
...
@@ -645,9 +637,7 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
WARN_ON
(
iter
->
gfn
>
iter
->
next_last_level_gfn
);
tdp_iter_start
(
iter
,
iter
->
pt_path
[
iter
->
root_level
-
1
],
iter
->
root_level
,
iter
->
min_level
,
iter
->
next_last_level_gfn
);
tdp_iter_restart
(
iter
);
return
true
;
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment