Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
abb9e0b8
Commit
abb9e0b8
authored
Aug 22, 2008
by
Avi Kivity
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
KVM: MMU: Convert the paging mode shadow walk to use the generic walker
Signed-off-by:
Avi Kivity
<
avi@qumranet.com
>
parent
140754bc
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
86 additions
and
72 deletions
+86
-72
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/paging_tmpl.h
+86
-72
No files found.
arch/x86/kvm/paging_tmpl.h
View file @
abb9e0b8
...
@@ -25,6 +25,7 @@
...
@@ -25,6 +25,7 @@
#if PTTYPE == 64
#if PTTYPE == 64
#define pt_element_t u64
#define pt_element_t u64
#define guest_walker guest_walker64
#define guest_walker guest_walker64
#define shadow_walker shadow_walker64
#define FNAME(name) paging##64_##name
#define FNAME(name) paging##64_##name
#define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
#define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
#define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
#define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
...
@@ -41,6 +42,7 @@
...
@@ -41,6 +42,7 @@
#elif PTTYPE == 32
#elif PTTYPE == 32
#define pt_element_t u32
#define pt_element_t u32
#define guest_walker guest_walker32
#define guest_walker guest_walker32
#define shadow_walker shadow_walker32
#define FNAME(name) paging##32_##name
#define FNAME(name) paging##32_##name
#define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
#define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
#define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
#define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
...
@@ -71,6 +73,17 @@ struct guest_walker {
...
@@ -71,6 +73,17 @@ struct guest_walker {
u32
error_code
;
u32
error_code
;
};
};
struct
shadow_walker
{
struct
kvm_shadow_walk
walker
;
struct
guest_walker
*
guest_walker
;
int
user_fault
;
int
write_fault
;
int
largepage
;
int
*
ptwrite
;
pfn_t
pfn
;
u64
*
sptep
;
};
static
gfn_t
gpte_to_gfn
(
pt_element_t
gpte
)
static
gfn_t
gpte_to_gfn
(
pt_element_t
gpte
)
{
{
return
(
gpte
&
PT_BASE_ADDR_MASK
)
>>
PAGE_SHIFT
;
return
(
gpte
&
PT_BASE_ADDR_MASK
)
>>
PAGE_SHIFT
;
...
@@ -272,86 +285,86 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
...
@@ -272,86 +285,86 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
/*
/*
* Fetch a shadow pte for a specific level in the paging hierarchy.
* Fetch a shadow pte for a specific level in the paging hierarchy.
*/
*/
static
u64
*
FNAME
(
fetch
)(
struct
kvm_vcpu
*
vcpu
,
gva_t
addr
,
static
int
FNAME
(
shadow_walk_entry
)(
struct
kvm_shadow_walk
*
_sw
,
struct
guest_walker
*
walker
,
struct
kvm_vcpu
*
vcpu
,
gva_t
addr
,
int
user_fault
,
int
write_fault
,
int
largepage
,
u64
*
sptep
,
int
level
)
int
*
ptwrite
,
pfn_t
pfn
)
{
{
hpa_t
shadow_addr
;
struct
shadow_walker
*
sw
=
int
level
;
container_of
(
_sw
,
struct
shadow_walker
,
walker
);
u64
*
shadow_ent
;
struct
guest_walker
*
gw
=
sw
->
guest_walker
;
unsigned
access
=
walker
->
pt_access
;
unsigned
access
=
gw
->
pt_access
;
if
(
!
is_present_pte
(
walker
->
ptes
[
walker
->
level
-
1
]))
return
NULL
;
shadow_addr
=
vcpu
->
arch
.
mmu
.
root_hpa
;
level
=
vcpu
->
arch
.
mmu
.
shadow_root_level
;
if
(
level
==
PT32E_ROOT_LEVEL
)
{
shadow_addr
=
vcpu
->
arch
.
mmu
.
pae_root
[(
addr
>>
30
)
&
3
];
shadow_addr
&=
PT64_BASE_ADDR_MASK
;
--
level
;
}
for
(;
;
level
--
)
{
u32
index
=
SHADOW_PT_INDEX
(
addr
,
level
);
struct
kvm_mmu_page
*
shadow_page
;
struct
kvm_mmu_page
*
shadow_page
;
u64
shadow_
pte
;
u64
s
pte
;
int
metaphysical
;
int
metaphysical
;
gfn_t
table_gfn
;
gfn_t
table_gfn
;
int
r
;
pt_element_t
curr_pte
;
shadow_ent
=
((
u64
*
)
__va
(
shadow_addr
))
+
index
;
if
(
level
==
PT_PAGE_TABLE_LEVEL
if
(
level
==
PT_PAGE_TABLE_LEVEL
)
||
(
sw
->
largepage
&&
level
==
PT_DIRECTORY_LEVEL
))
{
break
;
mmu_set_spte
(
vcpu
,
sptep
,
access
,
gw
->
pte_access
&
access
,
sw
->
user_fault
,
sw
->
write_fault
,
if
(
largepage
&&
level
==
PT_DIRECTORY_LEVEL
)
gw
->
ptes
[
gw
->
level
-
1
]
&
PT_DIRTY_MASK
,
break
;
sw
->
ptwrite
,
sw
->
largepage
,
gw
->
gfn
,
sw
->
pfn
,
false
);
if
(
is_shadow_present_pte
(
*
shadow_ent
)
sw
->
sptep
=
sptep
;
&&
!
is_large_pte
(
*
shadow_ent
))
{
return
1
;
shadow_addr
=
*
shadow_ent
&
PT64_BASE_ADDR_MASK
;
continue
;
}
}
if
(
is_large_pte
(
*
shadow_ent
))
if
(
is_shadow_present_pte
(
*
sptep
)
&&
!
is_large_pte
(
*
sptep
))
rmap_remove
(
vcpu
->
kvm
,
shadow_ent
);
return
0
;
if
(
is_large_pte
(
*
sptep
))
rmap_remove
(
vcpu
->
kvm
,
sptep
);
if
(
level
-
1
==
PT_PAGE_TABLE_LEVEL
if
(
level
==
PT_DIRECTORY_LEVEL
&&
gw
->
level
==
PT_DIRECTORY_LEVEL
)
{
&&
walker
->
level
==
PT_DIRECTORY_LEVEL
)
{
metaphysical
=
1
;
metaphysical
=
1
;
if
(
!
is_dirty_pte
(
walker
->
ptes
[
level
-
1
]))
if
(
!
is_dirty_pte
(
gw
->
ptes
[
level
-
1
]))
access
&=
~
ACC_WRITE_MASK
;
access
&=
~
ACC_WRITE_MASK
;
table_gfn
=
gpte_to_gfn
(
walker
->
ptes
[
level
-
1
]);
table_gfn
=
gpte_to_gfn
(
gw
->
ptes
[
level
-
1
]);
}
else
{
}
else
{
metaphysical
=
0
;
metaphysical
=
0
;
table_gfn
=
walker
->
table_gfn
[
level
-
2
];
table_gfn
=
gw
->
table_gfn
[
level
-
2
];
}
}
shadow_page
=
kvm_mmu_get_page
(
vcpu
,
table_gfn
,
addr
,
level
-
1
,
shadow_page
=
kvm_mmu_get_page
(
vcpu
,
table_gfn
,
addr
,
level
-
1
,
metaphysical
,
access
,
metaphysical
,
access
,
sptep
);
shadow_ent
);
if
(
!
metaphysical
)
{
if
(
!
metaphysical
)
{
int
r
;
r
=
kvm_read_guest_atomic
(
vcpu
->
kvm
,
gw
->
pte_gpa
[
level
-
2
],
pt_element_t
curr_pte
;
r
=
kvm_read_guest_atomic
(
vcpu
->
kvm
,
walker
->
pte_gpa
[
level
-
2
],
&
curr_pte
,
sizeof
(
curr_pte
));
&
curr_pte
,
sizeof
(
curr_pte
));
if
(
r
||
curr_pte
!=
walker
->
ptes
[
level
-
2
])
{
if
(
r
||
curr_pte
!=
gw
->
ptes
[
level
-
2
])
{
kvm_release_pfn_clean
(
pfn
);
kvm_release_pfn_clean
(
sw
->
pfn
);
return
NULL
;
sw
->
sptep
=
NULL
;
return
1
;
}
}
}
}
shadow_addr
=
__pa
(
shadow_page
->
spt
);
shadow_pte
=
shadow_addr
|
PT_PRESENT_MASK
|
PT_ACCESSED_MASK
spte
=
__pa
(
shadow_page
->
spt
)
|
PT_PRESENT_MASK
|
PT_ACCESSED_MASK
|
PT_WRITABLE_MASK
|
PT_USER_MASK
;
|
PT_WRITABLE_MASK
|
PT_USER_MASK
;
set_shadow_pte
(
shadow_ent
,
shadow_pte
);
*
sptep
=
spte
;
}
return
0
;
}
static
u64
*
FNAME
(
fetch
)(
struct
kvm_vcpu
*
vcpu
,
gva_t
addr
,
struct
guest_walker
*
guest_walker
,
int
user_fault
,
int
write_fault
,
int
largepage
,
int
*
ptwrite
,
pfn_t
pfn
)
{
struct
shadow_walker
walker
=
{
.
walker
=
{
.
entry
=
FNAME
(
shadow_walk_entry
),
},
.
guest_walker
=
guest_walker
,
.
user_fault
=
user_fault
,
.
write_fault
=
write_fault
,
.
largepage
=
largepage
,
.
ptwrite
=
ptwrite
,
.
pfn
=
pfn
,
};
if
(
!
is_present_pte
(
guest_walker
->
ptes
[
guest_walker
->
level
-
1
]))
return
NULL
;
mmu_set_spte
(
vcpu
,
shadow_ent
,
access
,
walker
->
pte_access
&
access
,
walk_shadow
(
&
walker
.
walker
,
vcpu
,
addr
);
user_fault
,
write_fault
,
walker
->
ptes
[
walker
->
level
-
1
]
&
PT_DIRTY_MASK
,
ptwrite
,
largepage
,
walker
->
gfn
,
pfn
,
false
);
return
shadow_ent
;
return
walker
.
sptep
;
}
}
/*
/*
...
@@ -499,6 +512,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
...
@@ -499,6 +512,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
#undef pt_element_t
#undef pt_element_t
#undef guest_walker
#undef guest_walker
#undef shadow_walker
#undef FNAME
#undef FNAME
#undef PT_BASE_ADDR_MASK
#undef PT_BASE_ADDR_MASK
#undef PT_INDEX
#undef PT_INDEX
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment