Commit c3a62df4 authored by Marc Zyngier's avatar Marc Zyngier

Merge branch kvm-arm64/pgtable-fixes-6.4 into kvmarm-master/fixes

* kvm-arm64/pgtable-fixes-6.4:
  : .
  : Fixes for concurrent S2 mapping race from Oliver:
  :
  : "So it appears that there is a race between two parallel stage-2 map
  : walkers that could lead to mapping the incorrect PA for a given IPA, as
  : the IPA -> PA relationship picks up an unintended offset. This series
  : eliminates the problem by using the current IPA of the walk as the
  : source-of-truth regarding where we are in a map operation."
  : .
  KVM: arm64: Constify start/end/phys fields of the pgtable walker data
  KVM: arm64: Infer PA offset from VA in hyp map walker
  KVM: arm64: Infer the PA offset from IPA in stage-2 map walker
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents 9a48c597 1ea24415
...@@ -209,6 +209,7 @@ struct kvm_pgtable_visit_ctx { ...@@ -209,6 +209,7 @@ struct kvm_pgtable_visit_ctx {
kvm_pte_t old; kvm_pte_t old;
void *arg; void *arg;
struct kvm_pgtable_mm_ops *mm_ops; struct kvm_pgtable_mm_ops *mm_ops;
u64 start;
u64 addr; u64 addr;
u64 end; u64 end;
u32 level; u32 level;
......
...@@ -58,8 +58,9 @@ ...@@ -58,8 +58,9 @@
struct kvm_pgtable_walk_data { struct kvm_pgtable_walk_data {
struct kvm_pgtable_walker *walker; struct kvm_pgtable_walker *walker;
const u64 start;
u64 addr; u64 addr;
u64 end; const u64 end;
}; };
static bool kvm_phys_is_valid(u64 phys) static bool kvm_phys_is_valid(u64 phys)
...@@ -201,6 +202,7 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data, ...@@ -201,6 +202,7 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
.old = READ_ONCE(*ptep), .old = READ_ONCE(*ptep),
.arg = data->walker->arg, .arg = data->walker->arg,
.mm_ops = mm_ops, .mm_ops = mm_ops,
.start = data->start,
.addr = data->addr, .addr = data->addr,
.end = data->end, .end = data->end,
.level = level, .level = level,
...@@ -293,6 +295,7 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size, ...@@ -293,6 +295,7 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
struct kvm_pgtable_walker *walker) struct kvm_pgtable_walker *walker)
{ {
struct kvm_pgtable_walk_data walk_data = { struct kvm_pgtable_walk_data walk_data = {
.start = ALIGN_DOWN(addr, PAGE_SIZE),
.addr = ALIGN_DOWN(addr, PAGE_SIZE), .addr = ALIGN_DOWN(addr, PAGE_SIZE),
.end = PAGE_ALIGN(walk_data.addr + size), .end = PAGE_ALIGN(walk_data.addr + size),
.walker = walker, .walker = walker,
...@@ -349,7 +352,7 @@ int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr, ...@@ -349,7 +352,7 @@ int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
} }
struct hyp_map_data { struct hyp_map_data {
u64 phys; const u64 phys;
kvm_pte_t attr; kvm_pte_t attr;
}; };
...@@ -407,13 +410,12 @@ enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte) ...@@ -407,13 +410,12 @@ enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte)
static bool hyp_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx, static bool hyp_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
struct hyp_map_data *data) struct hyp_map_data *data)
{ {
u64 phys = data->phys + (ctx->addr - ctx->start);
kvm_pte_t new; kvm_pte_t new;
u64 granule = kvm_granule_size(ctx->level), phys = data->phys;
if (!kvm_block_mapping_supported(ctx, phys)) if (!kvm_block_mapping_supported(ctx, phys))
return false; return false;
data->phys += granule;
new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level); new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
if (ctx->old == new) if (ctx->old == new)
return true; return true;
...@@ -576,7 +578,7 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt) ...@@ -576,7 +578,7 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
} }
struct stage2_map_data { struct stage2_map_data {
u64 phys; const u64 phys;
kvm_pte_t attr; kvm_pte_t attr;
u8 owner_id; u8 owner_id;
...@@ -794,20 +796,43 @@ static bool stage2_pte_executable(kvm_pte_t pte) ...@@ -794,20 +796,43 @@ static bool stage2_pte_executable(kvm_pte_t pte)
return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN); return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
} }
static u64 stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx *ctx,
const struct stage2_map_data *data)
{
u64 phys = data->phys;
/*
* Stage-2 walks to update ownership data are communicated to the map
* walker using an invalid PA. Avoid offsetting an already invalid PA,
* which could overflow and make the address valid again.
*/
if (!kvm_phys_is_valid(phys))
return phys;
/*
* Otherwise, work out the correct PA based on how far the walk has
* gotten.
*/
return phys + (ctx->addr - ctx->start);
}
static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx, static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx,
struct stage2_map_data *data) struct stage2_map_data *data)
{ {
u64 phys = stage2_map_walker_phys_addr(ctx, data);
if (data->force_pte && (ctx->level < (KVM_PGTABLE_MAX_LEVELS - 1))) if (data->force_pte && (ctx->level < (KVM_PGTABLE_MAX_LEVELS - 1)))
return false; return false;
return kvm_block_mapping_supported(ctx, data->phys); return kvm_block_mapping_supported(ctx, phys);
} }
static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx, static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
struct stage2_map_data *data) struct stage2_map_data *data)
{ {
kvm_pte_t new; kvm_pte_t new;
u64 granule = kvm_granule_size(ctx->level), phys = data->phys; u64 phys = stage2_map_walker_phys_addr(ctx, data);
u64 granule = kvm_granule_size(ctx->level);
struct kvm_pgtable *pgt = data->mmu->pgt; struct kvm_pgtable *pgt = data->mmu->pgt;
struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
...@@ -841,8 +866,6 @@ static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx, ...@@ -841,8 +866,6 @@ static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
stage2_make_pte(ctx, new); stage2_make_pte(ctx, new);
if (kvm_phys_is_valid(phys))
data->phys += granule;
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment