Commit 3005f6f2 authored by Ricardo Koller's avatar Ricardo Koller Committed by Oliver Upton

KVM: arm64: Open-code kvm_mmu_write_protect_pt_masked()

Move the functionality of kvm_mmu_write_protect_pt_masked() into its
caller, kvm_arch_mmu_enable_log_dirty_pt_masked().  This will be used
in a subsequent commit in order to share some of the code in
kvm_arch_mmu_enable_log_dirty_pt_masked().
Signed-off-by: default avatarRicardo Koller <ricarkol@google.com>
Reviewed-by: default avatarGavin Shan <gshan@redhat.com>
Link: https://lore.kernel.org/r/20230426172330.1439644-11-ricarkol@google.comSigned-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
parent e7bf7a49
......@@ -1078,28 +1078,6 @@ static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
kvm_flush_remote_tlbs(kvm);
}
/**
* kvm_mmu_write_protect_pt_masked() - write protect dirty pages
* @kvm: The KVM pointer
* @slot: The memory slot associated with mask
* @gfn_offset: The gfn offset in memory slot
* @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
* slot to be write protected
*
* Walks bits set in mask write protects the associated pte's. Caller must
* acquire kvm_mmu_lock.
*/
static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{
phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
stage2_wp_range(&kvm->arch.mmu, start, end);
}
/**
* kvm_mmu_split_memory_region() - split the stage 2 blocks into PAGE_SIZE
* pages for memory slot
......@@ -1129,17 +1107,27 @@ static void kvm_mmu_split_memory_region(struct kvm *kvm, int slot)
}
/*
* kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
* dirty pages.
* kvm_arch_mmu_enable_log_dirty_pt_masked() - enable dirty logging for selected pages.
* @kvm: The KVM pointer
* @slot: The memory slot associated with mask
* @gfn_offset: The gfn offset in memory slot
* @mask: The mask of pages at offset 'gfn_offset' in this memory
* slot to enable dirty logging on
*
* It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
* enable dirty logging for them.
* Writes protect selected pages to enable dirty logging for them. Caller must
* acquire kvm->mmu_lock.
*/
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
lockdep_assert_held_write(&kvm->mmu_lock);
stage2_wp_range(&kvm->arch.mmu, start, end);
}
static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment