Commit 12748007 authored by Christian Borntraeger's avatar Christian Borntraeger

KVM: s390/mm: Make pages accessible before destroying the guest

Before we destroy the secure configuration, we better make all
pages accessible again. This also happens during reboot, where we reboot
into a non-secure guest that then can go again into secure mode. As
this "new" secure guest will have a new ID we cannot reuse the old page
state.
Signed-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: default avatarThomas Huth <thuth@redhat.com>
Reviewed-by: default avatarCornelia Huck <cohuck@redhat.com>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
parent fa0c5eab
...@@ -149,4 +149,5 @@ int gmap_mprotect_notify(struct gmap *, unsigned long start, ...@@ -149,4 +149,5 @@ int gmap_mprotect_notify(struct gmap *, unsigned long start,
void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4], void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4],
unsigned long gaddr, unsigned long vmaddr); unsigned long gaddr, unsigned long vmaddr);
int gmap_mark_unmergeable(void); int gmap_mark_unmergeable(void);
void s390_reset_acc(struct mm_struct *mm);
#endif /* _ASM_S390_GMAP_H */ #endif /* _ASM_S390_GMAP_H */
...@@ -140,6 +140,9 @@ int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc) ...@@ -140,6 +140,9 @@ int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
{ {
int cc; int cc;
/* make all pages accessible before destroying the guest */
s390_reset_acc(kvm->mm);
cc = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), cc = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
UVC_CMD_DESTROY_SEC_CONF, rc, rrc); UVC_CMD_DESTROY_SEC_CONF, rc, rrc);
WRITE_ONCE(kvm->arch.gmap->guest_handle, 0); WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
......
...@@ -2650,3 +2650,38 @@ void s390_reset_cmma(struct mm_struct *mm) ...@@ -2650,3 +2650,38 @@ void s390_reset_cmma(struct mm_struct *mm)
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
} }
EXPORT_SYMBOL_GPL(s390_reset_cmma); EXPORT_SYMBOL_GPL(s390_reset_cmma);
/*
* make inaccessible pages accessible again
*/
static int __s390_reset_acc(pte_t *ptep, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
pte_t pte = READ_ONCE(*ptep);
if (pte_present(pte))
WARN_ON_ONCE(uv_convert_from_secure(pte_val(pte) & PAGE_MASK));
return 0;
}
static const struct mm_walk_ops reset_acc_walk_ops = {
.pte_entry = __s390_reset_acc,
};
#include <linux/sched/mm.h>
void s390_reset_acc(struct mm_struct *mm)
{
/*
* we might be called during
* reset: we walk the pages and clear
* close of all kvm file descriptors: we walk the pages and clear
* exit of process on fd closure: vma already gone, do nothing
*/
if (!mmget_not_zero(mm))
return;
down_read(&mm->mmap_sem);
walk_page_range(mm, 0, TASK_SIZE, &reset_acc_walk_ops, NULL);
up_read(&mm->mmap_sem);
mmput(mm);
}
EXPORT_SYMBOL_GPL(s390_reset_acc);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment