Commit 553de4db authored by David Matlack's avatar David Matlack Committed by Greg Kroah-Hartman

kvm: fix potentially corrupt mmio cache

commit ee3d1570 upstream.

vcpu exits and memslot mutations can run concurrently as long as the
vcpu does not aquire the slots mutex. Thus it is theoretically possible
for memslots to change underneath a vcpu that is handling an exit.

If we increment the memslot generation number again after
synchronize_srcu_expedited(), vcpus can safely cache memslot generation
without maintaining a single rcu_dereference through an entire vm exit.
And much of the x86/kvm code does not maintain a single rcu_dereference
of the current memslots during each exit.

We can prevent the following case:

   vcpu (CPU 0)                             | thread (CPU 1)
--------------------------------------------+--------------------------
1  vm exit                                  |
2  srcu_read_unlock(&kvm->srcu)             |
3  decide to cache something based on       |
     old memslots                           |
4                                           | change memslots
                                            | (increments generation)
5                                           | synchronize_srcu(&kvm->srcu);
6  retrieve generation # from new memslots  |
7  tag cache with new memslot generation    |
8  srcu_read_unlock(&kvm->srcu)             |
...                                         |
   <action based on cache occurs even       |
    though the caching decision was based   |
    on the old memslots>                    |
...                                         |
   <action *continues* to occur until next  |
    memslot generation change, which may    |
    be never>                               |
                                            |

By incrementing the generation after synchronizing with kvm->srcu readers,
we ensure that the generation retrieved in (6) will become invalid soon
after (8).

Keeping the existing increment is not strictly necessary, but we
do keep it and just move it for consistency from update_memslots to
install_new_memslots.  It invalidates old cached MMIOs immediately,
instead of having to wait for the end of synchronize_srcu_expedited,
which makes the code more clearly correct in case CPU 1 is preempted
right after synchronize_srcu() returns.

To avoid halving the generation space in SPTEs, always presume that the
low bit of the generation is zero when reconstructing a generation number
out of an SPTE.  This effectively disables MMIO caching in SPTEs during
the call to synchronize_srcu_expedited.  Using the low bit this way is
somewhat like a seqcount---where the protected thing is a cache, and
instead of retrying we can simply punt if we observe the low bit to be 1.
Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Reviewed-by: default avatarXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Reviewed-by: default avatarDavid Matlack <dmatlack@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent bb15dea0
...@@ -425,6 +425,20 @@ fault through the slow path. ...@@ -425,6 +425,20 @@ fault through the slow path.
Since only 19 bits are used to store generation-number on mmio spte, all Since only 19 bits are used to store generation-number on mmio spte, all
pages are zapped when there is an overflow. pages are zapped when there is an overflow.
Unfortunately, a single memory access might access kvm_memslots(kvm) multiple
times, the last one happening when the generation number is retrieved and
stored into the MMIO spte. Thus, the MMIO spte might be created based on
out-of-date information, but with an up-to-date generation number.
To avoid this, the generation number is incremented again after synchronize_srcu
returns; thus, the low bit of kvm_memslots(kvm)->generation is only 1 during a
memslot update, while some SRCU readers might be using the old copy. We do not
want to use an MMIO sptes created with an odd generation number, and we can do
this without losing a bit in the MMIO spte. The low bit of the generation
is not stored in MMIO spte, and presumed zero when it is extracted out of the
spte. If KVM is unlucky and creates an MMIO spte while the low bit is 1,
the next access to the spte will always be a cache miss.
Further reading Further reading
=============== ===============
......
...@@ -199,16 +199,20 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask) ...@@ -199,16 +199,20 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask)
EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
/* /*
* spte bits of bit 3 ~ bit 11 are used as low 9 bits of generation number, * the low bit of the generation number is always presumed to be zero.
* the bits of bits 52 ~ bit 61 are used as high 10 bits of generation * This disables mmio caching during memslot updates. The concept is
* number. * similar to a seqcount but instead of retrying the access we just punt
* and ignore the cache.
*
* spte bits 3-11 are used as bits 1-9 of the generation number,
* the bits 52-61 are used as bits 10-19 of the generation number.
*/ */
#define MMIO_SPTE_GEN_LOW_SHIFT 3 #define MMIO_SPTE_GEN_LOW_SHIFT 2
#define MMIO_SPTE_GEN_HIGH_SHIFT 52 #define MMIO_SPTE_GEN_HIGH_SHIFT 52
#define MMIO_GEN_SHIFT 19 #define MMIO_GEN_SHIFT 20
#define MMIO_GEN_LOW_SHIFT 9 #define MMIO_GEN_LOW_SHIFT 10
#define MMIO_GEN_LOW_MASK ((1 << MMIO_GEN_LOW_SHIFT) - 1) #define MMIO_GEN_LOW_MASK ((1 << MMIO_GEN_LOW_SHIFT) - 2)
#define MMIO_GEN_MASK ((1 << MMIO_GEN_SHIFT) - 1) #define MMIO_GEN_MASK ((1 << MMIO_GEN_SHIFT) - 1)
#define MMIO_MAX_GEN ((1 << MMIO_GEN_SHIFT) - 1) #define MMIO_MAX_GEN ((1 << MMIO_GEN_SHIFT) - 1)
...@@ -4433,7 +4437,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm) ...@@ -4433,7 +4437,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm)
* The very rare case: if the generation-number is round, * The very rare case: if the generation-number is round,
* zap all shadow pages. * zap all shadow pages.
*/ */
if (unlikely(kvm_current_mmio_generation(kvm) >= MMIO_MAX_GEN)) { if (unlikely(kvm_current_mmio_generation(kvm) == 0)) {
printk_ratelimited(KERN_INFO "kvm: zapping shadow pages for mmio generation wraparound\n"); printk_ratelimited(KERN_INFO "kvm: zapping shadow pages for mmio generation wraparound\n");
kvm_mmu_invalidate_zap_all_pages(kvm); kvm_mmu_invalidate_zap_all_pages(kvm);
} }
......
...@@ -95,8 +95,6 @@ static int hardware_enable_all(void); ...@@ -95,8 +95,6 @@ static int hardware_enable_all(void);
static void hardware_disable_all(void); static void hardware_disable_all(void);
static void kvm_io_bus_destroy(struct kvm_io_bus *bus); static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
static void update_memslots(struct kvm_memslots *slots,
struct kvm_memory_slot *new, u64 last_generation);
static void kvm_release_pfn_dirty(pfn_t pfn); static void kvm_release_pfn_dirty(pfn_t pfn);
static void mark_page_dirty_in_slot(struct kvm *kvm, static void mark_page_dirty_in_slot(struct kvm *kvm,
...@@ -685,8 +683,7 @@ static void sort_memslots(struct kvm_memslots *slots) ...@@ -685,8 +683,7 @@ static void sort_memslots(struct kvm_memslots *slots)
} }
static void update_memslots(struct kvm_memslots *slots, static void update_memslots(struct kvm_memslots *slots,
struct kvm_memory_slot *new, struct kvm_memory_slot *new)
u64 last_generation)
{ {
if (new) { if (new) {
int id = new->id; int id = new->id;
...@@ -697,8 +694,6 @@ static void update_memslots(struct kvm_memslots *slots, ...@@ -697,8 +694,6 @@ static void update_memslots(struct kvm_memslots *slots,
if (new->npages != npages) if (new->npages != npages)
sort_memslots(slots); sort_memslots(slots);
} }
slots->generation = last_generation + 1;
} }
static int check_memory_region_flags(struct kvm_userspace_memory_region *mem) static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
...@@ -720,10 +715,24 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm, ...@@ -720,10 +715,24 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
{ {
struct kvm_memslots *old_memslots = kvm->memslots; struct kvm_memslots *old_memslots = kvm->memslots;
update_memslots(slots, new, kvm->memslots->generation); /*
* Set the low bit in the generation, which disables SPTE caching
* until the end of synchronize_srcu_expedited.
*/
WARN_ON(old_memslots->generation & 1);
slots->generation = old_memslots->generation + 1;
update_memslots(slots, new);
rcu_assign_pointer(kvm->memslots, slots); rcu_assign_pointer(kvm->memslots, slots);
synchronize_srcu_expedited(&kvm->srcu); synchronize_srcu_expedited(&kvm->srcu);
/*
* Increment the new memslot generation a second time. This prevents
* vm exits that race with memslot updates from caching a memslot
* generation that will (potentially) be valid forever.
*/
slots->generation++;
kvm_arch_memslots_updated(kvm); kvm_arch_memslots_updated(kvm);
return old_memslots; return old_memslots;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment