Commit 0f7f8489 authored by David Hildenbrand's avatar David Hildenbrand Committed by Christian Borntraeger

s390/mm: fix races on gmap_shadow creation

Before any thread is allowed to use a gmap_shadow, it has to be fully
initialized. However, for invalidation to work properly, we have to
register the new gmap_shadow before we protect the parent gmap table.

Because locking is tricky, and we have to avoid duplicate gmaps, let's
introduce an initialized field, that signalizes other threads if that
gmap_shadow can already be used or if they have to retry.

Let's properly return errors using ERR_PTR() instead of simply returning
NULL, so a caller can properly react on the error.
Acked-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: default avatarDavid Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
parent 998f637c
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
* @parent: pointer to the parent gmap for shadow guest address spaces * @parent: pointer to the parent gmap for shadow guest address spaces
* @orig_asce: ASCE for which the shadow page table has been created * @orig_asce: ASCE for which the shadow page table has been created
* @removed: flag to indicate if a shadow guest address space has been removed * @removed: flag to indicate if a shadow guest address space has been removed
* @initialized: flag to indicate if a shadow guest address space can be used
*/ */
struct gmap { struct gmap {
struct list_head list; struct list_head list;
...@@ -49,6 +50,7 @@ struct gmap { ...@@ -49,6 +50,7 @@ struct gmap {
struct gmap *parent; struct gmap *parent;
unsigned long orig_asce; unsigned long orig_asce;
bool removed; bool removed;
bool initialized;
}; };
/** /**
......
...@@ -1384,7 +1384,8 @@ static void gmap_unshadow(struct gmap *sg) ...@@ -1384,7 +1384,8 @@ static void gmap_unshadow(struct gmap *sg)
* @asce: ASCE for which the shadow table is created * @asce: ASCE for which the shadow table is created
* *
* Returns the pointer to a gmap if a shadow table with the given asce is * Returns the pointer to a gmap if a shadow table with the given asce is
* already available, otherwise NULL * already available, ERR_PTR(-EAGAIN) if another one is just being created,
* otherwise NULL
*/ */
static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce) static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce)
{ {
...@@ -1393,6 +1394,8 @@ static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce) ...@@ -1393,6 +1394,8 @@ static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce)
list_for_each_entry(sg, &parent->children, list) { list_for_each_entry(sg, &parent->children, list) {
if (sg->orig_asce != asce || sg->removed) if (sg->orig_asce != asce || sg->removed)
continue; continue;
if (!sg->initialized)
return ERR_PTR(-EAGAIN);
atomic_inc(&sg->ref_count); atomic_inc(&sg->ref_count);
return sg; return sg;
} }
...@@ -1409,8 +1412,9 @@ static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce) ...@@ -1409,8 +1412,9 @@ static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce)
* The shadow table will be removed automatically on any change to the * The shadow table will be removed automatically on any change to the
* PTE mapping for the source table. * PTE mapping for the source table.
* *
* Returns a guest address space structure, NULL if out of memory or if * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
* anything goes wrong while protecting the top level pages. * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
* parent gmap table could not be protected.
*/ */
struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce) struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce)
{ {
...@@ -1428,30 +1432,37 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce) ...@@ -1428,30 +1432,37 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce)
limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11)); limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
new = gmap_alloc(limit); new = gmap_alloc(limit);
if (!new) if (!new)
return NULL; return ERR_PTR(-ENOMEM);
new->mm = parent->mm; new->mm = parent->mm;
new->parent = gmap_get(parent); new->parent = gmap_get(parent);
new->orig_asce = asce; new->orig_asce = asce;
new->initialized = false;
spin_lock(&parent->shadow_lock);
/* Recheck if another CPU created the same shadow */
sg = gmap_find_shadow(parent, asce);
if (sg) {
spin_unlock(&parent->shadow_lock);
gmap_free(new);
return sg;
}
atomic_set(&new->ref_count, 2);
list_add(&new->list, &parent->children);
spin_unlock(&parent->shadow_lock);
/* protect after insertion, so it will get properly invalidated */
down_read(&parent->mm->mmap_sem); down_read(&parent->mm->mmap_sem);
rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN, rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
((asce & _ASCE_TABLE_LENGTH) + 1) * 4096, ((asce & _ASCE_TABLE_LENGTH) + 1) * 4096,
PROT_READ, PGSTE_VSIE_BIT); PROT_READ, PGSTE_VSIE_BIT);
up_read(&parent->mm->mmap_sem); up_read(&parent->mm->mmap_sem);
spin_lock(&parent->shadow_lock);
new->initialized = true;
if (rc) { if (rc) {
atomic_set(&new->ref_count, 2); list_del(&new->list);
spin_lock(&parent->shadow_lock);
/* Recheck if another CPU created the same shadow */
sg = gmap_find_shadow(parent, asce);
if (!sg) {
list_add(&new->list, &parent->children);
sg = new;
new = NULL;
}
spin_unlock(&parent->shadow_lock);
}
if (new)
gmap_free(new); gmap_free(new);
return sg; new = ERR_PTR(rc);
}
spin_unlock(&parent->shadow_lock);
return new;
} }
EXPORT_SYMBOL_GPL(gmap_shadow); EXPORT_SYMBOL_GPL(gmap_shadow);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment