Commit f28a4b4d authored by Martin Schwidefsky's avatar Martin Schwidefsky

s390/mm: use a single lock for the fields in mm_context_t

The three locks 'lock', 'pgtable_lock' and 'gmap_lock' in the
mm_context_t can be reduced to a single lock.
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 60f07c8e
...@@ -9,9 +9,7 @@ typedef struct { ...@@ -9,9 +9,7 @@ typedef struct {
cpumask_t cpu_attach_mask; cpumask_t cpu_attach_mask;
atomic_t flush_count; atomic_t flush_count;
unsigned int flush_mm; unsigned int flush_mm;
spinlock_t pgtable_lock;
struct list_head pgtable_list; struct list_head pgtable_list;
spinlock_t gmap_lock;
struct list_head gmap_list; struct list_head gmap_list;
unsigned long gmap_asce; unsigned long gmap_asce;
unsigned long asce; unsigned long asce;
...@@ -29,10 +27,7 @@ typedef struct { ...@@ -29,10 +27,7 @@ typedef struct {
#define INIT_MM_CONTEXT(name) \ #define INIT_MM_CONTEXT(name) \
.context.lock = __SPIN_LOCK_UNLOCKED(name.context.lock), \ .context.lock = __SPIN_LOCK_UNLOCKED(name.context.lock), \
.context.pgtable_lock = \
__SPIN_LOCK_UNLOCKED(name.context.pgtable_lock), \
.context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \ .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
.context.gmap_lock = __SPIN_LOCK_UNLOCKED(name.context.gmap_lock), \
.context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list), .context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
static inline int tprot(unsigned long addr) static inline int tprot(unsigned long addr)
......
...@@ -18,9 +18,7 @@ static inline int init_new_context(struct task_struct *tsk, ...@@ -18,9 +18,7 @@ static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm) struct mm_struct *mm)
{ {
spin_lock_init(&mm->context.lock); spin_lock_init(&mm->context.lock);
spin_lock_init(&mm->context.pgtable_lock);
INIT_LIST_HEAD(&mm->context.pgtable_list); INIT_LIST_HEAD(&mm->context.pgtable_list);
spin_lock_init(&mm->context.gmap_lock);
INIT_LIST_HEAD(&mm->context.gmap_list); INIT_LIST_HEAD(&mm->context.gmap_list);
cpumask_clear(&mm->context.cpu_attach_mask); cpumask_clear(&mm->context.cpu_attach_mask);
atomic_set(&mm->context.flush_count, 0); atomic_set(&mm->context.flush_count, 0);
......
...@@ -100,14 +100,14 @@ struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit) ...@@ -100,14 +100,14 @@ struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
if (!gmap) if (!gmap)
return NULL; return NULL;
gmap->mm = mm; gmap->mm = mm;
spin_lock(&mm->context.gmap_lock); spin_lock(&mm->context.lock);
list_add_rcu(&gmap->list, &mm->context.gmap_list); list_add_rcu(&gmap->list, &mm->context.gmap_list);
if (list_is_singular(&mm->context.gmap_list)) if (list_is_singular(&mm->context.gmap_list))
gmap_asce = gmap->asce; gmap_asce = gmap->asce;
else else
gmap_asce = -1UL; gmap_asce = -1UL;
WRITE_ONCE(mm->context.gmap_asce, gmap_asce); WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
spin_unlock(&mm->context.gmap_lock); spin_unlock(&mm->context.lock);
return gmap; return gmap;
} }
EXPORT_SYMBOL_GPL(gmap_create); EXPORT_SYMBOL_GPL(gmap_create);
...@@ -248,7 +248,7 @@ void gmap_remove(struct gmap *gmap) ...@@ -248,7 +248,7 @@ void gmap_remove(struct gmap *gmap)
spin_unlock(&gmap->shadow_lock); spin_unlock(&gmap->shadow_lock);
} }
/* Remove gmap from the pre-mm list */ /* Remove gmap from the pre-mm list */
spin_lock(&gmap->mm->context.gmap_lock); spin_lock(&gmap->mm->context.lock);
list_del_rcu(&gmap->list); list_del_rcu(&gmap->list);
if (list_empty(&gmap->mm->context.gmap_list)) if (list_empty(&gmap->mm->context.gmap_list))
gmap_asce = 0; gmap_asce = 0;
...@@ -258,7 +258,7 @@ void gmap_remove(struct gmap *gmap) ...@@ -258,7 +258,7 @@ void gmap_remove(struct gmap *gmap)
else else
gmap_asce = -1UL; gmap_asce = -1UL;
WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce); WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
spin_unlock(&gmap->mm->context.gmap_lock); spin_unlock(&gmap->mm->context.lock);
synchronize_rcu(); synchronize_rcu();
/* Put reference */ /* Put reference */
gmap_put(gmap); gmap_put(gmap);
......
...@@ -188,7 +188,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) ...@@ -188,7 +188,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
/* Try to get a fragment of a 4K page as a 2K page table */ /* Try to get a fragment of a 4K page as a 2K page table */
if (!mm_alloc_pgste(mm)) { if (!mm_alloc_pgste(mm)) {
table = NULL; table = NULL;
spin_lock_bh(&mm->context.pgtable_lock); spin_lock_bh(&mm->context.lock);
if (!list_empty(&mm->context.pgtable_list)) { if (!list_empty(&mm->context.pgtable_list)) {
page = list_first_entry(&mm->context.pgtable_list, page = list_first_entry(&mm->context.pgtable_list,
struct page, lru); struct page, lru);
...@@ -203,7 +203,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) ...@@ -203,7 +203,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
list_del(&page->lru); list_del(&page->lru);
} }
} }
spin_unlock_bh(&mm->context.pgtable_lock); spin_unlock_bh(&mm->context.lock);
if (table) if (table)
return table; return table;
} }
...@@ -227,9 +227,9 @@ unsigned long *page_table_alloc(struct mm_struct *mm) ...@@ -227,9 +227,9 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
/* Return the first 2K fragment of the page */ /* Return the first 2K fragment of the page */
atomic_set(&page->_mapcount, 1); atomic_set(&page->_mapcount, 1);
clear_table(table, _PAGE_INVALID, PAGE_SIZE); clear_table(table, _PAGE_INVALID, PAGE_SIZE);
spin_lock_bh(&mm->context.pgtable_lock); spin_lock_bh(&mm->context.lock);
list_add(&page->lru, &mm->context.pgtable_list); list_add(&page->lru, &mm->context.pgtable_list);
spin_unlock_bh(&mm->context.pgtable_lock); spin_unlock_bh(&mm->context.lock);
} }
return table; return table;
} }
...@@ -243,13 +243,13 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) ...@@ -243,13 +243,13 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
if (!mm_alloc_pgste(mm)) { if (!mm_alloc_pgste(mm)) {
/* Free 2K page table fragment of a 4K page */ /* Free 2K page table fragment of a 4K page */
bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)); bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
spin_lock_bh(&mm->context.pgtable_lock); spin_lock_bh(&mm->context.lock);
mask = atomic_xor_bits(&page->_mapcount, 1U << bit); mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
if (mask & 3) if (mask & 3)
list_add(&page->lru, &mm->context.pgtable_list); list_add(&page->lru, &mm->context.pgtable_list);
else else
list_del(&page->lru); list_del(&page->lru);
spin_unlock_bh(&mm->context.pgtable_lock); spin_unlock_bh(&mm->context.lock);
if (mask != 0) if (mask != 0)
return; return;
} }
...@@ -275,13 +275,13 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table, ...@@ -275,13 +275,13 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
return; return;
} }
bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)); bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
spin_lock_bh(&mm->context.pgtable_lock); spin_lock_bh(&mm->context.lock);
mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit); mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
if (mask & 3) if (mask & 3)
list_add_tail(&page->lru, &mm->context.pgtable_list); list_add_tail(&page->lru, &mm->context.pgtable_list);
else else
list_del(&page->lru); list_del(&page->lru);
spin_unlock_bh(&mm->context.pgtable_lock); spin_unlock_bh(&mm->context.lock);
table = (unsigned long *) (__pa(table) | (1U << bit)); table = (unsigned long *) (__pa(table) | (1U << bit));
tlb_remove_table(tlb, table); tlb_remove_table(tlb, table);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment