Commit 5b9c017d authored by Andrew Morton's avatar Andrew Morton Committed by Jaroslav Kysela

[PATCH] ppc64: Convert mm_context_t to a struct

From: David Gibson <david@gibson.dropbear.id.au>

Converts the mm_context_t on ppc64 to be a struct.  This lets us separate
the low_hpages flag into a separate field rather than folding it into the
actual context id.  That makes things neater, since the flag is
conceptually separate and has, for example, should be propogate across a
fork whereas the context ID obviously isn't.  The mm_context_id is the only
place to put arch-specific information in the mm_struct.

This patch will also make some interesting extensions to the hugepage
support much easier, such as allowing dynamic resizing of the hugepage
address space, or using special pagetables for hugepages.
parent 99e7e863
......@@ -184,13 +184,13 @@ int ste_allocate(unsigned long ea)
/* Kernel or user address? */
if (REGION_ID(ea) >= KERNEL_REGION_ID) {
vsid = get_kernel_vsid(ea);
context = REGION_ID(ea);
context = KERNEL_CONTEXT(ea);
} else {
if (!current->mm)
return 1;
context = current->mm->context;
vsid = get_vsid(context, ea);
vsid = get_vsid(context.id, ea);
}
esid = GET_ESID(ea);
......@@ -223,7 +223,7 @@ static void preload_stab(struct task_struct *tsk, struct mm_struct *mm)
if (!IS_VALID_EA(pc) || (REGION_ID(pc) >= KERNEL_REGION_ID))
return;
vsid = get_vsid(mm->context, pc);
vsid = get_vsid(mm->context.id, pc);
__ste_allocate(pc_esid, vsid);
if (pc_esid == stack_esid)
......@@ -231,7 +231,7 @@ static void preload_stab(struct task_struct *tsk, struct mm_struct *mm)
if (!IS_VALID_EA(stack) || (REGION_ID(stack) >= KERNEL_REGION_ID))
return;
vsid = get_vsid(mm->context, stack);
vsid = get_vsid(mm->context.id, stack);
__ste_allocate(stack_esid, vsid);
if (pc_esid == unmapped_base_esid || stack_esid == unmapped_base_esid)
......@@ -240,7 +240,7 @@ static void preload_stab(struct task_struct *tsk, struct mm_struct *mm)
if (!IS_VALID_EA(unmapped_base) ||
(REGION_ID(unmapped_base) >= KERNEL_REGION_ID))
return;
vsid = get_vsid(mm->context, unmapped_base);
vsid = get_vsid(mm->context.id, unmapped_base);
__ste_allocate(unmapped_base_esid, vsid);
/* Order update */
......@@ -406,14 +406,14 @@ int slb_allocate(unsigned long ea)
/* Kernel or user address? */
if (REGION_ID(ea) >= KERNEL_REGION_ID) {
context = REGION_ID(ea);
context = KERNEL_CONTEXT(ea);
vsid = get_kernel_vsid(ea);
} else {
if (unlikely(!current->mm))
return 1;
context = current->mm->context;
vsid = get_vsid(context, ea);
vsid = get_vsid(context.id, ea);
}
esid = GET_ESID(ea);
......@@ -444,7 +444,7 @@ static void preload_slb(struct task_struct *tsk, struct mm_struct *mm)
if (!IS_VALID_EA(pc) || (REGION_ID(pc) >= KERNEL_REGION_ID))
return;
vsid = get_vsid(mm->context, pc);
vsid = get_vsid(mm->context.id, pc);
__slb_allocate(pc_esid, vsid, mm->context);
if (pc_esid == stack_esid)
......@@ -452,7 +452,7 @@ static void preload_slb(struct task_struct *tsk, struct mm_struct *mm)
if (!IS_VALID_EA(stack) || (REGION_ID(stack) >= KERNEL_REGION_ID))
return;
vsid = get_vsid(mm->context, stack);
vsid = get_vsid(mm->context.id, stack);
__slb_allocate(stack_esid, vsid, mm->context);
if (pc_esid == unmapped_base_esid || stack_esid == unmapped_base_esid)
......@@ -461,7 +461,7 @@ static void preload_slb(struct task_struct *tsk, struct mm_struct *mm)
if (!IS_VALID_EA(unmapped_base) ||
(REGION_ID(unmapped_base) >= KERNEL_REGION_ID))
return;
vsid = get_vsid(mm->context, unmapped_base);
vsid = get_vsid(mm->context.id, unmapped_base);
__slb_allocate(unmapped_base_esid, vsid, mm->context);
}
......
......@@ -265,7 +265,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
if (mm == NULL)
return 1;
vsid = get_vsid(mm->context, ea);
vsid = get_vsid(mm->context.id, ea);
break;
case IO_REGION_ID:
mm = &ioremap_mm;
......
......@@ -244,7 +244,7 @@ static int open_32bit_htlbpage_range(struct mm_struct *mm)
struct vm_area_struct *vma;
unsigned long addr;
if (mm->context & CONTEXT_LOW_HPAGES)
if (mm->context.low_hpages)
return 0; /* The window is already open */
/* Check no VMAs are in the region */
......@@ -281,7 +281,7 @@ static int open_32bit_htlbpage_range(struct mm_struct *mm)
/* FIXME: do we need to scan for PTEs too? */
mm->context |= CONTEXT_LOW_HPAGES;
mm->context.low_hpages = 1;
/* the context change must make it to memory before the slbia,
* so that further SLB misses do the right thing. */
......@@ -589,7 +589,6 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
}
}
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags)
......@@ -778,7 +777,7 @@ static void flush_hash_hugepage(mm_context_t context, unsigned long ea,
BUG_ON(hugepte_bad(pte));
BUG_ON(!in_hugepage_area(context, ea));
vsid = get_vsid(context, ea);
vsid = get_vsid(context.id, ea);
va = (vsid << 28) | (ea & 0x0fffffff);
vpn = va >> LARGE_PAGE_SHIFT;
......
......@@ -794,7 +794,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
if (!ptep)
return;
vsid = get_vsid(vma->vm_mm->context, ea);
vsid = get_vsid(vma->vm_mm->context.id, ea);
tmp = cpumask_of_cpu(smp_processor_id());
if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
......
......@@ -62,7 +62,7 @@ void hpte_update(pte_t *ptep, unsigned long pte, int wrprot)
addr = ptep_to_address(ptep);
if (REGION_ID(addr) == USER_REGION_ID)
context = mm->context;
context = mm->context.id;
i = batch->index;
/*
......
......@@ -18,15 +18,25 @@
#ifndef __ASSEMBLY__
/* Default "unsigned long" context */
typedef unsigned long mm_context_t;
/* Time to allow for more things here */
typedef unsigned long mm_context_id_t;
typedef struct {
mm_context_id_t id;
#ifdef CONFIG_HUGETLB_PAGE
int low_hpages;
#endif
} mm_context_t;
#ifdef CONFIG_HUGETLB_PAGE
#define CONTEXT_LOW_HPAGES (1UL<<63)
#define KERNEL_LOW_HPAGES .low_hpages = 0,
#else
#define CONTEXT_LOW_HPAGES 0
#define KERNEL_LOW_HPAGES
#endif
#define KERNEL_CONTEXT(ea) ({ \
mm_context_t ctx = { .id = REGION_ID(ea), KERNEL_LOW_HPAGES}; \
ctx; })
/*
* Hardware Segment Lookaside Buffer Entry
* This structure has been padded out to two 64b doublewords (actual SLBE's are
......
......@@ -52,7 +52,7 @@ struct mmu_context_queue_t {
long head;
long tail;
long size;
mm_context_t elements[LAST_USER_CONTEXT];
mm_context_id_t elements[LAST_USER_CONTEXT];
};
extern struct mmu_context_queue_t mmu_context_queue;
......@@ -83,7 +83,6 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
long head;
unsigned long flags;
/* This does the right thing across a fork (I hope) */
unsigned long low_hpages = mm->context & CONTEXT_LOW_HPAGES;
spin_lock_irqsave(&mmu_context_queue.lock, flags);
......@@ -93,8 +92,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
}
head = mmu_context_queue.head;
mm->context = mmu_context_queue.elements[head];
mm->context |= low_hpages;
mm->context.id = mmu_context_queue.elements[head];
head = (head < LAST_USER_CONTEXT-1) ? head+1 : 0;
mmu_context_queue.head = head;
......@@ -132,8 +130,7 @@ destroy_context(struct mm_struct *mm)
#endif
mmu_context_queue.size++;
mmu_context_queue.elements[index] =
mm->context & ~CONTEXT_LOW_HPAGES;
mmu_context_queue.elements[index] = mm->context.id;
spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
}
......@@ -212,8 +209,6 @@ get_vsid( unsigned long context, unsigned long ea )
{
unsigned long ordinal, vsid;
context &= ~CONTEXT_LOW_HPAGES;
ordinal = (((ea >> 28) & 0x1fffff) * LAST_USER_CONTEXT) | context;
vsid = (ordinal * VSID_RANDOMIZER) & VSID_MASK;
......
......@@ -32,6 +32,7 @@
/* For 64-bit processes the hugepage range is 1T-1.5T */
#define TASK_HPAGE_BASE (0x0000010000000000UL)
#define TASK_HPAGE_END (0x0000018000000000UL)
/* For 32-bit processes the hugepage range is 2-3G */
#define TASK_HPAGE_BASE_32 (0x80000000UL)
#define TASK_HPAGE_END_32 (0xc0000000UL)
......@@ -39,7 +40,7 @@
#define ARCH_HAS_HUGEPAGE_ONLY_RANGE
#define is_hugepage_only_range(addr, len) \
( ((addr > (TASK_HPAGE_BASE-len)) && (addr < TASK_HPAGE_END)) || \
((current->mm->context & CONTEXT_LOW_HPAGES) && \
(current->mm->context.low_hpages && \
(addr > (TASK_HPAGE_BASE_32-len)) && (addr < TASK_HPAGE_END_32)) )
#define hugetlb_free_pgtables free_pgtables
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
......@@ -47,7 +48,7 @@
#define in_hugepage_area(context, addr) \
((cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE) && \
((((addr) >= TASK_HPAGE_BASE) && ((addr) < TASK_HPAGE_END)) || \
(((context) & CONTEXT_LOW_HPAGES) && \
((context).low_hpages && \
(((addr) >= TASK_HPAGE_BASE_32) && ((addr) < TASK_HPAGE_END_32)))))
#else /* !CONFIG_HUGETLB_PAGE */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment