Commit 4722476b authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

powerpc/64s: mm_context.addr_limit is only used on hash

Radix keeps no meaningful state in addr_limit, so remove it from radix
code and rename to slb_addr_limit to make it clear it applies to hash
only.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Reviewed-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 85e3f1ad
...@@ -606,7 +606,7 @@ extern void slb_set_size(u16 size); ...@@ -606,7 +606,7 @@ extern void slb_set_size(u16 size);
/* 4 bits per slice and we have one slice per 1TB */ /* 4 bits per slice and we have one slice per 1TB */
#define SLICE_ARRAY_SIZE (H_PGTABLE_RANGE >> 41) #define SLICE_ARRAY_SIZE (H_PGTABLE_RANGE >> 41)
#define TASK_SLICE_ARRAY_SZ(x) ((x)->context.addr_limit >> 41) #define TASK_SLICE_ARRAY_SZ(x) ((x)->context.slb_addr_limit >> 41)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -92,7 +92,7 @@ typedef struct { ...@@ -92,7 +92,7 @@ typedef struct {
#ifdef CONFIG_PPC_MM_SLICES #ifdef CONFIG_PPC_MM_SLICES
u64 low_slices_psize; /* SLB page size encodings */ u64 low_slices_psize; /* SLB page size encodings */
unsigned char high_slices_psize[SLICE_ARRAY_SIZE]; unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
unsigned long addr_limit; unsigned long slb_addr_limit;
#else #else
u16 sllp; /* SLB page size encoding */ u16 sllp; /* SLB page size encoding */
#endif #endif
......
...@@ -143,7 +143,7 @@ struct paca_struct { ...@@ -143,7 +143,7 @@ struct paca_struct {
#ifdef CONFIG_PPC_MM_SLICES #ifdef CONFIG_PPC_MM_SLICES
u64 mm_ctx_low_slices_psize; u64 mm_ctx_low_slices_psize;
unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE]; unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE];
unsigned long addr_limit; unsigned long mm_ctx_slb_addr_limit;
#else #else
u16 mm_ctx_user_psize; u16 mm_ctx_user_psize;
u16 mm_ctx_sllp; u16 mm_ctx_sllp;
......
...@@ -185,7 +185,7 @@ int main(void) ...@@ -185,7 +185,7 @@ int main(void)
#ifdef CONFIG_PPC_MM_SLICES #ifdef CONFIG_PPC_MM_SLICES
OFFSET(PACALOWSLICESPSIZE, paca_struct, mm_ctx_low_slices_psize); OFFSET(PACALOWSLICESPSIZE, paca_struct, mm_ctx_low_slices_psize);
OFFSET(PACAHIGHSLICEPSIZE, paca_struct, mm_ctx_high_slices_psize); OFFSET(PACAHIGHSLICEPSIZE, paca_struct, mm_ctx_high_slices_psize);
DEFINE(PACA_ADDR_LIMIT, offsetof(struct paca_struct, addr_limit)); OFFSET(PACA_SLB_ADDR_LIMIT, paca_struct, mm_ctx_slb_addr_limit);
DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def)); DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def));
#endif /* CONFIG_PPC_MM_SLICES */ #endif /* CONFIG_PPC_MM_SLICES */
#endif #endif
......
...@@ -262,8 +262,8 @@ void copy_mm_to_paca(struct mm_struct *mm) ...@@ -262,8 +262,8 @@ void copy_mm_to_paca(struct mm_struct *mm)
get_paca()->mm_ctx_id = context->id; get_paca()->mm_ctx_id = context->id;
#ifdef CONFIG_PPC_MM_SLICES #ifdef CONFIG_PPC_MM_SLICES
VM_BUG_ON(!mm->context.addr_limit); VM_BUG_ON(!mm->context.slb_addr_limit);
get_paca()->addr_limit = mm->context.addr_limit; get_paca()->mm_ctx_slb_addr_limit = mm->context.slb_addr_limit;
get_paca()->mm_ctx_low_slices_psize = context->low_slices_psize; get_paca()->mm_ctx_low_slices_psize = context->low_slices_psize;
memcpy(&get_paca()->mm_ctx_high_slices_psize, memcpy(&get_paca()->mm_ctx_high_slices_psize,
&context->high_slices_psize, TASK_SLICE_ARRAY_SZ(mm)); &context->high_slices_psize, TASK_SLICE_ARRAY_SZ(mm));
......
...@@ -898,7 +898,8 @@ void __init setup_arch(char **cmdline_p) ...@@ -898,7 +898,8 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_PPC_MM_SLICES #ifdef CONFIG_PPC_MM_SLICES
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
init_mm.context.addr_limit = DEFAULT_MAP_WINDOW_USER64; if (!radix_enabled())
init_mm.context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64;
#else #else
#error "context.addr_limit not initialized." #error "context.addr_limit not initialized."
#endif #endif
......
...@@ -60,16 +60,10 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, ...@@ -60,16 +60,10 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
return -EINVAL; return -EINVAL;
if (len > high_limit) if (len > high_limit)
return -ENOMEM; return -ENOMEM;
if (fixed) { if (fixed) {
if (addr > high_limit - len) if (addr > high_limit - len)
return -ENOMEM; return -ENOMEM;
}
if (unlikely(addr > mm->context.addr_limit &&
mm->context.addr_limit != TASK_SIZE))
mm->context.addr_limit = TASK_SIZE;
if (fixed) {
if (prepare_hugepage_range(file, addr, len)) if (prepare_hugepage_range(file, addr, len))
return -EINVAL; return -EINVAL;
return addr; return addr;
......
...@@ -116,17 +116,12 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr, ...@@ -116,17 +116,12 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (len > high_limit) if (len > high_limit)
return -ENOMEM; return -ENOMEM;
if (fixed) { if (fixed) {
if (addr > high_limit - len) if (addr > high_limit - len)
return -ENOMEM; return -ENOMEM;
}
if (unlikely(addr > mm->context.addr_limit &&
mm->context.addr_limit != TASK_SIZE))
mm->context.addr_limit = TASK_SIZE;
if (fixed)
return addr; return addr;
}
if (addr) { if (addr) {
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
...@@ -165,17 +160,12 @@ radix__arch_get_unmapped_area_topdown(struct file *filp, ...@@ -165,17 +160,12 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
if (len > high_limit) if (len > high_limit)
return -ENOMEM; return -ENOMEM;
if (fixed) { if (fixed) {
if (addr > high_limit - len) if (addr > high_limit - len)
return -ENOMEM; return -ENOMEM;
}
if (unlikely(addr > mm->context.addr_limit &&
mm->context.addr_limit != TASK_SIZE))
mm->context.addr_limit = TASK_SIZE;
if (fixed)
return addr; return addr;
}
if (addr) { if (addr) {
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
......
...@@ -96,8 +96,8 @@ static int hash__init_new_context(struct mm_struct *mm) ...@@ -96,8 +96,8 @@ static int hash__init_new_context(struct mm_struct *mm)
* In the case of exec, use the default limit, * In the case of exec, use the default limit,
* otherwise inherit it from the mm we are duplicating. * otherwise inherit it from the mm we are duplicating.
*/ */
if (!mm->context.addr_limit) if (!mm->context.slb_addr_limit)
mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64; mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64;
/* /*
* The old code would re-promote on fork, we don't do that when using * The old code would re-promote on fork, we don't do that when using
......
...@@ -167,7 +167,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) ...@@ -167,7 +167,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
/* /*
* user space make sure we are within the allowed limit * user space make sure we are within the allowed limit
*/ */
ld r11,PACA_ADDR_LIMIT(r13) ld r11,PACA_SLB_ADDR_LIMIT(r13)
cmpld r3,r11 cmpld r3,r11
bge- 8f bge- 8f
......
...@@ -96,7 +96,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, ...@@ -96,7 +96,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
if ((mm->context.addr_limit - len) < addr) if ((mm->context.slb_addr_limit - len) < addr)
return 0; return 0;
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
return (!vma || (addr + len) <= vm_start_gap(vma)); return (!vma || (addr + len) <= vm_start_gap(vma));
...@@ -133,10 +133,10 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret) ...@@ -133,10 +133,10 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
if (!slice_low_has_vma(mm, i)) if (!slice_low_has_vma(mm, i))
ret->low_slices |= 1u << i; ret->low_slices |= 1u << i;
if (mm->context.addr_limit <= SLICE_LOW_TOP) if (mm->context.slb_addr_limit <= SLICE_LOW_TOP)
return; return;
for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++) for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++)
if (!slice_high_has_vma(mm, i)) if (!slice_high_has_vma(mm, i))
__set_bit(i, ret->high_slices); __set_bit(i, ret->high_slices);
} }
...@@ -157,7 +157,7 @@ static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_ma ...@@ -157,7 +157,7 @@ static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_ma
ret->low_slices |= 1u << i; ret->low_slices |= 1u << i;
hpsizes = mm->context.high_slices_psize; hpsizes = mm->context.high_slices_psize;
for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++) { for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) {
mask_index = i & 0x1; mask_index = i & 0x1;
index = i >> 1; index = i >> 1;
if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize) if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
...@@ -169,7 +169,7 @@ static int slice_check_fit(struct mm_struct *mm, ...@@ -169,7 +169,7 @@ static int slice_check_fit(struct mm_struct *mm,
struct slice_mask mask, struct slice_mask available) struct slice_mask mask, struct slice_mask available)
{ {
DECLARE_BITMAP(result, SLICE_NUM_HIGH); DECLARE_BITMAP(result, SLICE_NUM_HIGH);
unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.addr_limit); unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit);
bitmap_and(result, mask.high_slices, bitmap_and(result, mask.high_slices,
available.high_slices, slice_count); available.high_slices, slice_count);
...@@ -219,7 +219,7 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz ...@@ -219,7 +219,7 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz
mm->context.low_slices_psize = lpsizes; mm->context.low_slices_psize = lpsizes;
hpsizes = mm->context.high_slices_psize; hpsizes = mm->context.high_slices_psize;
for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++) { for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) {
mask_index = i & 0x1; mask_index = i & 0x1;
index = i >> 1; index = i >> 1;
if (test_bit(i, mask.high_slices)) if (test_bit(i, mask.high_slices))
...@@ -329,8 +329,8 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm, ...@@ -329,8 +329,8 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
* Only for that request for which high_limit is above * Only for that request for which high_limit is above
* DEFAULT_MAP_WINDOW we should apply this. * DEFAULT_MAP_WINDOW we should apply this.
*/ */
if (high_limit > DEFAULT_MAP_WINDOW) if (high_limit > DEFAULT_MAP_WINDOW)
addr += mm->context.addr_limit - DEFAULT_MAP_WINDOW; addr += mm->context.slb_addr_limit - DEFAULT_MAP_WINDOW;
while (addr > PAGE_SIZE) { while (addr > PAGE_SIZE) {
info.high_limit = addr; info.high_limit = addr;
...@@ -432,8 +432,8 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, ...@@ -432,8 +432,8 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
return -ENOMEM; return -ENOMEM;
} }
if (high_limit > mm->context.addr_limit) { if (high_limit > mm->context.slb_addr_limit) {
mm->context.addr_limit = high_limit; mm->context.slb_addr_limit = high_limit;
on_each_cpu(slice_flush_segments, mm, 1); on_each_cpu(slice_flush_segments, mm, 1);
} }
...@@ -452,7 +452,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, ...@@ -452,7 +452,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
/* Sanity checks */ /* Sanity checks */
BUG_ON(mm->task_size == 0); BUG_ON(mm->task_size == 0);
BUG_ON(mm->context.addr_limit == 0); BUG_ON(mm->context.slb_addr_limit == 0);
VM_BUG_ON(radix_enabled()); VM_BUG_ON(radix_enabled());
slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize); slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment