Commit c102f076 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/vdso: Replace vdso_base by vdso

All other architectures but s390 use a void pointer named 'vdso'
to reference the VDSO mapping.

In a following patch, the VDSO data page will be put in front of
text, vdso_base will then not anymore point to VDSO text.

To avoid confusion between vdso_base and VDSO text, rename vdso_base
into vdso and make it a void __user *.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/8e6cefe474aa4ceba028abb729485cd46c140990.1601197618.git.christophe.leroy@csgroup.eu
parent 526a9c4a
...@@ -90,7 +90,7 @@ struct hash_pte { ...@@ -90,7 +90,7 @@ struct hash_pte {
typedef struct { typedef struct {
unsigned long id; unsigned long id;
unsigned long vdso_base; void __user *vdso;
} mm_context_t; } mm_context_t;
void update_bats(void); void update_bats(void);
......
...@@ -111,7 +111,7 @@ typedef struct { ...@@ -111,7 +111,7 @@ typedef struct {
struct hash_mm_context *hash_context; struct hash_mm_context *hash_context;
unsigned long vdso_base; void __user *vdso;
/* /*
* pagetable fragment support * pagetable fragment support
*/ */
......
...@@ -169,7 +169,7 @@ do { \ ...@@ -169,7 +169,7 @@ do { \
NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \ NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \
NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \ NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \
NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize); \ NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize); \
VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso_base); \ VDSO_AUX_ENT(AT_SYSINFO_EHDR, (unsigned long)current->mm->context.vdso);\
ARCH_DLINFO_CACHE_GEOMETRY; \ ARCH_DLINFO_CACHE_GEOMETRY; \
} while (0) } while (0)
......
...@@ -262,8 +262,10 @@ extern void arch_exit_mmap(struct mm_struct *mm); ...@@ -262,8 +262,10 @@ extern void arch_exit_mmap(struct mm_struct *mm);
static inline void arch_unmap(struct mm_struct *mm, static inline void arch_unmap(struct mm_struct *mm,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
if (start <= mm->context.vdso_base && mm->context.vdso_base < end) unsigned long vdso_base = (unsigned long)mm->context.vdso;
mm->context.vdso_base = 0;
if (start <= vdso_base && vdso_base < end)
mm->context.vdso = NULL;
} }
#ifdef CONFIG_PPC_MEM_KEYS #ifdef CONFIG_PPC_MEM_KEYS
......
...@@ -57,7 +57,7 @@ ...@@ -57,7 +57,7 @@
typedef struct { typedef struct {
unsigned int id; unsigned int id;
unsigned int active; unsigned int active;
unsigned long vdso_base; void __user *vdso;
} mm_context_t; } mm_context_t;
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -108,7 +108,7 @@ extern unsigned int tlb_44x_index; ...@@ -108,7 +108,7 @@ extern unsigned int tlb_44x_index;
typedef struct { typedef struct {
unsigned int id; unsigned int id;
unsigned int active; unsigned int active;
unsigned long vdso_base; void __user *vdso;
} mm_context_t; } mm_context_t;
/* patch sites */ /* patch sites */
......
...@@ -181,7 +181,7 @@ void mmu_pin_tlb(unsigned long top, bool readonly); ...@@ -181,7 +181,7 @@ void mmu_pin_tlb(unsigned long top, bool readonly);
typedef struct { typedef struct {
unsigned int id; unsigned int id;
unsigned int active; unsigned int active;
unsigned long vdso_base; void __user *vdso;
void *pte_frag; void *pte_frag;
} mm_context_t; } mm_context_t;
......
...@@ -238,7 +238,7 @@ extern unsigned int tlbcam_index; ...@@ -238,7 +238,7 @@ extern unsigned int tlbcam_index;
typedef struct { typedef struct {
unsigned int id; unsigned int id;
unsigned int active; unsigned int active;
unsigned long vdso_base; void __user *vdso;
} mm_context_t; } mm_context_t;
/* Page size definitions, common between 32 and 64-bit /* Page size definitions, common between 32 and 64-bit
......
...@@ -801,8 +801,8 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset, ...@@ -801,8 +801,8 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
} }
/* Save user registers on the stack */ /* Save user registers on the stack */
if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) { if (vdso32_rt_sigtramp && tsk->mm->context.vdso) {
tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp; tramp = (unsigned long)tsk->mm->context.vdso + vdso32_rt_sigtramp;
} else { } else {
tramp = (unsigned long)mctx->mc_pad; tramp = (unsigned long)mctx->mc_pad;
/* Set up the sigreturn trampoline: li r0,sigret; sc */ /* Set up the sigreturn trampoline: li r0,sigret; sc */
...@@ -901,8 +901,8 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset, ...@@ -901,8 +901,8 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
else else
unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed); unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed);
if (vdso32_sigtramp && tsk->mm->context.vdso_base) { if (vdso32_sigtramp && tsk->mm->context.vdso) {
tramp = tsk->mm->context.vdso_base + vdso32_sigtramp; tramp = (unsigned long)tsk->mm->context.vdso + vdso32_sigtramp;
} else { } else {
tramp = (unsigned long)mctx->mc_pad; tramp = (unsigned long)mctx->mc_pad;
/* Set up the sigreturn trampoline: li r0,sigret; sc */ /* Set up the sigreturn trampoline: li r0,sigret; sc */
......
...@@ -854,8 +854,8 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, ...@@ -854,8 +854,8 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
tsk->thread.fp_state.fpscr = 0; tsk->thread.fp_state.fpscr = 0;
/* Set up to return from userspace. */ /* Set up to return from userspace. */
if (vdso64_rt_sigtramp && tsk->mm->context.vdso_base) { if (vdso64_rt_sigtramp && tsk->mm->context.vdso) {
regs->nip = tsk->mm->context.vdso_base + vdso64_rt_sigtramp; regs->nip = (unsigned long)tsk->mm->context.vdso + vdso64_rt_sigtramp;
} else { } else {
err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
if (err) if (err)
......
...@@ -123,7 +123,7 @@ static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struc ...@@ -123,7 +123,7 @@ static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struc
if (new_size != text_size + PAGE_SIZE) if (new_size != text_size + PAGE_SIZE)
return -EINVAL; return -EINVAL;
current->mm->context.vdso_base = new_vma->vm_start; current->mm->context.vdso = (void __user *)new_vma->vm_start;
return 0; return 0;
} }
...@@ -198,7 +198,7 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int ...@@ -198,7 +198,7 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
* install_special_mapping or the perf counter mmap tracking code * install_special_mapping or the perf counter mmap tracking code
* will fail to recognise it as a vDSO. * will fail to recognise it as a vDSO.
*/ */
current->mm->context.vdso_base = vdso_base; mm->context.vdso = (void __user *)vdso_base;
/* /*
* our vma flags don't have VM_WRITE so by default, the process isn't * our vma flags don't have VM_WRITE so by default, the process isn't
...@@ -221,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) ...@@ -221,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
int rc; int rc;
mm->context.vdso_base = 0; mm->context.vdso = NULL;
if (!vdso_ready) if (!vdso_ready)
return 0; return 0;
...@@ -231,7 +231,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) ...@@ -231,7 +231,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
rc = __arch_setup_additional_pages(bprm, uses_interp); rc = __arch_setup_additional_pages(bprm, uses_interp);
if (rc) if (rc)
mm->context.vdso_base = 0; mm->context.vdso = NULL;
mmap_write_unlock(mm); mmap_write_unlock(mm);
return rc; return rc;
......
...@@ -59,8 +59,8 @@ static int is_sigreturn_32_address(unsigned int nip, unsigned int fp) ...@@ -59,8 +59,8 @@ static int is_sigreturn_32_address(unsigned int nip, unsigned int fp)
{ {
if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad)) if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad))
return 1; return 1;
if (vdso32_sigtramp && current->mm->context.vdso_base && if (vdso32_sigtramp && current->mm->context.vdso &&
nip == current->mm->context.vdso_base + vdso32_sigtramp) nip == (unsigned long)current->mm->context.vdso + vdso32_sigtramp)
return 1; return 1;
return 0; return 0;
} }
...@@ -70,8 +70,8 @@ static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp) ...@@ -70,8 +70,8 @@ static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp)
if (nip == fp + offsetof(struct rt_signal_frame_32, if (nip == fp + offsetof(struct rt_signal_frame_32,
uc.uc_mcontext.mc_pad)) uc.uc_mcontext.mc_pad))
return 1; return 1;
if (vdso32_rt_sigtramp && current->mm->context.vdso_base && if (vdso32_rt_sigtramp && current->mm->context.vdso &&
nip == current->mm->context.vdso_base + vdso32_rt_sigtramp) nip == (unsigned long)current->mm->context.vdso + vdso32_rt_sigtramp)
return 1; return 1;
return 0; return 0;
} }
......
...@@ -68,8 +68,8 @@ static int is_sigreturn_64_address(unsigned long nip, unsigned long fp) ...@@ -68,8 +68,8 @@ static int is_sigreturn_64_address(unsigned long nip, unsigned long fp)
{ {
if (nip == fp + offsetof(struct signal_frame_64, tramp)) if (nip == fp + offsetof(struct signal_frame_64, tramp))
return 1; return 1;
if (vdso64_rt_sigtramp && current->mm->context.vdso_base && if (vdso64_rt_sigtramp && current->mm->context.vdso &&
nip == current->mm->context.vdso_base + vdso64_rt_sigtramp) nip == (unsigned long)current->mm->context.vdso + vdso64_rt_sigtramp)
return 1; return 1;
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment