Commit 313ce674 authored by Chris Metcalf's avatar Chris Metcalf

arch/tile: support TIF_NOTIFY_RESUME

This support is required for CONFIG_KEYS, NFSv4 kernel DNS, etc.
The change is slightly more complex than the minimal thing, since
I took advantage of having to go into the assembly code to just
move a bunch of stuff into C code: specifically, the schedule(),
do_async_page_fault(), do_signal(), and single_step_once() support,
in addition to the TIF_NOTIFY_RESUME support.
Signed-off-by: default avatarChris Metcalf <cmetcalf@tilera.com>
parent 93013a0f
...@@ -215,6 +215,8 @@ static inline void release_thread(struct task_struct *dead_task) ...@@ -215,6 +215,8 @@ static inline void release_thread(struct task_struct *dead_task)
extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
extern int do_work_pending(struct pt_regs *regs, u32 flags);
/* /*
* Return saved (kernel) PC of a blocked thread. * Return saved (kernel) PC of a blocked thread.
......
...@@ -125,6 +125,7 @@ extern void cpu_idle_on_new_stack(struct thread_info *old_ti, ...@@ -125,6 +125,7 @@ extern void cpu_idle_on_new_stack(struct thread_info *old_ti,
#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
#define TIF_SECCOMP 6 /* secure computing */ #define TIF_SECCOMP 6 /* secure computing */
#define TIF_MEMDIE 7 /* OOM killer at work */ #define TIF_MEMDIE 7 /* OOM killer at work */
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
...@@ -134,10 +135,12 @@ extern void cpu_idle_on_new_stack(struct thread_info *old_ti, ...@@ -134,10 +135,12 @@ extern void cpu_idle_on_new_stack(struct thread_info *old_ti,
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1<<TIF_SECCOMP) #define _TIF_SECCOMP (1<<TIF_SECCOMP)
#define _TIF_MEMDIE (1<<TIF_MEMDIE) #define _TIF_MEMDIE (1<<TIF_MEMDIE)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
/* Work to do on any return to user space. */ /* Work to do on any return to user space. */
#define _TIF_ALLWORK_MASK \ #define _TIF_ALLWORK_MASK \
(_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SINGLESTEP|_TIF_ASYNC_TLB) (_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SINGLESTEP|\
_TIF_ASYNC_TLB|_TIF_NOTIFY_RESUME)
/* /*
* Thread-synchronous status. * Thread-synchronous status.
......
...@@ -15,10 +15,14 @@ ...@@ -15,10 +15,14 @@
#ifndef _ASM_TILE_TRAPS_H #ifndef _ASM_TILE_TRAPS_H
#define _ASM_TILE_TRAPS_H #define _ASM_TILE_TRAPS_H
#include <arch/chip.h>
/* mm/fault.c */ /* mm/fault.c */
void do_page_fault(struct pt_regs *, int fault_num, void do_page_fault(struct pt_regs *, int fault_num,
unsigned long address, unsigned long write); unsigned long address, unsigned long write);
#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
void do_async_page_fault(struct pt_regs *); void do_async_page_fault(struct pt_regs *);
#endif
#ifndef __tilegx__ #ifndef __tilegx__
/* /*
......
...@@ -851,14 +851,27 @@ STD_ENTRY(interrupt_return) ...@@ -851,14 +851,27 @@ STD_ENTRY(interrupt_return)
/* Check to see if there is any work to do before returning to user. */ /* Check to see if there is any work to do before returning to user. */
{ {
addi r29, r32, THREAD_INFO_FLAGS_OFFSET addi r29, r32, THREAD_INFO_FLAGS_OFFSET
moveli r28, lo16(_TIF_ALLWORK_MASK) moveli r1, lo16(_TIF_ALLWORK_MASK)
} }
{ {
lw r29, r29 lw r29, r29
auli r28, r28, ha16(_TIF_ALLWORK_MASK) auli r1, r1, ha16(_TIF_ALLWORK_MASK)
} }
and r28, r29, r28 and r1, r29, r1
bnz r28, .Lwork_pending bzt r1, .Lrestore_all
/*
* Make sure we have all the registers saved for signal
* handling or single-step. Call out to C code to figure out
* exactly what we need to do for each flag bit, then if
* necessary, reload the flags and recheck.
*/
push_extra_callee_saves r0
{
PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
jal do_work_pending
}
bnz r0, .Lresume_userspace
/* /*
* In the NMI case we * In the NMI case we
...@@ -1099,99 +1112,6 @@ STD_ENTRY(interrupt_return) ...@@ -1099,99 +1112,6 @@ STD_ENTRY(interrupt_return)
pop_reg r50 pop_reg r50
pop_reg r51, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(51) pop_reg r51, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(51)
j .Lcontinue_restore_regs j .Lcontinue_restore_regs
.Lwork_pending:
/* Mask the reschedule flag */
andi r28, r29, _TIF_NEED_RESCHED
{
/*
* If the NEED_RESCHED flag is called, we call schedule(), which
* may drop this context right here and go do something else.
* On return, jump back to .Lresume_userspace and recheck.
*/
bz r28, .Lasync_tlb
/* Mask the async-tlb flag */
andi r28, r29, _TIF_ASYNC_TLB
}
jal schedule
FEEDBACK_REENTER(interrupt_return)
/* Reload the flags and check again */
j .Lresume_userspace
.Lasync_tlb:
{
bz r28, .Lneed_sigpending
/* Mask the sigpending flag */
andi r28, r29, _TIF_SIGPENDING
}
PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
jal do_async_page_fault
FEEDBACK_REENTER(interrupt_return)
/*
* Go restart the "resume userspace" process. We may have
* fired a signal, and we need to disable interrupts again.
*/
j .Lresume_userspace
.Lneed_sigpending:
/*
* At this point we are either doing signal handling or single-step,
* so either way make sure we have all the registers saved.
*/
push_extra_callee_saves r0
{
/* If no signal pending, skip to singlestep check */
bz r28, .Lneed_singlestep
/* Mask the singlestep flag */
andi r28, r29, _TIF_SINGLESTEP
}
jal do_signal
FEEDBACK_REENTER(interrupt_return)
/* Reload the flags and check again */
j .Lresume_userspace
.Lneed_singlestep:
{
/* Get a pointer to the EX1 field */
PTREGS_PTR(r29, PTREGS_OFFSET_EX1)
/* If we get here, our bit must be set. */
bz r28, .Lwork_confusion
}
/* If we are in priv mode, don't single step */
lw r28, r29
andi r28, r28, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
bnz r28, .Lrestore_all
/* Allow interrupts within the single step code */
TRACE_IRQS_ON /* Note: clobbers registers r0-r29 */
IRQ_ENABLE(r20, r21)
/* try to single-step the current instruction */
PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
jal single_step_once
FEEDBACK_REENTER(interrupt_return)
/* Re-disable interrupts. TRACE_IRQS_OFF in .Lrestore_all. */
IRQ_DISABLE(r20,r21)
j .Lrestore_all
.Lwork_confusion:
move r0, r28
panic "thread_info allwork flags unhandled on userspace resume: %#x"
STD_ENDPROC(interrupt_return) STD_ENDPROC(interrupt_return)
/* /*
......
...@@ -25,10 +25,13 @@ ...@@ -25,10 +25,13 @@
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/tracehook.h>
#include <linux/signal.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/stack.h> #include <asm/stack.h>
#include <asm/homecache.h> #include <asm/homecache.h>
#include <asm/syscalls.h> #include <asm/syscalls.h>
#include <asm/traps.h>
#ifdef CONFIG_HARDWALL #ifdef CONFIG_HARDWALL
#include <asm/hardwall.h> #include <asm/hardwall.h>
#endif #endif
...@@ -546,6 +549,51 @@ struct task_struct *__sched _switch_to(struct task_struct *prev, ...@@ -546,6 +549,51 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
return __switch_to(prev, next, next_current_ksp0(next)); return __switch_to(prev, next, next_current_ksp0(next));
} }
/*
* This routine is called on return from interrupt if any of the
* TIF_WORK_MASK flags are set in thread_info->flags. It is
* entered with interrupts disabled so we don't miss an event
* that modified the thread_info flags. If any flag is set, we
* handle it and return, and the calling assembly code will
* re-disable interrupts, reload the thread flags, and call back
* if more flags need to be handled.
*
* We return whether we need to check the thread_info flags again
* or not. Note that we don't clear TIF_SINGLESTEP here, so it's
* important that it be tested last, and then claim that we don't
* need to recheck the flags.
*/
int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
{
if (thread_info_flags & _TIF_NEED_RESCHED) {
schedule();
return 1;
}
#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
if (thread_info_flags & _TIF_ASYNC_TLB) {
do_async_page_fault(regs);
return 1;
}
#endif
if (thread_info_flags & _TIF_SIGPENDING) {
do_signal(regs);
return 1;
}
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
if (current->replacement_session_keyring)
key_replace_session_keyring();
return 1;
}
if (thread_info_flags & _TIF_SINGLESTEP) {
if ((regs->ex1 & SPR_EX_CONTEXT_1_1__PL_MASK) == 0)
single_step_once(regs);
return 0;
}
panic("work_pending: bad flags %#x\n", thread_info_flags);
}
/* Note there is an implicit fifth argument if (clone_flags & CLONE_SETTLS). */ /* Note there is an implicit fifth argument if (clone_flags & CLONE_SETTLS). */
SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
void __user *, parent_tidptr, void __user *, child_tidptr, void __user *, parent_tidptr, void __user *, child_tidptr,
......
...@@ -318,6 +318,14 @@ void single_step_once(struct pt_regs *regs) ...@@ -318,6 +318,14 @@ void single_step_once(struct pt_regs *regs)
" .popsection\n" " .popsection\n"
); );
/*
* Enable interrupts here to allow touching userspace and the like.
* The callers expect this: do_trap() already has interrupts
* enabled, and do_work_pending() handles functions that enable
* interrupts internally.
*/
local_irq_enable();
if (state == NULL) { if (state == NULL) {
/* allocate a page of writable, executable memory */ /* allocate a page of writable, executable memory */
state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL); state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL);
......
...@@ -732,6 +732,7 @@ void do_page_fault(struct pt_regs *regs, int fault_num, ...@@ -732,6 +732,7 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
panic("Bad fault number %d in do_page_fault", fault_num); panic("Bad fault number %d in do_page_fault", fault_num);
} }
#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
if (EX1_PL(regs->ex1) != USER_PL) { if (EX1_PL(regs->ex1) != USER_PL) {
struct async_tlb *async; struct async_tlb *async;
switch (fault_num) { switch (fault_num) {
...@@ -775,6 +776,7 @@ void do_page_fault(struct pt_regs *regs, int fault_num, ...@@ -775,6 +776,7 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
return; return;
} }
} }
#endif
handle_page_fault(regs, fault_num, is_page_fault, address, write); handle_page_fault(regs, fault_num, is_page_fault, address, write);
} }
...@@ -801,8 +803,6 @@ static void handle_async_page_fault(struct pt_regs *regs, ...@@ -801,8 +803,6 @@ static void handle_async_page_fault(struct pt_regs *regs,
async->address, async->is_write); async->address, async->is_write);
} }
} }
#endif /* CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() */
/* /*
* This routine effectively re-issues asynchronous page faults * This routine effectively re-issues asynchronous page faults
...@@ -824,6 +824,8 @@ void do_async_page_fault(struct pt_regs *regs) ...@@ -824,6 +824,8 @@ void do_async_page_fault(struct pt_regs *regs)
handle_async_page_fault(regs, &current->thread.sn_async_tlb); handle_async_page_fault(regs, &current->thread.sn_async_tlb);
#endif #endif
} }
#endif /* CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() */
void vmalloc_sync_all(void) void vmalloc_sync_all(void)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment