Commit 6916ce3f authored by Ralf Baechle's avatar Ralf Baechle Committed by Ingo Molnar

sched, MIPS: Get rid of finish_arch_switch()

MIPS was using finish_arch_switch() as a hook to restore and initialize
CPU context for all threads, even newly created kernel and user threads.
This is however entirely solvable within switch_to() so get rid of
finish_arch_switch() which is in the way of scheduler cleanups.
Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 7baa7aec
...@@ -83,45 +83,43 @@ do { if (cpu_has_rw_llb) { \ ...@@ -83,45 +83,43 @@ do { if (cpu_has_rw_llb) { \
} \ } \
} while (0) } while (0)
/*
* For newly created kernel threads switch_to() will return to
* ret_from_kernel_thread, newly created user threads to ret_from_fork.
* That is, everything following resume() will be skipped for new threads.
* So everything that matters to new threads should be placed before resume().
*/
#define switch_to(prev, next, last) \ #define switch_to(prev, next, last) \
do { \ do { \
u32 __c0_stat; \
s32 __fpsave = FP_SAVE_NONE; \ s32 __fpsave = FP_SAVE_NONE; \
__mips_mt_fpaff_switch_to(prev); \ __mips_mt_fpaff_switch_to(prev); \
if (cpu_has_dsp) \ if (cpu_has_dsp) { \
__save_dsp(prev); \ __save_dsp(prev); \
if (cop2_present && (KSTK_STATUS(prev) & ST0_CU2)) { \ __restore_dsp(next); \
} \
if (cop2_present) { \
set_c0_status(ST0_CU2); \
if ((KSTK_STATUS(prev) & ST0_CU2)) { \
if (cop2_lazy_restore) \ if (cop2_lazy_restore) \
KSTK_STATUS(prev) &= ~ST0_CU2; \ KSTK_STATUS(prev) &= ~ST0_CU2; \
__c0_stat = read_c0_status(); \
write_c0_status(__c0_stat | ST0_CU2); \
cop2_save(prev); \ cop2_save(prev); \
write_c0_status(__c0_stat & ~ST0_CU2); \ } \
if (KSTK_STATUS(next) & ST0_CU2 && \
!cop2_lazy_restore) { \
cop2_restore(next); \
} \
clear_c0_status(ST0_CU2); \
} \ } \
__clear_software_ll_bit(); \ __clear_software_ll_bit(); \
if (test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU)) \ if (test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU)) \
__fpsave = FP_SAVE_SCALAR; \ __fpsave = FP_SAVE_SCALAR; \
if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA)) \ if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA)) \
__fpsave = FP_SAVE_VECTOR; \ __fpsave = FP_SAVE_VECTOR; \
(last) = resume(prev, next, task_thread_info(next), __fpsave); \
} while (0)
#define finish_arch_switch(prev) \
do { \
u32 __c0_stat; \
if (cop2_present && !cop2_lazy_restore && \
(KSTK_STATUS(current) & ST0_CU2)) { \
__c0_stat = read_c0_status(); \
write_c0_status(__c0_stat | ST0_CU2); \
cop2_restore(current); \
write_c0_status(__c0_stat & ~ST0_CU2); \
} \
if (cpu_has_dsp) \
__restore_dsp(current); \
if (cpu_has_userlocal) \ if (cpu_has_userlocal) \
write_c0_userlocal(current_thread_info()->tp_value); \ write_c0_userlocal(task_thread_info(next)->tp_value); \
__restore_watch(); \ __restore_watch(); \
disable_msa(); \ disable_msa(); \
(last) = resume(prev, next, task_thread_info(next), __fpsave); \
} while (0) } while (0)
#endif /* _ASM_SWITCH_TO_H */ #endif /* _ASM_SWITCH_TO_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment