Commit 51889d22 authored by Petr Mladek's avatar Petr Mladek

Merge branch 'rework/kthreads' into for-linus

parents 93d17c1c 07a22b61
...@@ -581,7 +581,6 @@ void __handle_sysrq(int key, bool check_mask) ...@@ -581,7 +581,6 @@ void __handle_sysrq(int key, bool check_mask)
rcu_sysrq_start(); rcu_sysrq_start();
rcu_read_lock(); rcu_read_lock();
printk_prefer_direct_enter();
/* /*
* Raise the apparent loglevel to maximum so that the sysrq header * Raise the apparent loglevel to maximum so that the sysrq header
* is shown to provide the user with positive feedback. We do not * is shown to provide the user with positive feedback. We do not
...@@ -623,7 +622,6 @@ void __handle_sysrq(int key, bool check_mask) ...@@ -623,7 +622,6 @@ void __handle_sysrq(int key, bool check_mask)
pr_cont("\n"); pr_cont("\n");
console_loglevel = orig_log_level; console_loglevel = orig_log_level;
} }
printk_prefer_direct_exit();
rcu_read_unlock(); rcu_read_unlock();
rcu_sysrq_end(); rcu_sysrq_end();
......
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/mutex.h>
struct vc_data; struct vc_data;
struct console_font_op; struct console_font_op;
...@@ -154,22 +153,6 @@ struct console { ...@@ -154,22 +153,6 @@ struct console {
uint ospeed; uint ospeed;
u64 seq; u64 seq;
unsigned long dropped; unsigned long dropped;
struct task_struct *thread;
bool blocked;
/*
* The per-console lock is used by printing kthreads to synchronize
* this console with callers of console_lock(). This is necessary in
* order to allow printing kthreads to run in parallel to each other,
* while each safely accessing the @blocked field and synchronizing
* against direct printing via console_lock/console_unlock.
*
* Note: For synchronizing against direct printing via
* console_trylock/console_unlock, see the static global
* variable @console_kthreads_active.
*/
struct mutex lock;
void *data; void *data;
struct console *next; struct console *next;
}; };
......
...@@ -169,11 +169,7 @@ extern void __printk_safe_exit(void); ...@@ -169,11 +169,7 @@ extern void __printk_safe_exit(void);
#define printk_deferred_enter __printk_safe_enter #define printk_deferred_enter __printk_safe_enter
#define printk_deferred_exit __printk_safe_exit #define printk_deferred_exit __printk_safe_exit
extern void printk_prefer_direct_enter(void);
extern void printk_prefer_direct_exit(void);
extern bool pr_flush(int timeout_ms, bool reset_on_progress); extern bool pr_flush(int timeout_ms, bool reset_on_progress);
extern void try_block_console_kthreads(int timeout_ms);
/* /*
* Please don't use printk_ratelimit(), because it shares ratelimiting state * Please don't use printk_ratelimit(), because it shares ratelimiting state
...@@ -225,23 +221,11 @@ static inline void printk_deferred_exit(void) ...@@ -225,23 +221,11 @@ static inline void printk_deferred_exit(void)
{ {
} }
static inline void printk_prefer_direct_enter(void)
{
}
static inline void printk_prefer_direct_exit(void)
{
}
static inline bool pr_flush(int timeout_ms, bool reset_on_progress) static inline bool pr_flush(int timeout_ms, bool reset_on_progress)
{ {
return true; return true;
} }
static inline void try_block_console_kthreads(int timeout_ms)
{
}
static inline int printk_ratelimit(void) static inline int printk_ratelimit(void)
{ {
return 0; return 0;
......
...@@ -127,8 +127,6 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) ...@@ -127,8 +127,6 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
* complain: * complain:
*/ */
if (sysctl_hung_task_warnings) { if (sysctl_hung_task_warnings) {
printk_prefer_direct_enter();
if (sysctl_hung_task_warnings > 0) if (sysctl_hung_task_warnings > 0)
sysctl_hung_task_warnings--; sysctl_hung_task_warnings--;
pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n", pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
...@@ -144,8 +142,6 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) ...@@ -144,8 +142,6 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
if (sysctl_hung_task_all_cpu_backtrace) if (sysctl_hung_task_all_cpu_backtrace)
hung_task_show_all_bt = true; hung_task_show_all_bt = true;
printk_prefer_direct_exit();
} }
touch_nmi_watchdog(); touch_nmi_watchdog();
...@@ -208,17 +204,12 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout) ...@@ -208,17 +204,12 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
} }
unlock: unlock:
rcu_read_unlock(); rcu_read_unlock();
if (hung_task_show_lock) { if (hung_task_show_lock)
printk_prefer_direct_enter();
debug_show_all_locks(); debug_show_all_locks();
printk_prefer_direct_exit();
}
if (hung_task_show_all_bt) { if (hung_task_show_all_bt) {
hung_task_show_all_bt = false; hung_task_show_all_bt = false;
printk_prefer_direct_enter();
trigger_all_cpu_backtrace(); trigger_all_cpu_backtrace();
printk_prefer_direct_exit();
} }
if (hung_task_call_panic) if (hung_task_call_panic)
......
...@@ -297,7 +297,6 @@ void panic(const char *fmt, ...) ...@@ -297,7 +297,6 @@ void panic(const char *fmt, ...)
* unfortunately means it may not be hardened to work in a * unfortunately means it may not be hardened to work in a
* panic situation. * panic situation.
*/ */
try_block_console_kthreads(10000);
smp_send_stop(); smp_send_stop();
} else { } else {
/* /*
...@@ -305,7 +304,6 @@ void panic(const char *fmt, ...) ...@@ -305,7 +304,6 @@ void panic(const char *fmt, ...)
* kmsg_dump, we will need architecture dependent extra * kmsg_dump, we will need architecture dependent extra
* works in addition to stopping other CPUs. * works in addition to stopping other CPUs.
*/ */
try_block_console_kthreads(10000);
crash_smp_send_stop(); crash_smp_send_stop();
} }
...@@ -605,8 +603,6 @@ void __warn(const char *file, int line, void *caller, unsigned taint, ...@@ -605,8 +603,6 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
{ {
disable_trace_on_warning(); disable_trace_on_warning();
printk_prefer_direct_enter();
if (file) if (file)
pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n", pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n",
raw_smp_processor_id(), current->pid, file, line, raw_smp_processor_id(), current->pid, file, line,
...@@ -636,8 +632,6 @@ void __warn(const char *file, int line, void *caller, unsigned taint, ...@@ -636,8 +632,6 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
/* Just a warning, don't kill lockdep. */ /* Just a warning, don't kill lockdep. */
add_taint(taint, LOCKDEP_STILL_OK); add_taint(taint, LOCKDEP_STILL_OK);
printk_prefer_direct_exit();
} }
#ifndef __WARN_FLAGS #ifndef __WARN_FLAGS
......
...@@ -20,8 +20,6 @@ enum printk_info_flags { ...@@ -20,8 +20,6 @@ enum printk_info_flags {
LOG_CONT = 8, /* text is a fragment of a continuation line */ LOG_CONT = 8, /* text is a fragment of a continuation line */
}; };
extern bool block_console_kthreads;
__printf(4, 0) __printf(4, 0)
int vprintk_store(int facility, int level, int vprintk_store(int facility, int level,
const struct dev_printk_info *dev_info, const struct dev_printk_info *dev_info,
......
This diff is collapsed.
...@@ -8,9 +8,7 @@ ...@@ -8,9 +8,7 @@
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/printk.h> #include <linux/printk.h>
#include <linux/console.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/delay.h>
#include "internal.h" #include "internal.h"
...@@ -52,33 +50,3 @@ asmlinkage int vprintk(const char *fmt, va_list args) ...@@ -52,33 +50,3 @@ asmlinkage int vprintk(const char *fmt, va_list args)
return vprintk_default(fmt, args); return vprintk_default(fmt, args);
} }
EXPORT_SYMBOL(vprintk); EXPORT_SYMBOL(vprintk);
/**
* try_block_console_kthreads() - Try to block console kthreads and
* make the global console_lock() avaialble
*
* @timeout_ms: The maximum time (in ms) to wait.
*
* Prevent console kthreads from starting processing new messages. Wait
* until the global console_lock() become available.
*
* Context: Can be called in any context.
*/
void try_block_console_kthreads(int timeout_ms)
{
block_console_kthreads = true;
/* Do not wait when the console lock could not be safely taken. */
if (this_cpu_read(printk_context) || in_nmi())
return;
while (timeout_ms > 0) {
if (console_trylock()) {
console_unlock();
return;
}
udelay(1000);
timeout_ms -= 1;
}
}
...@@ -647,7 +647,6 @@ static void print_cpu_stall(unsigned long gps) ...@@ -647,7 +647,6 @@ static void print_cpu_stall(unsigned long gps)
* See Documentation/RCU/stallwarn.rst for info on how to debug * See Documentation/RCU/stallwarn.rst for info on how to debug
* RCU CPU stall warnings. * RCU CPU stall warnings.
*/ */
printk_prefer_direct_enter();
trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected")); trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected"));
pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name); pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags); raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
...@@ -685,7 +684,6 @@ static void print_cpu_stall(unsigned long gps) ...@@ -685,7 +684,6 @@ static void print_cpu_stall(unsigned long gps)
*/ */
set_tsk_need_resched(current); set_tsk_need_resched(current);
set_preempt_need_resched(); set_preempt_need_resched();
printk_prefer_direct_exit();
} }
static void check_cpu_stall(struct rcu_data *rdp) static void check_cpu_stall(struct rcu_data *rdp)
......
...@@ -82,7 +82,6 @@ void kernel_restart_prepare(char *cmd) ...@@ -82,7 +82,6 @@ void kernel_restart_prepare(char *cmd)
{ {
blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
system_state = SYSTEM_RESTART; system_state = SYSTEM_RESTART;
try_block_console_kthreads(10000);
usermodehelper_disable(); usermodehelper_disable();
device_shutdown(); device_shutdown();
} }
...@@ -271,7 +270,6 @@ static void kernel_shutdown_prepare(enum system_states state) ...@@ -271,7 +270,6 @@ static void kernel_shutdown_prepare(enum system_states state)
blocking_notifier_call_chain(&reboot_notifier_list, blocking_notifier_call_chain(&reboot_notifier_list,
(state == SYSTEM_HALT) ? SYS_HALT : SYS_POWER_OFF, NULL); (state == SYSTEM_HALT) ? SYS_HALT : SYS_POWER_OFF, NULL);
system_state = state; system_state = state;
try_block_console_kthreads(10000);
usermodehelper_disable(); usermodehelper_disable();
device_shutdown(); device_shutdown();
} }
...@@ -821,11 +819,9 @@ static int __orderly_reboot(void) ...@@ -821,11 +819,9 @@ static int __orderly_reboot(void)
ret = run_cmd(reboot_cmd); ret = run_cmd(reboot_cmd);
if (ret) { if (ret) {
printk_prefer_direct_enter();
pr_warn("Failed to start orderly reboot: forcing the issue\n"); pr_warn("Failed to start orderly reboot: forcing the issue\n");
emergency_sync(); emergency_sync();
kernel_restart(NULL); kernel_restart(NULL);
printk_prefer_direct_exit();
} }
return ret; return ret;
...@@ -838,7 +834,6 @@ static int __orderly_poweroff(bool force) ...@@ -838,7 +834,6 @@ static int __orderly_poweroff(bool force)
ret = run_cmd(poweroff_cmd); ret = run_cmd(poweroff_cmd);
if (ret && force) { if (ret && force) {
printk_prefer_direct_enter();
pr_warn("Failed to start orderly shutdown: forcing the issue\n"); pr_warn("Failed to start orderly shutdown: forcing the issue\n");
/* /*
...@@ -848,7 +843,6 @@ static int __orderly_poweroff(bool force) ...@@ -848,7 +843,6 @@ static int __orderly_poweroff(bool force)
*/ */
emergency_sync(); emergency_sync();
kernel_power_off(); kernel_power_off();
printk_prefer_direct_exit();
} }
return ret; return ret;
...@@ -906,8 +900,6 @@ EXPORT_SYMBOL_GPL(orderly_reboot); ...@@ -906,8 +900,6 @@ EXPORT_SYMBOL_GPL(orderly_reboot);
*/ */
static void hw_failure_emergency_poweroff_func(struct work_struct *work) static void hw_failure_emergency_poweroff_func(struct work_struct *work)
{ {
printk_prefer_direct_enter();
/* /*
* We have reached here after the emergency shutdown waiting period has * We have reached here after the emergency shutdown waiting period has
* expired. This means orderly_poweroff has not been able to shut off * expired. This means orderly_poweroff has not been able to shut off
...@@ -924,8 +916,6 @@ static void hw_failure_emergency_poweroff_func(struct work_struct *work) ...@@ -924,8 +916,6 @@ static void hw_failure_emergency_poweroff_func(struct work_struct *work)
*/ */
pr_emerg("Hardware protection shutdown failed. Trying emergency restart\n"); pr_emerg("Hardware protection shutdown failed. Trying emergency restart\n");
emergency_restart(); emergency_restart();
printk_prefer_direct_exit();
} }
static DECLARE_DELAYED_WORK(hw_failure_emergency_poweroff_work, static DECLARE_DELAYED_WORK(hw_failure_emergency_poweroff_work,
...@@ -964,13 +954,11 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced) ...@@ -964,13 +954,11 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced)
{ {
static atomic_t allow_proceed = ATOMIC_INIT(1); static atomic_t allow_proceed = ATOMIC_INIT(1);
printk_prefer_direct_enter();
pr_emerg("HARDWARE PROTECTION shutdown (%s)\n", reason); pr_emerg("HARDWARE PROTECTION shutdown (%s)\n", reason);
/* Shutdown should be initiated only once. */ /* Shutdown should be initiated only once. */
if (!atomic_dec_and_test(&allow_proceed)) if (!atomic_dec_and_test(&allow_proceed))
goto out; return;
/* /*
* Queue a backup emergency shutdown in the event of * Queue a backup emergency shutdown in the event of
...@@ -978,8 +966,6 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced) ...@@ -978,8 +966,6 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced)
*/ */
hw_failure_emergency_poweroff(ms_until_forced); hw_failure_emergency_poweroff(ms_until_forced);
orderly_poweroff(true); orderly_poweroff(true);
out:
printk_prefer_direct_exit();
} }
EXPORT_SYMBOL_GPL(hw_protection_shutdown); EXPORT_SYMBOL_GPL(hw_protection_shutdown);
......
...@@ -424,8 +424,6 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) ...@@ -424,8 +424,6 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
/* Start period for the next softlockup warning. */ /* Start period for the next softlockup warning. */
update_report_ts(); update_report_ts();
printk_prefer_direct_enter();
pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
smp_processor_id(), duration, smp_processor_id(), duration,
current->comm, task_pid_nr(current)); current->comm, task_pid_nr(current));
...@@ -444,8 +442,6 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) ...@@ -444,8 +442,6 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
if (softlockup_panic) if (softlockup_panic)
panic("softlockup: hung tasks"); panic("softlockup: hung tasks");
printk_prefer_direct_exit();
} }
return HRTIMER_RESTART; return HRTIMER_RESTART;
......
...@@ -135,8 +135,6 @@ static void watchdog_overflow_callback(struct perf_event *event, ...@@ -135,8 +135,6 @@ static void watchdog_overflow_callback(struct perf_event *event,
if (__this_cpu_read(hard_watchdog_warn) == true) if (__this_cpu_read(hard_watchdog_warn) == true)
return; return;
printk_prefer_direct_enter();
pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n", pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n",
this_cpu); this_cpu);
print_modules(); print_modules();
...@@ -157,8 +155,6 @@ static void watchdog_overflow_callback(struct perf_event *event, ...@@ -157,8 +155,6 @@ static void watchdog_overflow_callback(struct perf_event *event,
if (hardlockup_panic) if (hardlockup_panic)
nmi_panic(regs, "Hard LOCKUP"); nmi_panic(regs, "Hard LOCKUP");
printk_prefer_direct_exit();
__this_cpu_write(hard_watchdog_warn, true); __this_cpu_write(hard_watchdog_warn, true);
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment