Commit 7f60ba38 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-v3.7' of git://git.infradead.org/users/cbou/linux-pstore

Pull pstore changes from Anton Vorontsov:

 1) We no longer ad-hoc to the function tracer "high level"
    infrastructure and no longer use its debugfs knobs.  The change
    slightly touches kernel/trace directory, but it got the needed ack
    from Steven Rostedt:

      http://lkml.org/lkml/2012/8/21/688

 2) Added maintainers entry;

 3) A bunch of fixes, nothing special.

* tag 'for-v3.7' of git://git.infradead.org/users/cbou/linux-pstore:
  pstore: Avoid recursive spinlocks in the oops_in_progress case
  pstore/ftrace: Convert to its own enable/disable debugfs knob
  pstore/ram: Add missing platform_device_unregister
  MAINTAINERS: Add pstore maintainers
  pstore/ram: Mark ramoops_pstore_write_buf() as notrace
  pstore/ram: Fix printk format warning
  pstore/ram: Fix possible NULL dereference
parents e665faa4 80c9d03c
......@@ -102,9 +102,7 @@ related hangs. The functions call chain log is stored in a "ftrace-ramoops"
file. Here is an example of usage:
# mount -t debugfs debugfs /sys/kernel/debug/
# cd /sys/kernel/debug/tracing
# echo function > current_tracer
# echo 1 > options/func_pstore
# echo 1 > /sys/kernel/debug/pstore/record_ftrace
# reboot -f
[...]
# mount -t pstore pstore /mnt/
......
......@@ -5550,6 +5550,18 @@ L: cbe-oss-dev@lists.ozlabs.org
S: Maintained
F: drivers/block/ps3vram.c
PSTORE FILESYSTEM
M: Anton Vorontsov <cbouatmailru@gmail.com>
M: Colin Cross <ccross@android.com>
M: Kees Cook <keescook@chromium.org>
M: Tony Luck <tony.luck@intel.com>
S: Maintained
T: git git://git.infradead.org/users/cbou/linux-pstore.git
F: fs/pstore/
F: include/linux/pstore*
F: drivers/firmware/efivars.c
F: drivers/acpi/apei/erst.c
PTP HARDWARE CLOCK SUPPORT
M: Richard Cochran <richardcochran@gmail.com>
S: Maintained
......
......@@ -23,6 +23,7 @@ config PSTORE_FTRACE
bool "Persistent function tracer"
depends on PSTORE
depends on FUNCTION_TRACER
depends on DEBUG_FS
help
With this option kernel traces function calls into a persistent
ram buffer that can be decoded and dumped after reboot through
......
......@@ -17,19 +17,113 @@
#include <linux/percpu.h>
#include <linux/smp.h>
#include <linux/atomic.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/ftrace.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/cache.h>
#include <asm/barrier.h>
#include "internal.h"
void notrace pstore_ftrace_call(unsigned long ip, unsigned long parent_ip)
static void notrace pstore_ftrace_call(unsigned long ip,
unsigned long parent_ip)
{
unsigned long flags;
struct pstore_ftrace_record rec = {};
if (unlikely(oops_in_progress))
return;
local_irq_save(flags);
rec.ip = ip;
rec.parent_ip = parent_ip;
pstore_ftrace_encode_cpu(&rec, raw_smp_processor_id());
psinfo->write_buf(PSTORE_TYPE_FTRACE, 0, NULL, 0, (void *)&rec,
sizeof(rec), psinfo);
local_irq_restore(flags);
}
static struct ftrace_ops pstore_ftrace_ops __read_mostly = {
.func = pstore_ftrace_call,
};
static DEFINE_MUTEX(pstore_ftrace_lock);
static bool pstore_ftrace_enabled;
static ssize_t pstore_ftrace_knob_write(struct file *f, const char __user *buf,
size_t count, loff_t *ppos)
{
u8 on;
ssize_t ret;
ret = kstrtou8_from_user(buf, count, 2, &on);
if (ret)
return ret;
mutex_lock(&pstore_ftrace_lock);
if (!on ^ pstore_ftrace_enabled)
goto out;
if (on)
ret = register_ftrace_function(&pstore_ftrace_ops);
else
ret = unregister_ftrace_function(&pstore_ftrace_ops);
if (ret) {
pr_err("%s: unable to %sregister ftrace ops: %zd\n",
__func__, on ? "" : "un", ret);
goto err;
}
pstore_ftrace_enabled = on;
out:
ret = count;
err:
mutex_unlock(&pstore_ftrace_lock);
return ret;
}
static ssize_t pstore_ftrace_knob_read(struct file *f, char __user *buf,
size_t count, loff_t *ppos)
{
char val[] = { '0' + pstore_ftrace_enabled, '\n' };
return simple_read_from_buffer(buf, count, ppos, val, sizeof(val));
}
static const struct file_operations pstore_knob_fops = {
.open = simple_open,
.read = pstore_ftrace_knob_read,
.write = pstore_ftrace_knob_write,
};
void pstore_register_ftrace(void)
{
struct dentry *dir;
struct dentry *file;
if (!psinfo->write_buf)
return;
dir = debugfs_create_dir("pstore", NULL);
if (!dir) {
pr_err("%s: unable to create pstore directory\n", __func__);
return;
}
file = debugfs_create_file("record_ftrace", 0600, dir, NULL,
&pstore_knob_fops);
if (!file) {
pr_err("%s: unable to create record_ftrace file\n", __func__);
goto err_file;
}
return;
err_file:
debugfs_remove(dir);
}
......@@ -39,6 +39,12 @@ pstore_ftrace_decode_cpu(struct pstore_ftrace_record *rec)
#endif
}
#ifdef CONFIG_PSTORE_FTRACE
extern void pstore_register_ftrace(void);
#else
static inline void pstore_register_ftrace(void) {}
#endif
extern struct pstore_info *psinfo;
extern void pstore_set_kmsg_bytes(int);
......
......@@ -164,7 +164,13 @@ static void pstore_console_write(struct console *con, const char *s, unsigned c)
if (c > psinfo->bufsize)
c = psinfo->bufsize;
spin_lock_irqsave(&psinfo->buf_lock, flags);
if (oops_in_progress) {
if (!spin_trylock_irqsave(&psinfo->buf_lock, flags))
break;
} else {
spin_lock_irqsave(&psinfo->buf_lock, flags);
}
memcpy(psinfo->buf, s, c);
psinfo->write(PSTORE_TYPE_CONSOLE, 0, NULL, 0, c, psinfo);
spin_unlock_irqrestore(&psinfo->buf_lock, flags);
......@@ -236,6 +242,7 @@ int pstore_register(struct pstore_info *psi)
kmsg_dump_register(&pstore_dumper);
pstore_register_console();
pstore_register_ftrace();
if (pstore_update_ms >= 0) {
pstore_timer.expires = jiffies +
......
......@@ -32,6 +32,7 @@
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/compiler.h>
#include <linux/pstore_ram.h>
#define RAMOOPS_KERNMSG_HDR "===="
......@@ -181,12 +182,11 @@ static size_t ramoops_write_kmsg_hdr(struct persistent_ram_zone *prz)
return len;
}
static int ramoops_pstore_write_buf(enum pstore_type_id type,
enum kmsg_dump_reason reason,
u64 *id, unsigned int part,
const char *buf, size_t size,
struct pstore_info *psi)
static int notrace ramoops_pstore_write_buf(enum pstore_type_id type,
enum kmsg_dump_reason reason,
u64 *id, unsigned int part,
const char *buf, size_t size,
struct pstore_info *psi)
{
struct ramoops_context *cxt = psi->data;
struct persistent_ram_zone *prz = cxt->przs[cxt->dump_write_cnt];
......@@ -406,7 +406,7 @@ static int __devinit ramoops_probe(struct platform_device *pdev)
goto fail_init_fprz;
if (!cxt->przs && !cxt->cprz && !cxt->fprz) {
pr_err("memory size too small, minimum is %lu\n",
pr_err("memory size too small, minimum is %zu\n",
cxt->console_size + cxt->record_size +
cxt->ftrace_size);
goto fail_cnt;
......@@ -414,13 +414,14 @@ static int __devinit ramoops_probe(struct platform_device *pdev)
cxt->pstore.data = cxt;
/*
* Console can handle any buffer size, so prefer dumps buffer
* size since usually it is smaller.
* Console can handle any buffer size, so prefer LOG_LINE_MAX. If we
* have to handle dumps, we must have at least record_size buffer. And
* for ftrace, bufsize is irrelevant (if bufsize is 0, buf will be
* ZERO_SIZE_PTR).
*/
if (cxt->przs)
cxt->pstore.bufsize = cxt->przs[0]->buffer_size;
else
cxt->pstore.bufsize = cxt->cprz->buffer_size;
if (cxt->console_size)
cxt->pstore.bufsize = 1024; /* LOG_LINE_MAX */
cxt->pstore.bufsize = max(cxt->record_size, cxt->pstore.bufsize);
cxt->pstore.buf = kmalloc(cxt->pstore.bufsize, GFP_KERNEL);
spin_lock_init(&cxt->pstore.buf_lock);
if (!cxt->pstore.buf) {
......@@ -537,6 +538,7 @@ postcore_initcall(ramoops_init);
static void __exit ramoops_exit(void)
{
platform_driver_unregister(&ramoops_driver);
platform_device_unregister(dummy);
kfree(dummy_data);
}
module_exit(ramoops_exit);
......
......@@ -64,14 +64,6 @@ struct pstore_info {
void *data;
};
#ifdef CONFIG_PSTORE_FTRACE
extern void pstore_ftrace_call(unsigned long ip, unsigned long parent_ip);
#else
static inline void pstore_ftrace_call(unsigned long ip, unsigned long parent_ip)
{ }
#endif
#ifdef CONFIG_PSTORE
extern int pstore_register(struct pstore_info *);
#else
......
......@@ -13,7 +13,6 @@
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/pstore.h>
#include <linux/fs.h>
#include "trace.h"
......@@ -76,10 +75,9 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
preempt_enable_notrace();
}
/* Our two options */
/* Our option */
enum {
TRACE_FUNC_OPT_STACK = 0x1,
TRACE_FUNC_OPT_PSTORE = 0x2,
};
static struct tracer_flags func_flags;
......@@ -109,12 +107,6 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
/*
* So far tracing doesn't support multiple buffers, so
* we make an explicit call for now.
*/
if (unlikely(func_flags.val & TRACE_FUNC_OPT_PSTORE))
pstore_ftrace_call(ip, parent_ip);
pc = preempt_count();
trace_function(tr, ip, parent_ip, flags, pc);
}
......@@ -180,9 +172,6 @@ static struct ftrace_ops trace_stack_ops __read_mostly =
static struct tracer_opt func_opts[] = {
#ifdef CONFIG_STACKTRACE
{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
#endif
#ifdef CONFIG_PSTORE_FTRACE
{ TRACER_OPT(func_pstore, TRACE_FUNC_OPT_PSTORE) },
#endif
{ } /* Always set a last empty entry */
};
......@@ -235,8 +224,6 @@ static int func_set_flag(u32 old_flags, u32 bit, int set)
register_ftrace_function(&trace_ops);
}
break;
case TRACE_FUNC_OPT_PSTORE:
break;
default:
return -EINVAL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment