Commit c17488d0 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'trace-v4.5' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing updates from Steven Rostedt:
 "Not much new with tracing for this release.  Mostly just clean ups and
  minor fixes.

  Here's what else is new:

   - A new TRACE_EVENT_FN_COND macro, combining both _FN and _COND for
     those that want both.

   - New selftest to test the instance create and delete

   - Better debug output when ftrace fails"

* tag 'trace-v4.5' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (24 commits)
  ftrace: Fix the race between ftrace and insmod
  ftrace: Add infrastructure for delayed enabling of module functions
  x86: ftrace: Fix the comments for ftrace_modify_code_direct()
  tracing: Fix comment to use tracing_on over tracing_enable
  metag: ftrace: Fix the comments for ftrace_modify_code
  sh: ftrace: Fix the comments for ftrace_modify_code()
  ia64: ftrace: Fix the comments for ftrace_modify_code()
  ftrace: Clean up ftrace_module_init() code
  ftrace: Join functions ftrace_module_init() and ftrace_init_module()
  tracing: Introduce TRACE_EVENT_FN_COND macro
  tracing: Use seq_buf_used() in seq_buf_to_user() instead of len
  bpf: Constify bpf_verifier_ops structure
  ftrace: Have ftrace_ops_get_func() handle RCU and PER_CPU flags too
  ftrace: Remove use of control list and ops
  ftrace: Fix output of enabled_functions for showing tramp
  ftrace: Fix a typo in comment
  ftrace: Show all tramps registered to a record on ftrace_bug()
  ftrace: Add variable ftrace_expected for archs to show expected code
  ftrace: Add new type to distinguish what kind of ftrace_bug()
  tracing: Update cond flag when enabling or disabling a trigger
  ...
parents 34a9304a 5156dca3
......@@ -97,13 +97,11 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
unsigned char replaced[MCOUNT_INSN_SIZE];
/*
* Note: Due to modules and __init, code can
* disappear and change, we need to protect against faulting
* as well as code changing. We do this by using the
* probe_kernel_* functions.
*
* No real locking needed, this code is run through
* kstop_machine, or before SMP starts.
* Note:
* We are paranoid about modifying text, as if a bug was to happen, it
* could cause us to read or write to someplace that could cause harm.
* Carefully read and modify the code with probe_kernel_*(), and make
* sure what we read is what we expected it to be before modifying it.
*/
if (!do_check)
......
......@@ -54,12 +54,11 @@ static int ftrace_modify_code(unsigned long pc, unsigned char *old_code,
unsigned char replaced[MCOUNT_INSN_SIZE];
/*
* Note: Due to modules and __init, code can
* disappear and change, we need to protect against faulting
* as well as code changing.
*
* No real locking needed, this code is run through
* kstop_machine.
* Note:
* We are paranoid about modifying text, as if a bug was to happen, it
* could cause us to read or write to someplace that could cause harm.
* Carefully read and modify the code with probe_kernel_*(), and make
* sure what we read is what we expected it to be before modifying it.
*/
/* read the text we want to modify */
......
......@@ -212,13 +212,11 @@ static int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
unsigned char replaced[MCOUNT_INSN_SIZE];
/*
* Note: Due to modules and __init, code can
* disappear and change, we need to protect against faulting
* as well as code changing. We do this by using the
* probe_kernel_* functions.
*
* No real locking needed, this code is run through
* kstop_machine, or before SMP starts.
* Note:
* We are paranoid about modifying text, as if a bug was to happen, it
* could cause us to read or write to someplace that could cause harm.
* Carefully read and modify the code with probe_kernel_*(), and make
* sure what we read is what we expected it to be before modifying it.
*/
/* read the text we want to modify */
......
......@@ -105,14 +105,14 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
{
unsigned char replaced[MCOUNT_INSN_SIZE];
ftrace_expected = old_code;
/*
* Note: Due to modules and __init, code can
* disappear and change, we need to protect against faulting
* as well as code changing. We do this by using the
* probe_kernel_* functions.
*
* No real locking needed, this code is run through
* kstop_machine, or before SMP starts.
* Note:
* We are paranoid about modifying text, as if a bug was to happen, it
* could cause us to read or write to someplace that could cause harm.
* Carefully read and modify the code with probe_kernel_*(), and make
* sure what we read is what we expected it to be before modifying it.
*/
/* read the text we want to modify */
......@@ -154,6 +154,8 @@ int ftrace_make_nop(struct module *mod,
if (addr == MCOUNT_ADDR)
return ftrace_modify_code_direct(rec->ip, old, new);
ftrace_expected = NULL;
/* Normal cases use add_brk_on_nop */
WARN_ONCE(1, "invalid use of ftrace_make_nop");
return -EINVAL;
......@@ -220,6 +222,7 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
WARN_ON(1);
ftrace_expected = NULL;
return -EINVAL;
}
......@@ -314,6 +317,8 @@ static int add_break(unsigned long ip, const char *old)
if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
return -EFAULT;
ftrace_expected = old;
/* Make sure it is what we expect it to be */
if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0)
return -EINVAL;
......@@ -413,6 +418,8 @@ static int remove_breakpoint(struct dyn_ftrace *rec)
ftrace_addr = ftrace_get_addr_curr(rec);
nop = ftrace_call_replace(ip, ftrace_addr);
ftrace_expected = nop;
if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
return -EINVAL;
}
......
......@@ -76,8 +76,8 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
* ENABLED - set/unset when ftrace_ops is registered/unregistered
* DYNAMIC - set when ftrace_ops is registered to denote dynamically
* allocated ftrace_ops which need special care
* CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops
* could be controled by following calls:
* PER_CPU - set manualy by ftrace_ops user to denote the ftrace_ops
* could be controlled by following calls:
* ftrace_function_local_enable
* ftrace_function_local_disable
* SAVE_REGS - The ftrace_ops wants regs saved at each function called
......@@ -121,7 +121,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
enum {
FTRACE_OPS_FL_ENABLED = 1 << 0,
FTRACE_OPS_FL_DYNAMIC = 1 << 1,
FTRACE_OPS_FL_CONTROL = 1 << 2,
FTRACE_OPS_FL_PER_CPU = 1 << 2,
FTRACE_OPS_FL_SAVE_REGS = 1 << 3,
FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4,
FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5,
......@@ -134,6 +134,7 @@ enum {
FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12,
FTRACE_OPS_FL_IPMODIFY = 1 << 13,
FTRACE_OPS_FL_PID = 1 << 14,
FTRACE_OPS_FL_RCU = 1 << 15,
};
#ifdef CONFIG_DYNAMIC_FTRACE
......@@ -146,11 +147,11 @@ struct ftrace_ops_hash {
#endif
/*
* Note, ftrace_ops can be referenced outside of RCU protection.
* (Although, for perf, the control ops prevent that). If ftrace_ops is
* allocated and not part of kernel core data, the unregistering of it will
* perform a scheduling on all CPUs to make sure that there are no more users.
* Depending on the load of the system that may take a bit of time.
* Note, ftrace_ops can be referenced outside of RCU protection, unless
* the RCU flag is set. If ftrace_ops is allocated and not part of kernel
* core data, the unregistering of it will perform a scheduling on all CPUs
* to make sure that there are no more users. Depending on the load of the
* system that may take a bit of time.
*
* Any private data added must also take care not to be freed and if private
* data is added to a ftrace_ops that is in core code, the user of the
......@@ -196,34 +197,34 @@ int unregister_ftrace_function(struct ftrace_ops *ops);
void clear_ftrace_function(void);
/**
* ftrace_function_local_enable - enable controlled ftrace_ops on current cpu
* ftrace_function_local_enable - enable ftrace_ops on current cpu
*
* This function enables tracing on current cpu by decreasing
* the per cpu control variable.
* It must be called with preemption disabled and only on ftrace_ops
* registered with FTRACE_OPS_FL_CONTROL. If called without preemption
* registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
* disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
*/
static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
{
if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
return;
(*this_cpu_ptr(ops->disabled))--;
}
/**
* ftrace_function_local_disable - enable controlled ftrace_ops on current cpu
* ftrace_function_local_disable - disable ftrace_ops on current cpu
*
* This function enables tracing on current cpu by decreasing
* This function disables tracing on current cpu by increasing
* the per cpu control variable.
* It must be called with preemption disabled and only on ftrace_ops
* registered with FTRACE_OPS_FL_CONTROL. If called without preemption
* registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
* disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
*/
static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
{
if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
return;
(*this_cpu_ptr(ops->disabled))++;
......@@ -235,12 +236,12 @@ static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
*
* This function returns value of ftrace_ops::disabled on current cpu.
* It must be called with preemption disabled and only on ftrace_ops
* registered with FTRACE_OPS_FL_CONTROL. If called without preemption
* registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
* disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
*/
static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
{
WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL));
WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU));
return *this_cpu_ptr(ops->disabled);
}
......@@ -296,6 +297,21 @@ int ftrace_arch_code_modify_post_process(void);
struct dyn_ftrace;
enum ftrace_bug_type {
FTRACE_BUG_UNKNOWN,
FTRACE_BUG_INIT,
FTRACE_BUG_NOP,
FTRACE_BUG_CALL,
FTRACE_BUG_UPDATE,
};
extern enum ftrace_bug_type ftrace_bug_type;
/*
* Archs can set this to point to a variable that holds the value that was
* expected at the call site before calling ftrace_bug().
*/
extern const void *ftrace_expected;
void ftrace_bug(int err, struct dyn_ftrace *rec);
struct seq_file;
......@@ -341,6 +357,7 @@ bool is_ftrace_trampoline(unsigned long addr);
* REGS - the record wants the function to save regs
* REGS_EN - the function is set up to save regs.
* IPMODIFY - the record allows for the IP address to be changed.
* DISABLED - the record is not ready to be touched yet
*
* When a new ftrace_ops is registered and wants a function to save
* pt_regs, the rec->flag REGS is set. When the function has been
......@@ -355,10 +372,11 @@ enum {
FTRACE_FL_TRAMP = (1UL << 28),
FTRACE_FL_TRAMP_EN = (1UL << 27),
FTRACE_FL_IPMODIFY = (1UL << 26),
FTRACE_FL_DISABLED = (1UL << 25),
};
#define FTRACE_REF_MAX_SHIFT 26
#define FTRACE_FL_BITS 6
#define FTRACE_REF_MAX_SHIFT 25
#define FTRACE_FL_BITS 7
#define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1)
#define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
#define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
......
......@@ -479,6 +479,10 @@ extern void syscall_unregfunc(void);
#define TRACE_EVENT_FN(name, proto, args, struct, \
assign, print, reg, unreg) \
DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
#define TRACE_EVENT_FN_COND(name, proto, args, cond, struct, \
assign, print, reg, unreg) \
DECLARE_TRACE_CONDITION(name, PARAMS(proto), \
PARAMS(args), PARAMS(cond))
#define TRACE_EVENT_CONDITION(name, proto, args, cond, \
struct, assign, print) \
DECLARE_TRACE_CONDITION(name, PARAMS(proto), \
......
......@@ -40,6 +40,11 @@
assign, print, reg, unreg) \
DEFINE_TRACE_FN(name, reg, unreg)
#undef TRACE_EVENT_FN_COND
#define TRACE_EVENT_FN_COND(name, proto, args, cond, tstruct, \
assign, print, reg, unreg) \
DEFINE_TRACE_FN(name, reg, unreg)
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args) \
DEFINE_TRACE(name)
......@@ -93,6 +98,7 @@
#undef TRACE_EVENT
#undef TRACE_EVENT_FN
#undef TRACE_EVENT_FN_COND
#undef TRACE_EVENT_CONDITION
#undef DECLARE_EVENT_CLASS
#undef DEFINE_EVENT
......
......@@ -123,6 +123,12 @@ TRACE_MAKE_SYSTEM_STR();
TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
#undef TRACE_EVENT_FN_COND
#define TRACE_EVENT_FN_COND(name, proto, args, cond, tstruct, \
assign, print, reg, unreg) \
TRACE_EVENT_CONDITION(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \
PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
#undef TRACE_EVENT_FLAGS
#define TRACE_EVENT_FLAGS(name, value) \
__TRACE_EVENT_FLAGS(name, value)
......
......@@ -316,7 +316,7 @@ static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type
return true;
}
static struct bpf_verifier_ops kprobe_prog_ops = {
static const struct bpf_verifier_ops kprobe_prog_ops = {
.get_func_proto = kprobe_prog_func_proto,
.is_valid_access = kprobe_prog_is_valid_access,
};
......
This diff is collapsed.
......@@ -1001,17 +1001,13 @@ static int rb_head_page_replace(struct buffer_page *old,
/*
* rb_tail_page_update - move the tail page forward
*
* Returns 1 if moved tail page, 0 if someone else did.
*/
static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
struct buffer_page *tail_page,
struct buffer_page *next_page)
{
struct buffer_page *old_tail;
unsigned long old_entries;
unsigned long old_write;
int ret = 0;
/*
* The tail page now needs to be moved forward.
......@@ -1036,7 +1032,7 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
* it is, then it is up to us to update the tail
* pointer.
*/
if (tail_page == cpu_buffer->tail_page) {
if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
/* Zero the write counter */
unsigned long val = old_write & ~RB_WRITE_MASK;
unsigned long eval = old_entries & ~RB_WRITE_MASK;
......@@ -1061,14 +1057,9 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
*/
local_set(&next_page->page->commit, 0);
old_tail = cmpxchg(&cpu_buffer->tail_page,
tail_page, next_page);
if (old_tail == tail_page)
ret = 1;
/* Again, either we update tail_page or an interrupt does */
(void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
}
return ret;
}
static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
......@@ -2036,12 +2027,15 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
* the tail page would have moved.
*/
if (ret == RB_PAGE_NORMAL) {
struct buffer_page *buffer_tail_page;
buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
/*
* If the tail had moved passed next, then we need
* to reset the pointer.
*/
if (cpu_buffer->tail_page != tail_page &&
cpu_buffer->tail_page != next_page)
if (buffer_tail_page != tail_page &&
buffer_tail_page != next_page)
rb_head_page_set_normal(cpu_buffer, new_head,
next_page,
RB_PAGE_HEAD);
......@@ -2135,6 +2129,8 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
local_sub(length, &tail_page->write);
}
static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
/*
* This is the slow path, force gcc not to inline it.
*/
......@@ -2147,7 +2143,6 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer *buffer = cpu_buffer->buffer;
struct buffer_page *next_page;
int ret;
u64 ts;
next_page = tail_page;
......@@ -2221,20 +2216,17 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
}
}
ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
if (ret) {
/*
* Nested commits always have zero deltas, so
* just reread the time stamp
*/
ts = rb_time_stamp(buffer);
next_page->page->time_stamp = ts;
}
rb_tail_page_update(cpu_buffer, tail_page, next_page);
out_again:
rb_reset_tail(cpu_buffer, tail, info);
/* Commit what we have for now. */
rb_end_commit(cpu_buffer);
/* rb_end_commit() decs committing */
local_inc(&cpu_buffer->committing);
/* fail and let the caller try again */
return ERR_PTR(-EAGAIN);
......@@ -2362,7 +2354,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
addr = (unsigned long)event;
addr &= PAGE_MASK;
bpage = cpu_buffer->tail_page;
bpage = READ_ONCE(cpu_buffer->tail_page);
if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
unsigned long write_mask =
......@@ -2410,7 +2402,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
again:
max_count = cpu_buffer->nr_pages * 100;
while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
if (RB_WARN_ON(cpu_buffer, !(--max_count)))
return;
if (RB_WARN_ON(cpu_buffer,
......@@ -2419,6 +2411,8 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
local_set(&cpu_buffer->commit_page->page->commit,
rb_page_write(cpu_buffer->commit_page));
rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
/* Only update the write stamp if the page has an event */
if (rb_page_write(cpu_buffer->commit_page))
cpu_buffer->write_stamp =
cpu_buffer->commit_page->page->time_stamp;
/* add barrier to keep gcc from optimizing too much */
......@@ -2443,7 +2437,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
* and pushed the tail page forward, we will be left with
* a dangling commit that will never go forward.
*/
if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
goto again;
}
......@@ -2699,7 +2693,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
if (unlikely(info->add_timestamp))
info->length += RB_LEN_TIME_EXTEND;
tail_page = info->tail_page = cpu_buffer->tail_page;
/* Don't let the compiler play games with cpu_buffer->tail_page */
tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
write = local_add_return(info->length, &tail_page->write);
/* set write to only the index of the write */
......
......@@ -363,8 +363,8 @@ struct trace_option_dentry {
* @name: the name chosen to select it on the available_tracers file
* @init: called when one switches to this tracer (echo name > current_tracer)
* @reset: called when one switches to another tracer
* @start: called when tracing is unpaused (echo 1 > tracing_enabled)
* @stop: called when tracing is paused (echo 0 > tracing_enabled)
* @start: called when tracing is unpaused (echo 1 > tracing_on)
* @stop: called when tracing is paused (echo 0 > tracing_on)
* @update_thresh: called when tracing_thresh is updated
* @open: called when the trace file is opened
* @pipe_open: called when the trace_pipe file is opened
......@@ -467,8 +467,6 @@ enum {
TRACE_INTERNAL_IRQ_BIT,
TRACE_INTERNAL_SIRQ_BIT,
TRACE_CONTROL_BIT,
TRACE_BRANCH_BIT,
/*
* Abuse of the trace_recursion.
......
......@@ -334,7 +334,7 @@ static int perf_ftrace_function_register(struct perf_event *event)
{
struct ftrace_ops *ops = &event->ftrace_ops;
ops->flags |= FTRACE_OPS_FL_CONTROL;
ops->flags |= FTRACE_OPS_FL_PER_CPU | FTRACE_OPS_FL_RCU;
ops->func = perf_ftrace_function_call;
return register_ftrace_function(ops);
}
......
......@@ -538,11 +538,12 @@ static int register_trigger(char *glob, struct event_trigger_ops *ops,
list_add_rcu(&data->list, &file->triggers);
ret++;
update_cond_flag(file);
if (trace_event_trigger_enable_disable(file, 1) < 0) {
list_del_rcu(&data->list);
update_cond_flag(file);
ret--;
}
update_cond_flag(file);
out:
return ret;
}
......@@ -570,8 +571,8 @@ static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
unregistered = true;
list_del_rcu(&data->list);
update_cond_flag(file);
trace_event_trigger_enable_disable(file, 0);
update_cond_flag(file);
break;
}
}
......@@ -1314,11 +1315,12 @@ static int event_enable_register_trigger(char *glob,
list_add_rcu(&data->list, &file->triggers);
ret++;
update_cond_flag(file);
if (trace_event_trigger_enable_disable(file, 1) < 0) {
list_del_rcu(&data->list);
update_cond_flag(file);
ret--;
}
update_cond_flag(file);
out:
return ret;
}
......@@ -1339,8 +1341,8 @@ static void event_enable_unregister_trigger(char *glob,
(enable_data->file == test_enable_data->file)) {
unregistered = true;
list_del_rcu(&data->list);
update_cond_flag(file);
trace_event_trigger_enable_disable(file, 0);
update_cond_flag(file);
break;
}
}
......
......@@ -306,10 +306,12 @@ int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, int cnt)
if (!cnt)
return 0;
if (s->len <= s->readpos)
len = seq_buf_used(s);
if (len <= s->readpos)
return -EBUSY;
len = seq_buf_used(s) - s->readpos;
len -= s->readpos;
if (cnt > len)
cnt = len;
ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
......
#!/bin/sh
# description: Test creation and deletion of trace instances
if [ ! -d instances ] ; then
echo "no instance directory with this kernel"
exit_unsupported;
fi
fail() { # mesg
rmdir x y z 2>/dev/null
echo $1
set -e
exit $FAIL
}
cd instances
# we don't want to fail on error
set +e
mkdir x
rmdir x
result=$?
if [ $result -ne 0 ]; then
echo "instance rmdir not supported"
exit_unsupported
fi
instance_slam() {
while :; do
mkdir x
mkdir y
mkdir z
rmdir x
rmdir y
rmdir z
done 2>/dev/null
}
instance_slam &
x=`jobs -l`
p1=`echo $x | cut -d' ' -f2`
echo $p1
instance_slam &
x=`jobs -l | tail -1`
p2=`echo $x | cut -d' ' -f2`
echo $p2
instance_slam &
x=`jobs -l | tail -1`
p3=`echo $x | cut -d' ' -f2`
echo $p3
instance_slam &
x=`jobs -l | tail -1`
p4=`echo $x | cut -d' ' -f2`
echo $p4
instance_slam &
x=`jobs -l | tail -1`
p5=`echo $x | cut -d' ' -f2`
echo $p5
ls -lR >/dev/null
sleep 1
kill -1 $p1
kill -1 $p2
kill -1 $p3
kill -1 $p4
kill -1 $p5
echo "Wait for processes to finish"
wait $p1 $p2 $p3 $p4 $p5
echo "all processes finished, wait for cleanup"
mkdir x y z
ls x y z
rmdir x y z
for d in x y z; do
if [ -d $d ]; then
fail "instance $d still exists"
fi
done
set -e
exit 0
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment