Commit c7e9ad7d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Ingo Molnar:

 - fix the perf build, by fixing the rbtree.c sharing bug between kernel
   and tools/perf by creating a local copy of rbtree.c (more will be
   done for v4.3)

 - fix an AUX buffer (Intel-PT support) refcounting bug

 - fix copy_from_user_nmi() return value"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/x86: Fix copy_from_user_nmi() return if range is not ok
  perf: Fix AUX buffer refcounting
  tools: Copy rbtree_augmented.h from the kernel
  tools: Move rbtree.h from tools/perf/
  tools: Copy lib/rbtree.c to tools/lib/
  perf tools: Copy rbtree.h from the kernel
  tools: Adopt {READ,WRITE_ONCE} from the kernel
parents 1c4c7159 ebf2d268
...@@ -20,7 +20,7 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) ...@@ -20,7 +20,7 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
unsigned long ret; unsigned long ret;
if (__range_not_ok(from, n, TASK_SIZE)) if (__range_not_ok(from, n, TASK_SIZE))
return 0; return n;
/* /*
* Even though this function is typically called from NMI/IRQ context * Even though this function is typically called from NMI/IRQ context
......
...@@ -4358,14 +4358,6 @@ static void ring_buffer_wakeup(struct perf_event *event) ...@@ -4358,14 +4358,6 @@ static void ring_buffer_wakeup(struct perf_event *event)
rcu_read_unlock(); rcu_read_unlock();
} }
static void rb_free_rcu(struct rcu_head *rcu_head)
{
struct ring_buffer *rb;
rb = container_of(rcu_head, struct ring_buffer, rcu_head);
rb_free(rb);
}
struct ring_buffer *ring_buffer_get(struct perf_event *event) struct ring_buffer *ring_buffer_get(struct perf_event *event)
{ {
struct ring_buffer *rb; struct ring_buffer *rb;
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
struct ring_buffer { struct ring_buffer {
atomic_t refcount; atomic_t refcount;
struct rcu_head rcu_head; struct rcu_head rcu_head;
struct irq_work irq_work;
#ifdef CONFIG_PERF_USE_VMALLOC #ifdef CONFIG_PERF_USE_VMALLOC
struct work_struct work; struct work_struct work;
int page_order; /* allocation order */ int page_order; /* allocation order */
...@@ -55,6 +56,15 @@ struct ring_buffer { ...@@ -55,6 +56,15 @@ struct ring_buffer {
}; };
extern void rb_free(struct ring_buffer *rb); extern void rb_free(struct ring_buffer *rb);
static inline void rb_free_rcu(struct rcu_head *rcu_head)
{
struct ring_buffer *rb;
rb = container_of(rcu_head, struct ring_buffer, rcu_head);
rb_free(rb);
}
extern struct ring_buffer * extern struct ring_buffer *
rb_alloc(int nr_pages, long watermark, int cpu, int flags); rb_alloc(int nr_pages, long watermark, int cpu, int flags);
extern void perf_event_wakeup(struct perf_event *event); extern void perf_event_wakeup(struct perf_event *event);
......
...@@ -221,6 +221,8 @@ void perf_output_end(struct perf_output_handle *handle) ...@@ -221,6 +221,8 @@ void perf_output_end(struct perf_output_handle *handle)
rcu_read_unlock(); rcu_read_unlock();
} }
static void rb_irq_work(struct irq_work *work);
static void static void
ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
{ {
...@@ -241,6 +243,16 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) ...@@ -241,6 +243,16 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
INIT_LIST_HEAD(&rb->event_list); INIT_LIST_HEAD(&rb->event_list);
spin_lock_init(&rb->event_lock); spin_lock_init(&rb->event_lock);
init_irq_work(&rb->irq_work, rb_irq_work);
}
static void ring_buffer_put_async(struct ring_buffer *rb)
{
if (!atomic_dec_and_test(&rb->refcount))
return;
rb->rcu_head.next = (void *)rb;
irq_work_queue(&rb->irq_work);
} }
/* /*
...@@ -319,7 +331,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle, ...@@ -319,7 +331,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
rb_free_aux(rb); rb_free_aux(rb);
err: err:
ring_buffer_put(rb); ring_buffer_put_async(rb);
handle->event = NULL; handle->event = NULL;
return NULL; return NULL;
...@@ -370,7 +382,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size, ...@@ -370,7 +382,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
local_set(&rb->aux_nest, 0); local_set(&rb->aux_nest, 0);
rb_free_aux(rb); rb_free_aux(rb);
ring_buffer_put(rb); ring_buffer_put_async(rb);
} }
/* /*
...@@ -557,7 +569,18 @@ static void __rb_free_aux(struct ring_buffer *rb) ...@@ -557,7 +569,18 @@ static void __rb_free_aux(struct ring_buffer *rb)
void rb_free_aux(struct ring_buffer *rb) void rb_free_aux(struct ring_buffer *rb)
{ {
if (atomic_dec_and_test(&rb->aux_refcount)) if (atomic_dec_and_test(&rb->aux_refcount))
irq_work_queue(&rb->irq_work);
}
static void rb_irq_work(struct irq_work *work)
{
struct ring_buffer *rb = container_of(work, struct ring_buffer, irq_work);
if (!atomic_read(&rb->aux_refcount))
__rb_free_aux(rb); __rb_free_aux(rb);
if (rb->rcu_head.next == (void *)rb)
call_rcu(&rb->rcu_head, rb_free_rcu);
} }
#ifndef CONFIG_PERF_USE_VMALLOC #ifndef CONFIG_PERF_USE_VMALLOC
......
...@@ -41,4 +41,62 @@ ...@@ -41,4 +41,62 @@
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
#include <linux/types.h>
static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
{
switch (size) {
case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
default:
barrier();
__builtin_memcpy((void *)res, (const void *)p, size);
barrier();
}
}
static __always_inline void __write_once_size(volatile void *p, void *res, int size)
{
switch (size) {
case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
default:
barrier();
__builtin_memcpy((void *)p, (const void *)res, size);
barrier();
}
}
/*
* Prevent the compiler from merging or refetching reads or writes. The
* compiler is also forbidden from reordering successive instances of
* READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
* compiler is aware of some particular ordering. One way to make the
* compiler aware of ordering is to put the two invocations of READ_ONCE,
* WRITE_ONCE or ACCESS_ONCE() in different C statements.
*
* In contrast to ACCESS_ONCE these two macros will also work on aggregate
* data types like structs or unions. If the size of the accessed data
* type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
* READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
* compile-time warning.
*
* Their two major use cases are: (1) Mediating communication between
* process-level code and irq/NMI handlers, all running on the same CPU,
* and (2) Ensuring that the compiler does not fold, spindle, or otherwise
* mutilate accesses that either do not require ordering or that interact
* with an explicit memory barrier or atomic instruction that provides the
* required ordering.
*/
#define READ_ONCE(x) \
({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
#define WRITE_ONCE(x, val) \
({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
#endif /* _TOOLS_LINUX_COMPILER_H */ #endif /* _TOOLS_LINUX_COMPILER_H */
#ifndef _TOOLS_LINUX_EXPORT_H_
#define _TOOLS_LINUX_EXPORT_H_
#define EXPORT_SYMBOL(sym)
#define EXPORT_SYMBOL_GPL(sym)
#define EXPORT_SYMBOL_GPL_FUTURE(sym)
#define EXPORT_UNUSED_SYMBOL(sym)
#define EXPORT_UNUSED_SYMBOL_GPL(sym)
#endif
/*
Red Black Trees
(C) 1999 Andrea Arcangeli <andrea@suse.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
linux/include/linux/rbtree.h
To use rbtrees you'll have to implement your own insert and search cores.
This will avoid us to use callbacks and to drop drammatically performances.
I know it's not the cleaner way, but in C (not in C++) to get
performances and genericity...
See Documentation/rbtree.txt for documentation and samples.
*/
#ifndef __TOOLS_LINUX_PERF_RBTREE_H
#define __TOOLS_LINUX_PERF_RBTREE_H
#include <linux/kernel.h>
#include <linux/stddef.h>
struct rb_node {
unsigned long __rb_parent_color;
struct rb_node *rb_right;
struct rb_node *rb_left;
} __attribute__((aligned(sizeof(long))));
/* The alignment might seem pointless, but allegedly CRIS needs it */
struct rb_root {
struct rb_node *rb_node;
};
#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
#define RB_ROOT (struct rb_root) { NULL, }
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */
#define RB_EMPTY_NODE(node) \
((node)->__rb_parent_color == (unsigned long)(node))
#define RB_CLEAR_NODE(node) \
((node)->__rb_parent_color = (unsigned long)(node))
extern void rb_insert_color(struct rb_node *, struct rb_root *);
extern void rb_erase(struct rb_node *, struct rb_root *);
/* Find logical next and previous nodes in a tree */
extern struct rb_node *rb_next(const struct rb_node *);
extern struct rb_node *rb_prev(const struct rb_node *);
extern struct rb_node *rb_first(const struct rb_root *);
extern struct rb_node *rb_last(const struct rb_root *);
/* Postorder iteration - always visit the parent after its children */
extern struct rb_node *rb_first_postorder(const struct rb_root *);
extern struct rb_node *rb_next_postorder(const struct rb_node *);
/* Fast replacement of a single node without remove/rebalance/add/rebalance */
extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
struct rb_node **rb_link)
{
node->__rb_parent_color = (unsigned long)parent;
node->rb_left = node->rb_right = NULL;
*rb_link = node;
}
#define rb_entry_safe(ptr, type, member) \
({ typeof(ptr) ____ptr = (ptr); \
____ptr ? rb_entry(____ptr, type, member) : NULL; \
})
/*
* Handy for checking that we are not deleting an entry that is
* already in a list, found in block/{blk-throttle,cfq-iosched}.c,
* probably should be moved to lib/rbtree.c...
*/
static inline void rb_erase_init(struct rb_node *n, struct rb_root *root)
{
rb_erase(n, root);
RB_CLEAR_NODE(n);
}
#endif /* __TOOLS_LINUX_PERF_RBTREE_H */
/*
Red Black Trees
(C) 1999 Andrea Arcangeli <andrea@suse.de>
(C) 2002 David Woodhouse <dwmw2@infradead.org>
(C) 2012 Michel Lespinasse <walken@google.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
tools/linux/include/linux/rbtree_augmented.h
Copied from:
linux/include/linux/rbtree_augmented.h
*/
#ifndef _TOOLS_LINUX_RBTREE_AUGMENTED_H
#define _TOOLS_LINUX_RBTREE_AUGMENTED_H
#include <linux/compiler.h>
#include <linux/rbtree.h>
/*
* Please note - only struct rb_augment_callbacks and the prototypes for
* rb_insert_augmented() and rb_erase_augmented() are intended to be public.
* The rest are implementation details you are not expected to depend on.
*
* See Documentation/rbtree.txt for documentation and samples.
*/
struct rb_augment_callbacks {
void (*propagate)(struct rb_node *node, struct rb_node *stop);
void (*copy)(struct rb_node *old, struct rb_node *new);
void (*rotate)(struct rb_node *old, struct rb_node *new);
};
extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
/*
* Fixup the rbtree and update the augmented information when rebalancing.
*
* On insertion, the user must update the augmented information on the path
* leading to the inserted node, then call rb_link_node() as usual and
* rb_augment_inserted() instead of the usual rb_insert_color() call.
* If rb_augment_inserted() rebalances the rbtree, it will callback into
* a user provided function to update the augmented information on the
* affected subtrees.
*/
static inline void
rb_insert_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
__rb_insert_augmented(node, root, augment->rotate);
}
#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \
rbtype, rbaugmented, rbcompute) \
static inline void \
rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \
{ \
while (rb != stop) { \
rbstruct *node = rb_entry(rb, rbstruct, rbfield); \
rbtype augmented = rbcompute(node); \
if (node->rbaugmented == augmented) \
break; \
node->rbaugmented = augmented; \
rb = rb_parent(&node->rbfield); \
} \
} \
static inline void \
rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \
{ \
rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
new->rbaugmented = old->rbaugmented; \
} \
static void \
rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
{ \
rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
new->rbaugmented = old->rbaugmented; \
old->rbaugmented = rbcompute(old); \
} \
rbstatic const struct rb_augment_callbacks rbname = { \
rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
};
#define RB_RED 0
#define RB_BLACK 1
#define __rb_parent(pc) ((struct rb_node *)(pc & ~3))
#define __rb_color(pc) ((pc) & 1)
#define __rb_is_black(pc) __rb_color(pc)
#define __rb_is_red(pc) (!__rb_color(pc))
#define rb_color(rb) __rb_color((rb)->__rb_parent_color)
#define rb_is_red(rb) __rb_is_red((rb)->__rb_parent_color)
#define rb_is_black(rb) __rb_is_black((rb)->__rb_parent_color)
static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)
{
rb->__rb_parent_color = rb_color(rb) | (unsigned long)p;
}
static inline void rb_set_parent_color(struct rb_node *rb,
struct rb_node *p, int color)
{
rb->__rb_parent_color = (unsigned long)p | color;
}
static inline void
__rb_change_child(struct rb_node *old, struct rb_node *new,
struct rb_node *parent, struct rb_root *root)
{
if (parent) {
if (parent->rb_left == old)
parent->rb_left = new;
else
parent->rb_right = new;
} else
root->rb_node = new;
}
extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
static __always_inline struct rb_node *
__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
struct rb_node *child = node->rb_right, *tmp = node->rb_left;
struct rb_node *parent, *rebalance;
unsigned long pc;
if (!tmp) {
/*
* Case 1: node to erase has no more than 1 child (easy!)
*
* Note that if there is one child it must be red due to 5)
* and node must be black due to 4). We adjust colors locally
* so as to bypass __rb_erase_color() later on.
*/
pc = node->__rb_parent_color;
parent = __rb_parent(pc);
__rb_change_child(node, child, parent, root);
if (child) {
child->__rb_parent_color = pc;
rebalance = NULL;
} else
rebalance = __rb_is_black(pc) ? parent : NULL;
tmp = parent;
} else if (!child) {
/* Still case 1, but this time the child is node->rb_left */
tmp->__rb_parent_color = pc = node->__rb_parent_color;
parent = __rb_parent(pc);
__rb_change_child(node, tmp, parent, root);
rebalance = NULL;
tmp = parent;
} else {
struct rb_node *successor = child, *child2;
tmp = child->rb_left;
if (!tmp) {
/*
* Case 2: node's successor is its right child
*
* (n) (s)
* / \ / \
* (x) (s) -> (x) (c)
* \
* (c)
*/
parent = successor;
child2 = successor->rb_right;
augment->copy(node, successor);
} else {
/*
* Case 3: node's successor is leftmost under
* node's right child subtree
*
* (n) (s)
* / \ / \
* (x) (y) -> (x) (y)
* / /
* (p) (p)
* / /
* (s) (c)
* \
* (c)
*/
do {
parent = successor;
successor = tmp;
tmp = tmp->rb_left;
} while (tmp);
parent->rb_left = child2 = successor->rb_right;
successor->rb_right = child;
rb_set_parent(child, successor);
augment->copy(node, successor);
augment->propagate(parent, successor);
}
successor->rb_left = tmp = node->rb_left;
rb_set_parent(tmp, successor);
pc = node->__rb_parent_color;
tmp = __rb_parent(pc);
__rb_change_child(node, successor, tmp, root);
if (child2) {
successor->__rb_parent_color = pc;
rb_set_parent_color(child2, parent, RB_BLACK);
rebalance = NULL;
} else {
unsigned long pc2 = successor->__rb_parent_color;
successor->__rb_parent_color = pc;
rebalance = __rb_is_black(pc2) ? parent : NULL;
}
tmp = successor;
}
augment->propagate(tmp, NULL);
return rebalance;
}
static __always_inline void
rb_erase_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
if (rebalance)
__rb_erase_color(rebalance, root, augment->rotate);
}
#endif /* _TOOLS_LINUX_RBTREE_AUGMENTED_H */
This diff is collapsed.
...@@ -18,6 +18,7 @@ tools/arch/x86/include/asm/atomic.h ...@@ -18,6 +18,7 @@ tools/arch/x86/include/asm/atomic.h
tools/arch/x86/include/asm/rmwcc.h tools/arch/x86/include/asm/rmwcc.h
tools/lib/traceevent tools/lib/traceevent
tools/lib/api tools/lib/api
tools/lib/rbtree.c
tools/lib/symbol/kallsyms.c tools/lib/symbol/kallsyms.c
tools/lib/symbol/kallsyms.h tools/lib/symbol/kallsyms.h
tools/lib/util/find_next_bit.c tools/lib/util/find_next_bit.c
...@@ -44,6 +45,8 @@ tools/include/linux/kernel.h ...@@ -44,6 +45,8 @@ tools/include/linux/kernel.h
tools/include/linux/list.h tools/include/linux/list.h
tools/include/linux/log2.h tools/include/linux/log2.h
tools/include/linux/poison.h tools/include/linux/poison.h
tools/include/linux/rbtree.h
tools/include/linux/rbtree_augmented.h
tools/include/linux/types.h tools/include/linux/types.h
include/asm-generic/bitops/arch_hweight.h include/asm-generic/bitops/arch_hweight.h
include/asm-generic/bitops/const_hweight.h include/asm-generic/bitops/const_hweight.h
...@@ -51,12 +54,10 @@ include/asm-generic/bitops/fls64.h ...@@ -51,12 +54,10 @@ include/asm-generic/bitops/fls64.h
include/asm-generic/bitops/__fls.h include/asm-generic/bitops/__fls.h
include/asm-generic/bitops/fls.h include/asm-generic/bitops/fls.h
include/linux/perf_event.h include/linux/perf_event.h
include/linux/rbtree.h
include/linux/list.h include/linux/list.h
include/linux/hash.h include/linux/hash.h
include/linux/stringify.h include/linux/stringify.h
lib/hweight.c lib/hweight.c
lib/rbtree.c
include/linux/swab.h include/linux/swab.h
arch/*/include/asm/unistd*.h arch/*/include/asm/unistd*.h
arch/*/include/uapi/asm/unistd*.h arch/*/include/uapi/asm/unistd*.h
...@@ -65,7 +66,6 @@ arch/*/lib/memcpy*.S ...@@ -65,7 +66,6 @@ arch/*/lib/memcpy*.S
arch/*/lib/memset*.S arch/*/lib/memset*.S
include/linux/poison.h include/linux/poison.h
include/linux/hw_breakpoint.h include/linux/hw_breakpoint.h
include/linux/rbtree_augmented.h
include/uapi/linux/perf_event.h include/uapi/linux/perf_event.h
include/uapi/linux/const.h include/uapi/linux/const.h
include/uapi/linux/swab.h include/uapi/linux/swab.h
......
...@@ -139,7 +139,7 @@ $(OUTPUT)util/find_next_bit.o: ../lib/util/find_next_bit.c FORCE ...@@ -139,7 +139,7 @@ $(OUTPUT)util/find_next_bit.o: ../lib/util/find_next_bit.c FORCE
$(call rule_mkdir) $(call rule_mkdir)
$(call if_changed_dep,cc_o_c) $(call if_changed_dep,cc_o_c)
$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c FORCE $(OUTPUT)util/rbtree.o: ../lib/rbtree.c FORCE
$(call rule_mkdir) $(call rule_mkdir)
$(call if_changed_dep,cc_o_c) $(call if_changed_dep,cc_o_c)
......
#ifndef __TOOLS_LINUX_PERF_RBTREE_H
#define __TOOLS_LINUX_PERF_RBTREE_H
#include <stdbool.h>
#include "../../../../include/linux/rbtree.h"
/*
* Handy for checking that we are not deleting an entry that is
* already in a list, found in block/{blk-throttle,cfq-iosched}.c,
* probably should be moved to lib/rbtree.c...
*/
static inline void rb_erase_init(struct rb_node *n, struct rb_root *root)
{
rb_erase(n, root);
RB_CLEAR_NODE(n);
}
#endif /* __TOOLS_LINUX_PERF_RBTREE_H */
#include <stdbool.h>
#include "../../../../include/linux/rbtree_augmented.h"
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment