Commit 25a3720c authored by Elena Reshetova's avatar Elena Reshetova Committed by Arnaldo Carvalho de Melo

perf evlist: Convert perf_map.refcnt from atomic_t to refcount_t

The refcount_t type and corresponding API should be used instead of
atomic_t when the variable is used as a reference counter.

This allows to avoid accidental refcounter overflows that might lead to
use-after-free situations.
Signed-off-by: default avatarElena Reshetova <elena.reshetova@intel.com>
Signed-off-by: default avatarDavid Windsor <dwindsor@gmail.com>
Signed-off-by: default avatarHans Liljestrand <ishkamiel@gmail.com>
Signed-off-by: default avatarKees Kook <keescook@chromium.org>
Tested-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: David Windsor <dwindsor@gmail.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Hans Liljestrand <ishkamiel@gmail.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kees Kook <keescook@chromium.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Matija Glavinic Pecotic <matija.glavinic-pecotic.ext@nokia.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: alsa-devel@alsa-project.org
Link: http://lkml.kernel.org/r/1487691303-31858-8-git-send-email-elena.reshetova@intel.comSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent ead05e8f
...@@ -777,7 +777,7 @@ union perf_event *perf_mmap__read_forward(struct perf_mmap *md, bool check_messu ...@@ -777,7 +777,7 @@ union perf_event *perf_mmap__read_forward(struct perf_mmap *md, bool check_messu
/* /*
* Check if event was unmapped due to a POLLHUP/POLLERR. * Check if event was unmapped due to a POLLHUP/POLLERR.
*/ */
if (!atomic_read(&md->refcnt)) if (!refcount_read(&md->refcnt))
return NULL; return NULL;
head = perf_mmap__read_head(md); head = perf_mmap__read_head(md);
...@@ -794,7 +794,7 @@ perf_mmap__read_backward(struct perf_mmap *md) ...@@ -794,7 +794,7 @@ perf_mmap__read_backward(struct perf_mmap *md)
/* /*
* Check if event was unmapped due to a POLLHUP/POLLERR. * Check if event was unmapped due to a POLLHUP/POLLERR.
*/ */
if (!atomic_read(&md->refcnt)) if (!refcount_read(&md->refcnt))
return NULL; return NULL;
head = perf_mmap__read_head(md); head = perf_mmap__read_head(md);
...@@ -856,7 +856,7 @@ void perf_mmap__read_catchup(struct perf_mmap *md) ...@@ -856,7 +856,7 @@ void perf_mmap__read_catchup(struct perf_mmap *md)
{ {
u64 head; u64 head;
if (!atomic_read(&md->refcnt)) if (!refcount_read(&md->refcnt))
return; return;
head = perf_mmap__read_head(md); head = perf_mmap__read_head(md);
...@@ -875,14 +875,14 @@ static bool perf_mmap__empty(struct perf_mmap *md) ...@@ -875,14 +875,14 @@ static bool perf_mmap__empty(struct perf_mmap *md)
static void perf_mmap__get(struct perf_mmap *map) static void perf_mmap__get(struct perf_mmap *map)
{ {
atomic_inc(&map->refcnt); refcount_inc(&map->refcnt);
} }
static void perf_mmap__put(struct perf_mmap *md) static void perf_mmap__put(struct perf_mmap *md)
{ {
BUG_ON(md->base && atomic_read(&md->refcnt) == 0); BUG_ON(md->base && refcount_read(&md->refcnt) == 0);
if (atomic_dec_and_test(&md->refcnt)) if (refcount_dec_and_test(&md->refcnt))
perf_mmap__munmap(md); perf_mmap__munmap(md);
} }
...@@ -894,7 +894,7 @@ void perf_mmap__consume(struct perf_mmap *md, bool overwrite) ...@@ -894,7 +894,7 @@ void perf_mmap__consume(struct perf_mmap *md, bool overwrite)
perf_mmap__write_tail(md, old); perf_mmap__write_tail(md, old);
} }
if (atomic_read(&md->refcnt) == 1 && perf_mmap__empty(md)) if (refcount_read(&md->refcnt) == 1 && perf_mmap__empty(md))
perf_mmap__put(md); perf_mmap__put(md);
} }
...@@ -937,7 +937,7 @@ static void perf_mmap__munmap(struct perf_mmap *map) ...@@ -937,7 +937,7 @@ static void perf_mmap__munmap(struct perf_mmap *map)
munmap(map->base, perf_mmap__mmap_len(map)); munmap(map->base, perf_mmap__mmap_len(map));
map->base = NULL; map->base = NULL;
map->fd = -1; map->fd = -1;
atomic_set(&map->refcnt, 0); refcount_set(&map->refcnt, 0);
} }
auxtrace_mmap__munmap(&map->auxtrace_mmap); auxtrace_mmap__munmap(&map->auxtrace_mmap);
} }
...@@ -1001,7 +1001,7 @@ static int perf_mmap__mmap(struct perf_mmap *map, ...@@ -1001,7 +1001,7 @@ static int perf_mmap__mmap(struct perf_mmap *map,
* evlist layer can't just drop it when filtering events in * evlist layer can't just drop it when filtering events in
* perf_evlist__filter_pollfd(). * perf_evlist__filter_pollfd().
*/ */
atomic_set(&map->refcnt, 2); refcount_set(&map->refcnt, 2);
map->prev = 0; map->prev = 0;
map->mask = mp->mask; map->mask = mp->mask;
map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot, map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
......
#ifndef __PERF_EVLIST_H #ifndef __PERF_EVLIST_H
#define __PERF_EVLIST_H 1 #define __PERF_EVLIST_H 1
#include <linux/atomic.h> #include <linux/refcount.h>
#include <linux/list.h> #include <linux/list.h>
#include <api/fd/array.h> #include <api/fd/array.h>
#include <stdio.h> #include <stdio.h>
...@@ -29,7 +29,7 @@ struct perf_mmap { ...@@ -29,7 +29,7 @@ struct perf_mmap {
void *base; void *base;
int mask; int mask;
int fd; int fd;
atomic_t refcnt; refcount_t refcnt;
u64 prev; u64 prev;
struct auxtrace_mmap auxtrace_mmap; struct auxtrace_mmap auxtrace_mmap;
char event_copy[PERF_SAMPLE_MAX_SIZE] __attribute__((aligned(8))); char event_copy[PERF_SAMPLE_MAX_SIZE] __attribute__((aligned(8)));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment