Commit 6abb11ae authored by Markus Metzger's avatar Markus Metzger Committed by Ingo Molnar

x86, bts, ptrace: move BTS buffer allocation from ds.c into ptrace.c

Impact: restructure DS memory allocation to be done by the usage site of DS

Require pre-allocated buffers in ds.h.

Move the BTS buffer allocation for ptrace into ptrace.c.
The pointer to the allocated buffer is stored in the traced task's
task_struct together with the handle returned by ds_request_bts().

Removes memory accounting code.
Signed-off-by: default avatarMarkus Metzger <markus.t.metzger@intel.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent ca0002a1
...@@ -7,13 +7,12 @@ ...@@ -7,13 +7,12 @@
* *
* It manages: * It manages:
* - per-thread and per-cpu allocation of BTS and PEBS * - per-thread and per-cpu allocation of BTS and PEBS
* - buffer memory allocation (optional) * - buffer overflow handling (to be done)
* - buffer overflow handling
* - buffer access * - buffer access
* *
* It assumes: * It assumes:
* - get_task_struct on all parameter tasks * - get_task_struct on all traced tasks
* - current is allowed to trace parameter tasks * - current is allowed to trace tasks
* *
* *
* Copyright (C) 2007-2008 Intel Corporation. * Copyright (C) 2007-2008 Intel Corporation.
...@@ -54,8 +53,7 @@ typedef void (*pebs_ovfl_callback_t)(struct pebs_tracer *); ...@@ -54,8 +53,7 @@ typedef void (*pebs_ovfl_callback_t)(struct pebs_tracer *);
* task: the task to request recording for; * task: the task to request recording for;
* NULL for per-cpu recording on the current cpu * NULL for per-cpu recording on the current cpu
* base: the base pointer for the (non-pageable) buffer; * base: the base pointer for the (non-pageable) buffer;
* NULL if buffer allocation requested * size: the size of the provided buffer in bytes
* size: the size of the requested or provided buffer in bytes
* ovfl: pointer to a function to be called on buffer overflow; * ovfl: pointer to a function to be called on buffer overflow;
* NULL if cyclic buffer requested * NULL if cyclic buffer requested
* th: the interrupt threshold in records from the end of the buffer; * th: the interrupt threshold in records from the end of the buffer;
...@@ -72,8 +70,6 @@ extern struct pebs_tracer *ds_request_pebs(struct task_struct *task, ...@@ -72,8 +70,6 @@ extern struct pebs_tracer *ds_request_pebs(struct task_struct *task,
/* /*
* Release BTS or PEBS resources * Release BTS or PEBS resources
* *
* Frees buffers allocated on ds_request.
*
* Returns 0 on success; -Eerrno otherwise * Returns 0 on success; -Eerrno otherwise
* *
* tracer: the tracer handle returned from ds_request_~() * tracer: the tracer handle returned from ds_request_~()
......
...@@ -7,13 +7,12 @@ ...@@ -7,13 +7,12 @@
* *
* It manages: * It manages:
* - per-thread and per-cpu allocation of BTS and PEBS * - per-thread and per-cpu allocation of BTS and PEBS
* - buffer memory allocation (optional) * - buffer overflow handling (to be done)
* - buffer overflow handling
* - buffer access * - buffer access
* *
* It assumes: * It assumes:
* - get_task_struct on all parameter tasks * - get_task_struct on all traced tasks
* - current is allowed to trace parameter tasks * - current is allowed to trace tasks
* *
* *
* Copyright (C) 2007-2008 Intel Corporation. * Copyright (C) 2007-2008 Intel Corporation.
...@@ -57,8 +56,6 @@ struct ds_tracer { ...@@ -57,8 +56,6 @@ struct ds_tracer {
/* the buffer provided on ds_request() and its size in bytes */ /* the buffer provided on ds_request() and its size in bytes */
void *buffer; void *buffer;
size_t size; size_t size;
/* the number of allocated pages for on-request allocated buffers */
unsigned int pages;
}; };
struct bts_tracer { struct bts_tracer {
...@@ -141,8 +138,7 @@ static inline void ds_set(unsigned char *base, enum ds_qualifier qual, ...@@ -141,8 +138,7 @@ static inline void ds_set(unsigned char *base, enum ds_qualifier qual,
/* /*
* Locking is done only for allocating BTS or PEBS resources and for * Locking is done only for allocating BTS or PEBS resources.
* guarding context and buffer memory allocation.
*/ */
static spinlock_t ds_lock = __SPIN_LOCK_UNLOCKED(ds_lock); static spinlock_t ds_lock = __SPIN_LOCK_UNLOCKED(ds_lock);
...@@ -292,50 +288,6 @@ static void ds_overflow(struct ds_context *context, enum ds_qualifier qual) ...@@ -292,50 +288,6 @@ static void ds_overflow(struct ds_context *context, enum ds_qualifier qual)
} }
/*
* Allocate a non-pageable buffer of the parameter size.
* Checks the memory and the locked memory rlimit.
*
* Returns the buffer, if successful;
* NULL, if out of memory or rlimit exceeded.
*
* size: the requested buffer size in bytes
* pages (out): if not NULL, contains the number of pages reserved
*/
static inline void *ds_allocate_buffer(size_t size, unsigned int *pages)
{
unsigned long rlim, vm, pgsz;
void *buffer = NULL;
pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
down_write(&current->mm->mmap_sem);
rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
vm = current->mm->total_vm + pgsz;
if (rlim < vm)
goto out;
rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
vm = current->mm->locked_vm + pgsz;
if (rlim < vm)
goto out;
buffer = kzalloc(size, GFP_KERNEL);
if (!buffer)
goto out;
current->mm->total_vm += pgsz;
current->mm->locked_vm += pgsz;
if (pages)
*pages = pgsz;
out:
up_write(&current->mm->mmap_sem);
return buffer;
}
static void ds_install_ds_config(struct ds_context *context, static void ds_install_ds_config(struct ds_context *context,
enum ds_qualifier qual, enum ds_qualifier qual,
void *base, size_t size, size_t ith) void *base, size_t size, size_t ith)
...@@ -382,6 +334,10 @@ static int ds_request(struct ds_tracer *tracer, enum ds_qualifier qual, ...@@ -382,6 +334,10 @@ static int ds_request(struct ds_tracer *tracer, enum ds_qualifier qual,
if (!ds_cfg.sizeof_ds) if (!ds_cfg.sizeof_ds)
goto out; goto out;
error = -EINVAL;
if (!base)
goto out;
/* we require some space to do alignment adjustments below */ /* we require some space to do alignment adjustments below */
error = -EINVAL; error = -EINVAL;
if (size < (DS_ALIGNMENT + ds_cfg.sizeof_rec[qual])) if (size < (DS_ALIGNMENT + ds_cfg.sizeof_rec[qual]))
...@@ -395,13 +351,6 @@ static int ds_request(struct ds_tracer *tracer, enum ds_qualifier qual, ...@@ -395,13 +351,6 @@ static int ds_request(struct ds_tracer *tracer, enum ds_qualifier qual,
goto out; goto out;
} }
error = -ENOMEM;
if (!base) {
base = ds_allocate_buffer(size, &tracer->pages);
if (!base)
goto out;
}
tracer->buffer = base; tracer->buffer = base;
tracer->size = size; tracer->size = size;
...@@ -466,7 +415,7 @@ struct bts_tracer *ds_request_bts(struct task_struct *task, ...@@ -466,7 +415,7 @@ struct bts_tracer *ds_request_bts(struct task_struct *task,
return tracer; return tracer;
out_tracer: out_tracer:
(void)ds_release_bts(tracer); kfree(tracer);
out: out:
return ERR_PTR(error); return ERR_PTR(error);
} }
...@@ -496,31 +445,18 @@ struct pebs_tracer *ds_request_pebs(struct task_struct *task, ...@@ -496,31 +445,18 @@ struct pebs_tracer *ds_request_pebs(struct task_struct *task,
return tracer; return tracer;
out_tracer: out_tracer:
(void)ds_release_pebs(tracer); kfree(tracer);
out: out:
return ERR_PTR(error); return ERR_PTR(error);
} }
static void ds_release(struct ds_tracer *tracer, enum ds_qualifier qual) static void ds_release(struct ds_tracer *tracer, enum ds_qualifier qual)
{ {
if (tracer->context) {
BUG_ON(tracer->context->owner[qual] != tracer); BUG_ON(tracer->context->owner[qual] != tracer);
tracer->context->owner[qual] = NULL; tracer->context->owner[qual] = NULL;
put_tracer(tracer->context->task); put_tracer(tracer->context->task);
ds_put_context(tracer->context); ds_put_context(tracer->context);
}
if (tracer->pages) {
kfree(tracer->buffer);
down_write(&current->mm->mmap_sem);
current->mm->total_vm -= tracer->pages;
current->mm->locked_vm -= tracer->pages;
up_write(&current->mm->mmap_sem);
}
} }
int ds_release_bts(struct bts_tracer *tracer) int ds_release_bts(struct bts_tracer *tracer)
......
...@@ -758,6 +758,10 @@ static int ptrace_bts_config(struct task_struct *child, ...@@ -758,6 +758,10 @@ static int ptrace_bts_config(struct task_struct *child,
bts_ovfl_callback_t ovfl = NULL; bts_ovfl_callback_t ovfl = NULL;
unsigned int sig = 0; unsigned int sig = 0;
error = -EINVAL;
if (cfg.size < (10 * bts_cfg.sizeof_bts))
goto errout;
if (cfg.flags & PTRACE_BTS_O_SIGNAL) { if (cfg.flags & PTRACE_BTS_O_SIGNAL) {
if (!cfg.signal) if (!cfg.signal)
goto errout; goto errout;
...@@ -768,14 +772,26 @@ static int ptrace_bts_config(struct task_struct *child, ...@@ -768,14 +772,26 @@ static int ptrace_bts_config(struct task_struct *child,
sig = cfg.signal; sig = cfg.signal;
} }
if (child->bts) if (child->bts) {
(void)ds_release_bts(child->bts); (void)ds_release_bts(child->bts);
kfree(child->bts_buffer);
child->bts = NULL;
child->bts_buffer = NULL;
}
error = -ENOMEM;
child->bts_buffer = kzalloc(cfg.size, GFP_KERNEL);
if (!child->bts_buffer)
goto errout;
child->bts = ds_request_bts(child, /* base = */ NULL, cfg.size, child->bts = ds_request_bts(child, child->bts_buffer, cfg.size,
ovfl, /* th = */ (size_t)-1); ovfl, /* th = */ (size_t)-1);
if (IS_ERR(child->bts)) { if (IS_ERR(child->bts)) {
error = PTR_ERR(child->bts); error = PTR_ERR(child->bts);
kfree(child->bts_buffer);
child->bts = NULL; child->bts = NULL;
child->bts_buffer = NULL;
goto errout; goto errout;
} }
...@@ -972,6 +988,8 @@ void ptrace_disable(struct task_struct *child) ...@@ -972,6 +988,8 @@ void ptrace_disable(struct task_struct *child)
#ifdef CONFIG_X86_PTRACE_BTS #ifdef CONFIG_X86_PTRACE_BTS
if (child->bts) { if (child->bts) {
(void)ds_release_bts(child->bts); (void)ds_release_bts(child->bts);
kfree(child->bts_buffer);
child->bts_buffer = NULL;
child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask; child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask;
if (!child->thread.debugctlmsr) if (!child->thread.debugctlmsr)
......
...@@ -1168,6 +1168,10 @@ struct task_struct { ...@@ -1168,6 +1168,10 @@ struct task_struct {
* This field actually belongs to the ptracer task. * This field actually belongs to the ptracer task.
*/ */
struct bts_tracer *bts; struct bts_tracer *bts;
/*
* The buffer to hold the BTS data.
*/
void *bts_buffer;
#endif /* CONFIG_X86_PTRACE_BTS */ #endif /* CONFIG_X86_PTRACE_BTS */
/* PID/PID hash table linkage. */ /* PID/PID hash table linkage. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment