Commit 789f90fc authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf_counter: per user mlock gift

Instead of a per-process mlock gift for perf-counters, use a
per-user gift so that there is less of a DoS potential.

[ Impact: allow less worst-case unprivileged memory consumption ]
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <20090515132018.496182835@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 548e1ddf
...@@ -674,6 +674,10 @@ struct user_struct { ...@@ -674,6 +674,10 @@ struct user_struct {
struct work_struct work; struct work_struct work;
#endif #endif
#endif #endif
#ifdef CONFIG_PERF_COUNTERS
atomic_long_t locked_vm;
#endif
}; };
extern int uids_sysfs_init(void); extern int uids_sysfs_init(void);
......
...@@ -45,7 +45,7 @@ static atomic_t nr_munmap_tracking __read_mostly; ...@@ -45,7 +45,7 @@ static atomic_t nr_munmap_tracking __read_mostly;
static atomic_t nr_comm_tracking __read_mostly; static atomic_t nr_comm_tracking __read_mostly;
int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */ int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
int sysctl_perf_counter_mlock __read_mostly = 128; /* 'free' kb per counter */ int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
/* /*
* Lock for (sysadmin-configurable) counter reservations: * Lock for (sysadmin-configurable) counter reservations:
...@@ -1522,6 +1522,9 @@ static void perf_mmap_close(struct vm_area_struct *vma) ...@@ -1522,6 +1522,9 @@ static void perf_mmap_close(struct vm_area_struct *vma)
if (atomic_dec_and_mutex_lock(&counter->mmap_count, if (atomic_dec_and_mutex_lock(&counter->mmap_count,
&counter->mmap_mutex)) { &counter->mmap_mutex)) {
struct user_struct *user = current_user();
atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
vma->vm_mm->locked_vm -= counter->data->nr_locked; vma->vm_mm->locked_vm -= counter->data->nr_locked;
perf_mmap_data_free(counter); perf_mmap_data_free(counter);
mutex_unlock(&counter->mmap_mutex); mutex_unlock(&counter->mmap_mutex);
...@@ -1537,11 +1540,13 @@ static struct vm_operations_struct perf_mmap_vmops = { ...@@ -1537,11 +1540,13 @@ static struct vm_operations_struct perf_mmap_vmops = {
static int perf_mmap(struct file *file, struct vm_area_struct *vma) static int perf_mmap(struct file *file, struct vm_area_struct *vma)
{ {
struct perf_counter *counter = file->private_data; struct perf_counter *counter = file->private_data;
struct user_struct *user = current_user();
unsigned long vma_size; unsigned long vma_size;
unsigned long nr_pages; unsigned long nr_pages;
unsigned long user_locked, user_lock_limit;
unsigned long locked, lock_limit; unsigned long locked, lock_limit;
long user_extra, extra;
int ret = 0; int ret = 0;
long extra;
if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE)) if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
return -EINVAL; return -EINVAL;
...@@ -1569,15 +1574,17 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -1569,15 +1574,17 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
goto unlock; goto unlock;
} }
extra = nr_pages /* + 1 only account the data pages */; user_extra = nr_pages + 1;
extra -= sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10); user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
if (extra < 0) user_locked = atomic_long_read(&user->locked_vm) + user_extra;
extra = 0;
locked = vma->vm_mm->locked_vm + extra; extra = 0;
if (user_locked > user_lock_limit)
extra = user_locked - user_lock_limit;
lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
lock_limit >>= PAGE_SHIFT; lock_limit >>= PAGE_SHIFT;
locked = vma->vm_mm->locked_vm + extra;
if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
ret = -EPERM; ret = -EPERM;
...@@ -1590,6 +1597,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -1590,6 +1597,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
goto unlock; goto unlock;
atomic_set(&counter->mmap_count, 1); atomic_set(&counter->mmap_count, 1);
atomic_long_add(user_extra, &user->locked_vm);
vma->vm_mm->locked_vm += extra; vma->vm_mm->locked_vm += extra;
counter->data->nr_locked = extra; counter->data->nr_locked = extra;
unlock: unlock:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment