Commit 27a3b735 authored by Linus Torvalds's avatar Linus Torvalds

Merge branches 'core-urgent-for-linus', 'perf-urgent-for-linus' and...

Merge branches 'core-urgent-for-linus', 'perf-urgent-for-linus' and 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  debugobjects: Fix boot crash when kmemleak and debugobjects enabled

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  jump_label: Fix jump_label update for modules
  oprofile, x86: Fix race in nmi handler while starting counters

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  sched: Disable (revert) SCHED_LOAD_SCALE increase
  sched, cgroups: Fix MIN_SHARES on 64-bit boxen
...@@ -112,8 +112,10 @@ static void nmi_cpu_start(void *dummy) ...@@ -112,8 +112,10 @@ static void nmi_cpu_start(void *dummy)
static int nmi_start(void) static int nmi_start(void)
{ {
get_online_cpus(); get_online_cpus();
on_each_cpu(nmi_cpu_start, NULL, 1);
ctr_running = 1; ctr_running = 1;
/* make ctr_running visible to the nmi handler: */
smp_mb();
on_each_cpu(nmi_cpu_start, NULL, 1);
put_online_cpus(); put_online_cpus();
return 0; return 0;
} }
...@@ -504,15 +506,18 @@ static int nmi_setup(void) ...@@ -504,15 +506,18 @@ static int nmi_setup(void)
nmi_enabled = 0; nmi_enabled = 0;
ctr_running = 0; ctr_running = 0;
barrier(); /* make variables visible to the nmi handler: */
smp_mb();
err = register_die_notifier(&profile_exceptions_nb); err = register_die_notifier(&profile_exceptions_nb);
if (err) if (err)
goto fail; goto fail;
get_online_cpus(); get_online_cpus();
register_cpu_notifier(&oprofile_cpu_nb); register_cpu_notifier(&oprofile_cpu_nb);
on_each_cpu(nmi_cpu_setup, NULL, 1);
nmi_enabled = 1; nmi_enabled = 1;
/* make nmi_enabled visible to the nmi handler: */
smp_mb();
on_each_cpu(nmi_cpu_setup, NULL, 1);
put_online_cpus(); put_online_cpus();
return 0; return 0;
...@@ -531,7 +536,8 @@ static void nmi_shutdown(void) ...@@ -531,7 +536,8 @@ static void nmi_shutdown(void)
nmi_enabled = 0; nmi_enabled = 0;
ctr_running = 0; ctr_running = 0;
put_online_cpus(); put_online_cpus();
barrier(); /* make variables visible to the nmi handler: */
smp_mb();
unregister_die_notifier(&profile_exceptions_nb); unregister_die_notifier(&profile_exceptions_nb);
msrs = &get_cpu_var(cpu_msrs); msrs = &get_cpu_var(cpu_msrs);
model->shutdown(msrs); model->shutdown(msrs);
......
...@@ -808,7 +808,7 @@ enum cpu_idle_type { ...@@ -808,7 +808,7 @@ enum cpu_idle_type {
* when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
* increased costs. * increased costs.
*/ */
#if BITS_PER_LONG > 32 #if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */
# define SCHED_LOAD_RESOLUTION 10 # define SCHED_LOAD_RESOLUTION 10
# define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION) # define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION)
# define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION) # define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION)
......
...@@ -375,15 +375,19 @@ int jump_label_text_reserved(void *start, void *end) ...@@ -375,15 +375,19 @@ int jump_label_text_reserved(void *start, void *end)
static void jump_label_update(struct jump_label_key *key, int enable) static void jump_label_update(struct jump_label_key *key, int enable)
{ {
struct jump_entry *entry = key->entries; struct jump_entry *entry = key->entries, *stop = __stop___jump_table;
/* if there are no users, entry can be NULL */
if (entry)
__jump_label_update(key, entry, __stop___jump_table, enable);
#ifdef CONFIG_MODULES #ifdef CONFIG_MODULES
struct module *mod = __module_address((jump_label_t)key);
__jump_label_mod_update(key, enable); __jump_label_mod_update(key, enable);
if (mod)
stop = mod->jump_entries + mod->num_jump_entries;
#endif #endif
/* if there are no users, entry can be NULL */
if (entry)
__jump_label_update(key, entry, stop, enable);
} }
#endif #endif
...@@ -292,8 +292,8 @@ static DEFINE_SPINLOCK(task_group_lock); ...@@ -292,8 +292,8 @@ static DEFINE_SPINLOCK(task_group_lock);
* (The default weight is 1024 - so there's no practical * (The default weight is 1024 - so there's no practical
* limitation from this.) * limitation from this.)
*/ */
#define MIN_SHARES 2 #define MIN_SHARES (1UL << 1)
#define MAX_SHARES (1UL << (18 + SCHED_LOAD_RESOLUTION)) #define MAX_SHARES (1UL << 18)
static int root_task_group_load = ROOT_TASK_GROUP_LOAD; static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
#endif #endif
...@@ -8450,10 +8450,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares) ...@@ -8450,10 +8450,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
if (!tg->se[0]) if (!tg->se[0])
return -EINVAL; return -EINVAL;
if (shares < MIN_SHARES) shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
shares = MIN_SHARES;
else if (shares > MAX_SHARES)
shares = MAX_SHARES;
mutex_lock(&shares_mutex); mutex_lock(&shares_mutex);
if (tg->shares == shares) if (tg->shares == shares)
......
...@@ -198,7 +198,7 @@ static void free_object(struct debug_obj *obj) ...@@ -198,7 +198,7 @@ static void free_object(struct debug_obj *obj)
* initialized: * initialized:
*/ */
if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache) if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
sched = !work_pending(&debug_obj_work); sched = keventd_up() && !work_pending(&debug_obj_work);
hlist_add_head(&obj->node, &obj_pool); hlist_add_head(&obj->node, &obj_pool);
obj_pool_free++; obj_pool_free++;
obj_pool_used--; obj_pool_used--;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment