Commit 3e5963bc authored by Michael Neuling's avatar Michael Neuling Committed by Rafael J. Wysocki

cpufreq: powernv: Define per_cpu chip pointer to optimize hot-path

Commit 96c4726f "cpufreq: powernv: Remove cpu_to_chip_id() from
hot-path" introduced a 'core_to_chip_map' array to cache the chip-ids
of all cores.

Replace this with a per-CPU variable that stores the pointer to the
chip-array. This removes the linear lookup and provides a neater and
simpler solution.
Signed-off-by: default avatarMichael Neuling <mikey@neuling.org>
Signed-off-by: default avatarShilpasri G Bhat <shilpa.bhat@linux.vnet.ibm.com>
Acked-by: default avatarViresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
parent ed72662a
...@@ -44,7 +44,6 @@ ...@@ -44,7 +44,6 @@
static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1]; static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
static bool rebooting, throttled, occ_reset; static bool rebooting, throttled, occ_reset;
static unsigned int *core_to_chip_map;
static const char * const throttle_reason[] = { static const char * const throttle_reason[] = {
"No throttling", "No throttling",
...@@ -65,6 +64,7 @@ static struct chip { ...@@ -65,6 +64,7 @@ static struct chip {
} *chips; } *chips;
static int nr_chips; static int nr_chips;
static DEFINE_PER_CPU(struct chip *, chip_info);
/* /*
* Note: The set of pstates consists of contiguous integers, the * Note: The set of pstates consists of contiguous integers, the
...@@ -324,34 +324,31 @@ static inline unsigned int get_nominal_index(void) ...@@ -324,34 +324,31 @@ static inline unsigned int get_nominal_index(void)
static void powernv_cpufreq_throttle_check(void *data) static void powernv_cpufreq_throttle_check(void *data)
{ {
struct chip *chip;
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
unsigned int chip_id = core_to_chip_map[cpu_core_index_of_thread(cpu)];
unsigned long pmsr; unsigned long pmsr;
int pmsr_pmax, i; int pmsr_pmax;
pmsr = get_pmspr(SPRN_PMSR); pmsr = get_pmspr(SPRN_PMSR);
chip = this_cpu_read(chip_info);
for (i = 0; i < nr_chips; i++)
if (chips[i].id == chip_id)
break;
/* Check for Pmax Capping */ /* Check for Pmax Capping */
pmsr_pmax = (s8)PMSR_MAX(pmsr); pmsr_pmax = (s8)PMSR_MAX(pmsr);
if (pmsr_pmax != powernv_pstate_info.max) { if (pmsr_pmax != powernv_pstate_info.max) {
if (chips[i].throttled) if (chip->throttled)
goto next; goto next;
chips[i].throttled = true; chip->throttled = true;
if (pmsr_pmax < powernv_pstate_info.nominal) if (pmsr_pmax < powernv_pstate_info.nominal)
pr_warn_once("CPU %d on Chip %u has Pmax reduced below nominal frequency (%d < %d)\n", pr_warn_once("CPU %d on Chip %u has Pmax reduced below nominal frequency (%d < %d)\n",
cpu, chips[i].id, pmsr_pmax, cpu, chip->id, pmsr_pmax,
powernv_pstate_info.nominal); powernv_pstate_info.nominal);
trace_powernv_throttle(chips[i].id, trace_powernv_throttle(chip->id,
throttle_reason[chips[i].throttle_reason], throttle_reason[chip->throttle_reason],
pmsr_pmax); pmsr_pmax);
} else if (chips[i].throttled) { } else if (chip->throttled) {
chips[i].throttled = false; chip->throttled = false;
trace_powernv_throttle(chips[i].id, trace_powernv_throttle(chip->id,
throttle_reason[chips[i].throttle_reason], throttle_reason[chip->throttle_reason],
pmsr_pmax); pmsr_pmax);
} }
...@@ -558,47 +555,34 @@ static int init_chip_info(void) ...@@ -558,47 +555,34 @@ static int init_chip_info(void)
unsigned int chip[256]; unsigned int chip[256];
unsigned int cpu, i; unsigned int cpu, i;
unsigned int prev_chip_id = UINT_MAX; unsigned int prev_chip_id = UINT_MAX;
cpumask_t cpu_mask;
int ret = -ENOMEM;
core_to_chip_map = kcalloc(cpu_nr_cores(), sizeof(unsigned int),
GFP_KERNEL);
if (!core_to_chip_map)
goto out;
cpumask_copy(&cpu_mask, cpu_possible_mask); for_each_possible_cpu(cpu) {
for_each_cpu(cpu, &cpu_mask) {
unsigned int id = cpu_to_chip_id(cpu); unsigned int id = cpu_to_chip_id(cpu);
if (prev_chip_id != id) { if (prev_chip_id != id) {
prev_chip_id = id; prev_chip_id = id;
chip[nr_chips++] = id; chip[nr_chips++] = id;
} }
core_to_chip_map[cpu_core_index_of_thread(cpu)] = id;
cpumask_andnot(&cpu_mask, &cpu_mask, cpu_sibling_mask(cpu));
} }
chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL); chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL);
if (!chips) if (!chips)
goto free_chip_map; return -ENOMEM;
for (i = 0; i < nr_chips; i++) { for (i = 0; i < nr_chips; i++) {
chips[i].id = chip[i]; chips[i].id = chip[i];
cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i])); cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i]));
INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn); INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
for_each_cpu(cpu, &chips[i].mask)
per_cpu(chip_info, cpu) = &chips[i];
} }
return 0; return 0;
free_chip_map:
kfree(core_to_chip_map);
out:
return ret;
} }
static inline void clean_chip_info(void) static inline void clean_chip_info(void)
{ {
kfree(chips); kfree(chips);
kfree(core_to_chip_map);
} }
static inline void unregister_all_notifiers(void) static inline void unregister_all_notifiers(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment