Commit 4ca4f26a authored by Juri Lelli's avatar Juri Lelli Committed by Greg Kroah-Hartman

arm,arm64,drivers: add a prefix to drivers arch_topology interfaces

Now that some functions that deal with arch topology information live
under drivers, there is a clash of naming that might create confusion.

Tidy things up by creating a topology namespace for interfaces used by
arch code; achieve this by prepending a 'topology_' prefix to driver
interfaces.
Signed-off-by: default avatarJuri Lelli <juri.lelli@arm.com>
Acked-by: default avatarRussell King <rmk+kernel@armlinux.org.uk>
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 615ffd63
...@@ -111,7 +111,7 @@ static void __init parse_dt_topology(void) ...@@ -111,7 +111,7 @@ static void __init parse_dt_topology(void)
continue; continue;
} }
if (parse_cpu_capacity(cn, cpu)) { if (topology_parse_cpu_capacity(cn, cpu)) {
of_node_put(cn); of_node_put(cn);
continue; continue;
} }
...@@ -160,7 +160,7 @@ static void __init parse_dt_topology(void) ...@@ -160,7 +160,7 @@ static void __init parse_dt_topology(void)
>> (SCHED_CAPACITY_SHIFT-1)) + 1; >> (SCHED_CAPACITY_SHIFT-1)) + 1;
if (cap_from_dt) if (cap_from_dt)
normalize_cpu_capacity(); topology_normalize_cpu_scale();
} }
/* /*
...@@ -173,10 +173,10 @@ static void update_cpu_capacity(unsigned int cpu) ...@@ -173,10 +173,10 @@ static void update_cpu_capacity(unsigned int cpu)
if (!cpu_capacity(cpu) || cap_from_dt) if (!cpu_capacity(cpu) || cap_from_dt)
return; return;
set_capacity_scale(cpu, cpu_capacity(cpu) / middle_capacity); topology_set_cpu_scale(cpu, cpu_capacity(cpu) / middle_capacity);
pr_info("CPU%u: update cpu_capacity %lu\n", pr_info("CPU%u: update cpu_capacity %lu\n",
cpu, arch_scale_cpu_capacity(NULL, cpu)); cpu, topology_get_cpu_scale(NULL, cpu));
} }
#else #else
......
...@@ -39,7 +39,7 @@ static int __init get_cpu_for_node(struct device_node *node) ...@@ -39,7 +39,7 @@ static int __init get_cpu_for_node(struct device_node *node)
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
if (of_get_cpu_node(cpu, NULL) == cpu_node) { if (of_get_cpu_node(cpu, NULL) == cpu_node) {
parse_cpu_capacity(cpu_node, cpu); topology_parse_cpu_capacity(cpu_node, cpu);
of_node_put(cpu_node); of_node_put(cpu_node);
return cpu; return cpu;
} }
...@@ -191,7 +191,7 @@ static int __init parse_dt_topology(void) ...@@ -191,7 +191,7 @@ static int __init parse_dt_topology(void)
if (ret != 0) if (ret != 0)
goto out_map; goto out_map;
normalize_cpu_capacity(); topology_normalize_cpu_scale();
/* /*
* Check that all cores are in the topology; the SMP code will * Check that all cores are in the topology; the SMP code will
......
...@@ -25,12 +25,12 @@ ...@@ -25,12 +25,12 @@
static DEFINE_MUTEX(cpu_scale_mutex); static DEFINE_MUTEX(cpu_scale_mutex);
static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu)
{ {
return per_cpu(cpu_scale, cpu); return per_cpu(cpu_scale, cpu);
} }
void set_capacity_scale(unsigned int cpu, unsigned long capacity) void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
{ {
per_cpu(cpu_scale, cpu) = capacity; per_cpu(cpu_scale, cpu) = capacity;
} }
...@@ -42,7 +42,7 @@ static ssize_t cpu_capacity_show(struct device *dev, ...@@ -42,7 +42,7 @@ static ssize_t cpu_capacity_show(struct device *dev,
struct cpu *cpu = container_of(dev, struct cpu, dev); struct cpu *cpu = container_of(dev, struct cpu, dev);
return sprintf(buf, "%lu\n", return sprintf(buf, "%lu\n",
arch_scale_cpu_capacity(NULL, cpu->dev.id)); topology_get_cpu_scale(NULL, cpu->dev.id));
} }
static ssize_t cpu_capacity_store(struct device *dev, static ssize_t cpu_capacity_store(struct device *dev,
...@@ -67,7 +67,7 @@ static ssize_t cpu_capacity_store(struct device *dev, ...@@ -67,7 +67,7 @@ static ssize_t cpu_capacity_store(struct device *dev,
mutex_lock(&cpu_scale_mutex); mutex_lock(&cpu_scale_mutex);
for_each_cpu(i, &cpu_topology[this_cpu].core_sibling) for_each_cpu(i, &cpu_topology[this_cpu].core_sibling)
set_capacity_scale(i, new_capacity); topology_set_cpu_scale(i, new_capacity);
mutex_unlock(&cpu_scale_mutex); mutex_unlock(&cpu_scale_mutex);
return count; return count;
...@@ -98,7 +98,7 @@ static u32 capacity_scale; ...@@ -98,7 +98,7 @@ static u32 capacity_scale;
static u32 *raw_capacity; static u32 *raw_capacity;
static bool cap_parsing_failed; static bool cap_parsing_failed;
void normalize_cpu_capacity(void) void topology_normalize_cpu_scale(void)
{ {
u64 capacity; u64 capacity;
int cpu; int cpu;
...@@ -113,14 +113,14 @@ void normalize_cpu_capacity(void) ...@@ -113,14 +113,14 @@ void normalize_cpu_capacity(void)
cpu, raw_capacity[cpu]); cpu, raw_capacity[cpu]);
capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT) capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
/ capacity_scale; / capacity_scale;
set_capacity_scale(cpu, capacity); topology_set_cpu_scale(cpu, capacity);
pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
cpu, arch_scale_cpu_capacity(NULL, cpu)); cpu, topology_get_cpu_scale(NULL, cpu));
} }
mutex_unlock(&cpu_scale_mutex); mutex_unlock(&cpu_scale_mutex);
} }
int __init parse_cpu_capacity(struct device_node *cpu_node, int cpu) int __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
{ {
int ret = 1; int ret = 1;
u32 cpu_capacity; u32 cpu_capacity;
...@@ -185,12 +185,12 @@ init_cpu_capacity_callback(struct notifier_block *nb, ...@@ -185,12 +185,12 @@ init_cpu_capacity_callback(struct notifier_block *nb,
cpus_to_visit, cpus_to_visit,
policy->related_cpus); policy->related_cpus);
for_each_cpu(cpu, policy->related_cpus) { for_each_cpu(cpu, policy->related_cpus) {
raw_capacity[cpu] = arch_scale_cpu_capacity(NULL, cpu) * raw_capacity[cpu] = topology_get_cpu_scale(NULL, cpu) *
policy->cpuinfo.max_freq / 1000UL; policy->cpuinfo.max_freq / 1000UL;
capacity_scale = max(raw_capacity[cpu], capacity_scale); capacity_scale = max(raw_capacity[cpu], capacity_scale);
} }
if (cpumask_empty(cpus_to_visit)) { if (cpumask_empty(cpus_to_visit)) {
normalize_cpu_capacity(); topology_normalize_cpu_scale();
kfree(raw_capacity); kfree(raw_capacity);
pr_debug("cpu_capacity: parsing done\n"); pr_debug("cpu_capacity: parsing done\n");
cap_parsing_done = true; cap_parsing_done = true;
......
...@@ -4,14 +4,14 @@ ...@@ -4,14 +4,14 @@
#ifndef _LINUX_ARCH_TOPOLOGY_H_ #ifndef _LINUX_ARCH_TOPOLOGY_H_
#define _LINUX_ARCH_TOPOLOGY_H_ #define _LINUX_ARCH_TOPOLOGY_H_
void normalize_cpu_capacity(void); void topology_normalize_cpu_scale(void);
struct device_node; struct device_node;
int parse_cpu_capacity(struct device_node *cpu_node, int cpu); int topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
struct sched_domain; struct sched_domain;
unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu); unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu);
void set_capacity_scale(unsigned int cpu, unsigned long capacity); void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
#endif /* _LINUX_ARCH_TOPOLOGY_H_ */ #endif /* _LINUX_ARCH_TOPOLOGY_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment