Commit 52b8b8d7 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'x86/numa' into x86/mm

Merge reason: consolidate it into the more generic x86/mm tree to prevent conflicts.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parents 02ac81a8 14392fd3
...@@ -1705,7 +1705,7 @@ config HAVE_ARCH_EARLY_PFN_TO_NID ...@@ -1705,7 +1705,7 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
depends on NUMA depends on NUMA
config USE_PERCPU_NUMA_NODE_ID config USE_PERCPU_NUMA_NODE_ID
def_bool X86_64 def_bool y
depends on NUMA depends on NUMA
menu "Power management and ACPI options" menu "Power management and ACPI options"
......
...@@ -306,8 +306,6 @@ struct apic { ...@@ -306,8 +306,6 @@ struct apic {
void (*setup_apic_routing)(void); void (*setup_apic_routing)(void);
int (*multi_timer_check)(int apic, int irq); int (*multi_timer_check)(int apic, int irq);
int (*apicid_to_node)(int logical_apicid);
int (*cpu_to_logical_apicid)(int cpu);
int (*cpu_present_to_apicid)(int mps_cpu); int (*cpu_present_to_apicid)(int mps_cpu);
void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap); void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap);
void (*setup_portio_remap)(void); void (*setup_portio_remap)(void);
...@@ -355,6 +353,23 @@ struct apic { ...@@ -355,6 +353,23 @@ struct apic {
void (*icr_write)(u32 low, u32 high); void (*icr_write)(u32 low, u32 high);
void (*wait_icr_idle)(void); void (*wait_icr_idle)(void);
u32 (*safe_wait_icr_idle)(void); u32 (*safe_wait_icr_idle)(void);
#ifdef CONFIG_X86_32
/*
* Called very early during boot from get_smp_config(). It should
* return the logical apicid. x86_[bios]_cpu_to_apicid is
* initialized before this function is called.
*
* If logical apicid can't be determined that early, the function
* may return BAD_APICID. Logical apicid will be configured after
* init_apic_ldr() while bringing up CPUs. Note that NUMA affinity
* won't be applied properly during early boot in this case.
*/
int (*x86_32_early_logical_apicid)(int cpu);
/* determine CPU -> NUMA node mapping */
int (*x86_32_numa_cpu_node)(int cpu);
#endif
}; };
/* /*
...@@ -502,6 +517,11 @@ extern struct apic apic_noop; ...@@ -502,6 +517,11 @@ extern struct apic apic_noop;
extern struct apic apic_default; extern struct apic apic_default;
static inline int noop_x86_32_early_logical_apicid(int cpu)
{
return BAD_APICID;
}
/* /*
* Set up the logical destination ID. * Set up the logical destination ID.
* *
...@@ -521,7 +541,7 @@ static inline int default_phys_pkg_id(int cpuid_apic, int index_msb) ...@@ -521,7 +541,7 @@ static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
return cpuid_apic >> index_msb; return cpuid_apic >> index_msb;
} }
extern int default_apicid_to_node(int logical_apicid); extern int default_x86_32_numa_cpu_node(int cpu);
#endif #endif
...@@ -557,12 +577,6 @@ static inline void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_ma ...@@ -557,12 +577,6 @@ static inline void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_ma
*retmap = *phys_map; *retmap = *phys_map;
} }
/* Mapping from cpu number to logical apicid */
static inline int default_cpu_to_logical_apicid(int cpu)
{
return 1 << cpu;
}
static inline int __default_cpu_present_to_apicid(int mps_cpu) static inline int __default_cpu_present_to_apicid(int mps_cpu)
{ {
if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu)) if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
...@@ -595,8 +609,4 @@ extern int default_check_phys_apicid_present(int phys_apicid); ...@@ -595,8 +609,4 @@ extern int default_check_phys_apicid_present(int phys_apicid);
#endif /* CONFIG_X86_LOCAL_APIC */ #endif /* CONFIG_X86_LOCAL_APIC */
#ifdef CONFIG_X86_32
extern u8 cpu_2_logical_apicid[NR_CPUS];
#endif
#endif /* _ASM_X86_APIC_H */ #endif /* _ASM_X86_APIC_H */
...@@ -123,10 +123,6 @@ extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, ...@@ -123,10 +123,6 @@ extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
int vector); int vector);
extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
int vector); int vector);
extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
int vector);
extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
int vector);
/* Avoid include hell */ /* Avoid include hell */
#define NMI_VECTOR 0x02 #define NMI_VECTOR 0x02
...@@ -150,6 +146,10 @@ static inline void __default_local_send_IPI_all(int vector) ...@@ -150,6 +146,10 @@ static inline void __default_local_send_IPI_all(int vector)
} }
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
int vector);
extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
int vector);
extern void default_send_IPI_mask_logical(const struct cpumask *mask, extern void default_send_IPI_mask_logical(const struct cpumask *mask,
int vector); int vector);
extern void default_send_IPI_allbutself(int vector); extern void default_send_IPI_allbutself(int vector);
......
...@@ -25,7 +25,6 @@ extern int pic_mode; ...@@ -25,7 +25,6 @@ extern int pic_mode;
#define MAX_IRQ_SOURCES 256 #define MAX_IRQ_SOURCES 256
extern unsigned int def_to_bigsmp; extern unsigned int def_to_bigsmp;
extern u8 apicid_2_node[];
#ifdef CONFIG_X86_NUMAQ #ifdef CONFIG_X86_NUMAQ
extern int mp_bus_id_to_node[MAX_MP_BUSSES]; extern int mp_bus_id_to_node[MAX_MP_BUSSES];
...@@ -33,8 +32,6 @@ extern int mp_bus_id_to_local[MAX_MP_BUSSES]; ...@@ -33,8 +32,6 @@ extern int mp_bus_id_to_local[MAX_MP_BUSSES];
extern int quad_local_to_mp_bus_id [NR_CPUS/4][4]; extern int quad_local_to_mp_bus_id [NR_CPUS/4][4];
#endif #endif
#define MAX_APICID 256
#else /* CONFIG_X86_64: */ #else /* CONFIG_X86_64: */
#define MAX_MP_BUSSES 256 #define MAX_MP_BUSSES 256
......
#ifndef _ASM_X86_NUMA_H
#define _ASM_X86_NUMA_H
#include <asm/topology.h>
#include <asm/apicdef.h>
#ifdef CONFIG_NUMA
/*
* __apicid_to_node[] stores the raw mapping between physical apicid and
* node and is used to initialize cpu_to_node mapping.
*
* The mapping may be overridden by apic->numa_cpu_node() on 32bit and thus
* should be accessed by the accessors - set_apicid_to_node() and
* numa_cpu_node().
*/
extern s16 __apicid_to_node[MAX_LOCAL_APIC];
static inline void set_apicid_to_node(int apicid, s16 node)
{
__apicid_to_node[apicid] = node;
}
#else /* CONFIG_NUMA */
static inline void set_apicid_to_node(int apicid, s16 node)
{
}
#endif /* CONFIG_NUMA */
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
# include "numa_32.h" # include "numa_32.h"
#else #else
# include "numa_64.h" # include "numa_64.h"
#endif #endif
#ifdef CONFIG_NUMA
extern void __cpuinit numa_set_node(int cpu, int node);
extern void __cpuinit numa_clear_node(int cpu);
extern void __init numa_init_array(void);
extern void __init init_cpu_to_node(void);
extern void __cpuinit numa_add_cpu(int cpu);
extern void __cpuinit numa_remove_cpu(int cpu);
#else /* CONFIG_NUMA */
static inline void numa_set_node(int cpu, int node) { }
static inline void numa_clear_node(int cpu) { }
static inline void numa_init_array(void) { }
static inline void init_cpu_to_node(void) { }
static inline void numa_add_cpu(int cpu) { }
static inline void numa_remove_cpu(int cpu) { }
#endif /* CONFIG_NUMA */
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable);
#endif
#endif /* _ASM_X86_NUMA_H */
...@@ -4,7 +4,12 @@ ...@@ -4,7 +4,12 @@
extern int numa_off; extern int numa_off;
extern int pxm_to_nid(int pxm); extern int pxm_to_nid(int pxm);
extern void numa_remove_cpu(int cpu);
#ifdef CONFIG_NUMA
extern int __cpuinit numa_cpu_node(int cpu);
#else /* CONFIG_NUMA */
static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; }
#endif /* CONFIG_NUMA */
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
extern void set_highmem_pages_init(void); extern void set_highmem_pages_init(void);
......
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
#define _ASM_X86_NUMA_64_H #define _ASM_X86_NUMA_64_H
#include <linux/nodemask.h> #include <linux/nodemask.h>
#include <asm/apicdef.h>
struct bootnode { struct bootnode {
u64 start; u64 start;
...@@ -14,11 +13,8 @@ extern int compute_hash_shift(struct bootnode *nodes, int numblks, ...@@ -14,11 +13,8 @@ extern int compute_hash_shift(struct bootnode *nodes, int numblks,
#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT)) #define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
extern void numa_init_array(void);
extern int numa_off; extern int numa_off;
extern s16 apicid_to_node[MAX_LOCAL_APIC];
extern unsigned long numa_free_all_bootmem(void); extern unsigned long numa_free_all_bootmem(void);
extern void setup_node_bootmem(int nodeid, unsigned long start, extern void setup_node_bootmem(int nodeid, unsigned long start,
unsigned long end); unsigned long end);
...@@ -31,11 +27,7 @@ extern void setup_node_bootmem(int nodeid, unsigned long start, ...@@ -31,11 +27,7 @@ extern void setup_node_bootmem(int nodeid, unsigned long start,
*/ */
#define NODE_MIN_SIZE (4*1024*1024) #define NODE_MIN_SIZE (4*1024*1024)
extern void __init init_cpu_to_node(void); extern int __cpuinit numa_cpu_node(int cpu);
extern void __cpuinit numa_set_node(int cpu, int node);
extern void __cpuinit numa_clear_node(int cpu);
extern void __cpuinit numa_add_cpu(int cpu);
extern void __cpuinit numa_remove_cpu(int cpu);
#ifdef CONFIG_NUMA_EMU #ifdef CONFIG_NUMA_EMU
#define FAKE_NODE_MIN_SIZE ((u64)32 << 20) #define FAKE_NODE_MIN_SIZE ((u64)32 << 20)
...@@ -43,11 +35,7 @@ extern void __cpuinit numa_remove_cpu(int cpu); ...@@ -43,11 +35,7 @@ extern void __cpuinit numa_remove_cpu(int cpu);
void numa_emu_cmdline(char *); void numa_emu_cmdline(char *);
#endif /* CONFIG_NUMA_EMU */ #endif /* CONFIG_NUMA_EMU */
#else #else
static inline void init_cpu_to_node(void) { } static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; }
static inline void numa_set_node(int cpu, int node) { }
static inline void numa_clear_node(int cpu) { }
static inline void numa_add_cpu(int cpu, int node) { }
static inline void numa_remove_cpu(int cpu) { }
#endif #endif
#endif /* _ASM_X86_NUMA_64_H */ #endif /* _ASM_X86_NUMA_64_H */
...@@ -38,6 +38,9 @@ static inline struct cpumask *cpu_core_mask(int cpu) ...@@ -38,6 +38,9 @@ static inline struct cpumask *cpu_core_mask(int cpu)
DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
DECLARE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid);
#endif
/* Static state in head.S used to set up a CPU */ /* Static state in head.S used to set up a CPU */
extern unsigned long stack_start; /* Initial stack pointer address */ extern unsigned long stack_start; /* Initial stack pointer address */
......
...@@ -47,21 +47,6 @@ ...@@ -47,21 +47,6 @@
#include <asm/mpspec.h> #include <asm/mpspec.h>
#ifdef CONFIG_X86_32
/* Mappings between logical cpu number and node number */
extern int cpu_to_node_map[];
/* Returns the number of the node containing CPU 'cpu' */
static inline int __cpu_to_node(int cpu)
{
return cpu_to_node_map[cpu];
}
#define early_cpu_to_node __cpu_to_node
#define cpu_to_node __cpu_to_node
#else /* CONFIG_X86_64 */
/* Mappings between logical cpu number and node number */ /* Mappings between logical cpu number and node number */
DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
...@@ -84,8 +69,6 @@ static inline int early_cpu_to_node(int cpu) ...@@ -84,8 +69,6 @@ static inline int early_cpu_to_node(int cpu)
#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
#endif /* CONFIG_X86_64 */
/* Mappings between node number and cpus on that node. */ /* Mappings between node number and cpus on that node. */
extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
......
...@@ -589,14 +589,8 @@ static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) ...@@ -589,14 +589,8 @@ static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
nid = acpi_get_node(handle); nid = acpi_get_node(handle);
if (nid == -1 || !node_online(nid)) if (nid == -1 || !node_online(nid))
return; return;
#ifdef CONFIG_X86_64 set_apicid_to_node(physid, nid);
apicid_to_node[physid] = nid;
numa_set_node(cpu, nid); numa_set_node(cpu, nid);
#else /* CONFIG_X86_32 */
apicid_2_node[physid] = nid;
cpu_to_node_map[cpu] = nid;
#endif
#endif #endif
} }
......
...@@ -78,6 +78,15 @@ EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid); ...@@ -78,6 +78,15 @@ EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/*
* On x86_32, the mapping between cpu and logical apicid may vary
* depending on apic in use. The following early percpu variable is
* used for the mapping. This is where the behaviors of x86_64 and 32
* actually diverge. Let's keep it ugly for now.
*/
DEFINE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid, BAD_APICID);
/* /*
* Knob to control our willingness to enable the local APIC. * Knob to control our willingness to enable the local APIC.
* *
...@@ -1237,6 +1246,19 @@ void __cpuinit setup_local_APIC(void) ...@@ -1237,6 +1246,19 @@ void __cpuinit setup_local_APIC(void)
*/ */
apic->init_apic_ldr(); apic->init_apic_ldr();
#ifdef CONFIG_X86_32
/*
* APIC LDR is initialized. If logical_apicid mapping was
* initialized during get_smp_config(), make sure it matches the
* actual value.
*/
i = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
WARN_ON(i != BAD_APICID && i != logical_smp_processor_id());
/* always use the value from LDR */
early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
logical_smp_processor_id();
#endif
/* /*
* Set Task Priority to 'accept all'. We never change this * Set Task Priority to 'accept all'. We never change this
* later on. * later on.
...@@ -1972,7 +1994,10 @@ void __cpuinit generic_processor_info(int apicid, int version) ...@@ -1972,7 +1994,10 @@ void __cpuinit generic_processor_info(int apicid, int version)
early_per_cpu(x86_cpu_to_apicid, cpu) = apicid; early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid; early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
#endif #endif
#ifdef CONFIG_X86_32
early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
apic->x86_32_early_logical_apicid(cpu);
#endif
set_cpu_possible(cpu, true); set_cpu_possible(cpu, true);
set_cpu_present(cpu, true); set_cpu_present(cpu, true);
} }
...@@ -1993,10 +2018,14 @@ void default_init_apic_ldr(void) ...@@ -1993,10 +2018,14 @@ void default_init_apic_ldr(void)
} }
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
int default_apicid_to_node(int logical_apicid) int default_x86_32_numa_cpu_node(int cpu)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_NUMA
return apicid_2_node[hard_smp_processor_id()]; int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
if (apicid != BAD_APICID)
return __apicid_to_node[apicid];
return NUMA_NO_NODE;
#else #else
return 0; return 0;
#endif #endif
......
...@@ -185,8 +185,6 @@ struct apic apic_flat = { ...@@ -185,8 +185,6 @@ struct apic apic_flat = {
.ioapic_phys_id_map = NULL, .ioapic_phys_id_map = NULL,
.setup_apic_routing = NULL, .setup_apic_routing = NULL,
.multi_timer_check = NULL, .multi_timer_check = NULL,
.apicid_to_node = NULL,
.cpu_to_logical_apicid = NULL,
.cpu_present_to_apicid = default_cpu_present_to_apicid, .cpu_present_to_apicid = default_cpu_present_to_apicid,
.apicid_to_cpu_present = NULL, .apicid_to_cpu_present = NULL,
.setup_portio_remap = NULL, .setup_portio_remap = NULL,
...@@ -337,8 +335,6 @@ struct apic apic_physflat = { ...@@ -337,8 +335,6 @@ struct apic apic_physflat = {
.ioapic_phys_id_map = NULL, .ioapic_phys_id_map = NULL,
.setup_apic_routing = NULL, .setup_apic_routing = NULL,
.multi_timer_check = NULL, .multi_timer_check = NULL,
.apicid_to_node = NULL,
.cpu_to_logical_apicid = NULL,
.cpu_present_to_apicid = default_cpu_present_to_apicid, .cpu_present_to_apicid = default_cpu_present_to_apicid,
.apicid_to_cpu_present = NULL, .apicid_to_cpu_present = NULL,
.setup_portio_remap = NULL, .setup_portio_remap = NULL,
......
...@@ -54,11 +54,6 @@ static u64 noop_apic_icr_read(void) ...@@ -54,11 +54,6 @@ static u64 noop_apic_icr_read(void)
return 0; return 0;
} }
static int noop_cpu_to_logical_apicid(int cpu)
{
return 0;
}
static int noop_phys_pkg_id(int cpuid_apic, int index_msb) static int noop_phys_pkg_id(int cpuid_apic, int index_msb)
{ {
return 0; return 0;
...@@ -113,12 +108,6 @@ static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask) ...@@ -113,12 +108,6 @@ static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask)
cpumask_set_cpu(cpu, retmask); cpumask_set_cpu(cpu, retmask);
} }
int noop_apicid_to_node(int logical_apicid)
{
/* we're always on node 0 */
return 0;
}
static u32 noop_apic_read(u32 reg) static u32 noop_apic_read(u32 reg)
{ {
WARN_ON_ONCE((cpu_has_apic && !disable_apic)); WARN_ON_ONCE((cpu_has_apic && !disable_apic));
...@@ -130,6 +119,14 @@ static void noop_apic_write(u32 reg, u32 v) ...@@ -130,6 +119,14 @@ static void noop_apic_write(u32 reg, u32 v)
WARN_ON_ONCE(cpu_has_apic && !disable_apic); WARN_ON_ONCE(cpu_has_apic && !disable_apic);
} }
#ifdef CONFIG_X86_32
static int noop_x86_32_numa_cpu_node(int cpu)
{
/* we're always on node 0 */
return 0;
}
#endif
struct apic apic_noop = { struct apic apic_noop = {
.name = "noop", .name = "noop",
.probe = noop_probe, .probe = noop_probe,
...@@ -153,9 +150,7 @@ struct apic apic_noop = { ...@@ -153,9 +150,7 @@ struct apic apic_noop = {
.ioapic_phys_id_map = default_ioapic_phys_id_map, .ioapic_phys_id_map = default_ioapic_phys_id_map,
.setup_apic_routing = NULL, .setup_apic_routing = NULL,
.multi_timer_check = NULL, .multi_timer_check = NULL,
.apicid_to_node = noop_apicid_to_node,
.cpu_to_logical_apicid = noop_cpu_to_logical_apicid,
.cpu_present_to_apicid = default_cpu_present_to_apicid, .cpu_present_to_apicid = default_cpu_present_to_apicid,
.apicid_to_cpu_present = physid_set_mask_of_physid, .apicid_to_cpu_present = physid_set_mask_of_physid,
...@@ -197,4 +192,9 @@ struct apic apic_noop = { ...@@ -197,4 +192,9 @@ struct apic apic_noop = {
.icr_write = noop_apic_icr_write, .icr_write = noop_apic_icr_write,
.wait_icr_idle = noop_apic_wait_icr_idle, .wait_icr_idle = noop_apic_wait_icr_idle,
.safe_wait_icr_idle = noop_safe_apic_wait_icr_idle, .safe_wait_icr_idle = noop_safe_apic_wait_icr_idle,
#ifdef CONFIG_X86_32
.x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid,
.x86_32_numa_cpu_node = noop_x86_32_numa_cpu_node,
#endif
}; };
...@@ -45,6 +45,12 @@ static unsigned long bigsmp_check_apicid_present(int bit) ...@@ -45,6 +45,12 @@ static unsigned long bigsmp_check_apicid_present(int bit)
return 1; return 1;
} }
static int bigsmp_early_logical_apicid(int cpu)
{
/* on bigsmp, logical apicid is the same as physical */
return early_per_cpu(x86_cpu_to_apicid, cpu);
}
static inline unsigned long calculate_ldr(int cpu) static inline unsigned long calculate_ldr(int cpu)
{ {
unsigned long val, id; unsigned long val, id;
...@@ -80,11 +86,6 @@ static void bigsmp_setup_apic_routing(void) ...@@ -80,11 +86,6 @@ static void bigsmp_setup_apic_routing(void)
nr_ioapics); nr_ioapics);
} }
static int bigsmp_apicid_to_node(int logical_apicid)
{
return apicid_2_node[hard_smp_processor_id()];
}
static int bigsmp_cpu_present_to_apicid(int mps_cpu) static int bigsmp_cpu_present_to_apicid(int mps_cpu)
{ {
if (mps_cpu < nr_cpu_ids) if (mps_cpu < nr_cpu_ids)
...@@ -93,14 +94,6 @@ static int bigsmp_cpu_present_to_apicid(int mps_cpu) ...@@ -93,14 +94,6 @@ static int bigsmp_cpu_present_to_apicid(int mps_cpu)
return BAD_APICID; return BAD_APICID;
} }
/* Mapping from cpu number to logical apicid */
static inline int bigsmp_cpu_to_logical_apicid(int cpu)
{
if (cpu >= nr_cpu_ids)
return BAD_APICID;
return cpu_physical_id(cpu);
}
static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
{ {
/* For clustered we don't have a good way to do this yet - hack */ /* For clustered we don't have a good way to do this yet - hack */
...@@ -115,7 +108,11 @@ static int bigsmp_check_phys_apicid_present(int phys_apicid) ...@@ -115,7 +108,11 @@ static int bigsmp_check_phys_apicid_present(int phys_apicid)
/* As we are using single CPU as destination, pick only one CPU here */ /* As we are using single CPU as destination, pick only one CPU here */
static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask) static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask)
{ {
return bigsmp_cpu_to_logical_apicid(cpumask_first(cpumask)); int cpu = cpumask_first(cpumask);
if (cpu < nr_cpu_ids)
return cpu_physical_id(cpu);
return BAD_APICID;
} }
static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
...@@ -129,9 +126,9 @@ static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, ...@@ -129,9 +126,9 @@ static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
*/ */
for_each_cpu_and(cpu, cpumask, andmask) { for_each_cpu_and(cpu, cpumask, andmask) {
if (cpumask_test_cpu(cpu, cpu_online_mask)) if (cpumask_test_cpu(cpu, cpu_online_mask))
break; return cpu_physical_id(cpu);
} }
return bigsmp_cpu_to_logical_apicid(cpu); return BAD_APICID;
} }
static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
...@@ -219,8 +216,6 @@ struct apic apic_bigsmp = { ...@@ -219,8 +216,6 @@ struct apic apic_bigsmp = {
.ioapic_phys_id_map = bigsmp_ioapic_phys_id_map, .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map,
.setup_apic_routing = bigsmp_setup_apic_routing, .setup_apic_routing = bigsmp_setup_apic_routing,
.multi_timer_check = NULL, .multi_timer_check = NULL,
.apicid_to_node = bigsmp_apicid_to_node,
.cpu_to_logical_apicid = bigsmp_cpu_to_logical_apicid,
.cpu_present_to_apicid = bigsmp_cpu_present_to_apicid, .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid,
.apicid_to_cpu_present = physid_set_mask_of_physid, .apicid_to_cpu_present = physid_set_mask_of_physid,
.setup_portio_remap = NULL, .setup_portio_remap = NULL,
...@@ -256,4 +251,7 @@ struct apic apic_bigsmp = { ...@@ -256,4 +251,7 @@ struct apic apic_bigsmp = {
.icr_write = native_apic_icr_write, .icr_write = native_apic_icr_write,
.wait_icr_idle = native_apic_wait_icr_idle, .wait_icr_idle = native_apic_wait_icr_idle,
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
.x86_32_early_logical_apicid = bigsmp_early_logical_apicid,
.x86_32_numa_cpu_node = default_x86_32_numa_cpu_node,
}; };
...@@ -460,6 +460,12 @@ static unsigned long es7000_check_apicid_present(int bit) ...@@ -460,6 +460,12 @@ static unsigned long es7000_check_apicid_present(int bit)
return physid_isset(bit, phys_cpu_present_map); return physid_isset(bit, phys_cpu_present_map);
} }
static int es7000_early_logical_apicid(int cpu)
{
/* on es7000, logical apicid is the same as physical */
return early_per_cpu(x86_bios_cpu_apicid, cpu);
}
static unsigned long calculate_ldr(int cpu) static unsigned long calculate_ldr(int cpu)
{ {
unsigned long id = per_cpu(x86_bios_cpu_apicid, cpu); unsigned long id = per_cpu(x86_bios_cpu_apicid, cpu);
...@@ -504,12 +510,11 @@ static void es7000_setup_apic_routing(void) ...@@ -504,12 +510,11 @@ static void es7000_setup_apic_routing(void)
nr_ioapics, cpumask_bits(es7000_target_cpus())[0]); nr_ioapics, cpumask_bits(es7000_target_cpus())[0]);
} }
static int es7000_apicid_to_node(int logical_apicid) static int es7000_numa_cpu_node(int cpu)
{ {
return 0; return 0;
} }
static int es7000_cpu_present_to_apicid(int mps_cpu) static int es7000_cpu_present_to_apicid(int mps_cpu)
{ {
if (!mps_cpu) if (!mps_cpu)
...@@ -528,18 +533,6 @@ static void es7000_apicid_to_cpu_present(int phys_apicid, physid_mask_t *retmap) ...@@ -528,18 +533,6 @@ static void es7000_apicid_to_cpu_present(int phys_apicid, physid_mask_t *retmap)
++cpu_id; ++cpu_id;
} }
/* Mapping from cpu number to logical apicid */
static int es7000_cpu_to_logical_apicid(int cpu)
{
#ifdef CONFIG_SMP
if (cpu >= nr_cpu_ids)
return BAD_APICID;
return cpu_2_logical_apicid[cpu];
#else
return logical_smp_processor_id();
#endif
}
static void es7000_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) static void es7000_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
{ {
/* For clustered we don't have a good way to do this yet - hack */ /* For clustered we don't have a good way to do this yet - hack */
...@@ -561,7 +554,7 @@ static unsigned int es7000_cpu_mask_to_apicid(const struct cpumask *cpumask) ...@@ -561,7 +554,7 @@ static unsigned int es7000_cpu_mask_to_apicid(const struct cpumask *cpumask)
* The cpus in the mask must all be on the apic cluster. * The cpus in the mask must all be on the apic cluster.
*/ */
for_each_cpu(cpu, cpumask) { for_each_cpu(cpu, cpumask) {
int new_apicid = es7000_cpu_to_logical_apicid(cpu); int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
WARN(1, "Not a valid mask!"); WARN(1, "Not a valid mask!");
...@@ -578,7 +571,7 @@ static unsigned int ...@@ -578,7 +571,7 @@ static unsigned int
es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask, es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask,
const struct cpumask *andmask) const struct cpumask *andmask)
{ {
int apicid = es7000_cpu_to_logical_apicid(0); int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
cpumask_var_t cpumask; cpumask_var_t cpumask;
if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
...@@ -655,8 +648,6 @@ struct apic __refdata apic_es7000_cluster = { ...@@ -655,8 +648,6 @@ struct apic __refdata apic_es7000_cluster = {
.ioapic_phys_id_map = es7000_ioapic_phys_id_map, .ioapic_phys_id_map = es7000_ioapic_phys_id_map,
.setup_apic_routing = es7000_setup_apic_routing, .setup_apic_routing = es7000_setup_apic_routing,
.multi_timer_check = NULL, .multi_timer_check = NULL,
.apicid_to_node = es7000_apicid_to_node,
.cpu_to_logical_apicid = es7000_cpu_to_logical_apicid,
.cpu_present_to_apicid = es7000_cpu_present_to_apicid, .cpu_present_to_apicid = es7000_cpu_present_to_apicid,
.apicid_to_cpu_present = es7000_apicid_to_cpu_present, .apicid_to_cpu_present = es7000_apicid_to_cpu_present,
.setup_portio_remap = NULL, .setup_portio_remap = NULL,
...@@ -695,6 +686,9 @@ struct apic __refdata apic_es7000_cluster = { ...@@ -695,6 +686,9 @@ struct apic __refdata apic_es7000_cluster = {
.icr_write = native_apic_icr_write, .icr_write = native_apic_icr_write,
.wait_icr_idle = native_apic_wait_icr_idle, .wait_icr_idle = native_apic_wait_icr_idle,
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
.x86_32_early_logical_apicid = es7000_early_logical_apicid,
.x86_32_numa_cpu_node = es7000_numa_cpu_node,
}; };
struct apic __refdata apic_es7000 = { struct apic __refdata apic_es7000 = {
...@@ -720,8 +714,6 @@ struct apic __refdata apic_es7000 = { ...@@ -720,8 +714,6 @@ struct apic __refdata apic_es7000 = {
.ioapic_phys_id_map = es7000_ioapic_phys_id_map, .ioapic_phys_id_map = es7000_ioapic_phys_id_map,
.setup_apic_routing = es7000_setup_apic_routing, .setup_apic_routing = es7000_setup_apic_routing,
.multi_timer_check = NULL, .multi_timer_check = NULL,
.apicid_to_node = es7000_apicid_to_node,
.cpu_to_logical_apicid = es7000_cpu_to_logical_apicid,
.cpu_present_to_apicid = es7000_cpu_present_to_apicid, .cpu_present_to_apicid = es7000_cpu_present_to_apicid,
.apicid_to_cpu_present = es7000_apicid_to_cpu_present, .apicid_to_cpu_present = es7000_apicid_to_cpu_present,
.setup_portio_remap = NULL, .setup_portio_remap = NULL,
...@@ -758,4 +750,7 @@ struct apic __refdata apic_es7000 = { ...@@ -758,4 +750,7 @@ struct apic __refdata apic_es7000 = {
.icr_write = native_apic_icr_write, .icr_write = native_apic_icr_write,
.wait_icr_idle = native_apic_wait_icr_idle, .wait_icr_idle = native_apic_wait_icr_idle,
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
.x86_32_early_logical_apicid = es7000_early_logical_apicid,
.x86_32_numa_cpu_node = es7000_numa_cpu_node,
}; };
...@@ -56,6 +56,8 @@ void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, ...@@ -56,6 +56,8 @@ void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
local_irq_restore(flags); local_irq_restore(flags);
} }
#ifdef CONFIG_X86_32
void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
int vector) int vector)
{ {
...@@ -71,8 +73,8 @@ void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, ...@@ -71,8 +73,8 @@ void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
local_irq_save(flags); local_irq_save(flags);
for_each_cpu(query_cpu, mask) for_each_cpu(query_cpu, mask)
__default_send_IPI_dest_field( __default_send_IPI_dest_field(
apic->cpu_to_logical_apicid(query_cpu), vector, early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
apic->dest_logical); vector, apic->dest_logical);
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -90,14 +92,12 @@ void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, ...@@ -90,14 +92,12 @@ void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
if (query_cpu == this_cpu) if (query_cpu == this_cpu)
continue; continue;
__default_send_IPI_dest_field( __default_send_IPI_dest_field(
apic->cpu_to_logical_apicid(query_cpu), vector, early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
apic->dest_logical); vector, apic->dest_logical);
} }
local_irq_restore(flags); local_irq_restore(flags);
} }
#ifdef CONFIG_X86_32
/* /*
* This is only used on smaller machines. * This is only used on smaller machines.
*/ */
......
...@@ -373,13 +373,6 @@ static inline void numaq_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask ...@@ -373,13 +373,6 @@ static inline void numaq_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask
return physids_promote(0xFUL, retmap); return physids_promote(0xFUL, retmap);
} }
static inline int numaq_cpu_to_logical_apicid(int cpu)
{
if (cpu >= nr_cpu_ids)
return BAD_APICID;
return cpu_2_logical_apicid[cpu];
}
/* /*
* Supporting over 60 cpus on NUMA-Q requires a locality-dependent * Supporting over 60 cpus on NUMA-Q requires a locality-dependent
* cpu to APIC ID relation to properly interact with the intelligent * cpu to APIC ID relation to properly interact with the intelligent
...@@ -398,6 +391,15 @@ static inline int numaq_apicid_to_node(int logical_apicid) ...@@ -398,6 +391,15 @@ static inline int numaq_apicid_to_node(int logical_apicid)
return logical_apicid >> 4; return logical_apicid >> 4;
} }
static int numaq_numa_cpu_node(int cpu)
{
int logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
if (logical_apicid != BAD_APICID)
return numaq_apicid_to_node(logical_apicid);
return NUMA_NO_NODE;
}
static void numaq_apicid_to_cpu_present(int logical_apicid, physid_mask_t *retmap) static void numaq_apicid_to_cpu_present(int logical_apicid, physid_mask_t *retmap)
{ {
int node = numaq_apicid_to_node(logical_apicid); int node = numaq_apicid_to_node(logical_apicid);
...@@ -508,8 +510,6 @@ struct apic __refdata apic_numaq = { ...@@ -508,8 +510,6 @@ struct apic __refdata apic_numaq = {
.ioapic_phys_id_map = numaq_ioapic_phys_id_map, .ioapic_phys_id_map = numaq_ioapic_phys_id_map,
.setup_apic_routing = numaq_setup_apic_routing, .setup_apic_routing = numaq_setup_apic_routing,
.multi_timer_check = numaq_multi_timer_check, .multi_timer_check = numaq_multi_timer_check,
.apicid_to_node = numaq_apicid_to_node,
.cpu_to_logical_apicid = numaq_cpu_to_logical_apicid,
.cpu_present_to_apicid = numaq_cpu_present_to_apicid, .cpu_present_to_apicid = numaq_cpu_present_to_apicid,
.apicid_to_cpu_present = numaq_apicid_to_cpu_present, .apicid_to_cpu_present = numaq_apicid_to_cpu_present,
.setup_portio_remap = numaq_setup_portio_remap, .setup_portio_remap = numaq_setup_portio_remap,
...@@ -547,4 +547,7 @@ struct apic __refdata apic_numaq = { ...@@ -547,4 +547,7 @@ struct apic __refdata apic_numaq = {
.icr_write = native_apic_icr_write, .icr_write = native_apic_icr_write,
.wait_icr_idle = native_apic_wait_icr_idle, .wait_icr_idle = native_apic_wait_icr_idle,
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
.x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid,
.x86_32_numa_cpu_node = numaq_numa_cpu_node,
}; };
...@@ -77,6 +77,11 @@ void __init default_setup_apic_routing(void) ...@@ -77,6 +77,11 @@ void __init default_setup_apic_routing(void)
apic->setup_apic_routing(); apic->setup_apic_routing();
} }
static int default_x86_32_early_logical_apicid(int cpu)
{
return 1 << cpu;
}
static void setup_apic_flat_routing(void) static void setup_apic_flat_routing(void)
{ {
#ifdef CONFIG_X86_IO_APIC #ifdef CONFIG_X86_IO_APIC
...@@ -130,8 +135,6 @@ struct apic apic_default = { ...@@ -130,8 +135,6 @@ struct apic apic_default = {
.ioapic_phys_id_map = default_ioapic_phys_id_map, .ioapic_phys_id_map = default_ioapic_phys_id_map,
.setup_apic_routing = setup_apic_flat_routing, .setup_apic_routing = setup_apic_flat_routing,
.multi_timer_check = NULL, .multi_timer_check = NULL,
.apicid_to_node = default_apicid_to_node,
.cpu_to_logical_apicid = default_cpu_to_logical_apicid,
.cpu_present_to_apicid = default_cpu_present_to_apicid, .cpu_present_to_apicid = default_cpu_present_to_apicid,
.apicid_to_cpu_present = physid_set_mask_of_physid, .apicid_to_cpu_present = physid_set_mask_of_physid,
.setup_portio_remap = NULL, .setup_portio_remap = NULL,
...@@ -167,6 +170,9 @@ struct apic apic_default = { ...@@ -167,6 +170,9 @@ struct apic apic_default = {
.icr_write = native_apic_icr_write, .icr_write = native_apic_icr_write,
.wait_icr_idle = native_apic_wait_icr_idle, .wait_icr_idle = native_apic_wait_icr_idle,
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
.x86_32_early_logical_apicid = default_x86_32_early_logical_apicid,
.x86_32_numa_cpu_node = default_x86_32_numa_cpu_node,
}; };
extern struct apic apic_numaq; extern struct apic apic_numaq;
......
...@@ -194,11 +194,10 @@ static unsigned long summit_check_apicid_present(int bit) ...@@ -194,11 +194,10 @@ static unsigned long summit_check_apicid_present(int bit)
return 1; return 1;
} }
static void summit_init_apic_ldr(void) static int summit_early_logical_apicid(int cpu)
{ {
unsigned long val, id;
int count = 0; int count = 0;
u8 my_id = (u8)hard_smp_processor_id(); u8 my_id = early_per_cpu(x86_cpu_to_apicid, cpu);
u8 my_cluster = APIC_CLUSTER(my_id); u8 my_cluster = APIC_CLUSTER(my_id);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
u8 lid; u8 lid;
...@@ -206,7 +205,7 @@ static void summit_init_apic_ldr(void) ...@@ -206,7 +205,7 @@ static void summit_init_apic_ldr(void)
/* Create logical APIC IDs by counting CPUs already in cluster. */ /* Create logical APIC IDs by counting CPUs already in cluster. */
for (count = 0, i = nr_cpu_ids; --i >= 0; ) { for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
lid = cpu_2_logical_apicid[i]; lid = early_per_cpu(x86_cpu_to_logical_apicid, i);
if (lid != BAD_APICID && APIC_CLUSTER(lid) == my_cluster) if (lid != BAD_APICID && APIC_CLUSTER(lid) == my_cluster)
++count; ++count;
} }
...@@ -214,7 +213,15 @@ static void summit_init_apic_ldr(void) ...@@ -214,7 +213,15 @@ static void summit_init_apic_ldr(void)
/* We only have a 4 wide bitmap in cluster mode. If a deranged /* We only have a 4 wide bitmap in cluster mode. If a deranged
* BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
id = my_cluster | (1UL << count); return my_cluster | (1UL << count);
}
static void summit_init_apic_ldr(void)
{
int cpu = smp_processor_id();
unsigned long id = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
unsigned long val;
apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE); apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE);
val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
val |= SET_APIC_LOGICAL_ID(id); val |= SET_APIC_LOGICAL_ID(id);
...@@ -232,27 +239,6 @@ static void summit_setup_apic_routing(void) ...@@ -232,27 +239,6 @@ static void summit_setup_apic_routing(void)
nr_ioapics); nr_ioapics);
} }
static int summit_apicid_to_node(int logical_apicid)
{
#ifdef CONFIG_SMP
return apicid_2_node[hard_smp_processor_id()];
#else
return 0;
#endif
}
/* Mapping from cpu number to logical apicid */
static inline int summit_cpu_to_logical_apicid(int cpu)
{
#ifdef CONFIG_SMP
if (cpu >= nr_cpu_ids)
return BAD_APICID;
return cpu_2_logical_apicid[cpu];
#else
return logical_smp_processor_id();
#endif
}
static int summit_cpu_present_to_apicid(int mps_cpu) static int summit_cpu_present_to_apicid(int mps_cpu)
{ {
if (mps_cpu < nr_cpu_ids) if (mps_cpu < nr_cpu_ids)
...@@ -286,7 +272,7 @@ static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask) ...@@ -286,7 +272,7 @@ static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask)
* The cpus in the mask must all be on the apic cluster. * The cpus in the mask must all be on the apic cluster.
*/ */
for_each_cpu(cpu, cpumask) { for_each_cpu(cpu, cpumask) {
int new_apicid = summit_cpu_to_logical_apicid(cpu); int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
printk("%s: Not a valid mask!\n", __func__); printk("%s: Not a valid mask!\n", __func__);
...@@ -301,7 +287,7 @@ static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask) ...@@ -301,7 +287,7 @@ static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask)
static unsigned int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask, static unsigned int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
const struct cpumask *andmask) const struct cpumask *andmask)
{ {
int apicid = summit_cpu_to_logical_apicid(0); int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
cpumask_var_t cpumask; cpumask_var_t cpumask;
if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
...@@ -528,8 +514,6 @@ struct apic apic_summit = { ...@@ -528,8 +514,6 @@ struct apic apic_summit = {
.ioapic_phys_id_map = summit_ioapic_phys_id_map, .ioapic_phys_id_map = summit_ioapic_phys_id_map,
.setup_apic_routing = summit_setup_apic_routing, .setup_apic_routing = summit_setup_apic_routing,
.multi_timer_check = NULL, .multi_timer_check = NULL,
.apicid_to_node = summit_apicid_to_node,
.cpu_to_logical_apicid = summit_cpu_to_logical_apicid,
.cpu_present_to_apicid = summit_cpu_present_to_apicid, .cpu_present_to_apicid = summit_cpu_present_to_apicid,
.apicid_to_cpu_present = summit_apicid_to_cpu_present, .apicid_to_cpu_present = summit_apicid_to_cpu_present,
.setup_portio_remap = NULL, .setup_portio_remap = NULL,
...@@ -565,4 +549,7 @@ struct apic apic_summit = { ...@@ -565,4 +549,7 @@ struct apic apic_summit = {
.icr_write = native_apic_icr_write, .icr_write = native_apic_icr_write,
.wait_icr_idle = native_apic_wait_icr_idle, .wait_icr_idle = native_apic_wait_icr_idle,
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
.x86_32_early_logical_apicid = summit_early_logical_apicid,
.x86_32_numa_cpu_node = default_x86_32_numa_cpu_node,
}; };
...@@ -206,8 +206,6 @@ struct apic apic_x2apic_cluster = { ...@@ -206,8 +206,6 @@ struct apic apic_x2apic_cluster = {
.ioapic_phys_id_map = NULL, .ioapic_phys_id_map = NULL,
.setup_apic_routing = NULL, .setup_apic_routing = NULL,
.multi_timer_check = NULL, .multi_timer_check = NULL,
.apicid_to_node = NULL,
.cpu_to_logical_apicid = NULL,
.cpu_present_to_apicid = default_cpu_present_to_apicid, .cpu_present_to_apicid = default_cpu_present_to_apicid,
.apicid_to_cpu_present = NULL, .apicid_to_cpu_present = NULL,
.setup_portio_remap = NULL, .setup_portio_remap = NULL,
......
...@@ -195,8 +195,6 @@ struct apic apic_x2apic_phys = { ...@@ -195,8 +195,6 @@ struct apic apic_x2apic_phys = {
.ioapic_phys_id_map = NULL, .ioapic_phys_id_map = NULL,
.setup_apic_routing = NULL, .setup_apic_routing = NULL,
.multi_timer_check = NULL, .multi_timer_check = NULL,
.apicid_to_node = NULL,
.cpu_to_logical_apicid = NULL,
.cpu_present_to_apicid = default_cpu_present_to_apicid, .cpu_present_to_apicid = default_cpu_present_to_apicid,
.apicid_to_cpu_present = NULL, .apicid_to_cpu_present = NULL,
.setup_portio_remap = NULL, .setup_portio_remap = NULL,
......
...@@ -338,8 +338,6 @@ struct apic __refdata apic_x2apic_uv_x = { ...@@ -338,8 +338,6 @@ struct apic __refdata apic_x2apic_uv_x = {
.ioapic_phys_id_map = NULL, .ioapic_phys_id_map = NULL,
.setup_apic_routing = NULL, .setup_apic_routing = NULL,
.multi_timer_check = NULL, .multi_timer_check = NULL,
.apicid_to_node = NULL,
.cpu_to_logical_apicid = NULL,
.cpu_present_to_apicid = default_cpu_present_to_apicid, .cpu_present_to_apicid = default_cpu_present_to_apicid,
.apicid_to_cpu_present = NULL, .apicid_to_cpu_present = NULL,
.setup_portio_remap = NULL, .setup_portio_remap = NULL,
......
...@@ -233,18 +233,22 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) ...@@ -233,18 +233,22 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
} }
#endif #endif
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) #ifdef CONFIG_NUMA
/*
* To workaround broken NUMA config. Read the comment in
* srat_detect_node().
*/
static int __cpuinit nearby_node(int apicid) static int __cpuinit nearby_node(int apicid)
{ {
int i, node; int i, node;
for (i = apicid - 1; i >= 0; i--) { for (i = apicid - 1; i >= 0; i--) {
node = apicid_to_node[i]; node = __apicid_to_node[i];
if (node != NUMA_NO_NODE && node_online(node)) if (node != NUMA_NO_NODE && node_online(node))
return node; return node;
} }
for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
node = apicid_to_node[i]; node = __apicid_to_node[i];
if (node != NUMA_NO_NODE && node_online(node)) if (node != NUMA_NO_NODE && node_online(node))
return node; return node;
} }
...@@ -334,31 +338,40 @@ EXPORT_SYMBOL_GPL(amd_get_nb_id); ...@@ -334,31 +338,40 @@ EXPORT_SYMBOL_GPL(amd_get_nb_id);
static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
{ {
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) #ifdef CONFIG_NUMA
int cpu = smp_processor_id(); int cpu = smp_processor_id();
int node; int node;
unsigned apicid = c->apicid; unsigned apicid = c->apicid;
node = per_cpu(cpu_llc_id, cpu); node = numa_cpu_node(cpu);
if (node == NUMA_NO_NODE)
node = per_cpu(cpu_llc_id, cpu);
if (apicid_to_node[apicid] != NUMA_NO_NODE)
node = apicid_to_node[apicid];
if (!node_online(node)) { if (!node_online(node)) {
/* Two possibilities here: /*
- The CPU is missing memory and no node was created. * Two possibilities here:
In that case try picking one from a nearby CPU *
- The APIC IDs differ from the HyperTransport node IDs * - The CPU is missing memory and no node was created. In
which the K8 northbridge parsing fills in. * that case try picking one from a nearby CPU.
Assume they are all increased by a constant offset, *
but in the same order as the HT nodeids. * - The APIC IDs differ from the HyperTransport node IDs
If that doesn't result in a usable node fall back to the * which the K8 northbridge parsing fills in. Assume
path for the previous case. */ * they are all increased by a constant offset, but in
* the same order as the HT nodeids. If that doesn't
* result in a usable node fall back to the path for the
* previous case.
*
* This workaround operates directly on the mapping between
* APIC ID and NUMA node, assuming certain relationship
* between APIC ID, HT node ID and NUMA topology. As going
* through CPU mapping may alter the outcome, directly
* access __apicid_to_node[].
*/
int ht_nodeid = c->initial_apicid; int ht_nodeid = c->initial_apicid;
if (ht_nodeid >= 0 && if (ht_nodeid >= 0 &&
apicid_to_node[ht_nodeid] != NUMA_NO_NODE) __apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
node = apicid_to_node[ht_nodeid]; node = __apicid_to_node[ht_nodeid];
/* Pick a nearby node */ /* Pick a nearby node */
if (!node_online(node)) if (!node_online(node))
node = nearby_node(apicid); node = nearby_node(apicid);
......
...@@ -869,7 +869,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) ...@@ -869,7 +869,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
select_idle_routine(c); select_idle_routine(c);
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) #ifdef CONFIG_NUMA
numa_add_cpu(smp_processor_id()); numa_add_cpu(smp_processor_id());
#endif #endif
} }
......
...@@ -276,14 +276,13 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) ...@@ -276,14 +276,13 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
{ {
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) #ifdef CONFIG_NUMA
unsigned node; unsigned node;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
int apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid;
/* Don't do the funky fallback heuristics the AMD version employs /* Don't do the funky fallback heuristics the AMD version employs
for now. */ for now. */
node = apicid_to_node[apicid]; node = numa_cpu_node(cpu);
if (node == NUMA_NO_NODE || !node_online(node)) { if (node == NUMA_NO_NODE || !node_online(node)) {
/* reuse the value from init_cpu_to_node() */ /* reuse the value from init_cpu_to_node() */
node = cpu_to_node(cpu); node = cpu_to_node(cpu);
......
...@@ -1047,9 +1047,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -1047,9 +1047,7 @@ void __init setup_arch(char **cmdline_p)
prefill_possible_map(); prefill_possible_map();
#ifdef CONFIG_X86_64
init_cpu_to_node(); init_cpu_to_node();
#endif
init_apic_mappings(); init_apic_mappings();
ioapic_and_gsi_init(); ioapic_and_gsi_init();
......
...@@ -225,10 +225,15 @@ void __init setup_per_cpu_areas(void) ...@@ -225,10 +225,15 @@ void __init setup_per_cpu_areas(void)
per_cpu(x86_bios_cpu_apicid, cpu) = per_cpu(x86_bios_cpu_apicid, cpu) =
early_per_cpu_map(x86_bios_cpu_apicid, cpu); early_per_cpu_map(x86_bios_cpu_apicid, cpu);
#endif #endif
#ifdef CONFIG_X86_32
per_cpu(x86_cpu_to_logical_apicid, cpu) =
early_per_cpu_map(x86_cpu_to_logical_apicid, cpu);
#endif
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack_ptr, cpu) =
per_cpu(irq_stack_union.irq_stack, cpu) + per_cpu(irq_stack_union.irq_stack, cpu) +
IRQ_STACK_SIZE - 64; IRQ_STACK_SIZE - 64;
#endif
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
per_cpu(x86_cpu_to_node_map, cpu) = per_cpu(x86_cpu_to_node_map, cpu) =
early_per_cpu_map(x86_cpu_to_node_map, cpu); early_per_cpu_map(x86_cpu_to_node_map, cpu);
...@@ -241,7 +246,6 @@ void __init setup_per_cpu_areas(void) ...@@ -241,7 +246,6 @@ void __init setup_per_cpu_areas(void)
* So set them all (boot cpu and all APs). * So set them all (boot cpu and all APs).
*/ */
set_cpu_numa_node(cpu, early_cpu_to_node(cpu)); set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
#endif
#endif #endif
/* /*
* Up to this point, the boot CPU has been using .init.data * Up to this point, the boot CPU has been using .init.data
...@@ -256,7 +260,10 @@ void __init setup_per_cpu_areas(void) ...@@ -256,7 +260,10 @@ void __init setup_per_cpu_areas(void)
early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
#endif #endif
#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA) #ifdef CONFIG_X86_32
early_per_cpu_ptr(x86_cpu_to_logical_apicid) = NULL;
#endif
#ifdef CONFIG_NUMA
early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
#endif #endif
......
...@@ -71,10 +71,6 @@ ...@@ -71,10 +71,6 @@
#include <asm/smpboot_hooks.h> #include <asm/smpboot_hooks.h>
#include <asm/i8259.h> #include <asm/i8259.h>
#ifdef CONFIG_X86_32
u8 apicid_2_node[MAX_APICID];
#endif
/* State of each CPU */ /* State of each CPU */
DEFINE_PER_CPU(int, cpu_state) = { 0 }; DEFINE_PER_CPU(int, cpu_state) = { 0 };
...@@ -136,62 +132,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_info); ...@@ -136,62 +132,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_info);
atomic_t init_deasserted; atomic_t init_deasserted;
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
/* which node each logical CPU is on */
int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
EXPORT_SYMBOL(cpu_to_node_map);
/* set up a mapping between cpu and node. */
static void map_cpu_to_node(int cpu, int node)
{
printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
cpu_to_node_map[cpu] = node;
}
/* undo a mapping between cpu and node. */
static void unmap_cpu_to_node(int cpu)
{
int node;
printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
for (node = 0; node < MAX_NUMNODES; node++)
cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
cpu_to_node_map[cpu] = 0;
}
#else /* !(CONFIG_NUMA && CONFIG_X86_32) */
#define map_cpu_to_node(cpu, node) ({})
#define unmap_cpu_to_node(cpu) ({})
#endif
#ifdef CONFIG_X86_32
static int boot_cpu_logical_apicid;
u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly =
{ [0 ... NR_CPUS-1] = BAD_APICID };
static void map_cpu_to_logical_apicid(void)
{
int cpu = smp_processor_id();
int apicid = logical_smp_processor_id();
int node = apic->apicid_to_node(apicid);
if (!node_online(node))
node = first_online_node;
cpu_2_logical_apicid[cpu] = apicid;
map_cpu_to_node(cpu, node);
}
void numa_remove_cpu(int cpu)
{
cpu_2_logical_apicid[cpu] = BAD_APICID;
unmap_cpu_to_node(cpu);
}
#else
#define map_cpu_to_logical_apicid() do {} while (0)
#endif
/* /*
* Report back to the Boot Processor. * Report back to the Boot Processor.
* Running on AP. * Running on AP.
...@@ -259,7 +199,6 @@ static void __cpuinit smp_callin(void) ...@@ -259,7 +199,6 @@ static void __cpuinit smp_callin(void)
apic->smp_callin_clear_local_apic(); apic->smp_callin_clear_local_apic();
setup_local_APIC(); setup_local_APIC();
end_local_APIC_setup(); end_local_APIC_setup();
map_cpu_to_logical_apicid();
/* /*
* Need to setup vector mappings before we enable interrupts. * Need to setup vector mappings before we enable interrupts.
...@@ -960,7 +899,6 @@ static __init void disable_smp(void) ...@@ -960,7 +899,6 @@ static __init void disable_smp(void)
physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
else else
physid_set_mask_of_physid(0, &phys_cpu_present_map); physid_set_mask_of_physid(0, &phys_cpu_present_map);
map_cpu_to_logical_apicid();
cpumask_set_cpu(0, cpu_sibling_mask(0)); cpumask_set_cpu(0, cpu_sibling_mask(0));
cpumask_set_cpu(0, cpu_core_mask(0)); cpumask_set_cpu(0, cpu_core_mask(0));
} }
...@@ -1096,9 +1034,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) ...@@ -1096,9 +1034,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
* Setup boot CPU information * Setup boot CPU information
*/ */
smp_store_cpu_info(0); /* Final full version of the data */ smp_store_cpu_info(0); /* Final full version of the data */
#ifdef CONFIG_X86_32
boot_cpu_logical_apicid = logical_smp_processor_id();
#endif
current_thread_info()->cpu = 0; /* needed? */ current_thread_info()->cpu = 0; /* needed? */
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
...@@ -1139,8 +1075,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) ...@@ -1139,8 +1075,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
end_local_APIC_setup(); end_local_APIC_setup();
map_cpu_to_logical_apicid();
if (apic->setup_portio_remap) if (apic->setup_portio_remap)
apic->setup_portio_remap(); apic->setup_portio_remap();
......
...@@ -247,7 +247,7 @@ void __init amd_fake_nodes(const struct bootnode *nodes, int nr_nodes) ...@@ -247,7 +247,7 @@ void __init amd_fake_nodes(const struct bootnode *nodes, int nr_nodes)
__acpi_map_pxm_to_node(nid, i); __acpi_map_pxm_to_node(nid, i);
#endif #endif
} }
memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node)); memcpy(__apicid_to_node, fake_apicid_to_node, sizeof(__apicid_to_node));
} }
#endif /* CONFIG_NUMA_EMU */ #endif /* CONFIG_NUMA_EMU */
...@@ -287,7 +287,7 @@ int __init amd_scan_nodes(void) ...@@ -287,7 +287,7 @@ int __init amd_scan_nodes(void)
int j; int j;
for (j = apicid_base; j < cores + apicid_base; j++) for (j = apicid_base; j < cores + apicid_base; j++)
apicid_to_node[(i << bits) + j] = i; set_apicid_to_node((i << bits) + j, i);
setup_node_bootmem(i, nodes[i].start, nodes[i].end); setup_node_bootmem(i, nodes[i].start, nodes[i].end);
} }
......
...@@ -26,11 +26,49 @@ static __init int numa_setup(char *opt) ...@@ -26,11 +26,49 @@ static __init int numa_setup(char *opt)
early_param("numa", numa_setup); early_param("numa", numa_setup);
/* /*
* Which logical CPUs are on which nodes * apicid, cpu, node mappings
*/ */
s16 __apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
};
cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
EXPORT_SYMBOL(node_to_cpumask_map); EXPORT_SYMBOL(node_to_cpumask_map);
/*
* Map cpu index to node index
*/
DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
void __cpuinit numa_set_node(int cpu, int node)
{
int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
/* early setting, no percpu area yet */
if (cpu_to_node_map) {
cpu_to_node_map[cpu] = node;
return;
}
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
dump_stack();
return;
}
#endif
per_cpu(x86_cpu_to_node_map, cpu) = node;
if (node != NUMA_NO_NODE)
set_cpu_numa_node(cpu, node);
}
void __cpuinit numa_clear_node(int cpu)
{
numa_set_node(cpu, NUMA_NO_NODE);
}
/* /*
* Allocate node_to_cpumask_map based on number of available nodes * Allocate node_to_cpumask_map based on number of available nodes
* Requires node_possible_map to be valid. * Requires node_possible_map to be valid.
...@@ -57,7 +95,174 @@ void __init setup_node_to_cpumask_map(void) ...@@ -57,7 +95,174 @@ void __init setup_node_to_cpumask_map(void)
pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
} }
#ifdef CONFIG_DEBUG_PER_CPU_MAPS /*
* There are unfortunately some poorly designed mainboards around that
* only connect memory to a single CPU. This breaks the 1:1 cpu->node
* mapping. To avoid this fill in the mapping for all possible CPUs,
* as the number of CPUs is not known yet. We round robin the existing
* nodes.
*/
void __init numa_init_array(void)
{
int rr, i;
rr = first_node(node_online_map);
for (i = 0; i < nr_cpu_ids; i++) {
if (early_cpu_to_node(i) != NUMA_NO_NODE)
continue;
numa_set_node(i, rr);
rr = next_node(rr, node_online_map);
if (rr == MAX_NUMNODES)
rr = first_node(node_online_map);
}
}
static __init int find_near_online_node(int node)
{
int n, val;
int min_val = INT_MAX;
int best_node = -1;
for_each_online_node(n) {
val = node_distance(node, n);
if (val < min_val) {
min_val = val;
best_node = n;
}
}
return best_node;
}
/*
* Setup early cpu_to_node.
*
* Populate cpu_to_node[] only if x86_cpu_to_apicid[],
* and apicid_to_node[] tables have valid entries for a CPU.
* This means we skip cpu_to_node[] initialisation for NUMA
* emulation and faking node case (when running a kernel compiled
* for NUMA on a non NUMA box), which is OK as cpu_to_node[]
* is already initialized in a round robin manner at numa_init_array,
* prior to this call, and this initialization is good enough
* for the fake NUMA cases.
*
* Called before the per_cpu areas are setup.
*/
void __init init_cpu_to_node(void)
{
int cpu;
u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
BUG_ON(cpu_to_apicid == NULL);
for_each_possible_cpu(cpu) {
int node = numa_cpu_node(cpu);
if (node == NUMA_NO_NODE)
continue;
if (!node_online(node))
node = find_near_online_node(node);
numa_set_node(cpu, node);
}
}
#ifndef CONFIG_DEBUG_PER_CPU_MAPS
# ifndef CONFIG_NUMA_EMU
void __cpuinit numa_add_cpu(int cpu)
{
cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
}
void __cpuinit numa_remove_cpu(int cpu)
{
cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
}
# endif /* !CONFIG_NUMA_EMU */
#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
int __cpu_to_node(int cpu)
{
if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
printk(KERN_WARNING
"cpu_to_node(%d): usage too early!\n", cpu);
dump_stack();
return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
}
return per_cpu(x86_cpu_to_node_map, cpu);
}
EXPORT_SYMBOL(__cpu_to_node);
/*
* Same function as cpu_to_node() but used if called before the
* per_cpu areas are setup.
*/
int early_cpu_to_node(int cpu)
{
if (early_per_cpu_ptr(x86_cpu_to_node_map))
return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
if (!cpu_possible(cpu)) {
printk(KERN_WARNING
"early_cpu_to_node(%d): no per_cpu area!\n", cpu);
dump_stack();
return NUMA_NO_NODE;
}
return per_cpu(x86_cpu_to_node_map, cpu);
}
struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable)
{
int node = early_cpu_to_node(cpu);
struct cpumask *mask;
char buf[64];
if (node == NUMA_NO_NODE) {
/* early_cpu_to_node() already emits a warning and trace */
return NULL;
}
mask = node_to_cpumask_map[node];
if (!mask) {
pr_err("node_to_cpumask_map[%i] NULL\n", node);
dump_stack();
return NULL;
}
cpulist_scnprintf(buf, sizeof(buf), mask);
printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
enable ? "numa_add_cpu" : "numa_remove_cpu",
cpu, node, buf);
return mask;
}
# ifndef CONFIG_NUMA_EMU
static void __cpuinit numa_set_cpumask(int cpu, int enable)
{
struct cpumask *mask;
mask = debug_cpumask_set_cpu(cpu, enable);
if (!mask)
return;
if (enable)
cpumask_set_cpu(cpu, mask);
else
cpumask_clear_cpu(cpu, mask);
}
void __cpuinit numa_add_cpu(int cpu)
{
numa_set_cpumask(cpu, 1);
}
void __cpuinit numa_remove_cpu(int cpu)
{
numa_set_cpumask(cpu, 0);
}
# endif /* !CONFIG_NUMA_EMU */
/* /*
* Returns a pointer to the bitmask of CPUs on Node 'node'. * Returns a pointer to the bitmask of CPUs on Node 'node'.
*/ */
...@@ -80,4 +285,5 @@ const struct cpumask *cpumask_of_node(int node) ...@@ -80,4 +285,5 @@ const struct cpumask *cpumask_of_node(int node)
return node_to_cpumask_map[node]; return node_to_cpumask_map[node];
} }
EXPORT_SYMBOL(cpumask_of_node); EXPORT_SYMBOL(cpumask_of_node);
#endif
#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
...@@ -110,6 +110,12 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); ...@@ -110,6 +110,12 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
static unsigned long kva_start_pfn; static unsigned long kva_start_pfn;
static unsigned long kva_pages; static unsigned long kva_pages;
int __cpuinit numa_cpu_node(int cpu)
{
return apic->x86_32_numa_cpu_node(cpu);
}
/* /*
* FLAT - support for basic PC memory model with discontig enabled, essentially * FLAT - support for basic PC memory model with discontig enabled, essentially
* a single node with all available processors in it with a flat * a single node with all available processors in it with a flat
...@@ -361,6 +367,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, ...@@ -361,6 +367,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
*/ */
get_memcfg_numa(); get_memcfg_numa();
numa_init_array();
kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE); kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE);
......
...@@ -26,19 +26,9 @@ EXPORT_SYMBOL(node_data); ...@@ -26,19 +26,9 @@ EXPORT_SYMBOL(node_data);
struct memnode memnode; struct memnode memnode;
s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
};
static unsigned long __initdata nodemap_addr; static unsigned long __initdata nodemap_addr;
static unsigned long __initdata nodemap_size; static unsigned long __initdata nodemap_size;
/*
* Map cpu index to node index
*/
DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
/* /*
* Given a shift value, try to populate memnodemap[] * Given a shift value, try to populate memnodemap[]
* Returns : * Returns :
...@@ -234,28 +224,6 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) ...@@ -234,28 +224,6 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
node_set_online(nodeid); node_set_online(nodeid);
} }
/*
* There are unfortunately some poorly designed mainboards around that
* only connect memory to a single CPU. This breaks the 1:1 cpu->node
* mapping. To avoid this fill in the mapping for all possible CPUs,
* as the number of CPUs is not known yet. We round robin the existing
* nodes.
*/
void __init numa_init_array(void)
{
int rr, i;
rr = first_node(node_online_map);
for (i = 0; i < nr_cpu_ids; i++) {
if (early_cpu_to_node(i) != NUMA_NO_NODE)
continue;
numa_set_node(i, rr);
rr = next_node(rr, node_online_map);
if (rr == MAX_NUMNODES)
rr = first_node(node_online_map);
}
}
#ifdef CONFIG_NUMA_EMU #ifdef CONFIG_NUMA_EMU
/* Numa emulation */ /* Numa emulation */
static struct bootnode nodes[MAX_NUMNODES] __initdata; static struct bootnode nodes[MAX_NUMNODES] __initdata;
...@@ -676,115 +644,33 @@ unsigned long __init numa_free_all_bootmem(void) ...@@ -676,115 +644,33 @@ unsigned long __init numa_free_all_bootmem(void)
return pages; return pages;
} }
#ifdef CONFIG_NUMA int __cpuinit numa_cpu_node(int cpu)
static __init int find_near_online_node(int node)
{ {
int n, val; int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
int min_val = INT_MAX;
int best_node = -1;
for_each_online_node(n) {
val = node_distance(node, n);
if (val < min_val) {
min_val = val;
best_node = n;
}
}
return best_node; if (apicid != BAD_APICID)
return __apicid_to_node[apicid];
return NUMA_NO_NODE;
} }
/* /*
* Setup early cpu_to_node. * UGLINESS AHEAD: Currently, CONFIG_NUMA_EMU is 64bit only and makes use
* of 64bit specific data structures. The distinction is artificial and
* should be removed. numa_{add|remove}_cpu() are implemented in numa.c
* for both 32 and 64bit when CONFIG_NUMA_EMU is disabled but here when
* enabled.
* *
* Populate cpu_to_node[] only if x86_cpu_to_apicid[], * NUMA emulation is planned to be made generic and the following and other
* and apicid_to_node[] tables have valid entries for a CPU. * related code should be moved to numa.c.
* This means we skip cpu_to_node[] initialisation for NUMA
* emulation and faking node case (when running a kernel compiled
* for NUMA on a non NUMA box), which is OK as cpu_to_node[]
* is already initialized in a round robin manner at numa_init_array,
* prior to this call, and this initialization is good enough
* for the fake NUMA cases.
*
* Called before the per_cpu areas are setup.
*/ */
void __init init_cpu_to_node(void) #ifdef CONFIG_NUMA_EMU
{ # ifndef CONFIG_DEBUG_PER_CPU_MAPS
int cpu;
u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
BUG_ON(cpu_to_apicid == NULL);
for_each_possible_cpu(cpu) {
int node;
u16 apicid = cpu_to_apicid[cpu];
if (apicid == BAD_APICID)
continue;
node = apicid_to_node[apicid];
if (node == NUMA_NO_NODE)
continue;
if (!node_online(node))
node = find_near_online_node(node);
numa_set_node(cpu, node);
}
}
#endif
void __cpuinit numa_set_node(int cpu, int node)
{
int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
/* early setting, no percpu area yet */
if (cpu_to_node_map) {
cpu_to_node_map[cpu] = node;
return;
}
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
dump_stack();
return;
}
#endif
per_cpu(x86_cpu_to_node_map, cpu) = node;
if (node != NUMA_NO_NODE)
set_cpu_numa_node(cpu, node);
}
void __cpuinit numa_clear_node(int cpu)
{
numa_set_node(cpu, NUMA_NO_NODE);
}
#ifndef CONFIG_DEBUG_PER_CPU_MAPS
#ifndef CONFIG_NUMA_EMU
void __cpuinit numa_add_cpu(int cpu)
{
cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
}
void __cpuinit numa_remove_cpu(int cpu)
{
cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
}
#else
void __cpuinit numa_add_cpu(int cpu) void __cpuinit numa_add_cpu(int cpu)
{ {
unsigned long addr; unsigned long addr;
u16 apicid; int physnid, nid;
int physnid;
int nid = NUMA_NO_NODE;
apicid = early_per_cpu(x86_cpu_to_apicid, cpu); nid = numa_cpu_node(cpu);
if (apicid != BAD_APICID)
nid = apicid_to_node[apicid];
if (nid == NUMA_NO_NODE) if (nid == NUMA_NO_NODE)
nid = early_cpu_to_node(cpu); nid = early_cpu_to_node(cpu);
BUG_ON(nid == NUMA_NO_NODE || !node_online(nid)); BUG_ON(nid == NUMA_NO_NODE || !node_online(nid));
...@@ -818,53 +704,17 @@ void __cpuinit numa_remove_cpu(int cpu) ...@@ -818,53 +704,17 @@ void __cpuinit numa_remove_cpu(int cpu)
for_each_online_node(i) for_each_online_node(i)
cpumask_clear_cpu(cpu, node_to_cpumask_map[i]); cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
} }
#endif /* !CONFIG_NUMA_EMU */ # else /* !CONFIG_DEBUG_PER_CPU_MAPS */
#else /* CONFIG_DEBUG_PER_CPU_MAPS */
static struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable)
{
int node = early_cpu_to_node(cpu);
struct cpumask *mask;
char buf[64];
mask = node_to_cpumask_map[node];
if (!mask) {
pr_err("node_to_cpumask_map[%i] NULL\n", node);
dump_stack();
return NULL;
}
cpulist_scnprintf(buf, sizeof(buf), mask);
printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
enable ? "numa_add_cpu" : "numa_remove_cpu",
cpu, node, buf);
return mask;
}
/*
* --------- debug versions of the numa functions ---------
*/
#ifndef CONFIG_NUMA_EMU
static void __cpuinit numa_set_cpumask(int cpu, int enable)
{
struct cpumask *mask;
mask = debug_cpumask_set_cpu(cpu, enable);
if (!mask)
return;
if (enable)
cpumask_set_cpu(cpu, mask);
else
cpumask_clear_cpu(cpu, mask);
}
#else
static void __cpuinit numa_set_cpumask(int cpu, int enable) static void __cpuinit numa_set_cpumask(int cpu, int enable)
{ {
int node = early_cpu_to_node(cpu); int node = early_cpu_to_node(cpu);
struct cpumask *mask; struct cpumask *mask;
int i; int i;
if (node == NUMA_NO_NODE) {
/* early_cpu_to_node() already emits a warning and trace */
return;
}
for_each_online_node(i) { for_each_online_node(i) {
unsigned long addr; unsigned long addr;
...@@ -882,7 +732,6 @@ static void __cpuinit numa_set_cpumask(int cpu, int enable) ...@@ -882,7 +732,6 @@ static void __cpuinit numa_set_cpumask(int cpu, int enable)
cpumask_clear_cpu(cpu, mask); cpumask_clear_cpu(cpu, mask);
} }
} }
#endif /* CONFIG_NUMA_EMU */
void __cpuinit numa_add_cpu(int cpu) void __cpuinit numa_add_cpu(int cpu)
{ {
...@@ -893,39 +742,5 @@ void __cpuinit numa_remove_cpu(int cpu) ...@@ -893,39 +742,5 @@ void __cpuinit numa_remove_cpu(int cpu)
{ {
numa_set_cpumask(cpu, 0); numa_set_cpumask(cpu, 0);
} }
# endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
int __cpu_to_node(int cpu) #endif /* CONFIG_NUMA_EMU */
{
if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
printk(KERN_WARNING
"cpu_to_node(%d): usage too early!\n", cpu);
dump_stack();
return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
}
return per_cpu(x86_cpu_to_node_map, cpu);
}
EXPORT_SYMBOL(__cpu_to_node);
/*
* Same function as cpu_to_node() but used if called before the
* per_cpu areas are setup.
*/
int early_cpu_to_node(int cpu)
{
if (early_per_cpu_ptr(x86_cpu_to_node_map))
return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
if (!cpu_possible(cpu)) {
printk(KERN_WARNING
"early_cpu_to_node(%d): no per_cpu area!\n", cpu);
dump_stack();
return NUMA_NO_NODE;
}
return per_cpu(x86_cpu_to_node_map, cpu);
}
/*
* --------- end of debug versions of the numa functions ---------
*/
#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
...@@ -57,7 +57,7 @@ struct node_memory_chunk_s { ...@@ -57,7 +57,7 @@ struct node_memory_chunk_s {
static struct node_memory_chunk_s __initdata node_memory_chunk[MAXCHUNKS]; static struct node_memory_chunk_s __initdata node_memory_chunk[MAXCHUNKS];
static int __initdata num_memory_chunks; /* total number of memory chunks */ static int __initdata num_memory_chunks; /* total number of memory chunks */
static u8 __initdata apicid_to_pxm[MAX_APICID]; static u8 __initdata apicid_to_pxm[MAX_LOCAL_APIC];
int acpi_numa __initdata; int acpi_numa __initdata;
...@@ -254,8 +254,8 @@ int __init get_memcfg_from_srat(void) ...@@ -254,8 +254,8 @@ int __init get_memcfg_from_srat(void)
printk(KERN_DEBUG "Number of memory chunks in system = %d\n", printk(KERN_DEBUG "Number of memory chunks in system = %d\n",
num_memory_chunks); num_memory_chunks);
for (i = 0; i < MAX_APICID; i++) for (i = 0; i < MAX_LOCAL_APIC; i++)
apicid_2_node[i] = pxm_to_node(apicid_to_pxm[i]); set_apicid_to_node(i, pxm_to_node(apicid_to_pxm[i]));
for (j = 0; j < num_memory_chunks; j++){ for (j = 0; j < num_memory_chunks; j++){
struct node_memory_chunk_s * chunk = &node_memory_chunk[j]; struct node_memory_chunk_s * chunk = &node_memory_chunk[j];
......
...@@ -79,7 +79,7 @@ static __init void bad_srat(void) ...@@ -79,7 +79,7 @@ static __init void bad_srat(void)
printk(KERN_ERR "SRAT: SRAT not used.\n"); printk(KERN_ERR "SRAT: SRAT not used.\n");
acpi_numa = -1; acpi_numa = -1;
for (i = 0; i < MAX_LOCAL_APIC; i++) for (i = 0; i < MAX_LOCAL_APIC; i++)
apicid_to_node[i] = NUMA_NO_NODE; set_apicid_to_node(i, NUMA_NO_NODE);
for (i = 0; i < MAX_NUMNODES; i++) { for (i = 0; i < MAX_NUMNODES; i++) {
nodes[i].start = nodes[i].end = 0; nodes[i].start = nodes[i].end = 0;
nodes_add[i].start = nodes_add[i].end = 0; nodes_add[i].start = nodes_add[i].end = 0;
...@@ -138,7 +138,7 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa) ...@@ -138,7 +138,7 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node); printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node);
return; return;
} }
apicid_to_node[apic_id] = node; set_apicid_to_node(apic_id, node);
node_set(node, cpu_nodes_parsed); node_set(node, cpu_nodes_parsed);
acpi_numa = 1; acpi_numa = 1;
printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u\n", printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u\n",
...@@ -178,7 +178,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) ...@@ -178,7 +178,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
return; return;
} }
apicid_to_node[apic_id] = node; set_apicid_to_node(apic_id, node);
node_set(node, cpu_nodes_parsed); node_set(node, cpu_nodes_parsed);
acpi_numa = 1; acpi_numa = 1;
printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u\n", printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u\n",
...@@ -523,7 +523,7 @@ void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes) ...@@ -523,7 +523,7 @@ void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes)
* node, it must now point to the fake node ID. * node, it must now point to the fake node ID.
*/ */
for (j = 0; j < MAX_LOCAL_APIC; j++) for (j = 0; j < MAX_LOCAL_APIC; j++)
if (apicid_to_node[j] == nid && if (__apicid_to_node[j] == nid &&
fake_apicid_to_node[j] == NUMA_NO_NODE) fake_apicid_to_node[j] == NUMA_NO_NODE)
fake_apicid_to_node[j] = i; fake_apicid_to_node[j] = i;
} }
...@@ -534,13 +534,13 @@ void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes) ...@@ -534,13 +534,13 @@ void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes)
* value. * value.
*/ */
for (i = 0; i < MAX_LOCAL_APIC; i++) for (i = 0; i < MAX_LOCAL_APIC; i++)
if (apicid_to_node[i] != NUMA_NO_NODE && if (__apicid_to_node[i] != NUMA_NO_NODE &&
fake_apicid_to_node[i] == NUMA_NO_NODE) fake_apicid_to_node[i] == NUMA_NO_NODE)
fake_apicid_to_node[i] = 0; fake_apicid_to_node[i] = 0;
for (i = 0; i < num_nodes; i++) for (i = 0; i < num_nodes; i++)
__acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i); __acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i);
memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node)); memcpy(__apicid_to_node, fake_apicid_to_node, sizeof(__apicid_to_node));
nodes_clear(nodes_parsed); nodes_clear(nodes_parsed);
for (i = 0; i < num_nodes; i++) for (i = 0; i < num_nodes; i++)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment