Commit 85d605d9 authored by Alex Williamson's avatar Alex Williamson Committed by Tony Luck

[IA64] discontig.c: fixup pxm_to_nid_map

I'd like to be able to use pxm_to_nid_map in several places to
discover proximity domain to node id associations.  Unfortunately, after
reassign_cpu_only_nodes() plays with the node space, the pxm_to_nid_map
doesn't necessarily reflect reality.  This fixes up the table so it's
still valid.  Note that nid_to_pxm_map is still potentially broken, but
has a one-to-many problem if the above function combines several
proximity domains into a single node.  Thanks to Bob Picco for the base
patch.
Signed-off-by: default avatarAlex Williamson <alex.williamson@hp.com>
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
parent 65fd90f1
...@@ -53,11 +53,12 @@ static struct early_node_data mem_data[NR_NODES] __initdata; ...@@ -53,11 +53,12 @@ static struct early_node_data mem_data[NR_NODES] __initdata;
static void __init reassign_cpu_only_nodes(void) static void __init reassign_cpu_only_nodes(void)
{ {
struct node_memblk_s *p; struct node_memblk_s *p;
int i, j, k, nnode, nid, cpu, cpunid; int i, j, k, nnode, nid, cpu, cpunid, pxm;
u8 cslit, slit; u8 cslit, slit;
static DECLARE_BITMAP(nodes_with_mem, NR_NODES) __initdata; static DECLARE_BITMAP(nodes_with_mem, NR_NODES) __initdata;
static u8 numa_slit_fix[MAX_NUMNODES * MAX_NUMNODES] __initdata; static u8 numa_slit_fix[MAX_NUMNODES * MAX_NUMNODES] __initdata;
static int node_flip[NR_NODES] __initdata; static int node_flip[NR_NODES] __initdata;
static int old_nid_map[NR_CPUS] __initdata;
for (nnode = 0, p = &node_memblk[0]; p < &node_memblk[num_node_memblks]; p++) for (nnode = 0, p = &node_memblk[0]; p < &node_memblk[num_node_memblks]; p++)
if (!test_bit(p->nid, (void *) nodes_with_mem)) { if (!test_bit(p->nid, (void *) nodes_with_mem)) {
...@@ -104,9 +105,14 @@ static void __init reassign_cpu_only_nodes(void) ...@@ -104,9 +105,14 @@ static void __init reassign_cpu_only_nodes(void)
for (cpu = 0; cpu < NR_CPUS; cpu++) for (cpu = 0; cpu < NR_CPUS; cpu++)
if (node_cpuid[cpu].nid == i) { if (node_cpuid[cpu].nid == i) {
/* For nodes not being reassigned just fix the cpu's nid. */ /*
* For nodes not being reassigned just
* fix the cpu's nid and reverse pxm map
*/
if (cpunid < numnodes) { if (cpunid < numnodes) {
node_cpuid[cpu].nid = cpunid; pxm = nid_to_pxm_map[i];
pxm_to_nid_map[pxm] =
node_cpuid[cpu].nid = cpunid;
continue; continue;
} }
...@@ -126,6 +132,8 @@ static void __init reassign_cpu_only_nodes(void) ...@@ -126,6 +132,8 @@ static void __init reassign_cpu_only_nodes(void)
} }
} }
/* save old nid map so we can update the pxm */
old_nid_map[cpu] = node_cpuid[cpu].nid;
node_cpuid[cpu].nid = k; node_cpuid[cpu].nid = k;
} }
} }
...@@ -134,14 +142,19 @@ static void __init reassign_cpu_only_nodes(void) ...@@ -134,14 +142,19 @@ static void __init reassign_cpu_only_nodes(void)
* Fixup temporary nid values for CPU-only nodes. * Fixup temporary nid values for CPU-only nodes.
*/ */
for (cpu = 0; cpu < NR_CPUS; cpu++) for (cpu = 0; cpu < NR_CPUS; cpu++)
if (node_cpuid[cpu].nid == (numnodes + numnodes)) if (node_cpuid[cpu].nid == (numnodes + numnodes)) {
node_cpuid[cpu].nid = nnode - 1; pxm = nid_to_pxm_map[old_nid_map[cpu]];
else pxm_to_nid_map[pxm] = node_cpuid[cpu].nid = nnode - 1;
for (i = 0; i < nnode; i++) } else {
if (node_flip[i] == (node_cpuid[cpu].nid - numnodes)) { for (i = 0; i < nnode; i++) {
node_cpuid[cpu].nid = i; if (node_flip[i] != (node_cpuid[cpu].nid - numnodes))
break; continue;
}
pxm = nid_to_pxm_map[old_nid_map[cpu]];
pxm_to_nid_map[pxm] = node_cpuid[cpu].nid = i;
break;
}
}
/* /*
* Fix numa_slit by compressing from larger * Fix numa_slit by compressing from larger
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment