If we fail to parse min_common_depth from device tree we boot with numa disabled. Reflect the same by updating numa_enabled variable to false. Also, switch all min_common_depth failure check to if (!numa_enabled) check.
This helps us to avoid checking for both in different code paths. Signed-off-by: Aneesh Kumar K.V <aneesh.ku...@linux.ibm.com> --- arch/powerpc/mm/numa.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 27f792c0df68..848b4663c7ad 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -212,7 +212,7 @@ static int associativity_to_nid(const __be32 *associativity) { int nid = NUMA_NO_NODE; - if (min_common_depth == -1 || !numa_enabled) + if (!numa_enabled) goto out; if (of_read_number(associativity, 1) >= min_common_depth) @@ -628,8 +628,14 @@ static int __init parse_numa_properties(void) min_common_depth = find_min_common_depth(); - if (min_common_depth < 0) + if (min_common_depth < 0) { + /* + * if we fail to parse min_common_depth from device tree + * mark the numa disabled, boot with numa disabled. + */ + numa_enabled = false; return min_common_depth; + } dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth); @@ -745,7 +751,7 @@ void __init dump_numa_cpu_topology(void) unsigned int node; unsigned int cpu, count; - if (min_common_depth == -1 || !numa_enabled) + if (!numa_enabled) return; for_each_online_node(node) { @@ -810,7 +816,7 @@ static void __init find_possible_nodes(void) struct device_node *rtas; u32 numnodes, i; - if (min_common_depth <= 0 || !numa_enabled) + if (!numa_enabled) return; rtas = of_find_node_by_path("/rtas"); @@ -1012,7 +1018,7 @@ int hot_add_scn_to_nid(unsigned long scn_addr) struct device_node *memory = NULL; int nid; - if (!numa_enabled || (min_common_depth < 0)) + if (!numa_enabled) return first_online_node; memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); -- 2.21.0