Update min_common_depth = -1 if numa is disabled. This help us to avoid checking for both in different code paths.
Signed-off-by: Aneesh Kumar K.V <aneesh.ku...@linux.ibm.com> --- arch/powerpc/mm/numa.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index f6d68baeaa96..c84062a390cc 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -212,7 +212,7 @@ static int associativity_to_nid(const __be32 *associativity) { int nid = NUMA_NO_NODE; - if (min_common_depth == -1 || !numa_enabled) + if (min_common_depth == -1) goto out; if (of_read_number(associativity, 1) >= min_common_depth) @@ -625,6 +625,7 @@ static int __init parse_numa_properties(void) if (numa_enabled == 0) { printk(KERN_WARNING "NUMA disabled by user\n"); + min_common_depth = -1; return -1; } @@ -747,7 +748,7 @@ void __init dump_numa_cpu_topology(void) unsigned int node; unsigned int cpu, count; - if (min_common_depth == -1 || !numa_enabled) + if (min_common_depth == -1) return; for_each_online_node(node) { @@ -812,7 +813,7 @@ static void __init find_possible_nodes(void) struct device_node *rtas; u32 numnodes, i; - if (min_common_depth <= 0 || !numa_enabled) + if (min_common_depth <= 0) return; rtas = of_find_node_by_path("/rtas"); @@ -1014,7 +1015,7 @@ int hot_add_scn_to_nid(unsigned long scn_addr) struct device_node *memory = NULL; int nid; - if (!numa_enabled || (min_common_depth < 0)) + if (min_common_depth < 0) return first_online_node; memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); -- 2.21.0