Hi Michael,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on powerpc/next]
[also build test ERROR on v4.20-rc3 next-20181119]
[if your patch is applied to the wrong git tree, please drop us a note to help 
improve the system]

url:    
https://github.com/0day-ci/linux/commits/Michael-Bringmann/powerpc-numa-Perform-full-re-add-of-CPU-for-PRRN-VPHN-topology-update/20181119-224033
base:   https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git next
config: powerpc-allyesconfig (attached as .config)
compiler: powerpc64-linux-gnu-gcc (Debian 7.2.0-11) 7.2.0
reproduce:
        wget 
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O 
~/bin/make.cross
        chmod +x ~/bin/make.cross
        # save the attached .config to linux build tree
        GCC_VERSION=7.2.0 make.cross ARCH=powerpc 

All error/warnings (new ones prefixed by >>):

   arch/powerpc/mm/numa.c: In function 'numa_update_cpu_topology':
>> arch/powerpc/mm/numa.c:1361:4: error: implicit declaration of function 
>> 'dlpar_cpu_readd'; did you mean 'raw_cpu_read'? 
>> [-Werror=implicit-function-declaration]
       dlpar_cpu_readd(cpu);
       ^~~~~~~~~~~~~~~
       raw_cpu_read
   arch/powerpc/mm/numa.c: In function 'topology_schedule_update':
>> arch/powerpc/mm/numa.c:1451:2: warning: this 'if' clause does not guard... 
>> [-Wmisleading-indentation]
     if (!topology_update_in_progress);
     ^~
   arch/powerpc/mm/numa.c:1452:3: note: ...this statement, but the latter is 
misleadingly indented as if it were guarded by the 'if'
      schedule_work(&topology_work);
      ^~~~~~~~~~~~~
   cc1: some warnings being treated as errors

vim +1361 arch/powerpc/mm/numa.c

  1298  
  1299  /*
  1300   * Update the node maps and sysfs entries for each cpu whose home node
  1301   * has changed. Returns 1 when the topology has changed, and 0 
otherwise.
  1302   *
  1303   * readd_cpus: Also readd any CPUs that have changed affinity
  1304   */
  1305  static int numa_update_cpu_topology(bool readd_cpus)
  1306  {
  1307          unsigned int cpu, sibling, changed = 0;
  1308          struct topology_update_data *updates, *ud;
  1309          cpumask_t updated_cpus;
  1310          struct device *dev;
  1311          int weight, new_nid, i = 0;
  1312  
  1313          if ((!prrn_enabled && !vphn_enabled && topology_inited) ||
  1314                  topology_update_in_progress)
  1315                  return 0;
  1316  
  1317          weight = cpumask_weight(&cpu_associativity_changes_mask);
  1318          if (!weight)
  1319                  return 0;
  1320  
  1321          updates = kcalloc(weight, sizeof(*updates), GFP_KERNEL);
  1322          if (!updates)
  1323                  return 0;
  1324  
  1325          topology_update_in_progress = 1;
  1326  
  1327          cpumask_clear(&updated_cpus);
  1328  
  1329          for_each_cpu(cpu, &cpu_associativity_changes_mask) {
  1330                  /*
  1331                   * If siblings aren't flagged for changes, updates list
  1332                   * will be too short. Skip on this update and set for 
next
  1333                   * update.
  1334                   */
  1335                  if (!cpumask_subset(cpu_sibling_mask(cpu),
  1336                                          
&cpu_associativity_changes_mask)) {
  1337                          pr_info("Sibling bits not set for associativity 
"
  1338                                          "change, cpu%d\n", cpu);
  1339                          cpumask_or(&cpu_associativity_changes_mask,
  1340                                          &cpu_associativity_changes_mask,
  1341                                          cpu_sibling_mask(cpu));
  1342                          cpu = cpu_last_thread_sibling(cpu);
  1343                          continue;
  1344                  }
  1345  
  1346                  new_nid = find_and_online_cpu_nid(cpu);
  1347  
  1348                  if ((new_nid == numa_cpu_lookup_table[cpu]) ||
  1349                          !cpu_present(cpu)) {
  1350                          cpumask_andnot(&cpu_associativity_changes_mask,
  1351                                          &cpu_associativity_changes_mask,
  1352                                          cpu_sibling_mask(cpu));
  1353                          if (cpu_present(cpu))
  1354                                  dbg("Assoc chg gives same node %d for 
cpu%d\n",
  1355                                          new_nid, cpu);
  1356                          cpu = cpu_last_thread_sibling(cpu);
  1357                          continue;
  1358                  }
  1359  
  1360                  if (readd_cpus)
> 1361                          dlpar_cpu_readd(cpu);
  1362  
  1363                  for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
  1364                          ud = &updates[i++];
  1365                          ud->next = &updates[i];
  1366                          ud->cpu = sibling;
  1367                          ud->new_nid = new_nid;
  1368                          ud->old_nid = numa_cpu_lookup_table[sibling];
  1369                          cpumask_set_cpu(sibling, &updated_cpus);
  1370                  }
  1371                  cpu = cpu_last_thread_sibling(cpu);
  1372          }
  1373  
  1374          /*
  1375           * Prevent processing of 'updates' from overflowing array
  1376           * where last entry filled in a 'next' pointer.
  1377           */
  1378          if (i)
  1379                  updates[i-1].next = NULL;
  1380  
  1381          pr_debug("Topology update for the following CPUs:\n");
  1382          if (cpumask_weight(&updated_cpus)) {
  1383                  for (ud = &updates[0]; ud; ud = ud->next) {
  1384                          pr_debug("cpu %d moving from node %d "
  1385                                            "to %d\n", ud->cpu,
  1386                                            ud->old_nid, ud->new_nid);
  1387                  }
  1388          }
  1389  
  1390          /*
  1391           * In cases where we have nothing to update (because the 
updates list
  1392           * is too short or because the new topology is same as the old 
one),
  1393           * skip invoking update_cpu_topology() via stop-machine(). This 
is
  1394           * necessary (and not just a fast-path optimization) since 
stop-machine
  1395           * can end up electing a random CPU to run 
update_cpu_topology(), and
  1396           * thus trick us into setting up incorrect cpu-node mappings 
(since
  1397           * 'updates' is kzalloc()'ed).
  1398           *
  1399           * And for the similar reason, we will skip all the following 
updating.
  1400           */
  1401          if (!cpumask_weight(&updated_cpus))
  1402                  goto out;
  1403  
  1404          stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
  1405  
  1406          /*
  1407           * Update the numa-cpu lookup table with the new mappings, even 
for
  1408           * offline CPUs. It is best to perform this update from the 
stop-
  1409           * machine context.
  1410           */
  1411          stop_machine(update_lookup_table, &updates[0],
  1412                       cpumask_of(raw_smp_processor_id()));
  1413  
  1414          for (ud = &updates[0]; ud; ud = ud->next) {
  1415                  unregister_cpu_under_node(ud->cpu, ud->old_nid);
  1416                  register_cpu_under_node(ud->cpu, ud->new_nid);
  1417  
  1418                  dev = get_cpu_device(ud->cpu);
  1419                  if (dev)
  1420                          kobject_uevent(&dev->kobj, KOBJ_CHANGE);
  1421                  cpumask_clear_cpu(ud->cpu, 
&cpu_associativity_changes_mask);
  1422                  changed = 1;
  1423          }
  1424  
  1425  out:
  1426          topology_changed = changed;
  1427          topology_update_in_progress = 0;
  1428          kfree(updates);
  1429          return changed;
  1430  }
  1431  
  1432  int arch_update_cpu_topology(void)
  1433  {
  1434          int changed = topology_changed;
  1435  
  1436          topology_changed = 0;
  1437          return changed;
  1438  }
  1439  
  1440  static void topology_work_fn(struct work_struct *work)
  1441  {
  1442          lock_device_hotplug();
  1443          if (numa_update_cpu_topology(true))
  1444                  rebuild_sched_domains();
  1445          unlock_device_hotplug();
  1446  }
  1447  static DECLARE_WORK(topology_work, topology_work_fn);
  1448  
  1449  void topology_schedule_update(void)
  1450  {
> 1451          if (!topology_update_in_progress);
  1452                  schedule_work(&topology_work);
  1453  }
  1454  

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Attachment: .config.gz
Description: application/gzip

Reply via email to