Hi Athira,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on powerpc/next]
[also build test WARNING on v5.13 next-20210709]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:    
https://github.com/0day-ci/linux/commits/Athira-Rajeev/powerpc-perf-Enable-PMU-counters-post-partition-migration-if-PMU-is-active/20210711-150741
base:   https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git next
config: powerpc-allyesconfig (attached as .config)
compiler: powerpc64-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
        wget 
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O 
~/bin/make.cross
        chmod +x ~/bin/make.cross
        # 
https://github.com/0day-ci/linux/commit/2050c82afb3abd9eaa57fee45e71e7fccabfb81f
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review 
Athira-Rajeev/powerpc-perf-Enable-PMU-counters-post-partition-migration-if-PMU-is-active/20210711-150741
        git checkout 2050c82afb3abd9eaa57fee45e71e7fccabfb81f
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross 
ARCH=powerpc 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <l...@intel.com>

All warnings (new ones prefixed by >>):

>> arch/powerpc/perf/core-book3s.c:1343:6: warning: no previous prototype for 
>> 'mobility_pmu_disable' [-Wmissing-prototypes]
    1343 | void mobility_pmu_disable(void)
         |      ^~~~~~~~~~~~~~~~~~~~
>> arch/powerpc/perf/core-book3s.c:1537:6: warning: no previous prototype for 
>> 'mobility_pmu_enable' [-Wmissing-prototypes]
    1537 | void mobility_pmu_enable(void)
         |      ^~~~~~~~~~~~~~~~~~~


vim +/mobility_pmu_disable +1343 arch/powerpc/perf/core-book3s.c

  1337  
  1338  /*
  1339   * Called from powerpc mobility code
  1340   * before migration to disable counters
  1341   * if the PMU is active.
  1342   */
> 1343  void mobility_pmu_disable(void)
  1344  {
  1345          struct cpu_hw_events *cpuhw;
  1346  
  1347          cpuhw = this_cpu_ptr(&cpu_hw_events);
  1348          if (cpuhw->n_events != 0) {
  1349                  power_pmu_disable(NULL);
  1350                  cpuhw->migrate = 1;
  1351          }
  1352  }
  1353  
  1354  /*
  1355   * Re-enable all events if disable == 0.
  1356   * If we were previously disabled and events were added, then
  1357   * put the new config on the PMU.
  1358   */
  1359  static void power_pmu_enable(struct pmu *pmu)
  1360  {
  1361          struct perf_event *event;
  1362          struct cpu_hw_events *cpuhw;
  1363          unsigned long flags;
  1364          long i;
  1365          unsigned long val, mmcr0;
  1366          s64 left;
  1367          unsigned int hwc_index[MAX_HWEVENTS];
  1368          int n_lim;
  1369          int idx;
  1370          bool ebb;
  1371  
  1372          if (!ppmu)
  1373                  return;
  1374          local_irq_save(flags);
  1375  
  1376          cpuhw = this_cpu_ptr(&cpu_hw_events);
  1377          if (!cpuhw->disabled)
  1378                  goto out;
  1379  
  1380          if (cpuhw->n_events == 0) {
  1381                  ppc_set_pmu_inuse(0);
  1382                  goto out;
  1383          }
  1384  
  1385          cpuhw->disabled = 0;
  1386  
  1387          /*
  1388           * EBB requires an exclusive group and all events must have the 
EBB
  1389           * flag set, or not set, so we can just check a single event. 
Also we
  1390           * know we have at least one event.
  1391           */
  1392          ebb = is_ebb_event(cpuhw->event[0]);
  1393  
  1394          /*
  1395           * If we didn't change anything, or only removed events,
  1396           * no need to recalculate MMCR* settings and reset the PMCs.
  1397           * Just reenable the PMU with the current MMCR* settings
  1398           * (possibly updated for removal of events).
  1399           * While reenabling PMU during partition migration, continue
  1400           * with normal flow.
  1401           */
  1402          if (!cpuhw->n_added && !cpuhw->migrate) {
  1403                  mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra & 
~MMCRA_SAMPLE_ENABLE);
  1404                  mtspr(SPRN_MMCR1, cpuhw->mmcr.mmcr1);
  1405                  if (ppmu->flags & PPMU_ARCH_31)
  1406                          mtspr(SPRN_MMCR3, cpuhw->mmcr.mmcr3);
  1407                  goto out_enable;
  1408          }
  1409  
  1410          /*
  1411           * Clear all MMCR settings and recompute them for the new set 
of events.
  1412           */
  1413          memset(&cpuhw->mmcr, 0, sizeof(cpuhw->mmcr));
  1414  
  1415          if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, 
hwc_index,
  1416                                 &cpuhw->mmcr, cpuhw->event, 
ppmu->flags)) {
  1417                  /* shouldn't ever get here */
  1418                  printk(KERN_ERR "oops compute_mmcr failed\n");
  1419                  goto out;
  1420          }
  1421  
  1422          if (!(ppmu->flags & PPMU_ARCH_207S)) {
  1423                  /*
  1424                   * Add in MMCR0 freeze bits corresponding to the 
attr.exclude_*
  1425                   * bits for the first event. We have already checked 
that all
  1426                   * events have the same value for these bits as the 
first event.
  1427                   */
  1428                  event = cpuhw->event[0];
  1429                  if (event->attr.exclude_user)
  1430                          cpuhw->mmcr.mmcr0 |= MMCR0_FCP;
  1431                  if (event->attr.exclude_kernel)
  1432                          cpuhw->mmcr.mmcr0 |= freeze_events_kernel;
  1433                  if (event->attr.exclude_hv)
  1434                          cpuhw->mmcr.mmcr0 |= MMCR0_FCHV;
  1435          }
  1436  
  1437          /*
  1438           * Write the new configuration to MMCR* with the freeze
  1439           * bit set and set the hardware events to their initial values.
  1440           * Then unfreeze the events.
  1441           */
  1442          ppc_set_pmu_inuse(1);
  1443          mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra & ~MMCRA_SAMPLE_ENABLE);
  1444          mtspr(SPRN_MMCR1, cpuhw->mmcr.mmcr1);
  1445          mtspr(SPRN_MMCR0, (cpuhw->mmcr.mmcr0 & ~(MMCR0_PMC1CE | 
MMCR0_PMCjCE))
  1446                                  | MMCR0_FC);
  1447          if (ppmu->flags & PPMU_ARCH_207S)
  1448                  mtspr(SPRN_MMCR2, cpuhw->mmcr.mmcr2);
  1449  
  1450          if (ppmu->flags & PPMU_ARCH_31)
  1451                  mtspr(SPRN_MMCR3, cpuhw->mmcr.mmcr3);
  1452  
  1453          /*
  1454           * Read off any pre-existing events that need to move
  1455           * to another PMC.
  1456           * While enabling PMU during partition migration,
  1457           * skip power_pmu_read since all event count settings
  1458           * needs to be re-initialised after migration.
  1459           */
  1460          for (i = 0; i < cpuhw->n_events; ++i) {
  1461                  event = cpuhw->event[i];
  1462                  if ((event->hw.idx && event->hw.idx != hwc_index[i] + 
1) || (cpuhw->migrate)) {
  1463                          if (!cpuhw->migrate)
  1464                                  power_pmu_read(event);
  1465                          write_pmc(event->hw.idx, 0);
  1466                          event->hw.idx = 0;
  1467                  }
  1468          }
  1469  
  1470          /*
  1471           * Initialize the PMCs for all the new and moved events.
  1472           */
  1473          cpuhw->n_limited = n_lim = 0;
  1474          for (i = 0; i < cpuhw->n_events; ++i) {
  1475                  event = cpuhw->event[i];
  1476                  if (event->hw.idx)
  1477                          continue;
  1478                  idx = hwc_index[i] + 1;
  1479                  if (is_limited_pmc(idx)) {
  1480                          cpuhw->limited_counter[n_lim] = event;
  1481                          cpuhw->limited_hwidx[n_lim] = idx;
  1482                          ++n_lim;
  1483                          continue;
  1484                  }
  1485  
  1486                  if (ebb)
  1487                          val = local64_read(&event->hw.prev_count);
  1488                  else {
  1489                          val = 0;
  1490                          if (event->hw.sample_period) {
  1491                                  left = 
local64_read(&event->hw.period_left);
  1492                                  if (left < 0x80000000L)
  1493                                          val = 0x80000000L - left;
  1494                          }
  1495                          local64_set(&event->hw.prev_count, val);
  1496                  }
  1497  
  1498                  event->hw.idx = idx;
  1499                  if (event->hw.state & PERF_HES_STOPPED)
  1500                          val = 0;
  1501                  write_pmc(idx, val);
  1502  
  1503                  perf_event_update_userpage(event);
  1504          }
  1505          cpuhw->n_limited = n_lim;
  1506          cpuhw->mmcr.mmcr0 |= MMCR0_PMXE | MMCR0_FCECE;
  1507  
  1508   out_enable:
  1509          pmao_restore_workaround(ebb);
  1510  
  1511          mmcr0 = ebb_switch_in(ebb, cpuhw);
  1512  
  1513          mb();
  1514          if (cpuhw->bhrb_users)
  1515                  ppmu->config_bhrb(cpuhw->bhrb_filter);
  1516  
  1517          write_mmcr0(cpuhw, mmcr0);
  1518  
  1519          /*
  1520           * Enable instruction sampling if necessary
  1521           */
  1522          if (cpuhw->mmcr.mmcra & MMCRA_SAMPLE_ENABLE) {
  1523                  mb();
  1524                  mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra);
  1525          }
  1526  
  1527   out:
  1528  
  1529          local_irq_restore(flags);
  1530  }
  1531  
  1532  /*
  1533   * Called from powerpc mobility code
  1534   * during migration completion to
  1535   * enable back PMU counters.
  1536   */
> 1537  void mobility_pmu_enable(void)
  1538  {
  1539          struct cpu_hw_events *cpuhw;
  1540  
  1541          cpuhw = this_cpu_ptr(&cpu_hw_events);
  1542          power_pmu_enable(NULL);
  1543          cpuhw->migrate = 0;
  1544  }
  1545  

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-...@lists.01.org

Attachment: .config.gz
Description: application/gzip

Reply via email to