FYI, we noticed the below changes on

https://github.com/0day-ci/linux 
Jaegeuk-Kim/f2fs-avoid-multiple-node-page-writes-due-to-inline_data/20160124-041711
commit ea32c36edcf58f8c27653f3e5bc41f8c0b41e235 ("f2fs: avoid multiple node 
page writes due to inline_data")


=========================================================================================
compiler/cpufreq_governor/disk/filesize/fs/iterations/kconfig/nr_directories/nr_files_per_directory/nr_threads/rootfs/sync_method/tbox_group/test_size/testcase:
  
gcc-4.9/performance/1HDD/9B/f2fs/1x/x86_64-rhel/16d/256fpd/32t/debian-x86_64-2015-02-07.cgz/fsyncBeforeClose/lkp-ne04/400M/fsmark

commit: 
  7fdec82af6a9e190e53d07a1463d2a9ac49a8750
  ea32c36edcf58f8c27653f3e5bc41f8c0b41e235

7fdec82af6a9e190 ea32c36edcf58f8c27653f3e5b 
---------------- -------------------------- 
         %stddev     %change         %stddev
             \          |                \  
      1278 ±  0%      -3.2%       1236 ±  0%  fsmark.files_per_sec
   1874824 ±  0%     -41.5%    1097506 ±  0%  fsmark.time.file_system_outputs
     14748 ±  2%      +9.2%      16099 ±  1%  
fsmark.time.involuntary_context_switches
     44.50 ±  4%     +19.1%      53.00 ±  0%  
fsmark.time.percent_of_cpu_this_job_got
     36.30 ±  1%     +18.6%      43.03 ±  0%  fsmark.time.system_time
    870163 ±  0%     +19.6%    1041002 ±  0%  
fsmark.time.voluntary_context_switches
     22186 ±  1%     +10.4%      24490 ±  1%  vmstat.system.cs
    326553 ±  4%     -10.6%     292026 ±  0%  meminfo.Active
    292915 ±  4%     -11.9%     258035 ±  0%  meminfo.Active(file)
     30038 ± 10%     +17.1%      35170 ± 13%  softirqs.RCU
     31822 ±  9%     +16.4%      37037 ± 13%  softirqs.SCHED
    143471 ±  4%     -15.6%     121104 ±  2%  numa-meminfo.node1.Active
     17081 ± 18%     -21.7%      13370 ± 13%  numa-meminfo.node1.Active(anon)
    126389 ±  2%     -14.8%     107733 ±  2%  numa-meminfo.node1.Active(file)
     16928 ± 18%     -21.9%      13222 ± 13%  numa-meminfo.node1.AnonPages
     75337 ± 20%     -42.1%      43626 ±  1%  numa-vmstat.node0.nr_dirtied
      4266 ± 18%     -21.8%       3338 ± 13%  numa-vmstat.node1.nr_active_anon
     31597 ±  2%     -14.8%      26931 ±  2%  numa-vmstat.node1.nr_active_file
      4226 ± 18%     -22.0%       3297 ± 13%  numa-vmstat.node1.nr_anon_pages
     73228 ±  4%     -11.9%      64502 ±  0%  proc-vmstat.nr_active_file
    233617 ±  0%     -41.5%     136640 ±  0%  proc-vmstat.nr_dirtied
      6178 ±  0%     -11.1%       5489 ±  0%  proc-vmstat.numa_pte_updates
     50891 ±  0%     -39.8%      30627 ±  0%  proc-vmstat.pgactivate
   1874824 ±  0%     -41.5%    1097506 ±  0%  time.file_system_outputs
     44.50 ±  4%     +19.1%      53.00 ±  0%  time.percent_of_cpu_this_job_got
     36.30 ±  1%     +18.6%      43.03 ±  0%  time.system_time
    870163 ±  0%     +19.6%    1041002 ±  0%  time.voluntary_context_switches
  63126565 ±  2%     +16.8%   73717213 ±  1%  cpuidle.C1-NHM.time
  49681942 ±  1%     +34.6%   66867204 ±  3%  cpuidle.C1E-NHM.time
     56832 ±  1%     +20.6%      68527 ±  5%  cpuidle.C1E-NHM.usage
 3.428e+08 ±  1%     +14.9%  3.938e+08 ±  3%  cpuidle.C3-NHM.time
    314858 ±  3%     +32.6%     417584 ± 11%  cpuidle.C3-NHM.usage
     10.96 ±  6%      +6.8%      11.70 ±  1%  turbostat.%Busy
     20.82 ±  5%     +15.7%      24.10 ±  1%  turbostat.CPU%c1
     28.15 ±  4%     +13.0%      31.82 ±  3%  turbostat.CPU%c3
     40.07 ±  7%     -19.2%      32.38 ±  4%  turbostat.CPU%c6
      9.22 ±  6%     -23.4%       7.06 ±  3%  turbostat.Pkg%pc3
     36043 ±  6%    -100.0%       0.00 ± -1%  
latency_stats.avg.call_rwsem_down_read_failed.f2fs_write_data_page.[f2fs].__f2fs_writepage.[f2fs].f2fs_write_cache_pages.[f2fs].f2fs_write_data_pages.[f2fs].do_writepages.__filemap_fdatawrite_range.filemap_write_and_wait_range.f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync
      7336 ±  0%    +698.9%      58613 ±  1%  
latency_stats.hits.call_rwsem_down_read_failed.get_node_info.[f2fs].f2fs_write_node_page.[f2fs].sync_node_pages.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
     22886 ±  0%    -100.0%       0.00 ± -1%  
latency_stats.hits.call_rwsem_down_read_failed.get_node_info.[f2fs].read_node_page.[f2fs].__get_node_page.[f2fs].get_node_page.[f2fs].get_dnode_of_data.[f2fs].f2fs_write_inline_data.[f2fs].f2fs_write_data_page.[f2fs].__f2fs_writepage.[f2fs].f2fs_write_cache_pages.[f2fs].f2fs_write_data_pages.[f2fs].do_writepages
      0.00 ± -1%      +Inf%      79634 ±  0%  
latency_stats.hits.call_rwsem_down_read_failed.get_node_info.[f2fs].read_node_page.[f2fs].__get_node_page.[f2fs].get_node_page.[f2fs].get_dnode_of_data.[f2fs].f2fs_write_inline_data.[f2fs].f2fs_write_end.[f2fs].generic_perform_write.__generic_file_write_iter.generic_file_write_iter.f2fs_file_write_iter.[f2fs]
     75051 ±  0%    -100.0%       0.00 ± -1%  
latency_stats.hits.call_rwsem_down_read_failed.get_node_info.[f2fs].read_node_page.[f2fs].__get_node_page.[f2fs].get_node_page.[f2fs].get_node_page.[f2fs].update_inode_page.[f2fs].f2fs_write_end.[f2fs].generic_perform_write.__generic_file_write_iter.generic_file_write_iter.f2fs_file_write_iter.[f2fs]
     17214 ±  0%    +321.2%      72498 ±  0%  
latency_stats.hits.call_rwsem_down_read_failed.is_checkpointed_node.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
     11958 ±  0%    +454.6%      66321 ±  0%  
latency_stats.hits.call_rwsem_down_read_failed.need_dentry_mark.[f2fs].sync_node_pages.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
     74681 ±  0%    -100.0%       8.25 ± 61%  
latency_stats.hits.f2fs_write_data_pages.[f2fs].do_writepages.__filemap_fdatawrite_range.filemap_write_and_wait_range.f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
    130560 ± 11%    -100.0%       0.00 ± -1%  
latency_stats.max.call_rwsem_down_read_failed.f2fs_write_data_page.[f2fs].__f2fs_writepage.[f2fs].f2fs_write_cache_pages.[f2fs].f2fs_write_data_pages.[f2fs].do_writepages.__filemap_fdatawrite_range.filemap_write_and_wait_range.f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync
    270069 ± 14%    -100.0%     125.25 ± 81%  
latency_stats.max.f2fs_write_data_pages.[f2fs].do_writepages.__filemap_fdatawrite_range.filemap_write_and_wait_range.f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
  12155740 ±  1%    -100.0%       0.00 ± -1%  
latency_stats.sum.call_rwsem_down_read_failed.f2fs_write_data_page.[f2fs].__f2fs_writepage.[f2fs].f2fs_write_cache_pages.[f2fs].f2fs_write_data_pages.[f2fs].do_writepages.__filemap_fdatawrite_range.filemap_write_and_wait_range.f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync
   1498927 ±  1%    +858.3%   14364772 ±  0%  
latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].f2fs_write_node_page.[f2fs].sync_node_pages.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
     22102 ±  9%    +187.5%      63548 ± 19%  
latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].read_node_page.[f2fs].__get_node_page.[f2fs].get_node_page.[f2fs].get_dnode_of_data.[f2fs].do_write_data_page.[f2fs].f2fs_write_data_page.[f2fs].__f2fs_writepage.[f2fs].f2fs_write_cache_pages.[f2fs].f2fs_write_data_pages.[f2fs].do_writepages
   5128495 ±  1%    -100.0%       0.00 ± -1%  
latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].read_node_page.[f2fs].__get_node_page.[f2fs].get_node_page.[f2fs].get_dnode_of_data.[f2fs].f2fs_write_inline_data.[f2fs].f2fs_write_data_page.[f2fs].__f2fs_writepage.[f2fs].f2fs_write_cache_pages.[f2fs].f2fs_write_data_pages.[f2fs].do_writepages
      0.00 ± -1%      +Inf%   29676287 ±  0%  
latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].read_node_page.[f2fs].__get_node_page.[f2fs].get_node_page.[f2fs].get_dnode_of_data.[f2fs].f2fs_write_inline_data.[f2fs].f2fs_write_end.[f2fs].generic_perform_write.__generic_file_write_iter.generic_file_write_iter.f2fs_file_write_iter.[f2fs]
  24113264 ±  1%    -100.0%       0.00 ± -1%  
latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].read_node_page.[f2fs].__get_node_page.[f2fs].get_node_page.[f2fs].get_node_page.[f2fs].update_inode_page.[f2fs].f2fs_write_end.[f2fs].generic_perform_write.__generic_file_write_iter.generic_file_write_iter.f2fs_file_write_iter.[f2fs]
     11707 ±  6%    +229.8%      38617 ± 24%  
latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].write_data_page.[f2fs].do_write_data_page.[f2fs].f2fs_write_data_page.[f2fs].__f2fs_writepage.[f2fs].f2fs_write_cache_pages.[f2fs].f2fs_write_data_pages.[f2fs].do_writepages.__filemap_fdatawrite_range.filemap_fdatawrite.sync_dirty_inodes.[f2fs]
   3670765 ±  0%    +549.9%   23857878 ±  0%  
latency_stats.sum.call_rwsem_down_read_failed.is_checkpointed_node.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
   2546436 ±  0%    +680.8%   19883041 ±  0%  
latency_stats.sum.call_rwsem_down_read_failed.need_dentry_mark.[f2fs].sync_node_pages.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
   5243465 ±  2%    +258.7%   18809634 ±  1%  
latency_stats.sum.call_rwsem_down_read_failed.need_inode_block_update.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
     13216 ± 86%     -58.0%       5547 ± 26%  
latency_stats.sum.call_rwsem_down_write_failed.f2fs_init_extent_tree.[f2fs].f2fs_new_inode.[f2fs].f2fs_create.[f2fs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open.entry_SYSCALL_64_fastpath
   3020759 ±  2%    +333.5%   13096379 ±  0%  
latency_stats.sum.call_rwsem_down_write_failed.set_node_addr.[f2fs].f2fs_write_node_page.[f2fs].sync_node_pages.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
 1.722e+08 ±  2%    -100.0%     630.50 ± 92%  
latency_stats.sum.f2fs_write_data_pages.[f2fs].do_writepages.__filemap_fdatawrite_range.filemap_write_and_wait_range.f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
    896.92 ± 10%     +23.0%       1103 ±  6%  sched_debug.cfs_rq:/.exec_clock.12
      1663 ±  9%     +40.9%       2343 ± 24%  sched_debug.cfs_rq:/.exec_clock.2
    767.96 ±  8%     +16.5%     894.94 ±  3%  
sched_debug.cfs_rq:/.exec_clock.min
      3.12 ± 17%     -88.0%       0.38 ±110%  sched_debug.cfs_rq:/.load_avg.min
      3666 ± 13%     +35.9%       4980 ±  6%  
sched_debug.cfs_rq:/.min_vruntime.12
      4101 ± 10%     +60.7%       6589 ± 26%  
sched_debug.cfs_rq:/.min_vruntime.14
      4702 ±  7%     +50.3%       7067 ± 12%  
sched_debug.cfs_rq:/.min_vruntime.15
      7043 ± 14%     +25.9%       8870 ± 11%  
sched_debug.cfs_rq:/.min_vruntime.2
      4100 ±  4%     +48.3%       6081 ± 15%  
sched_debug.cfs_rq:/.min_vruntime.9
      6575 ±  3%     +17.8%       7744 ±  3%  
sched_debug.cfs_rq:/.min_vruntime.avg
      3239 ±  3%     +31.0%       4242 ±  9%  
sched_debug.cfs_rq:/.min_vruntime.min
      0.33 ±141%   +1175.0%       4.25 ± 69%  
sched_debug.cfs_rq:/.nr_spread_over.2
     -4695 ±-17%     -40.2%      -2807 ±-39%  sched_debug.cfs_rq:/.spread0.15
    136.75 ± 26%     -25.8%     101.50 ± 15%  sched_debug.cfs_rq:/.util_avg.3
     48.62 ± 21%     -80.7%       9.38 ± 59%  sched_debug.cfs_rq:/.util_avg.min
     43.46 ± 16%     +60.2%      69.63 ± 21%  
sched_debug.cfs_rq:/.util_avg.stddev
      9236 ±  4%      +9.8%      10143 ±  6%  sched_debug.cpu.nr_load_updates.10
      9179 ±  4%     +11.8%      10258 ±  5%  sched_debug.cpu.nr_load_updates.14
      9296 ±  4%      +9.8%      10211 ±  4%  sched_debug.cpu.nr_load_updates.8
     64213 ±  7%     +12.9%      72481 ±  7%  sched_debug.cpu.nr_switches.0
     49843 ±  2%     +19.5%      59574 ±  8%  sched_debug.cpu.nr_switches.1
     35136 ± 10%     +23.1%      43259 ±  5%  sched_debug.cpu.nr_switches.10
     27937 ±  6%     +23.4%      34467 ±  6%  sched_debug.cpu.nr_switches.11
     38343 ± 10%     +21.4%      46565 ± 10%  sched_debug.cpu.nr_switches.12
     35454 ± 10%     +21.8%      43192 ±  3%  sched_debug.cpu.nr_switches.14
     29114 ±  7%     +29.6%      37725 ± 20%  sched_debug.cpu.nr_switches.15
     58979 ±  5%     +17.3%      69170 ±  7%  sched_debug.cpu.nr_switches.2
     56391 ±  5%     +23.7%      69761 ±  8%  sched_debug.cpu.nr_switches.6
     35766 ±  9%     +25.9%      45013 ±  2%  sched_debug.cpu.nr_switches.8
     43509 ±  5%     +16.8%      50818 ±  3%  sched_debug.cpu.nr_switches.avg
     65222 ±  7%     +12.5%      73372 ±  7%  sched_debug.cpu.nr_switches.max
     27549 ±  6%     +18.2%      32567 ±  3%  sched_debug.cpu.nr_switches.min
     -6534 ± -5%     +70.0%     -11111 ± -2%  
sched_debug.cpu.nr_uninterruptible.0
   -958.25 ±-13%     +42.1%      -1361 ±-10%  
sched_debug.cpu.nr_uninterruptible.1
      1716 ± 20%     +46.1%       2507 ±  6%  
sched_debug.cpu.nr_uninterruptible.10
      1373 ±  8%     +56.0%       2143 ± 10%  
sched_debug.cpu.nr_uninterruptible.11
      1599 ±  7%     +71.7%       2744 ±  6%  
sched_debug.cpu.nr_uninterruptible.12
      1351 ± 20%     +52.4%       2060 ±  3%  
sched_debug.cpu.nr_uninterruptible.13
      1824 ±  8%     +40.8%       2568 ±  4%  
sched_debug.cpu.nr_uninterruptible.14
      1334 ± 10%     +68.0%       2241 ±  4%  
sched_debug.cpu.nr_uninterruptible.15
     -1140 ±-14%     +44.6%      -1649 ± -4%  
sched_debug.cpu.nr_uninterruptible.5
     -1139 ±-15%     +41.1%      -1607 ±-17%  
sched_debug.cpu.nr_uninterruptible.7
      1167 ± 13%     +70.0%       1983 ±  4%  
sched_debug.cpu.nr_uninterruptible.8
      1319 ±  7%     +46.4%       1930 ±  6%  
sched_debug.cpu.nr_uninterruptible.9
      1956 ±  8%     +41.9%       2775 ±  5%  
sched_debug.cpu.nr_uninterruptible.max
     -6537 ± -5%     +70.0%     -11114 ± -2%  
sched_debug.cpu.nr_uninterruptible.min
      2035 ±  1%     +62.9%       3315 ±  1%  
sched_debug.cpu.nr_uninterruptible.stddev
     51578 ±  2%     +17.3%      60512 ±  7%  sched_debug.cpu.sched_count.1
     35156 ± 10%     +24.2%      43650 ±  4%  sched_debug.cpu.sched_count.10
     27959 ±  6%     +25.4%      35049 ±  4%  sched_debug.cpu.sched_count.11
     38363 ± 10%     +25.0%      47965 ±  7%  sched_debug.cpu.sched_count.12
     35618 ±  9%     +21.8%      43369 ±  2%  sched_debug.cpu.sched_count.14
     29134 ±  7%     +34.3%      39119 ± 18%  sched_debug.cpu.sched_count.15
     50741 ±  8%     +30.2%      66049 ± 23%  sched_debug.cpu.sched_count.3
     62138 ±  8%     +14.3%      71037 ±  6%  sched_debug.cpu.sched_count.6
     36028 ±  9%     +25.0%      45039 ±  2%  sched_debug.cpu.sched_count.8
     27623 ±  6%     +18.4%      32700 ±  3%  sched_debug.cpu.sched_count.min
     28067 ±  8%     +10.2%      30921 ±  8%  sched_debug.cpu.sched_goidle.0
     21211 ±  3%     +20.1%      25465 ±  8%  sched_debug.cpu.sched_goidle.1
     13529 ± 10%     +20.1%      16248 ±  6%  sched_debug.cpu.sched_goidle.10
     11159 ±  5%     +21.5%      13554 ±  6%  sched_debug.cpu.sched_goidle.11
     13666 ± 11%     +18.9%      16252 ±  3%  sched_debug.cpu.sched_goidle.14
     11607 ±  8%     +29.2%      15001 ± 25%  sched_debug.cpu.sched_goidle.15
     25465 ±  6%     +15.8%      29484 ±  8%  sched_debug.cpu.sched_goidle.2
     24349 ±  5%     +22.5%      29827 ± 10%  sched_debug.cpu.sched_goidle.6
     13756 ±  9%     +23.4%      16972 ±  3%  sched_debug.cpu.sched_goidle.8
     18266 ±  4%     +14.8%      20973 ±  4%  sched_debug.cpu.sched_goidle.avg
     28513 ±  7%     +11.4%      31770 ±  7%  sched_debug.cpu.sched_goidle.max
     11005 ±  5%     +15.1%      12672 ±  3%  sched_debug.cpu.sched_goidle.min
     15905 ±  9%     +22.8%      19533 ±  8%  sched_debug.cpu.ttwu_count.12
     13379 ± 10%     +22.5%      16391 ±  9%  sched_debug.cpu.ttwu_count.13
     15038 ± 11%     +29.0%      19395 ±  8%  sched_debug.cpu.ttwu_count.14
     26020 ±  4%     +22.2%      31789 ±  3%  sched_debug.cpu.ttwu_count.2
     25547 ±  7%     +16.5%      29762 ±  5%  sched_debug.cpu.ttwu_count.3
     26667 ±  5%     +31.7%      35117 ±  9%  sched_debug.cpu.ttwu_count.4
     26022 ±  9%     +15.1%      29953 ±  3%  sched_debug.cpu.ttwu_count.5
     27546 ±  6%     +31.4%      36193 ±  8%  sched_debug.cpu.ttwu_count.6
     25556 ±  5%     +24.9%      31908 ± 13%  sched_debug.cpu.ttwu_count.7
     24624 ±  5%     +18.5%      29191 ±  3%  sched_debug.cpu.ttwu_count.avg
     11891 ±  7%     +23.4%      14671 ±  5%  sched_debug.cpu.ttwu_count.min
     21466 ±  2%     +33.3%      28616 ±  2%  sched_debug.cpu.ttwu_local.0
      6323 ±  9%     +29.1%       8161 ±  3%  sched_debug.cpu.ttwu_local.1
      4485 ±  9%     +31.7%       5907 ±  6%  sched_debug.cpu.ttwu_local.10
      3238 ±  3%     +25.5%       4063 ±  6%  sched_debug.cpu.ttwu_local.11
      5061 ± 10%     +22.7%       6211 ±  9%  sched_debug.cpu.ttwu_local.12
      3264 ± 10%     +28.0%       4178 ±  5%  sched_debug.cpu.ttwu_local.13
      4339 ±  9%     +35.0%       5859 ±  4%  sched_debug.cpu.ttwu_local.14
      3638 ± 10%     +21.4%       4415 ±  5%  sched_debug.cpu.ttwu_local.15
      6344 ±  6%     +37.4%       8716 ±  2%  sched_debug.cpu.ttwu_local.2
      5839 ±  9%     +28.8%       7523 ±  6%  sched_debug.cpu.ttwu_local.3
      6459 ±  6%     +48.7%       9602 ± 13%  sched_debug.cpu.ttwu_local.4
      5643 ±  9%     +29.8%       7322 ±  5%  sched_debug.cpu.ttwu_local.5
      6341 ±  4%     +58.9%      10079 ± 14%  sched_debug.cpu.ttwu_local.6
      5599 ±  4%     +34.9%       7552 ±  5%  sched_debug.cpu.ttwu_local.7
      4901 ±  6%     +28.0%       6275 ±  5%  sched_debug.cpu.ttwu_local.8
      6031 ±  3%     +33.5%       8053 ±  3%  sched_debug.cpu.ttwu_local.avg
     21563 ±  2%     +34.9%      29080 ±  3%  sched_debug.cpu.ttwu_local.max
      3061 ±  6%     +29.6%       3966 ±  4%  sched_debug.cpu.ttwu_local.min
      4253 ±  3%     +36.5%       5807 ±  4%  sched_debug.cpu.ttwu_local.stddev


lkp-ne04: Nehalem-EP
Memory: 12G


                            fsmark.time.file_system_outputs

  1.9e+06 **-**-*-**--***-****-***-****-***-***-****-***-****-***-***-****-**
          |    *    *                                                       |
  1.8e+06 ++                                                                |
  1.7e+06 ++                                                                |
          |                                                                 |
  1.6e+06 ++                                                                |
  1.5e+06 ++                                                                |
          |                                                                 |
  1.4e+06 ++                                                                |
  1.3e+06 ++                                                                |
          |                                                                 |
  1.2e+06 ++                                                                |
  1.1e+06 ++                        O    OO OO                              |
          OO OOOO OOO OOO OOOO OOO O OO O     O                             |
    1e+06 ++----------------------------------------------------------------+


                               proc-vmstat.nr_dirtied

  240000 ++----------------*--*---------------------------------------------+
         **.***.***.****.**  * *.***.***.****.***.***.***.***.****.***.***.**
  220000 ++                                                                 |
         |                                                                  |
         |                                                                  |
  200000 ++                                                                 |
         |                                                                  |
  180000 ++                                                                 |
         |                                                                  |
  160000 ++                                                                 |
         |                                                                  |
         |                                                                  |
  140000 OO OOO OOO OOOO OOO OOO OOO OOO OOOO O                             |
         |                                                                  |
  120000 ++-----------------------------------------------------------------+


                               proc-vmstat.pgactivate

  55000 ++------------------------------------------------------------------+
        |                                                                   |
        **.***.***.* *.*                                              .** .**
  50000 ++          *  :                                             *   *  |
        |               :                                            :      |
        |               **.***.***.***.***.***.***.***.***.* *.***.**       |
  45000 ++                                                  *               |
        |                                                                   |
  40000 ++                                                                  |
        OO OOO OOO OOO OOO OOO                                              |
        |                                                                   |
  35000 ++                                                                  |
        |                                                                   |
        |                                                                   |
  30000 ++---------------------OOO-OOO-OOO-OOO------------------------------+


        [*] bisect-good sample
        [O] bisect-bad  sample

To reproduce:

        git clone 
git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
        cd lkp-tests
        bin/lkp install job.yaml  # job file is attached in this email
        bin/lkp run     job.yaml


Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.


Thanks,
Ying Huang
---
LKP_SERVER: inn
LKP_CGI_PORT: 80
LKP_CIFS_PORT: 139
testcase: fsmark
default-monitors:
  wait: activate-monitor
  kmsg: 
  uptime: 
  iostat: 
  vmstat: 
  numa-numastat: 
  numa-vmstat: 
  numa-meminfo: 
  proc-vmstat: 
  proc-stat:
    interval: 10
  meminfo: 
  slabinfo: 
  interrupts: 
  lock_stat: 
  latency_stats: 
  softirqs: 
  bdi_dev_mapping: 
  diskstats: 
  nfsstat: 
  cpuidle: 
  cpufreq-stats: 
  turbostat: 
  pmeter: 
  sched_debug:
    interval: 60
cpufreq_governor: performance
default-watchdogs:
  oom-killer: 
  watchdog: 
commit: ea32c36edcf58f8c27653f3e5bc41f8c0b41e235
model: Nehalem-EP
memory: 12G
hdd_partitions: "/dev/disk/by-id/ata-ST3500514NS_9WJ03EBA-part3"
swap_partitions: "/dev/disk/by-id/ata-ST3120026AS_5MS07HA2-part2"
rootfs_partition: "/dev/disk/by-id/ata-ST3500514NS_9WJ03EBA-part1"
category: benchmark
iterations: 1x
nr_threads: 32t
disk: 1HDD
fs: f2fs
fs2: 
fsmark:
  filesize: 9B
  test_size: 400M
  sync_method: fsyncBeforeClose
  nr_directories: 16d
  nr_files_per_directory: 256fpd
queue: bisect
testbox: lkp-ne04
tbox_group: lkp-ne04
kconfig: x86_64-rhel
enqueue_time: 2016-01-29 15:11:06.121815543 +08:00
id: 03a2baf75031b61e95f6ab2b0a9f4990556eb7c3
user: lkp
compiler: gcc-4.9
head_commit: 27c52311cb3c7dd66ca9bb6b0c0b63ac2bc72051
base_commit: 92e963f50fc74041b5e9e744c330dca48e04f08d
branch: linux-devel/devel-hourly-2016012903
rootfs: debian-x86_64-2015-02-07.cgz
result_root: 
"/result/fsmark/performance-1x-32t-1HDD-f2fs-9B-400M-fsyncBeforeClose-16d-256fpd/lkp-ne04/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/ea32c36edcf58f8c27653f3e5bc41f8c0b41e235/1"
job_file: 
"/lkp/scheduled/lkp-ne04/bisect_fsmark-performance-1x-32t-1HDD-f2fs-9B-400M-fsyncBeforeClose-16d-256fpd-debian-x86_64-2015-02-07.cgz-x86_64-rhel-ea32c36edcf58f8c27653f3e5bc41f8c0b41e235-20160129-123190-1swp517-1.yaml"
nr_cpu: "$(nproc)"
max_uptime: 873.02
initrd: "/osimage/debian/debian-x86_64-2015-02-07.cgz"
bootloader_append:
- root=/dev/ram0
- user=lkp
- 
job=/lkp/scheduled/lkp-ne04/bisect_fsmark-performance-1x-32t-1HDD-f2fs-9B-400M-fsyncBeforeClose-16d-256fpd-debian-x86_64-2015-02-07.cgz-x86_64-rhel-ea32c36edcf58f8c27653f3e5bc41f8c0b41e235-20160129-123190-1swp517-1.yaml
- ARCH=x86_64
- kconfig=x86_64-rhel
- branch=linux-devel/devel-hourly-2016012903
- commit=ea32c36edcf58f8c27653f3e5bc41f8c0b41e235
- 
BOOT_IMAGE=/pkg/linux/x86_64-rhel/gcc-4.9/ea32c36edcf58f8c27653f3e5bc41f8c0b41e235/vmlinuz-4.4.0-05594-gea32c36
- max_uptime=873
- 
RESULT_ROOT=/result/fsmark/performance-1x-32t-1HDD-f2fs-9B-400M-fsyncBeforeClose-16d-256fpd/lkp-ne04/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/ea32c36edcf58f8c27653f3e5bc41f8c0b41e235/1
- LKP_SERVER=inn
- |2-


  earlyprintk=ttyS0,115200 systemd.log_level=err
  debug apic=debug sysrq_always_enabled rcupdate.rcu_cpu_stall_timeout=100
  panic=-1 softlockup_panic=1 nmi_watchdog=panic oops=panic load_ramdisk=2 
prompt_ramdisk=0
  console=ttyS0,115200 console=tty0 vga=normal

  rw
lkp_initrd: "/lkp/lkp/lkp-x86_64.cgz"
modules_initrd: 
"/pkg/linux/x86_64-rhel/gcc-4.9/ea32c36edcf58f8c27653f3e5bc41f8c0b41e235/modules.cgz"
bm_initrd: 
"/osimage/deps/debian-x86_64-2015-02-07.cgz/lkp.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/run-ipconfig.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/turbostat.cgz,/lkp/benchmarks/turbostat.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/fs.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/fs2.cgz,/lkp/benchmarks/fsmark.cgz"
linux_headers_initrd: 
"/pkg/linux/x86_64-rhel/gcc-4.9/ea32c36edcf58f8c27653f3e5bc41f8c0b41e235/linux-headers.cgz"
repeat_to: 2
kernel: 
"/pkg/linux/x86_64-rhel/gcc-4.9/ea32c36edcf58f8c27653f3e5bc41f8c0b41e235/vmlinuz-4.4.0-05594-gea32c36"
dequeue_time: 2016-01-29 15:33:21.154043484 +08:00
job_state: finished
loadavg: 22.81 8.07 2.89 2/252 3232
start_time: '1454052843'
end_time: '1454052926'
version: "/lkp/lkp/.src-20160127-223853"

Attachment: reproduce.sh
Description: Bourne shell script

Reply via email to