FYI, we noticed that pigz.throughput -2.8% regression with your commit, 

https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master
commit 3a47d5124a957358274e9ca7b115b2f3a914f56d ("sched/fair: Fix fairness 
issue on migration")


=========================================================================================
blocksize/compiler/cpufreq_governor/kconfig/nr_threads/rootfs/tbox_group/testcase:
  
128K/gcc-4.9/performance/x86_64-rhel/100%/debian-x86_64-2015-02-07.cgz/lkp-snb01/pigz

commit: 
  2f5177f0fd7e531b26d54633be62d1d4cb94621c
  3a47d5124a957358274e9ca7b115b2f3a914f56d

2f5177f0fd7e531b 3a47d5124a957358274e9ca7b1 
---------------- -------------------------- 
       fail:runs  %reproduction    fail:runs
         %stddev     %change         %stddev
             \          |                \  
 3.565e+08 ±  0%      -2.8%  3.466e+08 ±  0%  pigz.throughput
   3148624 ±  1%     -61.5%    1213138 ±  1%  
pigz.time.involuntary_context_switches
      3172 ±  0%      -4.9%       3017 ±  0%  
pigz.time.percent_of_cpu_this_job_got
    108.01 ±  0%     -12.7%      94.28 ±  0%  pigz.time.system_time
      9416 ±  0%      -4.8%       8963 ±  0%  pigz.time.user_time
   4435433 ±  0%     -25.4%    3308219 ±  0%  
pigz.time.voluntary_context_switches
     22606 ±  6%      -8.5%      20692 ±  0%  meminfo.Shmem
      6211 ± 51%     +65.1%      10253 ± 13%  numa-meminfo.node1.AnonHugePages
     82109 ±  4%    +111.8%     173874 ±  5%  softirqs.SCHED
  17775779 ±  2%      -8.2%   16323423 ±  5%  numa-numastat.node0.local_node
  17777847 ±  2%      -8.2%   16324614 ±  5%  numa-numastat.node0.numa_hit
  10837718 ± 18%     -23.4%    8299086 ±  3%  numa-vmstat.node0.numa_hit
  10771465 ± 18%     -23.6%    8232199 ±  3%  numa-vmstat.node0.numa_local
      6147 ±  4%      +9.7%       6742 ±  4%  slabinfo.anon_vma.active_objs
      6147 ±  4%      +9.7%       6742 ±  4%  slabinfo.anon_vma.num_objs
      5652 ±  6%      -8.5%       5171 ±  0%  proc-vmstat.nr_shmem
     33999 ±  6%     +40.2%      47675 ± 14%  proc-vmstat.numa_pages_migrated
     33999 ±  6%     +40.2%      47675 ± 14%  proc-vmstat.pgmigrate_success
   3148624 ±  1%     -61.5%    1213138 ±  1%  time.involuntary_context_switches
    108.01 ±  0%     -12.7%      94.28 ±  0%  time.system_time
   4435433 ±  0%     -25.4%    3308219 ±  0%  time.voluntary_context_switches
      0.64 ±  5%    +707.0%       5.17 ±  1%  turbostat.CPU%c1
      0.01 ±  0%    +675.0%       0.08 ±  5%  turbostat.CPU%c3
     16.03 ±101%     -99.6%       0.06 ±100%  turbostat.Pkg%pc2
  14964297 ± 21%    +256.4%   53332691 ±  3%  cpuidle.C1-SNB.time
     58475 ±  5%    +229.5%     192657 ±  1%  cpuidle.C1-SNB.usage
  18841654 ± 15%    +709.6%  1.525e+08 ±  2%  cpuidle.C1E-SNB.time
     66706 ± 10%    +629.5%     486650 ±  1%  cpuidle.C1E-SNB.usage
   4314242 ± 12%    +575.3%   29134227 ±  1%  cpuidle.C3-SNB.time
     14262 ± 10%    +543.3%      91745 ±  2%  cpuidle.C3-SNB.usage
    133764 ± 27%    +434.9%     715504 ±  1%  cpuidle.C7-SNB.usage
 3.929e+08 ±  1%    -100.0%       0.00 ± -1%  
latency_stats.avg.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
   9059435 ±103%    -100.0%     160.00 ± 44%  
latency_stats.avg.rpc_wait_bit_killable.__rpc_execute.rpc_execute.rpc_run_task.nfs4_call_sync_sequence.[nfsv4]._nfs4_proc_access.[nfsv4].nfs4_proc_access.[nfsv4].nfs_do_access.nfs_permission.__inode_permission.inode_permission.link_path_walk
 4.578e+08 ±  5%    -100.0%       0.00 ± -1%  
latency_stats.max.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
  51327106 ±102%    -100.0%     257.25 ± 51%  
latency_stats.max.rpc_wait_bit_killable.__rpc_execute.rpc_execute.rpc_run_task.nfs4_call_sync_sequence.[nfsv4]._nfs4_proc_access.[nfsv4].nfs4_proc_access.[nfsv4].nfs_do_access.nfs_permission.__inode_permission.inode_permission.link_path_walk
 7.858e+08 ±  1%    -100.0%       0.00 ± -1%  
latency_stats.sum.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
     20816 ± 25%    +361.7%      96106 ± 13%  
latency_stats.sum.pipe_read.__vfs_read.vfs_read.SyS_read.entry_SYSCALL_64_fastpath
 1.014e+08 ±107%    -100.0%       1445 ± 44%  
latency_stats.sum.rpc_wait_bit_killable.__rpc_execute.rpc_execute.rpc_run_task.nfs4_call_sync_sequence.[nfsv4]._nfs4_proc_access.[nfsv4].nfs4_proc_access.[nfsv4].nfs_do_access.nfs_permission.__inode_permission.inode_permission.link_path_walk
    172662 ± 14%     -18.4%     140966 ±  0%  
sched_debug.cfs_rq:/.exec_clock.avg
    172933 ± 14%     -18.0%     141779 ±  0%  
sched_debug.cfs_rq:/.exec_clock.max
    171995 ± 14%     -18.6%     139924 ±  0%  
sched_debug.cfs_rq:/.exec_clock.min
    179.82 ± 33%    +180.3%     504.10 ± 34%  
sched_debug.cfs_rq:/.exec_clock.stddev
     18.12 ± 27%     -67.6%       5.88 ± 35%  sched_debug.cfs_rq:/.load.min
   5948149 ± 14%     -23.8%    4531737 ±  0%  
sched_debug.cfs_rq:/.min_vruntime.avg
   5968318 ± 14%     -23.6%    4561399 ±  0%  
sched_debug.cfs_rq:/.min_vruntime.max
   5921026 ± 14%     -24.1%    4496045 ±  0%  
sched_debug.cfs_rq:/.min_vruntime.min
      0.67 ± 26%     -69.0%       0.21 ± 34%  
sched_debug.cfs_rq:/.nr_running.min
      0.10 ± 40%    +185.3%       0.28 ±  9%  
sched_debug.cfs_rq:/.nr_running.stddev
      0.38 ± 15%     -39.2%       0.23 ± 17%  
sched_debug.cfs_rq:/.nr_spread_over.avg
     11.22 ± 12%     -46.1%       6.04 ± 28%  
sched_debug.cfs_rq:/.nr_spread_over.max
      1.97 ± 12%     -45.5%       1.07 ± 26%  
sched_debug.cfs_rq:/.nr_spread_over.stddev
     18.97 ± 27%     -69.7%       5.75 ± 36%  
sched_debug.cfs_rq:/.runnable_load_avg.min
      4.44 ± 90%    +157.1%      11.41 ± 37%  
sched_debug.cfs_rq:/.runnable_load_avg.stddev
     13030 ±176%    -199.0%     -12895 ±-101%  sched_debug.cfs_rq:/.spread0.avg
    -13998 ±-100%    +247.4%     -48630 ±-36%  sched_debug.cfs_rq:/.spread0.min
      9880 ± 23%     +59.9%      15795 ± 23%  
sched_debug.cfs_rq:/.spread0.stddev
     34.89 ±  8%    +120.4%      76.89 ± 13%  
sched_debug.cfs_rq:/.util_avg.stddev
    196657 ± 16%     +36.9%     269230 ±  6%  sched_debug.cpu.avg_idle.stddev
     17.42 ± 64%     -79.4%       3.58 ± 11%  sched_debug.cpu.clock.stddev
     17.42 ± 64%     -79.4%       3.58 ± 11%  sched_debug.cpu.clock_task.stddev
     18.10 ± 30%     -51.7%       8.75 ± 27%  sched_debug.cpu.cpu_load[0].min
     18.74 ± 27%     -44.9%      10.33 ± 18%  sched_debug.cpu.cpu_load[1].min
     19.22 ± 24%     -36.7%      12.17 ±  6%  sched_debug.cpu.cpu_load[2].min
      1912 ± 23%     -63.2%     704.04 ± 48%  sched_debug.cpu.curr->pid.min
     18.12 ± 27%     -67.6%       5.88 ± 35%  sched_debug.cpu.load.min
    176320 ± 14%     -15.5%     148922 ±  0%  
sched_debug.cpu.nr_load_updates.avg
    178699 ± 14%     -15.3%     151407 ±  0%  
sched_debug.cpu.nr_load_updates.max
    174865 ± 14%     -15.8%     147278 ±  0%  
sched_debug.cpu.nr_load_updates.min
      0.67 ± 26%     -69.0%       0.21 ± 34%  sched_debug.cpu.nr_running.min
      0.27 ±  8%     +41.3%       0.39 ±  5%  sched_debug.cpu.nr_running.stddev
    152747 ± 14%     -34.7%      99735 ±  0%  sched_debug.cpu.nr_switches.avg
    183979 ± 12%     -31.3%     126457 ±  5%  sched_debug.cpu.nr_switches.max
    135220 ± 15%     -41.5%      79108 ±  3%  sched_debug.cpu.nr_switches.min
    154367 ± 14%     -34.5%     101165 ±  0%  sched_debug.cpu.sched_count.avg
    225427 ± 12%     -20.1%     180086 ±  4%  sched_debug.cpu.sched_count.max
    134418 ± 14%     -41.5%      78674 ±  3%  sched_debug.cpu.sched_count.min
      6588 ± 22%    +264.1%      23990 ±  2%  sched_debug.cpu.sched_goidle.avg
     20233 ± 23%     +73.8%      35174 ±  7%  sched_debug.cpu.sched_goidle.max
      3150 ± 22%    +492.7%      18675 ±  2%  sched_debug.cpu.sched_goidle.min
     86325 ± 15%     -36.0%      55212 ±  0%  sched_debug.cpu.ttwu_count.avg
    103976 ± 13%     -33.4%      69252 ±  4%  sched_debug.cpu.ttwu_count.max
     74872 ± 14%     -39.3%      45412 ±  3%  sched_debug.cpu.ttwu_count.min
     61351 ± 14%     -60.3%      24369 ±  1%  sched_debug.cpu.ttwu_local.avg
     68724 ± 13%     -56.3%      30060 ±  3%  sched_debug.cpu.ttwu_local.max
     54792 ± 14%     -65.3%      19018 ±  6%  sched_debug.cpu.ttwu_local.min
      9.86 ±  9%     -37.9%       6.13 ±  5%  
perf-profile.cycles.__account_scheduler_latency.enqueue_entity.enqueue_task_fair.activate_task.ttwu_do_activate
      5.67 ±  3%     -18.2%       4.64 ±  6%  
perf-profile.cycles.__alloc_pages_nodemask.alloc_pages_current.pipe_write.__vfs_write.vfs_write
      0.60 ± 98%    +306.6%       2.46 ± 19%  
perf-profile.cycles.__do_softirq.irq_exit.smp_apic_timer_interrupt.apic_timer_interrupt
      2.69 ± 52%    +282.2%      10.26 ±  5%  
perf-profile.cycles.__hrtimer_run_queues.hrtimer_interrupt.local_apic_timer_interrupt.smp_apic_timer_interrupt.apic_timer_interrupt
      4.49 ±  9%     -36.3%       2.86 ±  9%  
perf-profile.cycles.__kernel_text_address.print_context_stack.dump_trace.save_stack_trace_tsk.__account_scheduler_latency
      0.74 ± 23%     -40.4%       0.44 ± 20%  
perf-profile.cycles.__list_del_entry.list_del.__rmqueue.get_page_from_freelist.__alloc_pages_nodemask
      0.88 ± 31%    +395.7%       4.34 ±  6%  
perf-profile.cycles.__mutex_lock_slowpath.mutex_lock.pipe_read.__vfs_read.vfs_read
      0.41 ± 38%    +283.7%       1.59 ± 17%  
perf-profile.cycles.__mutex_lock_slowpath.mutex_lock.pipe_wait.pipe_write.__vfs_write
      3.42 ± 11%     -32.5%       2.31 ± 16%  
perf-profile.cycles.__put_page.anon_pipe_buf_release.pipe_read.__vfs_read.vfs_read
      1.01 ±  8%     -28.6%       0.72 ± 10%  
perf-profile.cycles.__rmqueue.get_page_from_freelist.__alloc_pages_nodemask.alloc_pages_current.pipe_write
      1.05 ± 20%     -64.2%       0.38 ± 26%  
perf-profile.cycles.__schedule.schedule.exit_to_usermode_loop.syscall_return_slowpath.entry_SYSCALL_64_fastpath
      2.86 ± 10%     -15.9%       2.40 ±  2%  
perf-profile.cycles.__schedule.schedule.futex_wait_queue_me.futex_wait.do_futex
      0.92 ±  7%     -22.4%       0.72 ± 12%  
perf-profile.cycles.__schedule.schedule.pipe_wait.pipe_write.__vfs_write
     26.84 ±  3%     -13.9%      23.11 ±  3%  
perf-profile.cycles.__vfs_write.vfs_write.sys_write.entry_SYSCALL_64_fastpath
      3.23 ± 11%     -33.6%       2.15 ±  7%  
perf-profile.cycles.__wake_up_common.__wake_up_sync_key.pipe_read.__vfs_read.vfs_read
      3.27 ± 10%     -33.1%       2.19 ±  5%  
perf-profile.cycles.__wake_up_sync_key.pipe_read.__vfs_read.vfs_read.sys_read
      0.18 ± 59%    +745.1%       1.50 ±  9%  
perf-profile.cycles.activate_task.ttwu_do_activate.sched_ttwu_pending.cpu_startup_entry.start_secondary
      1.65 ± 10%     -65.3%       0.57 ± 13%  
perf-profile.cycles.activate_task.ttwu_do_activate.sched_ttwu_pending.scheduler_ipi.smp_reschedule_interrupt
      2.33 ± 12%     -33.2%       1.56 ± 10%  
perf-profile.cycles.activate_task.ttwu_do_activate.try_to_wake_up.default_wake_function.autoremove_wake_function
      5.23 ± 15%     -60.1%       2.09 ± 11%  
perf-profile.cycles.activate_task.ttwu_do_activate.try_to_wake_up.wake_up_q.futex_requeue
      3.28 ± 10%     -35.9%       2.10 ± 13%  
perf-profile.cycles.activate_task.ttwu_do_activate.try_to_wake_up.wake_up_q.futex_wake
      6.42 ±  3%     -15.8%       5.41 ±  4%  
perf-profile.cycles.alloc_pages_current.pipe_write.__vfs_write.vfs_write.sys_write
      2.05 ± 13%     -37.9%       1.27 ±  4%  
perf-profile.cycles.anon_pipe_buf_release.__vfs_read.vfs_read.sys_read.entry_SYSCALL_64_fastpath
      3.51 ± 11%     -33.3%       2.35 ± 16%  
perf-profile.cycles.anon_pipe_buf_release.pipe_read.__vfs_read.vfs_read.sys_read
      5.04 ± 49%    +239.8%      17.13 ±  9%  
perf-profile.cycles.apic_timer_interrupt
      3.18 ± 12%     -34.0%       2.10 ±  7%  
perf-profile.cycles.autoremove_wake_function.__wake_up_common.__wake_up_sync_key.pipe_read.__vfs_read
      1.06 ± 77%    +158.9%       2.76 ±  5%  
perf-profile.cycles.call_cpuidle.cpu_startup_entry.start_secondary
      0.27 ±133%    +230.6%       0.89 ± 24%  
perf-profile.cycles.clockevents_program_event.tick_program_event.hrtimer_interrupt.local_apic_timer_interrupt.smp_apic_timer_interrupt
     17.49 ±  3%     -21.5%      13.73 ±  1%  
perf-profile.cycles.copy_page_from_iter.pipe_write.__vfs_write.vfs_write.sys_write
      1.45 ±  9%     -21.4%       1.14 ± 13%  
perf-profile.cycles.copy_page_from_iter_iovec.copy_page_from_iter.pipe_write.__vfs_write.vfs_write
     24.42 ±  3%     -15.2%      20.70 ±  2%  
perf-profile.cycles.copy_page_to_iter.pipe_read.__vfs_read.vfs_read.sys_read
     15.67 ±  3%     -22.7%      12.12 ±  1%  
perf-profile.cycles.copy_user_generic_string.copy_page_from_iter.pipe_write.__vfs_write.vfs_write
     23.42 ±  3%     -15.8%      19.72 ±  2%  
perf-profile.cycles.copy_user_generic_string.copy_page_to_iter.pipe_read.__vfs_read.vfs_read
      1.80 ± 54%    +329.2%       7.71 ±  5%  
perf-profile.cycles.cpu_startup_entry.start_secondary
      1.06 ± 77%    +157.0%       2.74 ±  5%  
perf-profile.cycles.cpuidle_enter.call_cpuidle.cpu_startup_entry.start_secondary
      1.46 ± 12%     -31.4%       1.00 ± 14%  
perf-profile.cycles.deactivate_task.__schedule.schedule.futex_wait_queue_me.futex_wait
      3.17 ± 12%     -34.1%       2.09 ±  7%  
perf-profile.cycles.default_wake_function.autoremove_wake_function.__wake_up_common.__wake_up_sync_key.pipe_read
     17.95 ±  7%     -42.3%      10.36 ±  2%  
perf-profile.cycles.do_futex.sys_futex.entry_SYSCALL_64_fastpath
      8.78 ±  9%     -36.6%       5.57 ±  4%  
perf-profile.cycles.dump_trace.save_stack_trace_tsk.__account_scheduler_latency.enqueue_entity.enqueue_task_fair
      9.98 ± 10%     -45.7%       5.42 ±  4%  
perf-profile.cycles.enqueue_entity.enqueue_task_fair.activate_task.ttwu_do_activate.try_to_wake_up
      0.16 ± 60%    +843.1%       1.53 ± 10%  
perf-profile.cycles.enqueue_task_fair.activate_task.ttwu_do_activate.sched_ttwu_pending.cpu_startup_entry
      1.60 ± 13%     -65.3%       0.55 ± 15%  
perf-profile.cycles.enqueue_task_fair.activate_task.ttwu_do_activate.sched_ttwu_pending.scheduler_ipi
      2.29 ± 11%     -34.1%       1.51 ±  9%  
perf-profile.cycles.enqueue_task_fair.activate_task.ttwu_do_activate.try_to_wake_up.default_wake_function
      8.37 ± 14%     -50.9%       4.11 ±  5%  
perf-profile.cycles.enqueue_task_fair.activate_task.ttwu_do_activate.try_to_wake_up.wake_up_q
     83.20 ±  3%     -19.0%      67.36 ±  2%  
perf-profile.cycles.entry_SYSCALL_64_fastpath
      1.17 ± 19%     -60.8%       0.46 ± 34%  
perf-profile.cycles.exit_to_usermode_loop.syscall_return_slowpath.entry_SYSCALL_64_fastpath
      3.13 ± 12%     -33.5%       2.08 ± 17%  
perf-profile.cycles.free_hot_cold_page.__put_page.anon_pipe_buf_release.pipe_read.__vfs_read
      1.68 ± 13%     -51.9%       0.81 ± 13%  
perf-profile.cycles.free_pcppages_bulk.free_hot_cold_page.__put_page.anon_pipe_buf_release.pipe_read
      8.80 ±  7%     -58.6%       3.64 ±  6%  
perf-profile.cycles.futex_requeue.do_futex.sys_futex.entry_SYSCALL_64_fastpath
      4.01 ± 12%     -21.3%       3.16 ±  3%  
perf-profile.cycles.futex_wait.do_futex.sys_futex.entry_SYSCALL_64_fastpath
      3.38 ±  8%     -17.7%       2.78 ±  3%  
perf-profile.cycles.futex_wait_queue_me.futex_wait.do_futex.sys_futex.entry_SYSCALL_64_fastpath
      4.59 ± 10%     -26.7%       3.37 ± 11%  
perf-profile.cycles.futex_wake.do_futex.sys_futex.entry_SYSCALL_64_fastpath
      4.08 ±  3%     -21.1%       3.22 ±  5%  
perf-profile.cycles.get_page_from_freelist.__alloc_pages_nodemask.alloc_pages_current.pipe_write.__vfs_write
      3.29 ± 59%    +274.5%      12.30 ±  8%  
perf-profile.cycles.hrtimer_interrupt.local_apic_timer_interrupt.smp_apic_timer_interrupt.apic_timer_interrupt
      0.33 ± 32%    +514.2%       2.06 ±  3%  
perf-profile.cycles.intel_idle.cpuidle_enter_state.cpuidle_enter.call_cpuidle.cpu_startup_entry
      0.67 ± 99%    +290.4%       2.63 ± 17%  
perf-profile.cycles.irq_exit.smp_apic_timer_interrupt.apic_timer_interrupt
      1.07 ±  3%     -49.4%       0.54 ± 27%  
perf-profile.cycles.is_ftrace_trampoline.__kernel_text_address.print_context_stack.dump_trace.save_stack_trace_tsk
      1.74 ± 12%     -32.8%       1.17 ± 10%  
perf-profile.cycles.is_module_text_address.__kernel_text_address.print_context_stack.dump_trace.save_stack_trace_tsk
      0.76 ± 22%     -39.6%       0.46 ± 17%  
perf-profile.cycles.list_del.__rmqueue.get_page_from_freelist.__alloc_pages_nodemask.alloc_pages_current
      3.41 ± 59%    +268.5%      12.55 ±  8%  
perf-profile.cycles.local_apic_timer_interrupt.smp_apic_timer_interrupt.apic_timer_interrupt
      0.93 ± 29%    +369.4%       4.37 ±  6%  
perf-profile.cycles.mutex_lock.pipe_read.__vfs_read.vfs_read.sys_read
      0.44 ± 39%    +273.3%       1.64 ± 16%  
perf-profile.cycles.mutex_lock.pipe_wait.pipe_write.__vfs_write.vfs_write
      0.88 ± 31%    +394.0%       4.32 ±  5%  
perf-profile.cycles.mutex_optimistic_spin.__mutex_lock_slowpath.mutex_lock.pipe_read.__vfs_read
      0.41 ± 38%    +283.7%       1.59 ± 17%  
perf-profile.cycles.mutex_optimistic_spin.__mutex_lock_slowpath.mutex_lock.pipe_wait.pipe_write
      0.85 ± 33%    +396.2%       4.24 ±  5%  
perf-profile.cycles.mutex_spin_on_owner.isra.4.mutex_optimistic_spin.__mutex_lock_slowpath.mutex_lock.pipe_read
      0.41 ± 38%    +276.5%       1.56 ± 18%  
perf-profile.cycles.mutex_spin_on_owner.isra.4.mutex_optimistic_spin.__mutex_lock_slowpath.mutex_lock.pipe_wait
      0.23 ± 98%    +298.9%       0.92 ± 12%  
perf-profile.cycles.perf_event_task_tick.scheduler_tick.update_process_times.tick_sched_handle.tick_sched_timer
      1.55 ± 11%     +60.2%       2.49 ± 12%  
perf-profile.cycles.pipe_wait.pipe_write.__vfs_write.vfs_write.sys_write
     26.79 ±  3%     -14.0%      23.05 ±  3%  
perf-profile.cycles.pipe_write.__vfs_write.vfs_write.sys_write.entry_SYSCALL_64_fastpath
      0.67 ± 97%     -90.7%       0.06 ± 54%  
perf-profile.cycles.poll_idle.cpuidle_enter_state.cpuidle_enter.call_cpuidle.cpu_startup_entry
      7.72 ± 10%     -36.3%       4.92 ±  5%  
perf-profile.cycles.print_context_stack.dump_trace.save_stack_trace_tsk.__account_scheduler_latency.enqueue_entity
      0.22 ± 27%    +448.9%       1.23 ± 18%  
perf-profile.cycles.rcu_check_callbacks.update_process_times.tick_sched_handle.tick_sched_timer.__hrtimer_run_queues
      1.72 ± 10%     -82.1%       0.31 ± 16%  
perf-profile.cycles.reschedule_interrupt
      0.36 ± 82%    +222.2%       1.16 ± 22%  
perf-profile.cycles.run_timer_softirq.__do_softirq.irq_exit.smp_apic_timer_interrupt.apic_timer_interrupt
      8.82 ± 10%     -36.9%       5.57 ±  3%  
perf-profile.cycles.save_stack_trace_tsk.__account_scheduler_latency.enqueue_entity.enqueue_task_fair.activate_task
      0.23 ± 40%    +625.8%       1.69 ±  4%  
perf-profile.cycles.sched_ttwu_pending.cpu_startup_entry.start_secondary
      1.37 ± 11%     -81.2%       0.26 ± 18%  
perf-profile.cycles.sched_ttwu_pending.scheduler_ipi.smp_reschedule_interrupt.reschedule_interrupt
      1.09 ± 21%     -63.3%       0.40 ± 29%  
perf-profile.cycles.schedule.exit_to_usermode_loop.syscall_return_slowpath.entry_SYSCALL_64_fastpath
      3.00 ±  8%     -14.5%       2.57 ±  4%  
perf-profile.cycles.schedule.futex_wait_queue_me.futex_wait.do_futex.sys_futex
      0.96 ± 10%     -24.1%       0.72 ± 13%  
perf-profile.cycles.schedule.pipe_wait.pipe_write.__vfs_write.vfs_write
      0.11 ± 44%    +675.6%       0.87 ± 19%  
perf-profile.cycles.schedule.schedule_preempt_disabled.cpu_startup_entry.start_secondary
      0.11 ± 44%    +675.6%       0.87 ± 19%  
perf-profile.cycles.schedule_preempt_disabled.cpu_startup_entry.start_secondary
      1.47 ± 14%     -82.0%       0.27 ± 21%  
perf-profile.cycles.scheduler_ipi.smp_reschedule_interrupt.reschedule_interrupt
      1.65 ± 61%    +266.8%       6.04 ±  9%  
perf-profile.cycles.scheduler_tick.update_process_times.tick_sched_handle.tick_sched_timer.__hrtimer_run_queues
      1.38 ± 17%     -66.1%       0.47 ± 13%  
perf-profile.cycles.select_task_rq_fair.try_to_wake_up.wake_up_q.futex_requeue.do_futex
      4.41 ± 62%    +258.0%      15.80 ±  9%  
perf-profile.cycles.smp_apic_timer_interrupt.apic_timer_interrupt
      1.50 ± 14%     -80.8%       0.29 ± 17%  
perf-profile.cycles.smp_reschedule_interrupt.reschedule_interrupt
      1.81 ± 54%    +336.0%       7.89 ±  4%  
perf-profile.cycles.start_secondary
     18.17 ±  7%     -42.4%      10.46 ±  2%  
perf-profile.cycles.sys_futex.entry_SYSCALL_64_fastpath
     35.99 ±  2%      -9.8%      32.45 ±  3%  
perf-profile.cycles.sys_read.entry_SYSCALL_64_fastpath
     27.21 ±  3%     -14.1%      23.37 ±  3%  
perf-profile.cycles.sys_write.entry_SYSCALL_64_fastpath
      1.21 ± 17%     -57.6%       0.52 ± 32%  
perf-profile.cycles.syscall_return_slowpath.entry_SYSCALL_64_fastpath
      0.85 ± 52%    +261.2%       3.07 ±  7%  
perf-profile.cycles.task_tick_fair.scheduler_tick.update_process_times.tick_sched_handle.tick_sched_timer
      0.32 ±134%    +244.2%       1.11 ± 21%  
perf-profile.cycles.tick_program_event.hrtimer_interrupt.local_apic_timer_interrupt.smp_apic_timer_interrupt.apic_timer_interrupt
      2.31 ± 50%    +274.6%       8.66 ±  6%  
perf-profile.cycles.tick_sched_handle.isra.19.tick_sched_timer.__hrtimer_run_queues.hrtimer_interrupt.local_apic_timer_interrupt
      2.44 ± 53%    +279.1%       9.23 ±  6%  
perf-profile.cycles.tick_sched_timer.__hrtimer_run_queues.hrtimer_interrupt.local_apic_timer_interrupt.smp_apic_timer_interrupt
      3.17 ± 12%     -33.4%       2.11 ±  7%  
perf-profile.cycles.try_to_wake_up.default_wake_function.autoremove_wake_function.__wake_up_common.__wake_up_sync_key
      7.72 ±  8%     -61.4%       2.98 ±  6%  
perf-profile.cycles.try_to_wake_up.wake_up_q.futex_requeue.do_futex.sys_futex
      4.04 ±  8%     -27.5%       2.93 ± 11%  
perf-profile.cycles.try_to_wake_up.wake_up_q.futex_wake.do_futex.sys_futex
      0.19 ± 51%    +728.6%       1.59 ±  7%  
perf-profile.cycles.ttwu_do_activate.constprop.89.sched_ttwu_pending.cpu_startup_entry.start_secondary
      1.72 ± 11%     -61.5%       0.66 ± 15%  
perf-profile.cycles.ttwu_do_activate.constprop.89.sched_ttwu_pending.scheduler_ipi.smp_reschedule_interrupt.reschedule_interrupt
      2.43 ± 13%     -31.6%       1.66 ±  7%  
perf-profile.cycles.ttwu_do_activate.constprop.89.try_to_wake_up.default_wake_function.autoremove_wake_function.__wake_up_common
      5.57 ± 16%     -60.2%       2.21 ± 11%  
perf-profile.cycles.ttwu_do_activate.constprop.89.try_to_wake_up.wake_up_q.futex_requeue.do_futex
      3.43 ±  8%     -36.8%       2.17 ± 12%  
perf-profile.cycles.ttwu_do_activate.constprop.89.try_to_wake_up.wake_up_q.futex_wake.do_futex
      0.31 ± 83%    +243.9%       1.06 ±  6%  
perf-profile.cycles.update_cfs_shares.task_tick_fair.scheduler_tick.update_process_times.tick_sched_handle
      2.27 ± 52%    +274.1%       8.49 ±  5%  
perf-profile.cycles.update_process_times.tick_sched_handle.tick_sched_timer.__hrtimer_run_queues.hrtimer_interrupt
     35.85 ±  2%      -9.7%      32.36 ±  3%  
perf-profile.cycles.vfs_read.sys_read.entry_SYSCALL_64_fastpath
     27.07 ±  2%     -14.1%      23.25 ±  3%  
perf-profile.cycles.vfs_write.sys_write.entry_SYSCALL_64_fastpath
      7.87 ±  7%     -60.7%       3.09 ±  5%  
perf-profile.cycles.wake_up_q.futex_requeue.do_futex.sys_futex.entry_SYSCALL_64_fastpath
      4.09 ±  8%     -27.0%       2.98 ± 10%  
perf-profile.cycles.wake_up_q.futex_wake.do_futex.sys_futex.entry_SYSCALL_64_fastpath



lkp-snb01: Sandy Bridge-EP
Memory: 32G


                                    pigz.throughput

  3.58e+08 ++---------------------------------------------------------------+
           *.         .*.*..*.         .*.*..      .*.              .*.     |
  3.56e+08 ++*..*.*..*        *..*.*.*.      *.*..*   *..*.*..*.*.*.   *..*.*
           |                                                                |
           |                                                                |
  3.54e+08 ++                                                               |
           |                                                                |
  3.52e+08 ++                                                               |
           |                                                                |
   3.5e+08 ++                                                               |
           |                                                                |
           |                                                                |
  3.48e+08 ++          O    O O  O   O                                      |
           O O  O    O   O                O  O    O                         |
  3.46e+08 ++-----O----------------O----O------O----------------------------+


                                pigz.time.user_time

  9450 ++-------------------------------*-----------------------------------+
  9400 *+.*.*..*.*..*.*..*.*..*..*.  .*    *.*..*.*..*..*.*..*.*..*.*..*.*..*
       |                           *.                                       |
  9350 ++                                                                   |
  9300 ++                                                                   |
       |                                                                    |
  9250 ++                                                                   |
  9200 ++                                                                   |
  9150 ++                                                                   |
       |                                                                    |
  9100 ++                                                                   |
  9050 ++                                                                   |
       |                      O    O                                        |
  9000 O+ O O       O O  O O            O                                   |
  8950 ++------O-O---------------O----O----O-O--O---------------------------+


                               pigz.time.system_time

  110 ++--------------------------------------------------------------------+
      *..         .*..      .*..         .*..    .*..         .*.. .*..    .*
  108 ++ *.*..*.*.    *.*..*    *..*.*..*    *.*.    *.*..*.*.    *    *.*. |
  106 ++                                                                    |
      |                                                                     |
  104 ++                                                                    |
  102 ++                                                                    |
      |                                                                     |
  100 ++                                                                    |
   98 ++                                                                    |
      |                                                                     |
   96 ++                                                                    |
   94 O+   O  O O     O         O    O    O  O O                            |
      |  O         O    O  O O     O    O                                   |
   92 ++--------------------------------------------------------------------+


                       pigz.time.percent_of_cpu_this_job_got

  3180 *+-----------*-*--*--------------*----*----*------------*--*-*----*--+
       |  *.*..*.*.        *..*..*.*..*    *    *    *..*.*..*         *    *
  3160 ++                                                                   |
  3140 ++                                                                   |
       |                                                                    |
  3120 ++                                                                   |
  3100 ++                                                                   |
       |                                                                    |
  3080 ++                                                                   |
  3060 ++                                                                   |
       |                                                                    |
  3040 ++                     O    O                                        |
  3020 ++ O         O O  O O            O                                   |
       O    O  O O               O    O    O O  O                           |
  3000 ++-------------------------------------------------------------------+


                         pigz.time.voluntary_context_switches

  4.6e+06 ++----------------------------------------------------------------+
          *.              .*..           .*.  .*.  .*.         .*.       .*.|
  4.4e+06 ++*..*.*..*.*..*    *.*.*..*.*.   *.   *.   *.*..*.*.   *..*.*.   *
          |                                                                 |
  4.2e+06 ++                                                                |
          |                                                                 |
    4e+06 ++                                                                |
          |                                                                 |
  3.8e+06 ++                                                                |
          |                                                                 |
  3.6e+06 ++                                                                |
          |                                                                 |
  3.4e+06 ++                                                                |
          O O  O O  O O  O O  O O O  O O  O O  O O                          |
  3.2e+06 ++----------------------------------------------------------------+


                        pigz.time.involuntary_context_switches

  3.5e+06 ++----------------------------------------------------------------+
          |                              .*.                                |
          *.*..*.*..*.*..*.*..*.*.*..*.*.   *..*.*..*.*.*..*.*..*.*..*.*..*.*
    3e+06 ++                                                                |
          |                                                                 |
          |                                                                 |
  2.5e+06 ++                                                                |
          |                                                                 |
    2e+06 ++                                                                |
          |                                                                 |
          |                                                                 |
  1.5e+06 ++                                                                |
          |                O    O    O                                      |
          O O  O O  O O  O    O   O    O  O O  O O                          |
    1e+06 ++----------------------------------------------------------------+


        [*] bisect-good sample
        [O] bisect-bad  sample

To reproduce:

        git clone 
git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
        cd lkp-tests
        bin/lkp install job.yaml  # job file is attached in this email
        bin/lkp run     job.yaml


Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.


Thanks,
Xiaolong Ye
---
LKP_SERVER: inn
LKP_CGI_PORT: 80
LKP_CIFS_PORT: 139
testcase: pigz
default-monitors:
  wait: activate-monitor
  kmsg: 
  uptime: 
  iostat: 
  heartbeat: 
  vmstat: 
  numa-numastat: 
  numa-vmstat: 
  numa-meminfo: 
  proc-vmstat: 
  proc-stat:
    interval: 10
  meminfo: 
  slabinfo: 
  interrupts: 
  lock_stat: 
  latency_stats: 
  softirqs: 
  bdi_dev_mapping: 
  diskstats: 
  nfsstat: 
  cpuidle: 
  cpufreq-stats: 
  turbostat: 
  pmeter: 
  sched_debug:
    interval: 60
cpufreq_governor: performance
default-watchdogs:
  oom-killer: 
  watchdog: 
commit: 3a47d5124a957358274e9ca7b115b2f3a914f56d
model: Sandy Bridge-EP
memory: 32G
hdd_partitions: "/dev/sda2"
swap_partitions: 
category: benchmark
perf-profile:
  freq: 800
nr_threads: 100%
pigz:
  blocksize: 128K
queue: bisect
testbox: lkp-snb01
tbox_group: lkp-snb01
kconfig: x86_64-rhel
enqueue_time: 2016-03-27 00:13:51.566934201 +08:00
compiler: gcc-4.9
rootfs: debian-x86_64-2015-02-07.cgz
id: c3b7127f7163951cf24dbf6b03c1e92367c8b4af
user: lkp
head_commit: 1701f680407c6cc05121566cef5293d33881fe71
base_commit: 10f1685fd59d4ded226123550bdaef176b99f65f
branch: linus/master
result_root: 
"/result/pigz/performance-100%-128K/lkp-snb01/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/3a47d5124a957358274e9ca7b115b2f3a914f56d/0"
job_file: 
"/lkp/scheduled/lkp-snb01/bisect_pigz-performance-100%-128K-debian-x86_64-2015-02-07.cgz-x86_64-rhel-3a47d5124a957358274e9ca7b115b2f3a914f56d-20160327-91511-18rmyet-0.yaml"
nr_cpu: "$(nproc)"
max_uptime: 1272.94
initrd: "/osimage/debian/debian-x86_64-2015-02-07.cgz"
bootloader_append:
- root=/dev/ram0
- user=lkp
- 
job=/lkp/scheduled/lkp-snb01/bisect_pigz-performance-100%-128K-debian-x86_64-2015-02-07.cgz-x86_64-rhel-3a47d5124a957358274e9ca7b115b2f3a914f56d-20160327-91511-18rmyet-0.yaml
- ARCH=x86_64
- kconfig=x86_64-rhel
- branch=linus/master
- commit=3a47d5124a957358274e9ca7b115b2f3a914f56d
- 
BOOT_IMAGE=/pkg/linux/x86_64-rhel/gcc-4.9/3a47d5124a957358274e9ca7b115b2f3a914f56d/vmlinuz-4.5.0-00764-g3a47d51
- max_uptime=1272
- 
RESULT_ROOT=/result/pigz/performance-100%-128K/lkp-snb01/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/3a47d5124a957358274e9ca7b115b2f3a914f56d/0
- LKP_SERVER=inn
- |2-


  earlyprintk=ttyS0,115200 systemd.log_level=err
  debug apic=debug sysrq_always_enabled rcupdate.rcu_cpu_stall_timeout=100
  panic=-1 softlockup_panic=1 nmi_watchdog=panic oops=panic load_ramdisk=2 
prompt_ramdisk=0
  console=ttyS0,115200 console=tty0 vga=normal

  rw
lkp_initrd: "/lkp/lkp/lkp-x86_64.cgz"
modules_initrd: 
"/pkg/linux/x86_64-rhel/gcc-4.9/3a47d5124a957358274e9ca7b115b2f3a914f56d/modules.cgz"
bm_initrd: 
"/osimage/deps/debian-x86_64-2015-02-07.cgz/lkp.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/run-ipconfig.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/turbostat.cgz,/lkp/benchmarks/turbostat.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/pigz.cgz"
linux_headers_initrd: 
"/pkg/linux/x86_64-rhel/gcc-4.9/3a47d5124a957358274e9ca7b115b2f3a914f56d/linux-headers.cgz"
repeat_to: 2
kernel: 
"/pkg/linux/x86_64-rhel/gcc-4.9/3a47d5124a957358274e9ca7b115b2f3a914f56d/vmlinuz-4.5.0-00764-g3a47d51"
dequeue_time: 2016-03-27 00:22:19.978224551 +08:00
job_state: finished
loadavg: 28.55 19.54 8.59 1/393 8796
start_time: '1459009383'
end_time: '1459009683'
version: "/lkp/lkp/.src-20160325-205817"
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu1/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu10/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu11/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu12/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu13/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu14/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu15/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu16/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu17/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu18/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu19/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu2/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu20/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu21/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu22/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu23/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu24/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu25/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu26/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu27/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu28/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu29/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu3/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu30/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu31/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu4/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu5/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu6/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu7/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu8/cpufreq/scaling_governor
2016-03-27 00:23:02 echo performance > 
/sys/devices/system/cpu/cpu9/cpufreq/scaling_governor

Reply via email to