FYI, we noticed the below changes on git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master commit a08a8cd375db9769588257e7782f6b6b68561b88 ("NFS: Add attribute update barriers to NFS writebacks")
testbox/testcase/testparams: ivb42/will-it-scale/performance-pthread_mutex2 f5062003465c20cf a08a8cd375db9769588257e778 ---------------- -------------------------- %stddev %change %stddev \ | \ 2.012e+08 ± 0% +1.5% 2.042e+08 ± 0% will-it-scale.per_process_ops 3.81 ± 0% +1.6% 3.88 ± 0% turbostat.RAMWatt 387 ± 7% +13.7% 441 ± 6% numa-vmstat.node0.nr_page_table_pages 5639 ± 15% -15.1% 4788 ± 0% meminfo.AnonHugePages 1552 ± 7% +13.7% 1765 ± 6% numa-meminfo.node0.PageTables 29.40 ± 21% +57.6% 46.32 ± 3% perf-profile.cpu-cycles.start_secondary 10.72 ± 30% -100.0% 0.00 ± 0% perf-profile.cpu-cycles.rest_init.start_kernel.x86_64_start_reservations.x86_64_start_kernel 10.72 ± 30% -100.0% 0.00 ± 0% perf-profile.cpu-cycles.cpu_startup_entry.rest_init.start_kernel.x86_64_start_reservations.x86_64_start_kernel 1.23 ± 17% -29.1% 0.87 ± 19% perf-profile.cpu-cycles.cmd_record._start.main.__libc_start_main 1.27 ± 17% -29.4% 0.89 ± 16% perf-profile.cpu-cycles.main.__libc_start_main 1.27 ± 17% -31.2% 0.87 ± 19% perf-profile.cpu-cycles._start.main.__libc_start_main 25.76 ± 19% +66.1% 42.79 ± 5% perf-profile.cpu-cycles.cpuidle_enter.cpu_startup_entry.start_secondary 1.39 ± 16% -30.3% 0.97 ± 11% perf-profile.cpu-cycles.__libc_start_main 1.62 ± 20% +52.2% 2.47 ± 18% perf-profile.cpu-cycles.ktime_get_update_offsets_now.hrtimer_interrupt.local_apic_timer_interrupt.smp_apic_timer_interrupt.apic_timer_interrupt 28.79 ± 21% +60.5% 46.22 ± 3% perf-profile.cpu-cycles.cpu_startup_entry.start_secondary 0.00 ± 0% +Inf% 0.81 ± 36% perf-profile.cpu-cycles._raw_spin_unlock_irqrestore.update_blocked_averages.rebalance_domains.run_rebalance_domains.__do_softirq 10.72 ± 30% -100.0% 0.00 ± 0% perf-profile.cpu-cycles.x86_64_start_kernel 18.38 ± 16% +43.4% 26.35 ± 3% perf-profile.cpu-cycles.intel_idle.cpuidle_enter_state.cpuidle_enter.cpu_startup_entry.start_secondary 1.64 ± 30% -59.1% 0.67 ± 46% perf-profile.cpu-cycles.filemap_map_pages.handle_pte_fault.handle_mm_fault.__do_page_fault.do_page_fault 18.60 ± 17% +42.4% 26.49 ± 4% perf-profile.cpu-cycles.cpuidle_enter_state.cpuidle_enter.cpu_startup_entry.start_secondary 10.42 ± 30% -100.0% 0.00 ± 0% perf-profile.cpu-cycles.cpuidle_enter.cpu_startup_entry.rest_init.start_kernel.x86_64_start_reservations 9.45 ± 34% -100.0% 0.00 ± 0% perf-profile.cpu-cycles.smp_reschedule_interrupt.reschedule_interrupt.cpuidle_enter.cpu_startup_entry.rest_init 10.72 ± 30% -100.0% 0.00 ± 0% perf-profile.cpu-cycles.x86_64_start_reservations.x86_64_start_kernel 13.90 ± 7% -12.0% 12.22 ± 4% perf-profile.cpu-cycles.update_process_times.tick_sched_handle.tick_sched_timer.__run_hrtimer.hrtimer_interrupt 13.92 ± 2% -22.7% 10.77 ± 18% perf-profile.cpu-cycles.__do_softirq.irq_exit.scheduler_ipi.smp_reschedule_interrupt.reschedule_interrupt 10.72 ± 30% -100.0% 0.00 ± 0% perf-profile.cpu-cycles.start_kernel.x86_64_start_reservations.x86_64_start_kernel 9.45 ± 34% -100.0% 0.00 ± 0% perf-profile.cpu-cycles.reschedule_interrupt.cpuidle_enter.cpu_startup_entry.rest_init.start_kernel 1438772 ± 8% +21.2% 1743708 ± 2% sched_debug.cfs_rq[0]:/.min_vruntime 25049 ± 3% +26.4% 31672 ± 1% sched_debug.cfs_rq[0]:/.avg->runnable_avg_sum 17 ± 12% +121.4% 38 ± 19% sched_debug.cfs_rq[0]:/.runnable_load_avg 545 ± 3% +26.4% 689 ± 2% sched_debug.cfs_rq[0]:/.tg_runnable_contrib 48 ± 47% +147.7% 120 ± 30% sched_debug.cfs_rq[0]:/.tg_load_contrib 53474 ± 7% +63.0% 87186 ± 2% sched_debug.cfs_rq[0]:/.exec_clock 61 ± 4% -43.0% 34 ± 9% sched_debug.cfs_rq[24]:/.runnable_load_avg 2241502 ± 4% -17.8% 1843317 ± 1% sched_debug.cfs_rq[24]:/.min_vruntime 802666 ± 24% -87.6% 99533 ± 40% sched_debug.cfs_rq[24]:/.spread0 99 ± 21% +51.9% 151 ± 31% sched_debug.cfs_rq[24]:/.tg_load_contrib 62 ± 5% -44.6% 34 ± 10% sched_debug.cfs_rq[24]:/.load 855 ± 2% -17.4% 706 ± 2% sched_debug.cfs_rq[24]:/.tg_runnable_contrib 39223 ± 2% -17.4% 32411 ± 2% sched_debug.cfs_rq[24]:/.avg->runnable_avg_sum 129807 ± 3% -26.0% 96066 ± 2% sched_debug.cfs_rq[24]:/.exec_clock 1 ± 0% +300.0% 4 ± 50% sched_debug.cfs_rq[39]:/.nr_spread_over 3 ± 33% -66.7% 1 ± 0% sched_debug.cfs_rq[3]:/.nr_spread_over 17 ± 12% +122.9% 39 ± 18% sched_debug.cpu#0.cpu_load[0] 56862 ± 7% +58.5% 90118 ± 2% sched_debug.cpu#0.nr_load_updates 3783 ± 6% +13.9% 4310 ± 0% sched_debug.cpu#0.curr->pid 16708 ± 18% -18.4% 13637 ± 3% sched_debug.cpu#0.sched_count 3907 ± 10% -24.6% 2945 ± 11% sched_debug.cpu#0.ttwu_local 5540 ± 8% -15.4% 4688 ± 6% sched_debug.cpu#0.ttwu_count 19 ± 17% +182.1% 55 ± 33% sched_debug.cpu#0.cpu_load[2] 18 ± 14% +159.5% 48 ± 29% sched_debug.cpu#0.cpu_load[1] 22 ± 27% +181.8% 62 ± 35% sched_debug.cpu#0.cpu_load[3] 24 ± 35% +175.8% 68 ± 35% sched_debug.cpu#0.cpu_load[4] 2253 ± 11% -23.0% 1734 ± 9% sched_debug.cpu#0.sched_goidle 3720 ± 8% -16.5% 3108 ± 13% sched_debug.cpu#13.curr->pid 6862 ± 10% -32.1% 4659 ± 15% sched_debug.cpu#15.nr_switches 3135 ± 11% -36.6% 1987 ± 15% sched_debug.cpu#15.sched_goidle 10 ± 4% +14.6% 11 ± 7% sched_debug.cpu#22.cpu_load[4] 84 ± 16% -55.2% 38 ± 21% sched_debug.cpu#24.cpu_load[3] 94 ± 18% -55.9% 41 ± 22% sched_debug.cpu#24.cpu_load[4] 136876 ± 2% -20.1% 109402 ± 2% sched_debug.cpu#24.nr_load_updates 61 ± 4% -43.0% 34 ± 9% sched_debug.cpu#24.cpu_load[0] 62 ± 5% -44.6% 34 ± 10% sched_debug.cpu#24.load 4589 ± 3% -10.6% 4101 ± 0% sched_debug.cpu#24.curr->pid 65 ± 6% -45.8% 35 ± 15% sched_debug.cpu#24.cpu_load[1] 75 ± 13% -51.2% 37 ± 20% sched_debug.cpu#24.cpu_load[2] 411 ± 29% +82.3% 749 ± 46% sched_debug.cpu#26.ttwu_count 207 ± 26% +110.2% 436 ± 23% sched_debug.cpu#32.ttwu_count 1027 ± 37% -52.7% 486 ± 2% sched_debug.cpu#37.ttwu_local 1550 ± 23% -41.5% 906 ± 39% sched_debug.cpu#42.ttwu_count 773 ± 21% +152.1% 1950 ± 46% sched_debug.cpu#45.ttwu_count 1664 ± 10% +109.2% 3482 ± 27% sched_debug.cpu#45.nr_switches 386 ± 24% +199.9% 1159 ± 40% sched_debug.cpu#45.ttwu_local 10 ± 8% +59.5% 16 ± 36% sched_debug.cpu#45.cpu_load[1] 699 ± 10% +59.6% 1116 ± 30% sched_debug.cpu#45.sched_goidle 1678 ± 10% +158.7% 4341 ± 48% sched_debug.cpu#45.sched_count 2954 ± 4% +15.7% 3417 ± 6% sched_debug.cpu#47.curr->pid ivb42: Ivytown Ivy Bridge-EP Memory: 64G will-it-scale.per_process_ops 2.045e+08 ++------------------------O-------------------------------------+ 2.04e+08 ++ O O O O O | O O O | 2.035e+08 O+ O O O O O O O | 2.03e+08 ++ O O | | O O O O | 2.025e+08 ++ | 2.02e+08 ++ | 2.015e+08 ++ | | *..*..*..* | 2.01e+08 ++ *.. .*.. .. | 2.005e+08 ++ *..*..*..*.. : .*. * | |.. : *. | 2e+08 *+ * | 1.995e+08 ++--------------------------------------------------------------+ [*] bisect-good sample [O] bisect-bad sample To reproduce: apt-get install ruby git clone git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git cd lkp-tests bin/setup-local job.yaml # the job file attached in this email bin/run-local job.yaml Disclaimer: Results have been estimated based on internal Intel analysis and are provided for informational purposes only. Any difference in system hardware or software design or configuration may affect actual performance. Thanks, Ying Huang
--- testcase: will-it-scale default-monitors: wait: pre-test uptime: iostat: vmstat: numa-numastat: numa-vmstat: numa-meminfo: proc-vmstat: proc-stat: meminfo: slabinfo: interrupts: lock_stat: latency_stats: softirqs: bdi_dev_mapping: diskstats: nfsstat: cpuidle: cpufreq-stats: turbostat: pmeter: sched_debug: interval: 10 default_watchdogs: watch-oom: watchdog: cpufreq_governor: performance commit: 09dd29592b9de9921d5451d18735ddf7b7c41c89 model: Ivytown Ivy Bridge-EP nr_cpu: 48 memory: 64G rootfs: debian-x86_64-2015-02-07.cgz perf-profile: freq: 800 will-it-scale: test: pthread_mutex2 testbox: ivb42 tbox_group: ivb42 kconfig: x86_64-rhel enqueue_time: 2015-03-08 22:23:51.602528045 +08:00 head_commit: 09dd29592b9de9921d5451d18735ddf7b7c41c89 base_commit: 9eccca0843205f87c00404b663188b88eb248051 branch: linux-devel/devel-hourly-2015030909 kernel: "/kernel/x86_64-rhel/09dd29592b9de9921d5451d18735ddf7b7c41c89/vmlinuz-4.0.0-rc3-01030-g09dd295" user: lkp queue: cyclic result_root: "/result/ivb42/will-it-scale/performance-pthread_mutex2/debian-x86_64-2015-02-07.cgz/x86_64-rhel/09dd29592b9de9921d5451d18735ddf7b7c41c89/0" job_file: "/lkp/scheduled/ivb42/cyclic_will-it-scale-performance-pthread_mutex2-debian-x86_64.cgz-x86_64-rhel-HEAD-09dd29592b9de9921d5451d18735ddf7b7c41c89-0-20150308-40925-iwbdjc.yaml" dequeue_time: 2015-03-09 13:24:54.233793554 +08:00 job_state: finished loadavg: 25.26 17.11 7.20 1/422 10086 start_time: '1425878742' end_time: '1425879052' version: "/lkp/lkp/.src-20150309-125440"
echo performance > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu1/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu10/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu11/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu12/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu13/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu14/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu15/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu16/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu17/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu18/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu19/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu2/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu20/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu21/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu22/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu23/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu24/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu25/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu26/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu27/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu28/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu29/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu3/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu30/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu31/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu32/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu33/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu34/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu35/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu36/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu37/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu38/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu39/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu4/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu40/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu41/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu42/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu43/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu44/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu45/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu46/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu47/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu5/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu6/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu7/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu8/cpufreq/scaling_governor echo performance > /sys/devices/system/cpu/cpu9/cpufreq/scaling_governor ./runtest.py pthread_mutex2 25 both 1 12 24 36 48
_______________________________________________ LKP mailing list l...@linux.intel.com