FYI, we noticed the below changes on

https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master
commit 6cdb18ad98a49f7e9b95d538a0614cde827404b8 ("mm/vmstat: fix overflow in 
mod_zone_page_state()")


=========================================================================================
compiler/cpufreq_governor/kconfig/rootfs/tbox_group/test/testcase:
  
gcc-4.9/performance/x86_64-rhel/debian-x86_64-2015-02-07.cgz/ivb42/pread1/will-it-scale

commit: 
  cc28d6d80f6ab494b10f0e2ec949eacd610f66e3
  6cdb18ad98a49f7e9b95d538a0614cde827404b8

cc28d6d80f6ab494 6cdb18ad98a49f7e9b95d538a0 
---------------- -------------------------- 
         %stddev     %change         %stddev
             \          |                \  
   2733943 ±  0%      -8.5%    2502129 ±  0%  will-it-scale.per_thread_ops
      3410 ±  0%      -2.0%       3343 ±  0%  will-it-scale.time.system_time
    340.08 ±  0%     +19.7%     406.99 ±  0%  will-it-scale.time.user_time
  69882822 ±  2%     -24.3%   52926191 ±  5%  cpuidle.C1-IVT.time
    340.08 ±  0%     +19.7%     406.99 ±  0%  time.user_time
    491.25 ±  6%     -17.7%     404.25 ±  7%  numa-vmstat.node0.nr_alloc_batch
      2799 ± 20%     -36.6%       1776 ±  0%  numa-vmstat.node0.nr_mapped
    630.00 ±140%    +244.4%       2169 ±  1%  numa-vmstat.node1.nr_inactive_anon
      6440 ± 11%     -15.5%       5440 ± 16%  
numa-vmstat.node1.nr_slab_reclaimable
     11204 ± 20%     -36.6%       7106 ±  0%  numa-meminfo.node0.Mapped
      1017 ±173%    +450.3%       5598 ± 15%  numa-meminfo.node1.AnonHugePages
      2521 ±140%    +244.1%       8678 ±  1%  numa-meminfo.node1.Inactive(anon)
     25762 ± 11%     -15.5%      21764 ± 16%  numa-meminfo.node1.SReclaimable
     70103 ±  9%      -9.8%      63218 ±  9%  numa-meminfo.node1.Slab
      2.29 ±  3%     +32.8%       3.04 ±  4%  
perf-profile.cycles-pp.atime_needs_update.touch_atime.shmem_file_read_iter.__vfs_read.vfs_read
      1.10 ±  3%     -27.4%       0.80 ±  5%  
perf-profile.cycles-pp.current_fs_time.atime_needs_update.touch_atime.shmem_file_read_iter.__vfs_read
      2.33 ±  2%     -13.0%       2.02 ±  3%  
perf-profile.cycles-pp.fput.entry_SYSCALL_64_fastpath
      0.89 ±  2%     +29.6%       1.15 ±  7%  
perf-profile.cycles-pp.fsnotify.vfs_read.sys_pread64.entry_SYSCALL_64_fastpath
      2.85 ±  2%     +45.4%       4.14 ±  5%  
perf-profile.cycles-pp.touch_atime.shmem_file_read_iter.__vfs_read.vfs_read.sys_pread64
     63939 ±  0%     +17.9%      75370 ± 15%  sched_debug.cfs_rq:/.exec_clock.25
     72.50 ± 73%     -63.1%      26.75 ± 19%  sched_debug.cfs_rq:/.load_avg.1
     34.00 ± 62%     -61.8%      13.00 ± 12%  sched_debug.cfs_rq:/.load_avg.14
     18.00 ± 11%     -11.1%      16.00 ± 10%  sched_debug.cfs_rq:/.load_avg.20
     14.75 ± 41%    +122.0%      32.75 ± 26%  sched_debug.cfs_rq:/.load_avg.25
    278.88 ± 11%     +18.8%     331.25 ±  7%  sched_debug.cfs_rq:/.load_avg.max
     51.89 ± 11%     +13.6%      58.97 ±  4%  
sched_debug.cfs_rq:/.load_avg.stddev
      7.25 ±  5%    +255.2%      25.75 ± 53%  
sched_debug.cfs_rq:/.runnable_load_avg.25
     28.50 ±  1%     +55.3%      44.25 ± 46%  
sched_debug.cfs_rq:/.runnable_load_avg.7
     72.50 ± 73%     -63.1%      26.75 ± 19%  
sched_debug.cfs_rq:/.tg_load_avg_contrib.1
     34.00 ± 62%     -61.8%      13.00 ± 12%  
sched_debug.cfs_rq:/.tg_load_avg_contrib.14
     18.00 ± 11%     -11.1%      16.00 ± 10%  
sched_debug.cfs_rq:/.tg_load_avg_contrib.20
     14.75 ± 41%    +122.0%      32.75 ± 25%  
sched_debug.cfs_rq:/.tg_load_avg_contrib.25
    279.29 ± 11%     +19.1%     332.67 ±  7%  
sched_debug.cfs_rq:/.tg_load_avg_contrib.max
     52.01 ± 11%     +13.8%      59.18 ±  4%  
sched_debug.cfs_rq:/.tg_load_avg_contrib.stddev
    359.50 ±  6%     +41.5%     508.75 ± 22%  sched_debug.cfs_rq:/.util_avg.25
    206.25 ± 16%     -13.1%     179.25 ± 11%  sched_debug.cfs_rq:/.util_avg.40
    688.75 ±  1%     +18.5%     816.00 ±  1%  sched_debug.cfs_rq:/.util_avg.7
    953467 ±  1%     -17.9%     782518 ± 10%  sched_debug.cpu.avg_idle.5
      9177 ± 43%     +73.9%      15957 ± 29%  sched_debug.cpu.nr_switches.13
      7365 ± 19%     -35.4%       4755 ± 11%  sched_debug.cpu.nr_switches.20
     12203 ± 28%     -62.2%       4608 ±  9%  sched_debug.cpu.nr_switches.22
      1868 ± 49%     -51.1%     913.50 ± 27%  sched_debug.cpu.nr_switches.27
      2546 ± 56%     -70.0%     763.00 ± 18%  sched_debug.cpu.nr_switches.28
      3003 ± 78%     -77.9%     663.00 ± 18%  sched_debug.cpu.nr_switches.33
      1820 ± 19%     +68.0%       3058 ± 33%  sched_debug.cpu.nr_switches.8
     -4.00 ±-35%    -156.2%       2.25 ± 85%  
sched_debug.cpu.nr_uninterruptible.11
      4.00 ±133%    -187.5%      -3.50 ±-24%  
sched_debug.cpu.nr_uninterruptible.17
      1.75 ± 74%    -214.3%      -2.00 ±-127%  
sched_debug.cpu.nr_uninterruptible.25
      0.00 ±  2%      +Inf%       4.00 ± 39%  
sched_debug.cpu.nr_uninterruptible.26
      2.50 ± 44%    -110.0%      -0.25 ±-591%  
sched_debug.cpu.nr_uninterruptible.27
      1.33 ±154%    -287.5%      -2.50 ±-72%  
sched_debug.cpu.nr_uninterruptible.32
     -1.00 ±-244%    -250.0%       1.50 ±251%  
sched_debug.cpu.nr_uninterruptible.45
      3.50 ± 82%    -135.7%      -1.25 ±-66%  
sched_debug.cpu.nr_uninterruptible.46
     -4.50 ±-40%    -133.3%       1.50 ±242%  
sched_debug.cpu.nr_uninterruptible.6
     -3.00 ±-78%    -433.3%      10.00 ±150%  
sched_debug.cpu.nr_uninterruptible.7
     10124 ± 39%     +65.8%      16783 ± 23%  sched_debug.cpu.sched_count.13
     12833 ± 23%     -54.6%       5823 ± 32%  sched_debug.cpu.sched_count.22
      1934 ± 48%     -49.8%     971.00 ± 26%  sched_debug.cpu.sched_count.27
      3065 ± 76%     -76.2%     728.25 ± 16%  sched_debug.cpu.sched_count.33
      2098 ± 24%    +664.1%      16030 ±126%  sched_debug.cpu.sched_count.5
      4653 ± 33%     +83.4%       8536 ± 25%  sched_debug.cpu.sched_goidle.15
      5061 ± 41%     -61.1%       1968 ± 13%  sched_debug.cpu.sched_goidle.22
    834.75 ± 57%     -60.2%     332.00 ± 35%  sched_debug.cpu.sched_goidle.27
    719.00 ± 71%     -63.3%     264.00 ± 19%  sched_debug.cpu.sched_goidle.28
    943.25 ±115%     -76.3%     223.25 ± 21%  sched_debug.cpu.sched_goidle.33
      2520 ± 26%    +112.4%       5353 ± 19%  sched_debug.cpu.ttwu_count.13
      5324 ± 22%     -49.7%       2679 ± 45%  sched_debug.cpu.ttwu_count.22
      2926 ± 38%    +231.1%       9690 ± 37%  sched_debug.cpu.ttwu_count.23
    277.25 ± 18%    +166.7%     739.50 ± 83%  sched_debug.cpu.ttwu_count.27
      1247 ± 61%     -76.6%     292.25 ± 11%  sched_debug.cpu.ttwu_count.28
    751.75 ± 22%    +183.9%       2134 ±  9%  sched_debug.cpu.ttwu_count.3
      6405 ± 97%     -75.9%       1542 ± 48%  sched_debug.cpu.ttwu_count.41
      5582 ±104%     -76.2%       1327 ± 55%  sched_debug.cpu.ttwu_count.43
      3201 ± 26%     -75.1%     796.75 ± 18%  sched_debug.cpu.ttwu_local.22


ivb42: Ivytown Ivy Bridge-EP
Memory: 64G

To reproduce:

        git clone 
git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
        cd lkp-tests
        bin/lkp install job.yaml  # job file is attached in this email
        bin/lkp run     job.yaml


Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.


Thanks,
Ying Huang
---
LKP_SERVER: inn
LKP_CGI_PORT: 80
LKP_CIFS_PORT: 139
testcase: will-it-scale
default-monitors:
  wait: activate-monitor
  kmsg: 
  uptime: 
  iostat: 
  vmstat: 
  numa-numastat: 
  numa-vmstat: 
  numa-meminfo: 
  proc-vmstat: 
  proc-stat:
    interval: 10
  meminfo: 
  slabinfo: 
  interrupts: 
  lock_stat: 
  latency_stats: 
  softirqs: 
  bdi_dev_mapping: 
  diskstats: 
  nfsstat: 
  cpuidle: 
  cpufreq-stats: 
  turbostat: 
  pmeter: 
  sched_debug:
    interval: 60
cpufreq_governor: performance
default-watchdogs:
  oom-killer: 
  watchdog: 
commit: 6cdb18ad98a49f7e9b95d538a0614cde827404b8
model: Ivytown Ivy Bridge-EP
nr_cpu: 48
memory: 64G
swap_partitions: LABEL=SWAP
rootfs_partition: LABEL=LKP-ROOTFS
category: benchmark
perf-profile:
  freq: 800
will-it-scale:
  test: pread1
queue: bisect
testbox: ivb42
tbox_group: ivb42
kconfig: x86_64-rhel
enqueue_time: 2016-01-05 05:00:40.511744641 +08:00
id: 374e605d3cbf102941031de6640b9edf424e5409
user: lkp
compiler: gcc-4.9
head_commit: f4366aad18b531cf15057f70e3cea09fef88c310
base_commit: 168309855a7d1e16db751e9c647119fe2d2dc878
branch: internal-eywa/master
rootfs: debian-x86_64-2015-02-07.cgz
result_root: 
"/result/will-it-scale/performance-pread1/ivb42/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/6cdb18ad98a49f7e9b95d538a0614cde827404b8/0"
job_file: 
"/lkp/scheduled/ivb42/bisect_will-it-scale-performance-pread1-debian-x86_64-2015-02-07.cgz-x86_64-rhel-6cdb18ad98a49f7e9b95d538a0614cde827404b8-20160105-44334-2cdw3i-0.yaml"
max_uptime: 1500
initrd: "/osimage/debian/debian-x86_64-2015-02-07.cgz"
bootloader_append:
- root=/dev/ram0
- user=lkp
- 
job=/lkp/scheduled/ivb42/bisect_will-it-scale-performance-pread1-debian-x86_64-2015-02-07.cgz-x86_64-rhel-6cdb18ad98a49f7e9b95d538a0614cde827404b8-20160105-44334-2cdw3i-0.yaml
- ARCH=x86_64
- kconfig=x86_64-rhel
- branch=internal-eywa/master
- commit=6cdb18ad98a49f7e9b95d538a0614cde827404b8
- 
BOOT_IMAGE=/pkg/linux/x86_64-rhel/gcc-4.9/6cdb18ad98a49f7e9b95d538a0614cde827404b8/vmlinuz-4.4.0-rc7-00013-g6cdb18a
- max_uptime=1500
- 
RESULT_ROOT=/result/will-it-scale/performance-pread1/ivb42/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/6cdb18ad98a49f7e9b95d538a0614cde827404b8/0
- LKP_SERVER=inn
- |2-


  earlyprintk=ttyS0,115200 systemd.log_level=err
  debug apic=debug sysrq_always_enabled rcupdate.rcu_cpu_stall_timeout=100
  panic=-1 softlockup_panic=1 nmi_watchdog=panic oops=panic load_ramdisk=2 
prompt_ramdisk=0
  console=ttyS0,115200 console=tty0 vga=normal

  rw
lkp_initrd: "/lkp/lkp/lkp-x86_64.cgz"
modules_initrd: 
"/pkg/linux/x86_64-rhel/gcc-4.9/6cdb18ad98a49f7e9b95d538a0614cde827404b8/modules.cgz"
bm_initrd: 
"/osimage/deps/debian-x86_64-2015-02-07.cgz/lkp.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/run-ipconfig.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/turbostat.cgz,/lkp/benchmarks/turbostat.cgz,/lkp/benchmarks/will-it-scale.cgz"
linux_headers_initrd: 
"/pkg/linux/x86_64-rhel/gcc-4.9/6cdb18ad98a49f7e9b95d538a0614cde827404b8/linux-headers.cgz"
repeat_to: 2
kernel: 
"/pkg/linux/x86_64-rhel/gcc-4.9/6cdb18ad98a49f7e9b95d538a0614cde827404b8/vmlinuz-4.4.0-rc7-00013-g6cdb18a"
dequeue_time: 2016-01-05 05:06:46.434314862 +08:00
job_state: finished
loadavg: 38.55 18.46 7.28 1/506 9298
start_time: '1451941650'
end_time: '1451941960'
version: "/lkp/lkp/.src-20160104-165204"

Attachment: reproduce.sh
Description: Bourne shell script

Reply via email to