========================================================================================= compiler/cpufreq_governor/ipc/iterations/kconfig/mode/nr_threads/rootfs/tbox_group/testcase: gcc-12/performance/socket/4/x86_64-rhel-8.3/process/100%/debian-11.1-x86_64-20220510.cgz/lkp-icl-2sp6/hackbench 7bc162d5cc4de5c3 a0fd217e6d6fbd23e91f8796787 6ba02860484315665e300d9f415 ---------------- --------------------------- --------------------------- %stddev %change %stddev %change %stddev \ | \ | \ 4.883e+08 ± 5% -5.4% 4.621e+08 ± 3% -7.2% 4.531e+08 ± 2% cpuidle..time 4168912 ± 4% -7.0% 3877247 ± 7% -10.3% 3737609 ± 4% cpuidle..usage 182.32 ± 3% +6.7% 194.55 ± 8% +14.1% 208.03 ± 11% uptime.boot 5404 ± 13% +8.4% 5859 ± 29% +34.0% 7241 ± 34% uptime.idle 43.13 ± 15% +10.9% 47.82 ± 31% +41.0% 60.81 ± 38% boot-time.boot 29.24 ± 21% +15.9% 33.89 ± 44% +60.7% 46.98 ± 49% boot-time.dhcp 4718 ± 15% +11.2% 5247 ± 32% +40.9% 6646 ± 37% boot-time.idle 3.23 ± 41% +29.7% 4.18 ± 75% +114.7% 6.93 ± 69% boot-time.smp_boot 12760 ± 70% -100.0% 0.00 -100.0% 0.00 perf-c2c.DRAM.local 2596 ± 70% -100.0% 0.00 -100.0% 0.00 perf-c2c.DRAM.remote 25909 ± 70% -100.0% 0.00 -100.0% 0.00 perf-c2c.HITM.local 256.83 ± 72% -100.0% 0.00 -100.0% 0.00 perf-c2c.HITM.remote 26166 ± 70% -100.0% 0.00 -100.0% 0.00 perf-c2c.HITM.total 2.55 ± 11% -0.2 2.33 ± 12% -0.2 2.32 ± 9% mpstat.cpu.all.idle% 0.00 ±223% -0.0 0.00 -0.0 0.00 mpstat.cpu.all.iowait% 1.45 -0.3 1.19 -0.3 1.19 mpstat.cpu.all.irq% 0.04 -0.0 0.04 ± 2% -0.0 0.04 ± 3% mpstat.cpu.all.soft% 91.38 +1.0 92.36 +1.0 92.37 mpstat.cpu.all.sys% 4.57 -0.5 4.09 -0.5 4.09 mpstat.cpu.all.usr% 0.00 -100.0% 0.00 -100.0% 0.00 numa-numastat.node0.interleave_hit 1085826 ± 28% -30.4% 755552 ± 18% -31.8% 740726 ± 22% numa-numastat.node0.local_node 1147403 ± 27% -29.2% 811989 ± 19% -27.8% 828444 ± 17% numa-numastat.node0.numa_hit 61576 ± 72% -8.3% 56440 ± 50% +42.5% 87718 ± 41% numa-numastat.node0.other_node 0.00 -100.0% 0.00 -100.0% 0.00 numa-numastat.node1.interleave_hit 1340399 ± 21% +2.6% 1375519 ± 13% +4.1% 1395446 ± 16% numa-numastat.node1.local_node 1413908 ± 22% +2.8% 1454068 ± 13% +2.0% 1442762 ± 14% numa-numastat.node1.numa_hit 73509 ± 60% +6.9% 78548 ± 36% -35.6% 47315 ± 76% numa-numastat.node1.other_node 3.17 ± 11% -5.3% 3.00 -5.3% 3.00 vmstat.cpu.id 90.83 +1.1% 91.83 +1.3% 92.00 vmstat.cpu.sy 4.00 -4.2% 3.83 ± 9% -10.0% 3.60 ± 13% vmstat.cpu.us 0.00 -100.0% 0.00 -100.0% 0.00 vmstat.io.bi 4.00 +0.0% 4.00 +0.0% 4.00 vmstat.memory.buff 5537857 ± 17% -13.9% 4769628 ± 7% -13.4% 4794497 ± 12% vmstat.memory.cache 1.229e+08 +0.8% 1.24e+08 +0.8% 1.24e+08 vmstat.memory.free 1703 +4.6% 1781 +4.2% 1774 vmstat.procs.r 3116201 -8.8% 2842098 -8.9% 2837431 vmstat.system.cs 545942 -10.1% 490601 -10.2% 490063 vmstat.system.in 135.84 +5.9% 143.80 +6.3% 144.38 time.elapsed_time 135.84 +5.9% 143.80 +6.3% 144.38 time.elapsed_time.max 96130402 -6.0% 90340310 -5.7% 90612319 time.involuntary_context_switches 9128 ± 3% -6.5% 8538 ± 5% -7.4% 8455 ± 5% time.major_page_faults 2048 +0.0% 2048 +0.0% 2048 time.maximum_resident_set_size 533978 +1.6% 542323 +1.0% 539552 time.minor_page_faults 4096 +0.0% 4096 +0.0% 4096 time.page_size 12350 +0.6% 12424 +0.6% 12429 time.percent_of_cpu_this_job_got 16030 +7.0% 17152 +7.5% 17226 time.system_time 747.92 -4.3% 715.77 -3.7% 720.55 time.user_time 3.329e+08 -3.1% 3.227e+08 -2.7% 3.24e+08 time.voluntary_context_switches 455347 -5.9% 428458 -6.4% 426221 hackbench.throughput 447699 -5.7% 422187 -6.1% 420499 hackbench.throughput_avg 455347 -5.9% 428458 -6.4% 426221 hackbench.throughput_best 442776 -6.2% 415425 -6.5% 414117 hackbench.throughput_worst 135.84 +5.9% 143.80 +6.3% 144.38 hackbench.time.elapsed_time 135.84 +5.9% 143.80 +6.3% 144.38 hackbench.time.elapsed_time.max 96130402 -6.0% 90340310 -5.7% 90612319 hackbench.time.involuntary_context_switches 9128 ± 3% -6.5% 8538 ± 5% -7.4% 8455 ± 5% hackbench.time.major_page_faults 2048 +0.0% 2048 +0.0% 2048 hackbench.time.maximum_resident_set_size 533978 +1.6% 542323 +1.0% 539552 hackbench.time.minor_page_faults 4096 +0.0% 4096 +0.0% 4096 hackbench.time.page_size 12350 +0.6% 12424 +0.6% 12429 hackbench.time.percent_of_cpu_this_job_got 16030 +7.0% 17152 +7.5% 17226 hackbench.time.system_time 747.92 -4.3% 715.77 -3.7% 720.55 hackbench.time.user_time 3.329e+08 -3.1% 3.227e+08 -2.7% 3.24e+08 hackbench.time.voluntary_context_switches 3145 -1.2% 3106 -1.2% 3108 turbostat.Avg_MHz 97.44 +0.3 97.79 +0.4 97.89 turbostat.Busy% 3233 -1.7% 3178 -1.7% 3178 turbostat.Bzy_MHz 1505999 ± 7% +1.9% 1534366 ± 6% +7.8% 1624128 ± 5% turbostat.C1 0.06 ± 8% -0.0 0.05 ± 7% -0.0 0.06 ± 8% turbostat.C1% 2100474 ± 9% -16.9% 1746544 ± 17% -26.8% 1537922 ± 7% turbostat.C1E 0.44 ± 9% -0.1 0.37 ± 13% -0.1 0.33 ± 5% turbostat.C1E% 367921 ± 8% -3.5% 354919 ± 3% -6.6% 343515 ± 3% turbostat.C6 2.10 ± 10% -0.3 1.84 ± 2% -0.3 1.76 ± 3% turbostat.C6% 0.68 ± 8% -16.5% 0.56 ± 8% -22.4% 0.52 ± 5% turbostat.CPU%c1 1.88 ± 11% -12.4% 1.65 ± 2% -15.7% 1.59 ± 3% turbostat.CPU%c6 77.00 ± 2% -1.7% 75.67 ± 2% -1.8% 75.60 ± 2% turbostat.CoreTmp 0.20 -4.2% 0.19 -5.0% 0.19 turbostat.IPC 75882286 -5.2% 71943143 -4.8% 72233496 turbostat.IRQ 113.11 +12.9 125.98 +12.3 125.45 turbostat.PKG_% 135641 ± 21% +30.5% 177014 ± 5% +21.0% 164086 ± 17% turbostat.POLL 77.17 -1.9% 75.67 -1.5% 76.00 ± 2% turbostat.PkgTmp 494.12 +0.2% 495.33 +0.3% 495.45 turbostat.PkgWatt 190.13 -1.3% 187.64 +10.1% 209.34 turbostat.RAMWatt 0.00 -100.0% 0.00 +7.7e+104% 768.00 turbostat.SMI 2595 +0.1% 2598 +0.1% 2598 turbostat.TSC_MHz 203822 ± 60% +198.6% 608701 ± 59% +159.4% 528680 ± 65% meminfo.Active 203699 ± 60% +198.8% 608573 ± 59% +159.5% 528552 ± 65% meminfo.Active(anon) 122.67 ± 6% +4.3% 128.00 +4.3% 128.00 meminfo.Active(file) 129988 ± 4% +1.1% 131399 ± 7% +1.7% 132207 ± 5% meminfo.AnonHugePages 732895 ± 6% -13.2% 636185 ± 7% -12.6% 640678 ± 8% meminfo.AnonPages 4.00 +0.0% 4.00 +0.0% 4.00 meminfo.Buffers 5381820 ± 17% -14.1% 4624610 ± 8% -13.7% 4646965 ± 12% meminfo.Cached 65831196 +0.0% 65831196 +0.0% 65831196 meminfo.CommitLimit 5362198 ± 18% -16.1% 4499421 ± 9% -15.5% 4530443 ± 14% meminfo.Committed_AS 1.183e+09 +0.1% 1.184e+09 +0.1% 1.183e+09 meminfo.DirectMap1G 9787415 ± 8% -11.3% 8685935 ± 16% -7.2% 9087064 ± 9% meminfo.DirectMap2M 548191 ± 27% -22.2% 426331 ± 18% -18.9% 444633 ± 11% meminfo.DirectMap4k 2048 +0.0% 2048 +0.0% 2048 meminfo.Hugepagesize 3239938 ± 32% -38.8% 1981395 ± 38% -35.5% 2088664 ± 46% meminfo.Inactive 3239758 ± 32% -38.8% 1981215 ± 38% -35.5% 2088484 ± 46% meminfo.Inactive(anon) 179.83 -0.3% 179.33 -0.2% 179.40 meminfo.Inactive(file) 144522 -3.0% 140240 -3.0% 140145 meminfo.KReclaimable 100281 +0.8% 101035 +1.2% 101437 meminfo.KernelStack 1431490 ± 19% -37.0% 902508 ± 44% -33.2% 956103 ± 53% meminfo.Mapped 1.224e+08 +0.8% 1.234e+08 +0.8% 1.234e+08 meminfo.MemAvailable 1.23e+08 +0.8% 1.24e+08 +0.8% 1.24e+08 meminfo.MemFree 1.317e+08 +0.0% 1.317e+08 +0.0% 1.317e+08 meminfo.MemTotal 8711265 ± 11% -11.9% 7677995 ± 6% -11.5% 7709567 ± 9% meminfo.Memused 163279 +3.0% 168167 ± 3% +0.4% 163900 ± 2% meminfo.PageTables 90680 -0.6% 90152 -0.4% 90300 meminfo.Percpu 144522 -3.0% 140240 -3.0% 140145 meminfo.SReclaimable 631442 -0.3% 629626 -0.3% 629408 meminfo.SUnreclaim 2711151 ± 35% -27.9% 1953938 ± 19% -27.1% 1976247 ± 29% meminfo.Shmem 775965 -0.8% 769867 -0.8% 769554 meminfo.Slab 2670369 -0.0% 2670368 +0.0% 2670412 meminfo.Unevictable 1.374e+13 +0.0% 1.374e+13 +0.0% 1.374e+13 meminfo.VmallocTotal 240469 +0.3% 241248 +0.5% 241622 meminfo.VmallocUsed 8868864 ± 11% -9.8% 8003021 ± 4% -9.4% 8031013 ± 7% meminfo.max_used_kB 60623 ± 25% -2.1% 59353 ±125% +2.7% 62287 ±113% numa-meminfo.node0.Active 60540 ± 25% -2.1% 59289 ±125% +2.7% 62191 ±113% numa-meminfo.node0.Active(anon) 82.67 ± 71% -22.6% 64.00 ±100% +16.1% 96.00 ± 51% numa-meminfo.node0.Active(file) 45512 ± 55% +35.2% 61514 ± 63% +10.9% 50486 ± 64% numa-meminfo.node0.AnonHugePages 347594 ± 18% -3.2% 336335 ± 20% -10.0% 312751 ± 27% numa-meminfo.node0.AnonPages 562165 ± 18% -2.8% 546504 ± 14% -7.8% 518572 ± 11% numa-meminfo.node0.AnonPages.max 2860089 ± 57% -35.3% 1851652 ± 58% -21.4% 2247443 ± 37% numa-meminfo.node0.FilePages 1360379 ± 72% -71.3% 389808 ± 23% -71.3% 389917 ± 31% numa-meminfo.node0.Inactive 1360266 ± 72% -71.3% 389718 ± 23% -71.3% 389802 ± 31% numa-meminfo.node0.Inactive(anon) 113.33 ± 71% -21.0% 89.50 ±100% +0.8% 114.20 ± 66% numa-meminfo.node0.Inactive(file) 73362 ± 31% -11.2% 65115 ± 38% +3.0% 75576 ± 26% numa-meminfo.node0.KReclaimable 56758 ± 24% -10.3% 50908 ± 49% -3.7% 54664 ± 57% numa-meminfo.node0.KernelStack 402969 ± 74% -57.7% 170527 ± 31% -55.8% 177978 ± 31% numa-meminfo.node0.Mapped 61175514 ± 2% +1.9% 62343890 ± 2% +1.3% 61957100 numa-meminfo.node0.MemFree 65658096 +0.0% 65658096 +0.0% 65658096 numa-meminfo.node0.MemTotal 4482580 ± 36% -26.1% 3314204 ± 39% -17.4% 3700994 ± 25% numa-meminfo.node0.MemUsed 94097 ± 30% -12.6% 82238 ± 61% -6.9% 87559 ± 70% numa-meminfo.node0.PageTables 73362 ± 31% -11.2% 65115 ± 38% +3.0% 75576 ± 26% numa-meminfo.node0.SReclaimable 335026 ± 9% -6.1% 314466 ± 23% -12.5% 293093 ± 25% numa-meminfo.node0.SUnreclaim 1073509 ± 95% -89.5% 113005 ±102% -87.0% 139594 ±116% numa-meminfo.node0.Shmem 408389 ± 8% -7.1% 379582 ± 24% -9.7% 368670 ± 23% numa-meminfo.node0.Slab 1786383 ± 65% -2.7% 1738492 ± 66% +18.0% 2107638 ± 47% numa-meminfo.node0.Unevictable 140001 ± 92% +293.9% 551466 ± 63% +232.9% 466032 ± 69% numa-meminfo.node1.Active 139961 ± 92% +294.0% 551402 ± 63% +232.9% 466000 ± 69% numa-meminfo.node1.Active(anon) 40.00 ±141% +60.0% 64.00 ±100% -20.0% 32.00 ±154% numa-meminfo.node1.Active(file) 84393 ± 31% -17.1% 69966 ± 52% -3.2% 81728 ± 42% numa-meminfo.node1.AnonHugePages 385861 ± 17% -22.2% 300225 ± 18% -15.1% 327615 ± 15% numa-meminfo.node1.AnonPages 602132 ± 20% -26.7% 441431 ± 14% -24.1% 457230 ± 21% numa-meminfo.node1.AnonPages.max 2518083 ± 53% +10.2% 2774346 ± 32% -4.8% 2397835 ± 41% numa-meminfo.node1.FilePages 1879643 ± 44% -15.3% 1591222 ± 46% -9.7% 1697090 ± 54% numa-meminfo.node1.Inactive 1879576 ± 44% -15.3% 1591132 ± 46% -9.7% 1697025 ± 54% numa-meminfo.node1.Inactive(anon) 66.50 ±121% +35.1% 89.83 ±100% -2.0% 65.20 ±115% numa-meminfo.node1.Inactive(file) 71159 ± 31% +5.6% 75179 ± 32% -9.2% 64617 ± 30% numa-meminfo.node1.KReclaimable 43384 ± 32% +15.6% 50135 ± 50% +8.2% 46938 ± 66% numa-meminfo.node1.KernelStack 1030705 ± 33% -29.1% 730755 ± 47% -24.6% 777182 ± 58% numa-meminfo.node1.Mapped 61778303 ± 2% -0.2% 61639504 +0.4% 61997485 numa-meminfo.node1.MemFree 66004296 +0.0% 66004296 +0.0% 66004296 numa-meminfo.node1.MemTotal 4225992 ± 31% +3.3% 4364790 ± 24% -5.2% 4006809 ± 26% numa-meminfo.node1.MemUsed 68727 ± 43% +24.9% 85871 ± 62% +11.5% 76658 ± 83% numa-meminfo.node1.PageTables 71159 ± 31% +5.6% 75179 ± 32% -9.2% 64617 ± 30% numa-meminfo.node1.SReclaimable 295876 ± 11% +6.2% 314174 ± 23% +13.8% 336703 ± 21% numa-meminfo.node1.SUnreclaim 1633990 ± 51% +12.7% 1842316 ± 24% +12.3% 1834963 ± 34% numa-meminfo.node1.Shmem 367037 ± 10% +6.1% 389355 ± 23% +9.3% 401321 ± 21% numa-meminfo.node1.Slab 883984 ±133% +5.4% 931875 ±123% -36.3% 562774 ±177% numa-meminfo.node1.Unevictable 15178 ± 25% -1.6% 14941 ±126% +2.9% 15623 ±113% numa-vmstat.node0.nr_active_anon 20.67 ± 71% -22.6% 16.00 ±100% +16.1% 24.00 ± 51% numa-vmstat.node0.nr_active_file 86797 ± 18% -3.2% 84015 ± 20% -10.0% 78094 ± 27% numa-vmstat.node0.nr_anon_pages 21.67 ± 56% +36.2% 29.50 ± 64% +11.7% 24.20 ± 65% numa-vmstat.node0.nr_anon_transparent_hugepages 715313 ± 57% -35.3% 463017 ± 58% -21.4% 562039 ± 37% numa-vmstat.node0.nr_file_pages 15293765 ± 2% +1.9% 15585702 ± 2% +1.3% 15489544 numa-vmstat.node0.nr_free_pages 340214 ± 72% -71.4% 97344 ± 23% -71.4% 97460 ± 31% numa-vmstat.node0.nr_inactive_anon 28.33 ± 71% -22.4% 22.00 ±100% +0.2% 28.40 ± 65% numa-vmstat.node0.nr_inactive_file 0.00 -100.0% 0.00 -100.0% 0.00 numa-vmstat.node0.nr_isolated_anon 56711 ± 24% -9.9% 51083 ± 49% -4.0% 54431 ± 57% numa-vmstat.node0.nr_kernel_stack 101165 ± 74% -57.9% 42574 ± 31% -55.9% 44622 ± 31% numa-vmstat.node0.nr_mapped 23535 ± 30% -12.3% 20638 ± 60% -7.5% 21771 ± 70% numa-vmstat.node0.nr_page_table_pages 268668 ± 95% -89.4% 28355 ±102% -86.9% 35077 ±116% numa-vmstat.node0.nr_shmem 18343 ± 31% -11.2% 16281 ± 38% +3.0% 18888 ± 26% numa-vmstat.node0.nr_slab_reclaimable 83852 ± 9% -6.1% 78700 ± 23% -12.9% 73007 ± 25% numa-vmstat.node0.nr_slab_unreclaimable 446595 ± 65% -2.7% 434622 ± 66% +18.0% 526908 ± 47% numa-vmstat.node0.nr_unevictable 15178 ± 25% -1.6% 14941 ±126% +2.9% 15623 ±113% numa-vmstat.node0.nr_zone_active_anon 20.67 ± 71% -22.6% 16.00 ±100% +16.1% 24.00 ± 51% numa-vmstat.node0.nr_zone_active_file 340213 ± 72% -71.4% 97343 ± 23% -71.4% 97460 ± 31% numa-vmstat.node0.nr_zone_inactive_anon 28.33 ± 71% -22.4% 22.00 ±100% +0.2% 28.40 ± 65% numa-vmstat.node0.nr_zone_inactive_file 446595 ± 65% -2.7% 434622 ± 66% +18.0% 526908 ± 47% numa-vmstat.node0.nr_zone_unevictable 1146748 ± 27% -29.2% 812051 ± 19% -27.8% 828190 ± 17% numa-vmstat.node0.numa_hit 0.00 -100.0% 0.00 -100.0% 0.00 numa-vmstat.node0.numa_interleave 1085171 ± 28% -30.4% 755614 ± 18% -31.8% 740472 ± 22% numa-vmstat.node0.numa_local 61576 ± 72% -8.3% 56440 ± 50% +42.5% 87718 ± 41% numa-vmstat.node0.numa_other 35413 ± 93% +290.3% 138215 ± 63% +230.2% 116928 ± 69% numa-vmstat.node1.nr_active_anon 10.00 ±141% +60.0% 16.00 ±100% -20.0% 8.00 ±154% numa-vmstat.node1.nr_active_file 96435 ± 16% -22.3% 74970 ± 18% -15.1% 81903 ± 15% numa-vmstat.node1.nr_anon_pages 40.50 ± 32% -16.5% 33.83 ± 52% -2.7% 39.40 ± 43% numa-vmstat.node1.nr_anon_transparent_hugepages 629218 ± 53% +10.2% 693354 ± 32% -4.5% 600864 ± 41% numa-vmstat.node1.nr_file_pages 15444792 ± 2% -0.2% 15410531 +0.3% 15498432 numa-vmstat.node1.nr_free_pages 469139 ± 44% -15.4% 397102 ± 46% -9.4% 425233 ± 55% numa-vmstat.node1.nr_inactive_anon 16.50 ±122% +35.4% 22.33 ±100% -3.0% 16.00 ±117% numa-vmstat.node1.nr_inactive_file 0.00 -100.0% 0.00 +4e+101% 0.40 ±200% numa-vmstat.node1.nr_isolated_anon 43466 ± 32% +15.0% 49968 ± 50% +7.4% 46698 ± 66% numa-vmstat.node1.nr_kernel_stack 257002 ± 33% -29.0% 182356 ± 48% -24.3% 194531 ± 58% numa-vmstat.node1.nr_mapped 17235 ± 43% +24.1% 21392 ± 62% +10.5% 19038 ± 83% numa-vmstat.node1.nr_page_table_pages 408195 ± 51% +12.8% 460346 ± 24% +12.7% 460146 ± 34% numa-vmstat.node1.nr_shmem 17777 ± 31% +5.6% 18781 ± 32% -9.1% 16152 ± 30% numa-vmstat.node1.nr_slab_reclaimable 74091 ± 11% +5.9% 78480 ± 23% +13.2% 83885 ± 21% numa-vmstat.node1.nr_slab_unreclaimable 220995 ±133% +5.4% 232968 ±123% -36.3% 140693 ±177% numa-vmstat.node1.nr_unevictable 35413 ± 93% +290.3% 138214 ± 63% +230.2% 116928 ± 69% numa-vmstat.node1.nr_zone_active_anon 10.00 ±141% +60.0% 16.00 ±100% -20.0% 8.00 ±154% numa-vmstat.node1.nr_zone_active_file 469139 ± 44% -15.4% 397102 ± 46% -9.4% 425232 ± 55% numa-vmstat.node1.nr_zone_inactive_anon 16.50 ±122% +35.4% 22.33 ±100% -3.0% 16.00 ±117% numa-vmstat.node1.nr_zone_inactive_file 220995 ±133% +5.4% 232968 ±123% -36.3% 140693 ±177% numa-vmstat.node1.nr_zone_unevictable 1413178 ± 22% +2.9% 1454049 ± 13% +2.1% 1442346 ± 14% numa-vmstat.node1.numa_hit 0.00 -100.0% 0.00 -100.0% 0.00 numa-vmstat.node1.numa_interleave 1339669 ± 21% +2.7% 1375501 ± 13% +4.1% 1395031 ± 17% numa-vmstat.node1.numa_local 73509 ± 60% +6.9% 78548 ± 36% -35.6% 47315 ± 76% numa-vmstat.node1.numa_other 247.83 ± 30% -23.3% 190.17 ± 20% -20.8% 196.40 ± 12% proc-vmstat.direct_map_level2_splits 2.17 ± 31% +7.7% 2.33 ± 40% -7.7% 2.00 ± 31% proc-vmstat.direct_map_level3_splits 51157 ± 60% +197.2% 152043 ± 59% +159.9% 132968 ± 65% proc-vmstat.nr_active_anon 30.67 ± 6% +4.3% 32.00 +4.3% 32.00 proc-vmstat.nr_active_file 183216 ± 6% -13.1% 159176 ± 7% -12.7% 160025 ± 8% proc-vmstat.nr_anon_pages 63.17 ± 3% +0.5% 63.50 ± 7% +1.6% 64.20 ± 5% proc-vmstat.nr_anon_transparent_hugepages 3053894 +0.8% 3079629 +0.8% 3078887 proc-vmstat.nr_dirty_background_threshold 6115256 +0.8% 6166789 +0.8% 6165304 proc-vmstat.nr_dirty_threshold 1345673 ± 17% -14.1% 1156027 ± 8% -13.7% 1161982 ± 12% proc-vmstat.nr_file_pages 30737847 +0.8% 30995577 +0.8% 30988148 proc-vmstat.nr_free_pages 809915 ± 32% -38.8% 495403 ± 38% -35.6% 521385 ± 46% proc-vmstat.nr_inactive_anon 44.83 -1.1% 44.33 -1.0% 44.40 proc-vmstat.nr_inactive_file 0.67 ±141% +50.0% 1.00 ±141% -100.0% 0.00 proc-vmstat.nr_isolated_anon 100262 +0.8% 101078 +1.3% 101605 proc-vmstat.nr_kernel_stack 358287 ± 19% -36.9% 225932 ± 44% -33.5% 238169 ± 53% proc-vmstat.nr_mapped 40823 +3.0% 42029 ± 3% +0.5% 41046 proc-vmstat.nr_page_table_pages 678005 ± 35% -28.0% 488357 ± 19% -27.1% 494301 ± 30% proc-vmstat.nr_shmem 36123 -2.9% 35063 -3.0% 35046 proc-vmstat.nr_slab_reclaimable 157786 -0.4% 157232 -0.3% 157330 proc-vmstat.nr_slab_unreclaimable 667592 -0.0% 667591 +0.0% 667602 proc-vmstat.nr_unevictable 51157 ± 60% +197.2% 152043 ± 59% +159.9% 132968 ± 65% proc-vmstat.nr_zone_active_anon 30.67 ± 6% +4.3% 32.00 +4.3% 32.00 proc-vmstat.nr_zone_active_file 809915 ± 32% -38.8% 495403 ± 38% -35.6% 521385 ± 46% proc-vmstat.nr_zone_inactive_anon 44.83 -1.1% 44.33 -1.0% 44.40 proc-vmstat.nr_zone_inactive_file 667592 -0.0% 667591 +0.0% 667602 proc-vmstat.nr_zone_unevictable 245710 ± 20% -22.5% 190365 ± 20% -24.6% 185160 ± 14% proc-vmstat.numa_hint_faults 173866 ± 13% -24.8% 130734 ± 36% -21.2% 136965 ± 20% proc-vmstat.numa_hint_faults_local 2564578 ± 14% -11.5% 2268893 ± 4% -11.5% 2270676 ± 9% proc-vmstat.numa_hit 52.00 ±103% -57.4% 22.17 ± 35% +5.8% 55.00 ±122% proc-vmstat.numa_huge_pte_updates 0.00 -100.0% 0.00 -100.0% 0.00 proc-vmstat.numa_interleave 2429492 ± 14% -12.2% 2133272 ± 4% -12.1% 2135643 ± 9% proc-vmstat.numa_local 135086 -0.1% 134989 -0.0% 135033 proc-vmstat.numa_other 42910 ± 55% -41.8% 24988 ± 29% -46.9% 22803 ± 55% proc-vmstat.numa_pages_migrated 481291 ± 12% -15.2% 408307 ± 11% -11.5% 425774 ± 8% proc-vmstat.numa_pte_updates 168803 ± 84% +132.6% 392645 ± 59% +92.7% 325216 ± 67% proc-vmstat.pgactivate 3197394 ± 11% -10.5% 2860892 ± 4% -10.4% 2865154 ± 6% proc-vmstat.pgalloc_normal 1648445 ± 6% -7.0% 1533339 ± 2% -7.8% 1520590 ± 2% proc-vmstat.pgfault 2016126 ± 3% +2.2% 2059688 ± 5% +0.7% 2029790 ± 5% proc-vmstat.pgfree 42910 ± 55% -41.8% 24988 ± 29% -46.9% 22803 ± 55% proc-vmstat.pgmigrate_success 0.00 -100.0% 0.00 -100.0% 0.00 proc-vmstat.pgpgin 113635 ± 23% -16.4% 95027 ± 5% -18.4% 92750 ± 4% proc-vmstat.pgreuse 92.83 ± 3% -2.0% 91.00 ± 10% +0.6% 93.40 ± 6% proc-vmstat.thp_collapse_alloc 0.00 +1.7e+101% 0.17 ±223% +6e+101% 0.60 ±133% proc-vmstat.thp_deferred_split_page 24.00 +2.1% 24.50 ± 3% +2.5% 24.60 ± 3% proc-vmstat.thp_fault_alloc 11.17 ± 68% -32.8% 7.50 ± 62% -8.7% 10.20 ±122% proc-vmstat.thp_migration_success 0.00 +1.7e+101% 0.17 ±223% +6e+101% 0.60 ±133% proc-vmstat.thp_split_pmd 0.00 -100.0% 0.00 -100.0% 0.00 proc-vmstat.thp_zero_page_alloc 21.17 -0.8% 21.00 +0.2% 21.20 proc-vmstat.unevictable_pgs_culled 0.00 -100.0% 0.00 -100.0% 0.00 proc-vmstat.unevictable_pgs_rescued 1127680 +4.7% 1180672 +4.9% 1183027 proc-vmstat.unevictable_pgs_scanned 7.62 +0.2% 7.63 -0.1% 7.61 perf-stat.i.MPKI 4.48e+10 -3.4% 4.327e+10 -3.9% 4.305e+10 perf-stat.i.branch-instructions 0.45 +0.0 0.47 +0.0 0.47 perf-stat.i.branch-miss-rate% 1.988e+08 +1.5% 2.017e+08 +1.3% 2.013e+08 perf-stat.i.branch-misses 21.55 -1.2 20.32 -1.2 20.34 perf-stat.i.cache-miss-rate% 3.953e+08 -9.5% 3.578e+08 -10.1% 3.552e+08 perf-stat.i.cache-misses 1.815e+09 -3.8% 1.746e+09 -4.5% 1.733e+09 perf-stat.i.cache-references 3161372 -10.9% 2817844 -10.1% 2842314 perf-stat.i.context-switches 1.69 +2.7% 1.73 +3.2% 1.74 perf-stat.i.cpi 128264 -0.1% 128173 -0.1% 128139 perf-stat.i.cpu-clock 4.023e+11 -1.4% 3.967e+11 -1.4% 3.967e+11 perf-stat.i.cpu-cycles 365627 ± 2% -9.7% 330317 -9.7% 330336 perf-stat.i.cpu-migrations 1139 ± 2% +8.4% 1235 +11.2% 1267 ± 3% perf-stat.i.cycles-between-cache-misses 0.04 ± 16% +0.0 0.04 ± 11% +0.0 0.04 ± 5% perf-stat.i.dTLB-load-miss-rate% 24803278 ± 15% -2.3% 24226955 ± 11% +5.0% 26048000 ± 5% perf-stat.i.dTLB-load-misses 6.569e+10 -4.0% 6.305e+10 -4.5% 6.276e+10 perf-stat.i.dTLB-loads 0.01 ± 37% -0.0 0.01 ± 20% +0.0 0.01 ± 20% perf-stat.i.dTLB-store-miss-rate% 4003244 ± 37% -15.3% 3389687 ± 21% +4.3% 4176789 ± 20% perf-stat.i.dTLB-store-misses 4.057e+10 -5.3% 3.841e+10 -5.8% 3.822e+10 perf-stat.i.dTLB-stores 2.408e+11 -3.9% 2.314e+11 -4.3% 2.303e+11 perf-stat.i.instructions 0.60 -2.6% 0.58 -3.1% 0.58 perf-stat.i.ipc 78.56 ± 3% -14.6% 67.11 ± 5% -15.8% 66.16 ± 6% perf-stat.i.major-faults 3.14 -1.4% 3.10 -1.4% 3.10 perf-stat.i.metric.GHz 1598 -10.7% 1427 -10.6% 1429 perf-stat.i.metric.K/sec 1194 -4.2% 1144 -4.6% 1138 perf-stat.i.metric.M/sec 10973 ± 7% -15.5% 9275 ± 3% -16.4% 9178 ± 2% perf-stat.i.minor-faults 26.75 +0.0 26.78 -0.1 26.65 perf-stat.i.node-load-miss-rate% 30953814 -4.8% 29470176 -6.2% 29042619 perf-stat.i.node-load-misses 94854027 -8.2% 87086579 -8.6% 86690715 perf-stat.i.node-loads 10.12 +1.0 11.14 ± 2% +0.9 11.05 perf-stat.i.node-store-miss-rate% 6830990 -6.0% 6417970 -7.3% 6333380 perf-stat.i.node-store-misses 67140443 -17.8% 55222136 -18.4% 54762093 perf-stat.i.node-stores 11052 ± 7% -15.5% 9343 ± 3% -16.4% 9244 ± 2% perf-stat.i.page-faults 128264 -0.1% 128173 -0.1% 128139 perf-stat.i.task-clock 7.54 -0.1% 7.53 -0.4% 7.51 perf-stat.overall.MPKI 0.44 +0.0 0.47 +0.0 0.47 perf-stat.overall.branch-miss-rate% 21.83 -1.3 20.52 -1.3 20.52 perf-stat.overall.cache-miss-rate% 1.67 +2.5% 1.71 +3.0% 1.72 perf-stat.overall.cpi 1015 +9.2% 1109 +10.1% 1117 perf-stat.overall.cycles-between-cache-misses 0.04 ± 16% +0.0 0.04 ± 11% +0.0 0.04 ± 5% perf-stat.overall.dTLB-load-miss-rate% 0.01 ± 38% -0.0 0.01 ± 20% +0.0 0.01 ± 19% perf-stat.overall.dTLB-store-miss-rate% 0.60 -2.5% 0.58 -2.9% 0.58 perf-stat.overall.ipc 24.35 +0.8 25.13 +0.6 24.97 perf-stat.overall.node-load-miss-rate% 9.09 +1.2 10.31 +1.2 10.28 perf-stat.overall.node-store-miss-rate% 4.443e+10 -3.3% 4.294e+10 -3.7% 4.276e+10 perf-stat.ps.branch-instructions 1.966e+08 +1.6% 1.998e+08 +1.5% 1.996e+08 perf-stat.ps.branch-misses 3.933e+08 -9.7% 3.55e+08 -10.3% 3.526e+08 perf-stat.ps.cache-misses 1.801e+09 -4.0% 1.73e+09 -4.6% 1.718e+09 perf-stat.ps.cache-references 3104212 -10.4% 2781030 -9.6% 2804668 perf-stat.ps.context-switches 127050 +0.0% 127068 +0.0% 127100 perf-stat.ps.cpu-clock 3.994e+11 -1.4% 3.939e+11 -1.3% 3.941e+11 perf-stat.ps.cpu-cycles 354970 -8.9% 323401 ± 2% -8.9% 323414 perf-stat.ps.cpu-migrations 24565631 ± 16% -1.9% 24093755 ± 11% +5.7% 25970968 ± 5% perf-stat.ps.dTLB-load-misses 6.521e+10 -4.0% 6.258e+10 -4.4% 6.234e+10 perf-stat.ps.dTLB-loads 4047965 ± 38% -16.3% 3389310 ± 20% +3.5% 4188164 ± 19% perf-stat.ps.dTLB-store-misses 4.029e+10 -5.4% 3.812e+10 -5.8% 3.796e+10 perf-stat.ps.dTLB-stores 2.389e+11 -3.8% 2.297e+11 -4.2% 2.288e+11 perf-stat.ps.instructions 66.62 ± 3% -12.0% 58.62 ± 5% -13.1% 57.88 ± 5% perf-stat.ps.major-faults 10118 ± 8% -13.6% 8745 ± 2% -14.4% 8664 ± 2% perf-stat.ps.minor-faults 30547504 -4.7% 29097293 -6.0% 28720714 perf-stat.ps.node-load-misses 94908109 -8.6% 86722788 -9.1% 86307398 perf-stat.ps.node-loads 6660116 -5.6% 6290369 -6.7% 6216850 perf-stat.ps.node-store-misses 66647480 -17.9% 54727405 -18.6% 54278164 perf-stat.ps.node-stores 10184 ± 8% -13.6% 8803 ± 2% -14.4% 8722 ± 2% perf-stat.ps.page-faults 127050 +0.0% 127068 +0.0% 127100 perf-stat.ps.task-clock 3.261e+13 +1.6% 3.312e+13 +1.7% 3.315e+13 perf-stat.total.instructions 18473 ±100% +71.2% 31632 ± 44% +103.5% 37589 ± 2% sched_debug.cfs_rq:/.MIN_vruntime.avg 2364639 ±100% +71.2% 4048954 ± 44% +103.5% 4811449 ± 2% sched_debug.cfs_rq:/.MIN_vruntime.max 0.00 +0.0% 0.00 +0.0% 0.00 sched_debug.cfs_rq:/.MIN_vruntime.min 208188 ±100% +71.2% 356479 ± 44% +103.5% 423611 ± 2% sched_debug.cfs_rq:/.MIN_vruntime.stddev 9.49 ± 4% +11.3% 10.57 ± 6% +8.8% 10.33 ± 5% sched_debug.cfs_rq:/.h_nr_running.avg 26.67 ± 5% +7.1% 28.56 ± 5% +4.0% 27.73 ± 2% sched_debug.cfs_rq:/.h_nr_running.max 0.28 ± 44% +80.0% 0.50 ± 50% +20.0% 0.33 ± 63% sched_debug.cfs_rq:/.h_nr_running.min 6.37 ± 4% +11.4% 7.10 ± 6% +7.4% 6.84 ± 3% sched_debug.cfs_rq:/.h_nr_running.stddev 10612 ± 17% +14.4% 12144 ± 10% +23.4% 13096 ± 11% sched_debug.cfs_rq:/.load.avg 367702 ± 52% +61.0% 591934 ± 27% +92.5% 707712 ± 30% sched_debug.cfs_rq:/.load.max 469.39 ±108% +114.0% 1004 ± 60% +16.4% 546.40 ± 69% sched_debug.cfs_rq:/.load.min 35751 ± 47% +47.6% 52755 ± 26% +75.8% 62847 ± 30% sched_debug.cfs_rq:/.load.stddev 69.32 ±127% +2.3% 70.92 ±121% +140.5% 166.72 ±157% sched_debug.cfs_rq:/.load_avg.avg 5328 ±188% +3.2% 5498 ±198% +29.2% 6882 ±170% sched_debug.cfs_rq:/.load_avg.max 1.17 ± 14% +0.0% 1.17 ± 27% +20.0% 1.40 ± 23% sched_debug.cfs_rq:/.load_avg.min 496.64 ±175% +5.3% 522.94 ±180% +94.5% 965.76 ±171% sched_debug.cfs_rq:/.load_avg.stddev 18473 ±100% +71.2% 31632 ± 44% +103.5% 37589 ± 2% sched_debug.cfs_rq:/.max_vruntime.avg 2364639 ±100% +71.2% 4048954 ± 44% +103.5% 4811450 ± 2% sched_debug.cfs_rq:/.max_vruntime.max 0.00 +0.0% 0.00 +0.0% 0.00 sched_debug.cfs_rq:/.max_vruntime.min 208188 ±100% +71.2% 356479 ± 44% +103.5% 423611 ± 2% sched_debug.cfs_rq:/.max_vruntime.stddev 7226615 +0.5% 7260631 +0.5% 7260689 sched_debug.cfs_rq:/.min_vruntime.avg 9061493 ± 5% -1.7% 8910843 ± 4% -2.6% 8827149 ± 4% sched_debug.cfs_rq:/.min_vruntime.max 6914915 +0.8% 6970885 -0.0% 6912152 sched_debug.cfs_rq:/.min_vruntime.min 250377 ± 10% -6.8% 233268 ± 11% +1.0% 252865 ± 5% sched_debug.cfs_rq:/.min_vruntime.stddev 0.70 +0.8% 0.70 +0.2% 0.70 sched_debug.cfs_rq:/.nr_running.avg 1.06 ± 11% -5.3% 1.00 +13.7% 1.20 ± 13% sched_debug.cfs_rq:/.nr_running.max 0.28 ± 44% +80.0% 0.50 ± 50% +20.0% 0.33 ± 63% sched_debug.cfs_rq:/.nr_running.min 0.14 ± 9% -16.8% 0.12 ± 17% +0.9% 0.14 ± 15% sched_debug.cfs_rq:/.nr_running.stddev 9.71 ± 40% +48.9% 14.46 ± 34% +489.5% 57.24 ±165% sched_debug.cfs_rq:/.removed.load_avg.avg 341.33 +0.0% 341.33 +1713.6% 6190 ±188% sched_debug.cfs_rq:/.removed.load_avg.max 55.31 ± 20% +21.4% 67.14 ± 16% +922.3% 565.42 ±180% sched_debug.cfs_rq:/.removed.load_avg.stddev 3.90 ± 46% +71.3% 6.68 ± 42% +27.8% 4.98 ± 44% sched_debug.cfs_rq:/.removed.runnable_avg.avg 176.44 ± 5% +2.0% 180.06 ± 5% -5.7% 166.33 ± 7% sched_debug.cfs_rq:/.removed.runnable_avg.max 23.27 ± 22% +35.1% 31.44 ± 23% +12.9% 26.28 ± 27% sched_debug.cfs_rq:/.removed.runnable_avg.stddev 3.90 ± 46% +71.3% 6.68 ± 42% +27.8% 4.98 ± 44% sched_debug.cfs_rq:/.removed.util_avg.avg 176.44 ± 5% +2.0% 180.06 ± 5% -5.7% 166.33 ± 7% sched_debug.cfs_rq:/.removed.util_avg.max 23.27 ± 22% +35.0% 31.43 ± 23% +12.9% 26.28 ± 27% sched_debug.cfs_rq:/.removed.util_avg.stddev 9921 ± 3% +10.1% 10923 ± 5% +5.5% 10470 ± 2% sched_debug.cfs_rq:/.runnable_avg.avg 17354 ± 4% +7.5% 18652 ± 9% +10.0% 19087 ± 6% sched_debug.cfs_rq:/.runnable_avg.max 1205 ± 59% +38.8% 1673 ± 44% +52.3% 1836 ± 32% sched_debug.cfs_rq:/.runnable_avg.min 2720 ± 3% +12.9% 3072 ± 7% +10.7% 3012 ± 3% sched_debug.cfs_rq:/.runnable_avg.stddev 0.01 ±223% -100.0% 0.00 +140.0% 0.01 ±122% sched_debug.cfs_rq:/.spread.avg 0.67 ±223% -100.0% 0.00 +140.0% 1.60 ±122% sched_debug.cfs_rq:/.spread.max 0.06 ±223% -100.0% 0.00 +140.0% 0.14 ±122% sched_debug.cfs_rq:/.spread.stddev -802332 -13.3% -695269 -13.3% -695410 sched_debug.cfs_rq:/.spread0.avg 1029531 ± 40% -6.5% 963003 ± 51% -14.6% 879291 ± 33% sched_debug.cfs_rq:/.spread0.max -1116926 -11.3% -991037 -6.4% -1045976 sched_debug.cfs_rq:/.spread0.min 250004 ± 10% -6.2% 234600 ± 11% +0.8% 252106 ± 5% sched_debug.cfs_rq:/.spread0.stddev 746.59 +0.3% 748.85 +0.2% 748.19 sched_debug.cfs_rq:/.util_avg.avg 1526 ± 4% -1.8% 1498 ± 3% +4.0% 1588 ± 4% sched_debug.cfs_rq:/.util_avg.max 118.33 ± 37% +8.7% 128.67 ± 33% +58.1% 187.07 ± 24% sched_debug.cfs_rq:/.util_avg.min 257.79 ± 3% -1.4% 254.31 ± 4% +1.8% 262.31 ± 3% sched_debug.cfs_rq:/.util_avg.stddev 309.08 ± 5% +15.4% 356.69 ± 8% +11.5% 344.70 ± 6% sched_debug.cfs_rq:/.util_est_enqueued.avg 1200 ± 6% +12.4% 1349 ± 9% +11.2% 1334 ± 4% sched_debug.cfs_rq:/.util_est_enqueued.max 2.44 ±143% -52.3% 1.17 ±223% -50.9% 1.20 ±200% sched_debug.cfs_rq:/.util_est_enqueued.min 241.74 ± 5% +16.6% 281.91 ± 6% +13.2% 273.56 ± 3% sched_debug.cfs_rq:/.util_est_enqueued.stddev 428381 ± 3% +1.7% 435830 ± 3% +7.8% 461658 ± 8% sched_debug.cpu.avg_idle.avg 1035072 ± 19% +27.5% 1319661 ± 46% +86.7% 1932056 ± 49% sched_debug.cpu.avg_idle.max 21181 ± 47% +7.6% 22783 ± 58% -1.5% 20855 ± 39% sched_debug.cpu.avg_idle.min 154867 ± 15% +10.2% 170635 ± 28% +66.3% 257520 ± 46% sched_debug.cpu.avg_idle.stddev 105813 ± 6% +4.1% 110153 ± 13% +16.2% 123004 ± 18% sched_debug.cpu.clock.avg 106023 ± 6% +4.1% 110345 ± 13% +16.2% 123163 ± 18% sched_debug.cpu.clock.max 105604 ± 6% +4.1% 109916 ± 13% +16.3% 122816 ± 18% sched_debug.cpu.clock.min 121.61 ± 23% +2.5% 124.70 ± 40% -15.0% 103.41 ± 34% sched_debug.cpu.clock.stddev 104601 ± 6% +4.3% 109053 ± 13% +16.1% 121466 ± 18% sched_debug.cpu.clock_task.avg 105076 ± 6% +4.3% 109543 ± 13% +16.3% 122154 ± 18% sched_debug.cpu.clock_task.max 89692 -0.1% 89608 -0.4% 89303 sched_debug.cpu.clock_task.min 1342 ± 43% +30.1% 1745 ± 75% +114.0% 2871 ± 69% sched_debug.cpu.clock_task.stddev 13482 +0.4% 13530 +0.4% 13542 sched_debug.cpu.curr->pid.avg 16770 +0.2% 16805 -0.1% 16760 sched_debug.cpu.curr->pid.max 4947 ± 27% +3.2% 5104 ± 50% -11.2% 4393 ± 46% sched_debug.cpu.curr->pid.min 1805 ± 9% -4.5% 1724 ± 12% -0.1% 1804 ± 13% sched_debug.cpu.curr->pid.stddev 505781 +0.2% 506623 ± 2% +4.4% 528071 ± 5% sched_debug.cpu.max_idle_balance_cost.avg 874225 ± 46% -9.4% 792013 ± 59% +52.6% 1333820 ± 55% sched_debug.cpu.max_idle_balance_cost.max 500000 +0.0% 500000 +0.0% 500000 sched_debug.cpu.max_idle_balance_cost.min 37209 ±106% -3.1% 36056 ±172% +208.1% 114643 ± 99% sched_debug.cpu.max_idle_balance_cost.stddev 4294 +0.0% 4294 +0.0% 4294 sched_debug.cpu.next_balance.avg 4294 +0.0% 4294 +0.0% 4294 sched_debug.cpu.next_balance.max 4294 +0.0% 4294 +0.0% 4294 sched_debug.cpu.next_balance.min 0.00 ± 20% +3.8% 0.00 ± 28% -14.8% 0.00 ± 31% sched_debug.cpu.next_balance.stddev 9.50 ± 4% +11.2% 10.57 ± 5% +8.9% 10.34 ± 5% sched_debug.cpu.nr_running.avg 26.67 ± 5% +7.1% 28.56 ± 5% +4.0% 27.73 ± 2% sched_debug.cpu.nr_running.max 0.44 ± 35% +25.0% 0.56 ± 28% -10.0% 0.40 ± 62% sched_debug.cpu.nr_running.min 6.35 ± 4% +11.6% 7.09 ± 6% +7.7% 6.84 ± 3% sched_debug.cpu.nr_running.stddev 1394250 -6.7% 1300659 -6.6% 1301614 sched_debug.cpu.nr_switches.avg 1643137 ± 2% -7.8% 1515540 ± 2% -6.3% 1539074 sched_debug.cpu.nr_switches.max 1207910 -7.0% 1123538 -6.3% 1132376 sched_debug.cpu.nr_switches.min 87018 ± 17% -15.5% 73537 ± 10% -10.9% 77530 ± 4% sched_debug.cpu.nr_switches.stddev 2.134e+09 ± 6% -3.2% 2.065e+09 ± 3% +2.9% 2.197e+09 ± 7% sched_debug.cpu.nr_uninterruptible.avg 4.295e+09 +0.0% 4.295e+09 +0.0% 4.295e+09 sched_debug.cpu.nr_uninterruptible.max 2.14e+09 +0.0% 2.141e+09 -0.1% 2.138e+09 sched_debug.cpu.nr_uninterruptible.stddev 105600 ± 6% +4.1% 109910 ± 13% +16.3% 122811 ± 18% sched_debug.cpu_clk 996147 +0.0% 996147 +0.0% 996147 sched_debug.dl_rq:.dl_bw->bw.avg 996147 +0.0% 996147 +0.0% 996147 sched_debug.dl_rq:.dl_bw->bw.max 996147 +0.0% 996147 +0.0% 996147 sched_debug.dl_rq:.dl_bw->bw.min 4.295e+09 +0.0% 4.295e+09 +0.0% 4.295e+09 sched_debug.jiffies 104879 ± 6% +4.1% 109186 ± 14% +16.4% 122089 ± 19% sched_debug.ktime 0.00 +0.0% 0.00 +0.0% 0.00 sched_debug.rt_rq:.rt_nr_migratory.avg 0.33 +0.0% 0.33 +0.0% 0.33 sched_debug.rt_rq:.rt_nr_migratory.max 0.03 +0.0% 0.03 +0.0% 0.03 sched_debug.rt_rq:.rt_nr_migratory.stddev 0.00 +0.0% 0.00 +0.0% 0.00 sched_debug.rt_rq:.rt_nr_running.avg 0.33 +0.0% 0.33 +0.0% 0.33 sched_debug.rt_rq:.rt_nr_running.max 0.03 +0.0% 0.03 +0.0% 0.03 sched_debug.rt_rq:.rt_nr_running.stddev 950.00 +0.0% 950.00 +0.0% 950.00 sched_debug.rt_rq:.rt_runtime.avg 950.00 +0.0% 950.00 +0.0% 950.00 sched_debug.rt_rq:.rt_runtime.max 950.00 +0.0% 950.00 +0.0% 950.00 sched_debug.rt_rq:.rt_runtime.min 0.69 ± 98% +63.7% 1.13 ± 51% +88.4% 1.30 ± 61% sched_debug.rt_rq:.rt_time.avg 88.54 ± 98% +63.5% 144.74 ± 51% +88.1% 166.50 ± 62% sched_debug.rt_rq:.rt_time.max 0.00 +1.1e+99% 0.00 ±223% +1.5e+99% 0.00 ±200% sched_debug.rt_rq:.rt_time.min 7.79 ± 98% +63.5% 12.74 ± 51% +88.1% 14.66 ± 62% sched_debug.rt_rq:.rt_time.stddev 98000 -0.3% 97695 -0.6% 97446 sched_debug.sched_clk 1.00 +0.0% 1.00 +0.0% 1.00 sched_debug.sched_clock_stable() 58611259 +0.0% 58611259 +0.0% 58611259 sched_debug.sysctl_sched.sysctl_sched_features 0.75 +0.0% 0.75 +0.0% 0.75 sched_debug.sysctl_sched.sysctl_sched_idle_min_granularity 24.00 +0.0% 24.00 +0.0% 24.00 sched_debug.sysctl_sched.sysctl_sched_latency 3.00 +0.0% 3.00 +0.0% 3.00 sched_debug.sysctl_sched.sysctl_sched_min_granularity 1.00 +0.0% 1.00 +0.0% 1.00 sched_debug.sysctl_sched.sysctl_sched_tunable_scaling 4.00 +0.0% 4.00 +0.0% 4.00 sched_debug.sysctl_sched.sysctl_sched_wakeup_granularity 2.00 ± 12% -1.9 0.09 ±223% -2.0 0.00 perf-profile.calltrace.cycles-pp.__unfreeze_partials.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg.sock_read_iter 1.78 ± 14% -1.8 0.00 -1.8 0.00 perf-profile.calltrace.cycles-pp._raw_spin_lock_irqsave.__unfreeze_partials.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg 1.66 ± 15% -1.7 0.00 -1.7 0.00 perf-profile.calltrace.cycles-pp.native_queued_spin_lock_slowpath._raw_spin_lock_irqsave.__unfreeze_partials.unix_stream_read_generic.unix_stream_recvmsg 6.73 -1.6 5.16 ± 4% -1.6 5.09 ± 4% perf-profile.calltrace.cycles-pp.kmem_cache_free.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg.sock_read_iter 5.06 ± 3% -1.5 3.58 ± 2% -1.5 3.58 ± 2% perf-profile.calltrace.cycles-pp.kmem_cache_alloc_node.__alloc_skb.alloc_skb_with_frags.sock_alloc_send_pskb.unix_stream_sendmsg 1.43 ± 12% -1.4 0.00 -1.4 0.00 perf-profile.calltrace.cycles-pp.get_partial_node.___slab_alloc.kmem_cache_alloc_node.__alloc_skb.alloc_skb_with_frags 53.78 -1.4 52.40 -1.4 52.39 perf-profile.calltrace.cycles-pp.__libc_read 5.11 ± 2% -1.3 3.80 ± 6% -1.3 3.76 ± 4% perf-profile.calltrace.cycles-pp._raw_spin_lock.unix_stream_sendmsg.sock_write_iter.vfs_write.ksys_write 9.82 ± 2% -1.2 8.62 ± 3% -1.2 8.62 ± 3% perf-profile.calltrace.cycles-pp.skb_copy_datagram_iter.unix_stream_read_actor.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg 9.90 ± 2% -1.2 8.70 ± 3% -1.2 8.70 ± 3% perf-profile.calltrace.cycles-pp.unix_stream_read_actor.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg.sock_read_iter 9.70 ± 2% -1.2 8.50 ± 3% -1.2 8.50 ± 3% perf-profile.calltrace.cycles-pp.__skb_datagram_iter.skb_copy_datagram_iter.unix_stream_read_actor.unix_stream_read_generic.unix_stream_recvmsg 1.90 ± 9% -1.2 0.71 ± 8% -1.2 0.71 ± 6% perf-profile.calltrace.cycles-pp.___slab_alloc.kmem_cache_alloc_node.__alloc_skb.alloc_skb_with_frags.sock_alloc_send_pskb 51.59 -1.1 50.46 -1.1 50.47 perf-profile.calltrace.cycles-pp.entry_SYSCALL_64_after_hwframe.__libc_read 51.29 -1.1 50.17 -1.1 50.18 perf-profile.calltrace.cycles-pp.do_syscall_64.entry_SYSCALL_64_after_hwframe.__libc_read 1.10 ± 15% -1.1 0.00 -1.1 0.00 perf-profile.calltrace.cycles-pp._raw_spin_lock_irqsave.get_partial_node.___slab_alloc.kmem_cache_alloc_node.__alloc_skb 1.08 ± 16% -1.1 0.00 -1.1 0.00 perf-profile.calltrace.cycles-pp.native_queued_spin_lock_slowpath._raw_spin_lock_irqsave.get_partial_node.___slab_alloc.kmem_cache_alloc_node 49.81 -1.0 48.85 -0.9 48.89 perf-profile.calltrace.cycles-pp.ksys_read.do_syscall_64.entry_SYSCALL_64_after_hwframe.__libc_read 5.21 ± 2% -0.9 4.26 ± 3% -1.0 4.25 ± 3% perf-profile.calltrace.cycles-pp.skb_release_head_state.consume_skb.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg 5.10 ± 2% -0.9 4.16 ± 4% -1.0 4.15 ± 3% perf-profile.calltrace.cycles-pp.unix_destruct_scm.skb_release_head_state.consume_skb.unix_stream_read_generic.unix_stream_recvmsg 49.06 -0.9 48.12 -0.9 48.16 perf-profile.calltrace.cycles-pp.vfs_read.ksys_read.do_syscall_64.entry_SYSCALL_64_after_hwframe.__libc_read 4.93 ± 2% -0.9 3.99 ± 4% -1.0 3.98 ± 3% perf-profile.calltrace.cycles-pp.sock_wfree.unix_destruct_scm.skb_release_head_state.consume_skb.unix_stream_read_generic 5.34 ± 3% -0.9 4.41 ± 2% -0.9 4.45 ± 4% perf-profile.calltrace.cycles-pp._copy_to_iter.__skb_datagram_iter.skb_copy_datagram_iter.unix_stream_read_actor.unix_stream_read_generic 5.04 ± 3% -0.9 4.12 ± 2% -0.9 4.15 ± 4% perf-profile.calltrace.cycles-pp.copyout._copy_to_iter.__skb_datagram_iter.skb_copy_datagram_iter.unix_stream_read_actor 47.09 -0.9 46.20 -0.8 46.25 perf-profile.calltrace.cycles-pp.sock_read_iter.vfs_read.ksys_read.do_syscall_64.entry_SYSCALL_64_after_hwframe 46.41 -0.9 45.54 -0.8 45.59 perf-profile.calltrace.cycles-pp.sock_recvmsg.sock_read_iter.vfs_read.ksys_read.do_syscall_64 45.56 -0.9 44.71 -0.8 44.76 perf-profile.calltrace.cycles-pp.unix_stream_recvmsg.sock_recvmsg.sock_read_iter.vfs_read.ksys_read 45.26 -0.8 44.42 -0.8 44.47 perf-profile.calltrace.cycles-pp.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg.sock_read_iter.vfs_read 3.34 ± 2% -0.8 2.51 ± 6% -0.8 2.50 ± 4% perf-profile.calltrace.cycles-pp.skb_set_owner_w.sock_alloc_send_pskb.unix_stream_sendmsg.sock_write_iter.vfs_write 3.72 -0.6 3.09 ± 3% -0.6 3.09 ± 3% perf-profile.calltrace.cycles-pp.__kmem_cache_free.skb_release_data.consume_skb.unix_stream_read_generic.unix_stream_recvmsg 0.61 ± 2% -0.5 0.09 ±223% -0.5 0.10 ±200% perf-profile.calltrace.cycles-pp.try_to_wake_up.autoremove_wake_function.__wake_up_common.__wake_up_common_lock.unix_write_space 2.28 ± 3% -0.5 1.81 ± 6% -0.5 1.79 ± 6% perf-profile.calltrace.cycles-pp.skb_queue_tail.unix_stream_sendmsg.sock_write_iter.vfs_write.ksys_write 2.18 ± 3% -0.5 1.72 ± 6% -0.5 1.70 ± 6% perf-profile.calltrace.cycles-pp._raw_spin_lock_irqsave.skb_queue_tail.unix_stream_sendmsg.sock_write_iter.vfs_write 0.55 ± 2% -0.4 0.19 ±141% -0.3 0.22 ±122% perf-profile.calltrace.cycles-pp._raw_spin_lock_irqsave.__wake_up_common_lock.sock_def_readable.unix_stream_sendmsg.sock_write_iter 0.53 ± 2% -0.4 0.18 ±141% -0.4 0.11 ±200% perf-profile.calltrace.cycles-pp.native_queued_spin_lock_slowpath._raw_spin_lock_irqsave.__wake_up_common_lock.sock_def_readable.unix_stream_sendmsg 0.52 -0.4 0.17 ±141% -0.4 0.10 ±200% perf-profile.calltrace.cycles-pp.obj_cgroup_charge.__kmem_cache_alloc_node.__kmalloc_node_track_caller.kmalloc_reserve.__alloc_skb 0.56 ± 3% -0.3 0.22 ±141% -0.3 0.26 ±124% perf-profile.calltrace.cycles-pp.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode.do_syscall_64.entry_SYSCALL_64_after_hwframe 0.34 ± 70% -0.3 0.00 -0.3 0.00 perf-profile.calltrace.cycles-pp.check_heap_object.__check_object_size.skb_copy_datagram_from_iter.unix_stream_sendmsg.sock_write_iter 0.68 ± 2% -0.3 0.36 ± 70% -0.3 0.42 ± 50% perf-profile.calltrace.cycles-pp.__wake_up_common.__wake_up_common_lock.unix_write_space.sock_wfree.unix_destruct_scm 0.68 ± 2% -0.3 0.36 ± 71% -0.3 0.42 ± 50% perf-profile.calltrace.cycles-pp.autoremove_wake_function.__wake_up_common.__wake_up_common_lock.unix_write_space.sock_wfree 3.98 -0.3 3.72 ± 4% -0.3 3.68 ± 3% perf-profile.calltrace.cycles-pp.__check_object_size.simple_copy_to_iter.__skb_datagram_iter.skb_copy_datagram_iter.unix_stream_read_actor 3.43 -0.3 3.17 ± 5% -0.3 3.14 ± 4% perf-profile.calltrace.cycles-pp.check_heap_object.__check_object_size.simple_copy_to_iter.__skb_datagram_iter.skb_copy_datagram_iter 4.15 -0.3 3.89 ± 4% -0.3 3.85 ± 3% perf-profile.calltrace.cycles-pp.simple_copy_to_iter.__skb_datagram_iter.skb_copy_datagram_iter.unix_stream_read_actor.unix_stream_read_generic 0.34 ± 70% -0.3 0.08 ±223% -0.3 0.00 perf-profile.calltrace.cycles-pp.mutex_lock.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg.sock_read_iter 0.68 ± 2% -0.2 0.45 ± 45% -0.2 0.53 ± 5% perf-profile.calltrace.cycles-pp.__wake_up_common_lock.unix_write_space.sock_wfree.unix_destruct_scm.skb_release_head_state 2.44 ± 3% -0.2 2.23 ± 3% -0.2 2.22 ± 3% perf-profile.calltrace.cycles-pp.skb_copy_datagram_from_iter.unix_stream_sendmsg.sock_write_iter.vfs_write.ksys_write 1.38 -0.2 1.18 ± 2% -0.2 1.18 ± 2% perf-profile.calltrace.cycles-pp.memcg_slab_post_alloc_hook.__kmem_cache_alloc_node.__kmalloc_node_track_caller.kmalloc_reserve.__alloc_skb 0.89 -0.2 0.70 ± 3% -0.2 0.69 ± 3% perf-profile.calltrace.cycles-pp.__build_skb_around.__alloc_skb.alloc_skb_with_frags.sock_alloc_send_pskb.unix_stream_sendmsg 0.63 ± 5% -0.2 0.44 ± 45% -0.2 0.43 ± 50% perf-profile.calltrace.cycles-pp.get_obj_cgroup_from_current.kmem_cache_alloc_node.__alloc_skb.alloc_skb_with_frags.sock_alloc_send_pskb 1.12 -0.2 0.94 ± 4% -0.2 0.93 ± 3% perf-profile.calltrace.cycles-pp.unix_write_space.sock_wfree.unix_destruct_scm.skb_release_head_state.consume_skb 0.18 ±141% -0.2 0.00 -0.2 0.00 perf-profile.calltrace.cycles-pp.native_queued_spin_lock_slowpath._raw_spin_lock.unix_stream_sendmsg.sock_write_iter.vfs_write 0.59 -0.2 0.43 ± 44% -0.3 0.32 ± 81% perf-profile.calltrace.cycles-pp._raw_spin_lock_irqsave.skb_unlink.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg 1.81 -0.2 1.66 ± 2% -0.1 1.66 ± 2% perf-profile.calltrace.cycles-pp.__slab_free.skb_release_data.consume_skb.unix_stream_read_generic.unix_stream_recvmsg 1.12 -0.1 0.98 ± 5% -0.2 0.96 ± 4% perf-profile.calltrace.cycles-pp.syscall_exit_to_user_mode.do_syscall_64.entry_SYSCALL_64_after_hwframe.__libc_read 0.79 -0.1 0.65 ± 3% -0.1 0.65 ± 7% perf-profile.calltrace.cycles-pp.schedule_timeout.sock_alloc_send_pskb.unix_stream_sendmsg.sock_write_iter.vfs_write 0.77 ± 2% -0.1 0.63 ± 3% -0.1 0.62 ± 7% perf-profile.calltrace.cycles-pp.schedule.schedule_timeout.sock_alloc_send_pskb.unix_stream_sendmsg.sock_write_iter 0.74 -0.1 0.60 ± 4% -0.1 0.60 ± 7% perf-profile.calltrace.cycles-pp.__schedule.schedule.schedule_timeout.sock_alloc_send_pskb.unix_stream_sendmsg 0.96 ± 2% -0.1 0.82 ± 6% -0.2 0.80 ± 5% perf-profile.calltrace.cycles-pp.exit_to_user_mode_prepare.syscall_exit_to_user_mode.do_syscall_64.entry_SYSCALL_64_after_hwframe.__libc_read 1.22 ± 6% -0.1 1.09 ± 4% -0.1 1.08 ± 4% perf-profile.calltrace.cycles-pp._copy_from_iter.skb_copy_datagram_from_iter.unix_stream_sendmsg.sock_write_iter.vfs_write 0.96 ± 7% -0.1 0.83 ± 5% -0.1 0.82 ± 5% perf-profile.calltrace.cycles-pp.copyin._copy_from_iter.skb_copy_datagram_from_iter.unix_stream_sendmsg.sock_write_iter 1.34 -0.1 1.24 -0.1 1.23 ± 3% perf-profile.calltrace.cycles-pp.__slab_free.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg.sock_read_iter 0.73 ± 2% -0.1 0.63 ± 2% -0.1 0.64 ± 3% perf-profile.calltrace.cycles-pp.skb_unlink.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg.sock_read_iter 0.08 ±223% -0.1 0.00 -0.1 0.00 perf-profile.calltrace.cycles-pp.apparmor_file_permission.security_file_permission.vfs_read.ksys_read.do_syscall_64 0.08 ±223% -0.1 0.00 -0.1 0.00 perf-profile.calltrace.cycles-pp.apparmor_file_permission.security_file_permission.vfs_write.ksys_write.do_syscall_64 0.89 -0.1 0.82 ± 3% -0.1 0.82 ± 3% perf-profile.calltrace.cycles-pp.security_socket_sendmsg.sock_write_iter.vfs_write.ksys_write.do_syscall_64 1.23 -0.1 1.16 ± 3% -0.1 1.16 ± 2% perf-profile.calltrace.cycles-pp.memcg_slab_post_alloc_hook.kmem_cache_alloc_node.__alloc_skb.alloc_skb_with_frags.sock_alloc_send_pskb 0.70 ± 2% -0.1 0.63 ± 4% -0.1 0.63 ± 4% perf-profile.calltrace.cycles-pp.aa_sk_perm.security_socket_sendmsg.sock_write_iter.vfs_write.ksys_write 1.39 -0.1 1.33 ± 2% -0.1 1.33 ± 3% perf-profile.calltrace.cycles-pp.__entry_text_start.__libc_write 0.90 -0.1 0.84 ± 2% -0.1 0.84 ± 2% perf-profile.calltrace.cycles-pp.__check_object_size.skb_copy_datagram_from_iter.unix_stream_sendmsg.sock_write_iter.vfs_write 1.31 -0.1 1.26 -0.0 1.27 ± 2% perf-profile.calltrace.cycles-pp.__entry_text_start.__libc_read 0.68 ± 2% -0.1 0.63 ± 3% -0.1 0.62 ± 2% perf-profile.calltrace.cycles-pp.__fdget_pos.ksys_write.do_syscall_64.entry_SYSCALL_64_after_hwframe.__libc_write 0.62 ± 2% -0.1 0.57 ± 4% -0.1 0.56 ± 2% perf-profile.calltrace.cycles-pp.__fget_light.__fdget_pos.ksys_write.do_syscall_64.entry_SYSCALL_64_after_hwframe 0.60 -0.0 0.57 ± 3% -0.0 0.56 ± 2% perf-profile.calltrace.cycles-pp.security_file_permission.vfs_write.ksys_write.do_syscall_64.entry_SYSCALL_64_after_hwframe 0.60 -0.0 0.57 ± 2% -0.0 0.57 ± 3% perf-profile.calltrace.cycles-pp.mod_objcg_state.kmem_cache_free.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg 0.66 -0.0 0.63 -0.0 0.63 ± 2% perf-profile.calltrace.cycles-pp.security_socket_recvmsg.sock_recvmsg.sock_read_iter.vfs_read.ksys_read 0.64 -0.0 0.60 ± 2% -0.0 0.61 ± 2% perf-profile.calltrace.cycles-pp.mod_objcg_state.__kmem_cache_free.skb_release_data.consume_skb.unix_stream_read_generic 0.62 ± 2% -0.0 0.61 ± 3% -0.0 0.61 ± 2% perf-profile.calltrace.cycles-pp.security_file_permission.vfs_read.ksys_read.do_syscall_64.entry_SYSCALL_64_after_hwframe 0.00 +0.0 0.00 +0.1 0.11 ±200% perf-profile.calltrace.cycles-pp.__schedule.schedule.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode 0.18 ±141% +0.1 0.25 ±100% -0.2 0.00 perf-profile.calltrace.cycles-pp.__fdget_pos.ksys_read.do_syscall_64.entry_SYSCALL_64_after_hwframe.__libc_read 0.00 +0.1 0.08 ±223% +0.1 0.12 ±200% perf-profile.calltrace.cycles-pp.schedule.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode.do_syscall_64 0.61 +0.1 0.76 ± 30% +0.1 0.74 ± 33% perf-profile.calltrace.cycles-pp.syscall_exit_to_user_mode.do_syscall_64.entry_SYSCALL_64_after_hwframe.__libc_write 0.00 +0.2 0.18 ±141% +0.1 0.11 ±200% perf-profile.calltrace.cycles-pp.select_task_rq_fair.select_task_rq.try_to_wake_up.autoremove_wake_function.__wake_up_common 0.00 +0.2 0.18 ±141% +0.1 0.11 ±200% perf-profile.calltrace.cycles-pp.pick_next_task_fair.__schedule.schedule.schedule_timeout.unix_stream_data_wait 0.00 +0.2 0.18 ±141% +0.2 0.24 ±122% perf-profile.calltrace.cycles-pp.dequeue_entity.dequeue_task_fair.__schedule.schedule.schedule_timeout 0.00 +0.2 0.18 ±141% +0.3 0.33 ± 82% perf-profile.calltrace.cycles-pp.native_queued_spin_lock_slowpath._raw_spin_lock_irqsave.prepare_to_wait.sock_alloc_send_pskb.unix_stream_sendmsg 0.00 +0.2 0.18 ±141% +0.2 0.24 ±124% perf-profile.calltrace.cycles-pp.cpu_startup_entry.start_secondary.secondary_startup_64_no_verify 0.00 +0.2 0.18 ±141% +0.2 0.24 ±124% perf-profile.calltrace.cycles-pp.do_idle.cpu_startup_entry.start_secondary.secondary_startup_64_no_verify 0.00 +0.2 0.18 ±141% +0.2 0.24 ±124% perf-profile.calltrace.cycles-pp.start_secondary.secondary_startup_64_no_verify 0.00 +0.2 0.18 ±141% +0.3 0.34 ± 82% perf-profile.calltrace.cycles-pp._raw_spin_lock_irqsave.prepare_to_wait.sock_alloc_send_pskb.unix_stream_sendmsg.sock_write_iter 0.00 +0.2 0.18 ±141% +0.2 0.24 ±124% perf-profile.calltrace.cycles-pp.secondary_startup_64_no_verify 0.00 +0.2 0.18 ±141% +0.1 0.12 ±200% perf-profile.calltrace.cycles-pp.select_task_rq.try_to_wake_up.autoremove_wake_function.__wake_up_common.__wake_up_common_lock 4.74 ± 2% +0.2 4.93 ± 27% +0.0 4.77 ± 31% perf-profile.calltrace.cycles-pp.sock_def_readable.unix_stream_sendmsg.sock_write_iter.vfs_write.ksys_write 0.00 +0.2 0.22 ±141% +0.2 0.25 ±123% perf-profile.calltrace.cycles-pp.native_queued_spin_lock_slowpath._raw_spin_lock.try_to_wake_up.autoremove_wake_function.__wake_up_common 0.00 +0.2 0.24 ±141% +0.3 0.27 ±123% perf-profile.calltrace.cycles-pp._raw_spin_lock.try_to_wake_up.autoremove_wake_function.__wake_up_common.__wake_up_common_lock 0.00 +0.3 0.27 ±100% +0.4 0.36 ± 81% perf-profile.calltrace.cycles-pp.prepare_to_wait.sock_alloc_send_pskb.unix_stream_sendmsg.sock_write_iter.vfs_write 0.00 +0.3 0.32 ±101% +0.3 0.29 ±122% perf-profile.calltrace.cycles-pp.enqueue_entity.enqueue_task_fair.activate_task.ttwu_do_activate.try_to_wake_up 0.00 +0.4 0.42 ±101% +0.4 0.36 ±123% perf-profile.calltrace.cycles-pp.exit_to_user_mode_prepare.syscall_exit_to_user_mode.do_syscall_64.entry_SYSCALL_64_after_hwframe.__libc_write 0.66 +0.4 1.11 ± 74% +0.3 0.96 ±103% perf-profile.calltrace.cycles-pp.dequeue_task_fair.__schedule.schedule.schedule_timeout.unix_stream_data_wait 0.74 +0.5 1.27 ± 59% +0.5 1.21 ± 67% perf-profile.calltrace.cycles-pp.enqueue_task_fair.activate_task.ttwu_do_activate.try_to_wake_up.autoremove_wake_function 0.00 +0.5 0.54 ±105% +0.5 0.48 ±123% perf-profile.calltrace.cycles-pp.update_cfs_group.enqueue_task_fair.activate_task.ttwu_do_activate.try_to_wake_up 0.77 ± 2% +0.5 1.31 ± 58% +0.5 1.25 ± 66% perf-profile.calltrace.cycles-pp.activate_task.ttwu_do_activate.try_to_wake_up.autoremove_wake_function.__wake_up_common 0.88 ± 2% +0.6 1.43 ± 56% +0.5 1.37 ± 63% perf-profile.calltrace.cycles-pp.ttwu_do_activate.try_to_wake_up.autoremove_wake_function.__wake_up_common.__wake_up_common_lock 0.00 +0.6 0.56 ±104% +0.5 0.51 ±123% perf-profile.calltrace.cycles-pp.update_cfs_group.dequeue_task_fair.__schedule.schedule.schedule_timeout 2.09 +0.6 2.73 ± 42% +0.6 2.65 ± 47% perf-profile.calltrace.cycles-pp.__schedule.schedule.schedule_timeout.unix_stream_data_wait.unix_stream_read_generic 2.13 +0.7 2.78 ± 41% +0.6 2.69 ± 47% perf-profile.calltrace.cycles-pp.schedule.schedule_timeout.unix_stream_data_wait.unix_stream_read_generic.unix_stream_recvmsg 2.22 +0.7 2.87 ± 41% +0.6 2.78 ± 47% perf-profile.calltrace.cycles-pp.schedule_timeout.unix_stream_data_wait.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg 2.59 ± 2% +0.7 3.27 ± 41% +0.6 3.15 ± 49% perf-profile.calltrace.cycles-pp.__wake_up_common_lock.sock_def_readable.unix_stream_sendmsg.sock_write_iter.vfs_write 2.65 +0.7 3.33 ± 39% +0.6 3.22 ± 44% perf-profile.calltrace.cycles-pp.unix_stream_data_wait.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg.sock_read_iter 2.00 ± 2% +0.8 2.76 ± 46% +0.7 2.67 ± 54% perf-profile.calltrace.cycles-pp.__wake_up_common.__wake_up_common_lock.sock_def_readable.unix_stream_sendmsg.sock_write_iter 1.86 ± 2% +0.8 2.62 ± 48% +0.7 2.54 ± 56% perf-profile.calltrace.cycles-pp.try_to_wake_up.autoremove_wake_function.__wake_up_common.__wake_up_common_lock.sock_def_readable 1.88 ± 2% +0.8 2.65 ± 47% +0.7 2.56 ± 56% perf-profile.calltrace.cycles-pp.autoremove_wake_function.__wake_up_common.__wake_up_common_lock.sock_def_readable.unix_stream_sendmsg 45.79 +0.9 46.67 +0.8 46.57 perf-profile.calltrace.cycles-pp.__libc_write 42.50 +0.9 43.43 +1.0 43.45 perf-profile.calltrace.cycles-pp.ksys_write.do_syscall_64.entry_SYSCALL_64_after_hwframe.__libc_write 41.56 +1.0 42.56 +1.0 42.60 perf-profile.calltrace.cycles-pp.vfs_write.ksys_write.do_syscall_64.entry_SYSCALL_64_after_hwframe.__libc_write 43.75 +1.1 44.81 +1.1 44.82 perf-profile.calltrace.cycles-pp.entry_SYSCALL_64_after_hwframe.__libc_write 43.46 +1.1 44.52 +1.1 44.54 perf-profile.calltrace.cycles-pp.do_syscall_64.entry_SYSCALL_64_after_hwframe.__libc_write 39.64 +1.1 40.73 +1.1 40.78 perf-profile.calltrace.cycles-pp.sock_write_iter.vfs_write.ksys_write.do_syscall_64.entry_SYSCALL_64_after_hwframe 37.62 +1.2 38.84 +1.3 38.89 perf-profile.calltrace.cycles-pp.unix_stream_sendmsg.sock_write_iter.vfs_write.ksys_write.do_syscall_64 19.29 +3.7 22.98 ± 6% +4.0 23.26 ± 5% perf-profile.calltrace.cycles-pp.sock_alloc_send_pskb.unix_stream_sendmsg.sock_write_iter.vfs_write.ksys_write 15.36 ± 2% +4.0 19.39 ± 7% +4.3 19.63 ± 6% perf-profile.calltrace.cycles-pp.consume_skb.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg.sock_read_iter 14.47 ± 2% +4.5 18.97 ± 8% +4.7 19.18 ± 6% perf-profile.calltrace.cycles-pp.alloc_skb_with_frags.sock_alloc_send_pskb.unix_stream_sendmsg.sock_write_iter.vfs_write 14.22 ± 2% +4.5 18.72 ± 8% +4.7 18.93 ± 6% perf-profile.calltrace.cycles-pp.__alloc_skb.alloc_skb_with_frags.sock_alloc_send_pskb.unix_stream_sendmsg.sock_write_iter 9.71 ± 5% +5.0 14.71 ± 10% +5.3 14.97 ± 7% perf-profile.calltrace.cycles-pp.skb_release_data.consume_skb.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg 2.50 ± 22% +5.8 8.29 ± 17% +6.0 8.53 ± 10% perf-profile.calltrace.cycles-pp._raw_spin_lock_irqsave.__unfreeze_partials.skb_release_data.consume_skb.unix_stream_read_generic 2.37 ± 23% +5.8 8.17 ± 17% +6.0 8.41 ± 10% perf-profile.calltrace.cycles-pp.native_queued_spin_lock_slowpath._raw_spin_lock_irqsave.__unfreeze_partials.skb_release_data.consume_skb 2.72 ± 21% +6.0 8.69 ± 17% +6.2 8.93 ± 10% perf-profile.calltrace.cycles-pp.__unfreeze_partials.skb_release_data.consume_skb.unix_stream_read_generic.unix_stream_recvmsg 7.16 ± 5% +6.3 13.43 ± 11% +6.5 13.65 ± 7% perf-profile.calltrace.cycles-pp.kmalloc_reserve.__alloc_skb.alloc_skb_with_frags.sock_alloc_send_pskb.unix_stream_sendmsg 6.78 ± 5% +6.3 13.06 ± 11% +6.5 13.29 ± 7% perf-profile.calltrace.cycles-pp.__kmalloc_node_track_caller.kmalloc_reserve.__alloc_skb.alloc_skb_with_frags.sock_alloc_send_pskb 6.37 ± 6% +6.3 12.68 ± 11% +6.5 12.91 ± 7% perf-profile.calltrace.cycles-pp.__kmem_cache_alloc_node.__kmalloc_node_track_caller.kmalloc_reserve.__alloc_skb.alloc_skb_with_frags 1.55 ± 24% +6.3 7.86 ± 17% +6.5 8.07 ± 10% perf-profile.calltrace.cycles-pp.native_queued_spin_lock_slowpath._raw_spin_lock_irqsave.get_partial_node.___slab_alloc.__kmem_cache_alloc_node 1.57 ± 24% +6.3 7.92 ± 17% +6.6 8.13 ± 10% perf-profile.calltrace.cycles-pp._raw_spin_lock_irqsave.get_partial_node.___slab_alloc.__kmem_cache_alloc_node.__kmalloc_node_track_caller 1.91 ± 20% +6.6 8.49 ± 17% +6.8 8.69 ± 10% perf-profile.calltrace.cycles-pp.get_partial_node.___slab_alloc.__kmem_cache_alloc_node.__kmalloc_node_track_caller.kmalloc_reserve 2.39 ± 16% +6.8 9.19 ± 16% +7.0 9.42 ± 10% perf-profile.calltrace.cycles-pp.___slab_alloc.__kmem_cache_alloc_node.__kmalloc_node_track_caller.kmalloc_reserve.__alloc_skb 6.78 -1.6 5.21 ± 4% -1.6 5.14 ± 4% perf-profile.children.cycles-pp.kmem_cache_free 5.16 ± 3% -1.5 3.68 ± 2% -1.5 3.69 ± 2% perf-profile.children.cycles-pp.kmem_cache_alloc_node 53.87 -1.2 52.63 -1.2 52.65 perf-profile.children.cycles-pp.__libc_read 9.85 ± 2% -1.2 8.65 ± 3% -1.2 8.65 ± 3% perf-profile.children.cycles-pp.skb_copy_datagram_iter 9.74 ± 2% -1.2 8.54 ± 3% -1.2 8.55 ± 3% perf-profile.children.cycles-pp.__skb_datagram_iter 9.92 ± 2% -1.2 8.73 ± 3% -1.2 8.73 ± 3% perf-profile.children.cycles-pp.unix_stream_read_actor 6.12 ± 2% -1.1 5.00 ± 8% -1.2 4.93 ± 6% perf-profile.children.cycles-pp._raw_spin_lock 49.86 -1.0 48.90 -0.9 48.93 perf-profile.children.cycles-pp.ksys_read 5.15 ± 2% -0.9 4.20 ± 4% -1.0 4.19 ± 3% perf-profile.children.cycles-pp.unix_destruct_scm 5.24 ± 2% -0.9 4.30 ± 3% -1.0 4.28 ± 3% perf-profile.children.cycles-pp.skb_release_head_state 4.96 ± 2% -0.9 4.02 ± 4% -1.0 4.01 ± 3% perf-profile.children.cycles-pp.sock_wfree 49.15 -0.9 48.21 -0.9 48.25 perf-profile.children.cycles-pp.vfs_read 5.37 ± 3% -0.9 4.44 ± 2% -0.9 4.48 ± 4% perf-profile.children.cycles-pp._copy_to_iter 5.14 ± 3% -0.9 4.21 ± 2% -0.9 4.25 ± 4% perf-profile.children.cycles-pp.copyout 47.12 -0.9 46.23 -0.8 46.28 perf-profile.children.cycles-pp.sock_read_iter 46.46 -0.9 45.58 -0.8 45.64 perf-profile.children.cycles-pp.sock_recvmsg 45.59 -0.8 44.74 -0.8 44.79 perf-profile.children.cycles-pp.unix_stream_recvmsg 45.41 -0.8 44.57 -0.8 44.62 perf-profile.children.cycles-pp.unix_stream_read_generic 3.36 ± 2% -0.8 2.53 ± 6% -0.8 2.52 ± 4% perf-profile.children.cycles-pp.skb_set_owner_w 3.77 -0.6 3.14 ± 3% -0.6 3.14 ± 3% perf-profile.children.cycles-pp.__kmem_cache_free 2.31 ± 3% -0.5 1.84 ± 6% -0.5 1.82 ± 6% perf-profile.children.cycles-pp.skb_queue_tail 5.12 -0.3 4.80 ± 3% -0.4 4.76 ± 3% perf-profile.children.cycles-pp.__check_object_size 4.04 -0.3 3.71 ± 4% -0.3 3.69 ± 3% perf-profile.children.cycles-pp.check_heap_object 2.68 -0.3 2.40 ± 2% -0.3 2.40 ± 2% perf-profile.children.cycles-pp.memcg_slab_post_alloc_hook 4.18 -0.3 3.92 ± 4% -0.3 3.89 ± 3% perf-profile.children.cycles-pp.simple_copy_to_iter 3.18 -0.3 2.93 ± 2% -0.3 2.93 ± 2% perf-profile.children.cycles-pp.__slab_free 2.50 ± 3% -0.2 2.29 ± 3% -0.2 2.28 ± 3% perf-profile.children.cycles-pp.skb_copy_datagram_from_iter 0.91 -0.2 0.71 ± 4% -0.2 0.71 ± 3% perf-profile.children.cycles-pp.__build_skb_around 1.13 -0.2 0.95 ± 4% -0.2 0.94 ± 3% perf-profile.children.cycles-pp.unix_write_space 0.76 ± 2% -0.2 0.58 ± 5% -0.2 0.58 ± 2% perf-profile.children.cycles-pp.asm_sysvec_apic_timer_interrupt 0.70 ± 3% -0.2 0.54 ± 4% -0.2 0.54 perf-profile.children.cycles-pp.sysvec_apic_timer_interrupt 0.65 ± 3% -0.2 0.48 ± 5% -0.2 0.48 perf-profile.children.cycles-pp.__sysvec_apic_timer_interrupt 0.64 ± 3% -0.2 0.48 ± 5% -0.2 0.48 perf-profile.children.cycles-pp.hrtimer_interrupt 1.18 ± 4% -0.2 1.02 ± 3% -0.2 1.01 ± 3% perf-profile.children.cycles-pp.get_obj_cgroup_from_current 0.59 ± 3% -0.2 0.44 ± 5% -0.2 0.44 perf-profile.children.cycles-pp.__hrtimer_run_queues 0.54 ± 3% -0.2 0.40 ± 5% -0.1 0.40 perf-profile.children.cycles-pp.tick_sched_timer 0.50 ± 3% -0.1 0.36 ± 5% -0.1 0.36 ± 2% perf-profile.children.cycles-pp.update_process_times 1.28 ± 5% -0.1 1.14 ± 3% -0.1 1.13 ± 4% perf-profile.children.cycles-pp._copy_from_iter 0.51 ± 3% -0.1 0.38 ± 5% -0.1 0.37 ± 2% perf-profile.children.cycles-pp.tick_sched_handle 1.05 ± 7% -0.1 0.92 ± 4% -0.1 0.91 ± 4% perf-profile.children.cycles-pp.copyin 0.64 ± 7% -0.1 0.52 ± 5% -0.1 0.52 ± 5% perf-profile.children.cycles-pp.__get_obj_cgroup_from_memcg 0.40 ± 3% -0.1 0.29 ± 5% -0.1 0.29 perf-profile.children.cycles-pp.scheduler_tick 2.18 -0.1 2.07 ± 2% -0.1 2.09 ± 2% perf-profile.children.cycles-pp.mod_objcg_state 0.34 ± 4% -0.1 0.23 ± 5% -0.1 0.24 ± 4% perf-profile.children.cycles-pp.task_tick_fair 0.17 ± 44% -0.1 0.06 ±141% -0.1 0.09 ±125% perf-profile.children.cycles-pp.perf_trace_sched_wakeup_template 0.76 ± 2% -0.1 0.66 ± 2% -0.1 0.66 ± 3% perf-profile.children.cycles-pp.skb_unlink 1.22 -0.1 1.13 ± 2% -0.1 1.13 ± 2% perf-profile.children.cycles-pp.aa_sk_perm 0.46 ± 2% -0.1 0.37 -0.1 0.37 ± 3% perf-profile.children.cycles-pp.task_work_run 0.45 ± 2% -0.1 0.37 ± 2% -0.1 0.37 ± 2% perf-profile.children.cycles-pp.task_mm_cid_work 0.12 ± 44% -0.1 0.04 ±141% -0.1 0.06 ±125% perf-profile.children.cycles-pp.perf_tp_event 1.59 -0.1 1.51 ± 2% -0.1 1.52 ± 2% perf-profile.children.cycles-pp.__entry_text_start 1.26 ± 3% -0.1 1.18 ± 4% -0.1 1.16 ± 2% perf-profile.children.cycles-pp.__fdget_pos 1.10 ± 3% -0.1 1.03 ± 4% -0.1 1.01 ± 2% perf-profile.children.cycles-pp.__fget_light 0.92 ± 2% -0.1 0.85 ± 3% -0.1 0.85 ± 3% perf-profile.children.cycles-pp.security_socket_sendmsg 0.52 ± 4% -0.1 0.45 ± 6% -0.1 0.46 perf-profile.children.cycles-pp.__virt_addr_valid 1.05 -0.1 0.99 ± 2% -0.1 0.99 ± 2% perf-profile.children.cycles-pp.apparmor_file_permission 0.15 ± 48% -0.1 0.09 ±144% -0.1 0.08 ±122% perf-profile.children.cycles-pp.reader__read_event 1.32 -0.1 1.27 ± 3% -0.1 1.26 perf-profile.children.cycles-pp.security_file_permission 0.27 ± 3% -0.1 0.22 ± 4% -0.1 0.21 ± 4% perf-profile.children.cycles-pp.load_balance 0.27 ± 3% -0.1 0.22 ± 5% -0.1 0.21 ± 4% perf-profile.children.cycles-pp.newidle_balance 0.43 -0.1 0.38 ± 2% -0.1 0.37 ± 2% perf-profile.children.cycles-pp.mutex_unlock 0.94 -0.1 0.89 ± 3% -0.0 0.89 ± 2% perf-profile.children.cycles-pp.obj_cgroup_charge 1.18 -0.0 1.14 ± 2% -0.0 1.13 ± 3% perf-profile.children.cycles-pp.entry_SYSRETQ_unsafe_stack 0.14 ± 57% -0.0 0.10 ±144% -0.1 0.05 ±200% perf-profile.children.cycles-pp.__cmd_record 0.06 ± 45% -0.0 0.02 ±141% -0.0 0.03 ±136% perf-profile.children.cycles-pp.perf_trace_sched_stat_runtime 0.87 -0.0 0.84 ± 2% -0.0 0.83 ± 2% perf-profile.children.cycles-pp.__cond_resched 0.41 ± 4% -0.0 0.37 -0.0 0.37 ± 2% perf-profile.children.cycles-pp.syscall_return_via_sysret 0.32 ± 4% -0.0 0.28 ± 2% -0.0 0.28 perf-profile.children.cycles-pp.obj_cgroup_uncharge_pages 0.12 ± 60% -0.0 0.09 ±144% -0.1 0.04 ±200% perf-profile.children.cycles-pp.record__finish_output 0.12 ± 60% -0.0 0.09 ±144% -0.0 0.08 ±122% perf-profile.children.cycles-pp.perf_session__process_events 0.69 -0.0 0.66 -0.0 0.66 ± 2% perf-profile.children.cycles-pp.security_socket_recvmsg 0.10 ± 69% -0.0 0.07 ±141% -0.1 0.05 ±122% perf-profile.children.cycles-pp.ordered_events__queue 0.33 -0.0 0.30 -0.0 0.30 ± 3% perf-profile.children.cycles-pp.syscall_enter_from_user_mode 0.10 ± 69% -0.0 0.07 ±141% -0.1 0.05 ±122% perf-profile.children.cycles-pp.process_simple 0.10 ± 69% -0.0 0.07 ±141% -0.1 0.05 ±122% perf-profile.children.cycles-pp.queue_event 0.13 ± 2% -0.0 0.10 ± 6% -0.0 0.09 ± 5% perf-profile.children.cycles-pp.detach_tasks 0.22 ± 3% -0.0 0.19 ± 10% -0.0 0.18 ± 10% perf-profile.children.cycles-pp.wake_affine 0.36 ± 2% -0.0 0.33 -0.0 0.32 ± 2% perf-profile.children.cycles-pp.aa_file_perm 0.26 ± 6% -0.0 0.24 ± 8% -0.0 0.23 ± 5% perf-profile.children.cycles-pp.memcg_account_kmem 0.50 ± 4% -0.0 0.47 ± 16% -0.0 0.47 ± 26% perf-profile.children.cycles-pp.update_curr 0.16 -0.0 0.13 ± 11% -0.0 0.13 ± 8% perf-profile.children.cycles-pp.__list_add_valid 0.16 ± 3% -0.0 0.13 ± 9% -0.0 0.13 ± 6% perf-profile.children.cycles-pp.task_h_load 0.18 ± 8% -0.0 0.15 ± 4% -0.0 0.16 ± 7% perf-profile.children.cycles-pp.__mod_memcg_lruvec_state 0.54 -0.0 0.51 ± 2% -0.0 0.52 perf-profile.children.cycles-pp.mutex_lock 0.05 -0.0 0.03 ±100% -0.0 0.04 ± 50% perf-profile.children.cycles-pp.__irq_exit_rcu 0.14 ± 3% -0.0 0.12 ± 3% -0.0 0.12 ± 3% perf-profile.children.cycles-pp.try_charge_memcg 0.73 ± 2% -0.0 0.71 ± 18% -0.0 0.69 ± 20% perf-profile.children.cycles-pp.pick_next_task_fair 0.39 -0.0 0.37 -0.0 0.37 perf-profile.children.cycles-pp.__get_task_ioprio 0.48 -0.0 0.46 ± 16% -0.0 0.45 ± 17% perf-profile.children.cycles-pp.switch_fpu_return 0.02 ±141% -0.0 0.00 -0.0 0.00 perf-profile.children.cycles-pp.page_counter_try_charge 0.13 ± 3% -0.0 0.12 ± 12% -0.0 0.11 ± 15% perf-profile.children.cycles-pp.update_rq_clock_task 0.33 ± 2% -0.0 0.32 ± 2% -0.0 0.31 ± 3% perf-profile.children.cycles-pp.rcu_all_qs 0.03 ±142% -0.0 0.02 ±223% -0.0 0.02 ±125% perf-profile.children.cycles-pp.perf_session__process_user_event 0.03 ±142% -0.0 0.02 ±223% -0.0 0.02 ±125% perf-profile.children.cycles-pp.__ordered_events__flush 0.20 ± 2% -0.0 0.19 ± 3% -0.0 0.19 ± 2% perf-profile.children.cycles-pp.syscall_exit_to_user_mode_prepare 0.16 -0.0 0.15 ± 3% -0.0 0.15 ± 2% perf-profile.children.cycles-pp.kfree 0.03 ±141% -0.0 0.02 ±223% -0.0 0.02 ±123% perf-profile.children.cycles-pp.perf_session__deliver_event 0.36 -0.0 0.35 ± 14% -0.0 0.34 ± 15% perf-profile.children.cycles-pp.restore_fpregs_from_fpstate 0.16 ± 3% -0.0 0.15 ± 2% -0.0 0.15 ± 2% perf-profile.children.cycles-pp.check_stack_object 0.24 ± 2% -0.0 0.23 -0.0 0.23 ± 2% perf-profile.children.cycles-pp.wait_for_unix_gc 1.86 -0.0 1.86 ± 15% -0.1 1.81 ± 16% perf-profile.children.cycles-pp.syscall_exit_to_user_mode 0.13 -0.0 0.12 -0.0 0.12 ± 5% perf-profile.children.cycles-pp.refill_stock 0.10 ± 4% -0.0 0.09 ± 5% -0.0 0.09 perf-profile.children.cycles-pp.unix_passcred_enabled 0.02 ±142% -0.0 0.01 ±223% -0.0 0.00 perf-profile.children.cycles-pp.evlist__parse_sample 0.16 -0.0 0.15 ± 2% -0.0 0.15 ± 5% perf-profile.children.cycles-pp.security_socket_getpeersec_dgram 0.05 -0.0 0.04 ± 44% -0.0 0.04 ± 50% perf-profile.children.cycles-pp.apparmor_socket_sendmsg 0.01 ±223% -0.0 0.00 -0.0 0.00 perf-profile.children.cycles-pp.sched_mm_cid_remote_clear 0.08 ± 4% -0.0 0.07 ± 11% -0.0 0.07 ± 17% perf-profile.children.cycles-pp.asm_sysvec_reschedule_ipi 0.53 ± 2% -0.0 0.52 ± 2% -0.0 0.52 ± 2% perf-profile.children.cycles-pp.refill_obj_stock 0.20 -0.0 0.19 ± 2% -0.0 0.20 ± 2% perf-profile.children.cycles-pp.scm_recv 0.09 ± 7% -0.0 0.08 ± 21% -0.0 0.08 ± 20% perf-profile.children.cycles-pp.update_min_vruntime 94.98 -0.0 94.97 +0.0 94.98 perf-profile.children.cycles-pp.do_syscall_64 0.28 ± 2% -0.0 0.28 ± 2% -0.0 0.27 ± 2% perf-profile.children.cycles-pp.kmalloc_slab 95.44 -0.0 95.44 +0.0 95.45 perf-profile.children.cycles-pp.entry_SYSCALL_64_after_hwframe 0.16 ± 3% -0.0 0.16 ± 3% -0.0 0.16 ± 4% perf-profile.children.cycles-pp.unix_scm_to_skb 0.08 ± 8% -0.0 0.08 ± 13% -0.0 0.08 ± 23% perf-profile.children.cycles-pp.cpuacct_charge 0.07 ± 6% -0.0 0.07 -0.0 0.07 perf-profile.children.cycles-pp.should_failslab 0.06 ± 7% -0.0 0.06 ± 6% -0.0 0.06 ± 7% perf-profile.children.cycles-pp.obj_cgroup_uncharge 0.16 ± 2% -0.0 0.16 ± 3% -0.0 0.15 ± 3% perf-profile.children.cycles-pp.rw_verify_area 0.06 ± 6% -0.0 0.06 -0.0 0.06 perf-profile.children.cycles-pp.__x64_sys_read 0.14 ± 2% -0.0 0.14 ± 3% -0.0 0.13 ± 3% perf-profile.children.cycles-pp.put_pid 0.07 ± 6% -0.0 0.07 ± 15% -0.0 0.07 ± 18% perf-profile.children.cycles-pp.sched_mm_cid_migrate_to 0.05 +0.0 0.05 -0.0 0.04 ± 50% perf-profile.children.cycles-pp.apparmor_socket_recvmsg 0.12 ± 4% +0.0 0.12 ± 6% -0.0 0.12 ± 5% perf-profile.children.cycles-pp.fsnotify_perm 0.08 +0.0 0.08 +0.0 0.08 perf-profile.children.cycles-pp.skb_put 0.06 +0.0 0.06 +0.0 0.06 perf-profile.children.cycles-pp.kfree_skbmem 0.24 ± 2% +0.0 0.24 ± 4% +0.0 0.24 ± 2% perf-profile.children.cycles-pp._raw_spin_unlock_irqrestore 0.06 ± 7% +0.0 0.06 ± 7% +0.0 0.07 ± 12% perf-profile.children.cycles-pp.__x64_sys_write 0.15 +0.0 0.15 ± 5% +0.0 0.15 ± 2% perf-profile.children.cycles-pp.is_vmalloc_addr 0.00 +0.0 0.00 +0.0 0.01 ±200% perf-profile.children.cycles-pp.wait_consider_task 0.00 +0.0 0.00 +0.0 0.01 ±200% perf-profile.children.cycles-pp.asm_exc_page_fault 0.08 ± 6% +0.0 0.08 ± 6% -0.0 0.07 ± 5% perf-profile.children.cycles-pp.skb_free_head 0.07 +0.0 0.07 ± 34% -0.0 0.07 ± 36% perf-profile.children.cycles-pp.put_prev_entity 0.24 ± 6% +0.0 0.24 ± 17% +0.0 0.25 ± 16% perf-profile.children.cycles-pp.__switch_to_asm 0.22 +0.0 0.22 ± 3% -0.0 0.22 ± 3% perf-profile.children.cycles-pp.kmalloc_size_roundup 0.40 ± 2% +0.0 0.41 ± 3% -0.0 0.40 ± 2% perf-profile.children.cycles-pp.__list_del_entry_valid 0.11 +0.0 0.12 ± 6% -0.0 0.10 ± 4% perf-profile.children.cycles-pp.entry_SYSCALL_64_safe_stack 0.12 ± 17% +0.0 0.12 ± 20% +0.0 0.12 ± 17% perf-profile.children.cycles-pp.cgroup_rstat_updated 0.05 +0.0 0.06 ± 9% +0.0 0.06 ± 14% perf-profile.children.cycles-pp.rb_erase 0.18 ± 2% +0.0 0.18 ± 23% +0.0 0.18 ± 23% perf-profile.children.cycles-pp.__switch_to 0.06 ± 8% +0.0 0.06 ± 50% +0.0 0.07 ± 29% perf-profile.children.cycles-pp.set_task_cpu 0.10 ± 4% +0.0 0.11 ± 28% +0.0 0.11 ± 32% perf-profile.children.cycles-pp.check_preempt_curr 1.50 +0.0 1.51 ± 18% -0.0 1.47 ± 20% perf-profile.children.cycles-pp.exit_to_user_mode_prepare 0.09 +0.0 0.10 ± 18% +0.0 0.10 ± 21% perf-profile.children.cycles-pp.os_xsave 0.00 +0.0 0.01 ±223% +0.0 0.00 perf-profile.children.cycles-pp.rcu_note_context_switch 0.00 +0.0 0.01 ±223% +0.0 0.00 perf-profile.children.cycles-pp.__do_softirq 0.00 +0.0 0.01 ±223% +0.0 0.01 ±200% perf-profile.children.cycles-pp.set_next_buddy 0.00 +0.0 0.01 ±223% +0.0 0.01 ±200% perf-profile.children.cycles-pp.__do_sys_wait4 0.00 +0.0 0.01 ±223% +0.0 0.01 ±200% perf-profile.children.cycles-pp.kernel_wait4 0.00 +0.0 0.01 ±223% +0.0 0.01 ±200% perf-profile.children.cycles-pp.do_wait 0.00 +0.0 0.01 ±223% +0.0 0.01 ±200% perf-profile.children.cycles-pp.wait4 0.00 +0.0 0.01 ±223% +0.0 0.02 ±122% perf-profile.children.cycles-pp.select_idle_core 0.00 +0.0 0.01 ±223% +0.0 0.03 ± 82% perf-profile.children.cycles-pp.get_any_partial 0.06 ± 7% +0.0 0.08 ± 35% +0.0 0.07 ± 39% perf-profile.children.cycles-pp.check_preempt_wakeup 0.19 +0.0 0.21 ± 29% +0.0 0.20 ± 30% perf-profile.children.cycles-pp.prepare_task_switch 0.09 ± 7% +0.0 0.10 ± 32% +0.0 0.10 ± 33% perf-profile.children.cycles-pp.finish_task_switch 0.13 ± 3% +0.0 0.14 ± 7% +0.0 0.15 ± 6% perf-profile.children.cycles-pp.put_cpu_partial 0.00 +0.0 0.02 ±141% +0.0 0.01 ±200% perf-profile.children.cycles-pp.__x64_sys_exit_group 0.00 +0.0 0.02 ±141% +0.0 0.01 ±200% perf-profile.children.cycles-pp.do_group_exit 0.00 +0.0 0.02 ±141% +0.0 0.01 ±200% perf-profile.children.cycles-pp.do_exit 0.00 +0.0 0.02 ±141% +0.0 0.01 ±200% perf-profile.children.cycles-pp.__calc_delta 0.00 +0.0 0.02 ±141% +0.0 0.02 ±122% perf-profile.children.cycles-pp.native_irq_return_iret 0.12 ± 3% +0.0 0.14 ± 28% +0.0 0.13 ± 35% perf-profile.children.cycles-pp.__update_load_avg_cfs_rq 0.50 ± 3% +0.0 0.51 ± 24% +0.0 0.51 ± 33% perf-profile.children.cycles-pp.dequeue_entity 0.00 +0.0 0.02 ±142% +0.0 0.02 ±123% perf-profile.children.cycles-pp.__wrgsbase_inactive 0.16 ± 2% +0.0 0.18 ± 22% +0.0 0.17 ± 26% perf-profile.children.cycles-pp.__update_load_avg_se 0.09 ± 4% +0.0 0.11 ± 27% +0.0 0.11 ± 27% perf-profile.children.cycles-pp.update_rq_clock 0.00 +0.0 0.02 ±141% +0.0 0.02 ±123% perf-profile.children.cycles-pp.pick_next_entity 0.02 ± 99% +0.0 0.05 ± 73% +0.0 0.03 ±124% perf-profile.children.cycles-pp.sched_clock_cpu 0.01 ±223% +0.0 0.03 ±101% +0.0 0.01 ±200% perf-profile.children.cycles-pp.__cgroup_account_cputime 0.48 ± 2% +0.0 0.51 ± 2% +0.0 0.50 ± 4% perf-profile.children.cycles-pp.__check_heap_object 0.11 ± 3% +0.0 0.13 ± 30% +0.0 0.12 ± 35% perf-profile.children.cycles-pp.reweight_entity 0.13 ± 8% +0.0 0.15 ± 32% +0.0 0.15 ± 26% perf-profile.children.cycles-pp.___perf_sw_event 0.82 ± 2% +0.0 0.85 ± 23% +0.0 0.82 ± 26% perf-profile.children.cycles-pp.exit_to_user_mode_loop 0.00 +0.0 0.03 ±100% +0.0 0.02 ±122% perf-profile.children.cycles-pp.ttwu_queue_wakelist 0.00 +0.0 0.03 ±100% +0.0 0.03 ±122% perf-profile.children.cycles-pp.migrate_task_rq_fair 0.00 +0.0 0.03 ±100% +0.0 0.03 ±124% perf-profile.children.cycles-pp.native_sched_clock 0.00 +0.0 0.03 ±100% +0.0 0.03 ±124% perf-profile.children.cycles-pp.schedule_idle 0.05 ± 7% +0.0 0.09 ± 78% +0.0 0.09 ± 65% perf-profile.children.cycles-pp.available_idle_cpu 0.00 +0.0 0.04 ±104% +0.0 0.04 ±124% perf-profile.children.cycles-pp.__sysvec_call_function_single 0.00 +0.0 0.04 ±104% +0.0 0.04 ±126% perf-profile.children.cycles-pp.sysvec_call_function_single 0.00 +0.0 0.04 ±100% +0.0 0.03 ±122% perf-profile.children.cycles-pp.intel_idle 0.20 ± 2% +0.0 0.24 ± 28% +0.0 0.23 ± 33% perf-profile.children.cycles-pp.set_next_entity 0.00 +0.0 0.04 ±105% +0.0 0.04 ±125% perf-profile.children.cycles-pp.asm_sysvec_call_function_single 0.00 +0.0 0.04 ±100% +0.0 0.04 ±123% perf-profile.children.cycles-pp.intel_idle_irq 0.45 +0.1 0.50 ± 26% +0.0 0.48 ± 28% perf-profile.children.cycles-pp.switch_mm_irqs_off 0.00 +0.1 0.07 ± 71% +0.1 0.08 ± 54% perf-profile.children.cycles-pp.finish_wait 0.36 ± 3% +0.1 0.46 ± 33% +0.1 0.44 ± 33% perf-profile.children.cycles-pp.select_task_rq 0.00 +0.1 0.10 ±100% +0.1 0.09 ±122% perf-profile.children.cycles-pp.cpuidle_enter 0.00 +0.1 0.10 ±100% +0.1 0.09 ±122% perf-profile.children.cycles-pp.cpuidle_enter_state 0.00 +0.1 0.10 ±101% +0.1 0.11 ±129% perf-profile.children.cycles-pp.flush_smp_call_function_queue 0.00 +0.1 0.11 ±100% +0.1 0.10 ±122% perf-profile.children.cycles-pp.cpuidle_idle_call 0.59 ± 2% +0.1 0.71 ± 34% +0.1 0.71 ± 41% perf-profile.children.cycles-pp.enqueue_entity 0.30 ± 3% +0.1 0.42 ± 36% +0.1 0.41 ± 34% perf-profile.children.cycles-pp.select_task_rq_fair 0.00 +0.1 0.12 ±102% +0.1 0.13 ±128% perf-profile.children.cycles-pp.sched_ttwu_pending 0.00 +0.1 0.13 ± 81% +0.1 0.13 ± 69% perf-profile.children.cycles-pp.select_idle_cpu 0.06 ± 9% +0.1 0.20 ± 64% +0.1 0.19 ± 58% perf-profile.children.cycles-pp.select_idle_sibling 0.68 +0.2 0.84 ± 36% +0.2 0.83 ± 43% perf-profile.children.cycles-pp.update_load_avg 0.45 ± 3% +0.2 0.64 ± 30% +0.3 0.71 ± 26% perf-profile.children.cycles-pp.prepare_to_wait 4.75 ± 2% +0.2 4.94 ± 26% +0.0 4.79 ± 30% perf-profile.children.cycles-pp.sock_def_readable 0.00 +0.3 0.27 ± 94% +0.2 0.25 ±116% perf-profile.children.cycles-pp.start_secondary 0.00 +0.3 0.27 ± 94% +0.3 0.25 ±116% perf-profile.children.cycles-pp.secondary_startup_64_no_verify 0.00 +0.3 0.27 ± 94% +0.3 0.25 ±116% perf-profile.children.cycles-pp.cpu_startup_entry 0.00 +0.3 0.27 ± 94% +0.3 0.25 ±116% perf-profile.children.cycles-pp.do_idle 0.96 +0.5 1.42 ± 51% +0.4 1.37 ± 59% perf-profile.children.cycles-pp.dequeue_task_fair 3.01 +0.5 3.52 ± 33% +0.4 3.43 ± 36% perf-profile.children.cycles-pp.schedule_timeout 3.28 ± 2% +0.5 3.81 ± 35% +0.4 3.69 ± 42% perf-profile.children.cycles-pp.__wake_up_common_lock 1.14 ± 2% +0.6 1.70 ± 50% +0.5 1.65 ± 57% perf-profile.children.cycles-pp.activate_task 1.05 +0.6 1.61 ± 52% +0.5 1.56 ± 59% perf-profile.children.cycles-pp.enqueue_task_fair 1.20 ± 2% +0.6 1.80 ± 50% +0.5 1.74 ± 57% perf-profile.children.cycles-pp.ttwu_do_activate 2.69 ± 2% +0.6 3.29 ± 38% +0.5 3.19 ± 45% perf-profile.children.cycles-pp.__wake_up_common 2.56 ± 2% +0.6 3.17 ± 39% +0.5 3.08 ± 46% perf-profile.children.cycles-pp.autoremove_wake_function 3.25 +0.6 3.87 ± 34% +0.5 3.76 ± 39% perf-profile.children.cycles-pp.schedule 2.48 ± 2% +0.6 3.10 ± 39% +0.5 3.01 ± 47% perf-profile.children.cycles-pp.try_to_wake_up 3.20 +0.6 3.85 ± 35% +0.5 3.74 ± 39% perf-profile.children.cycles-pp.__schedule 2.66 +0.7 3.34 ± 38% +0.6 3.24 ± 44% perf-profile.children.cycles-pp.unix_stream_data_wait 0.71 ± 3% +0.8 1.52 ± 74% +0.7 1.45 ± 84% perf-profile.children.cycles-pp.update_cfs_group 42.56 +0.9 43.50 +1.0 43.52 perf-profile.children.cycles-pp.ksys_write 45.89 +1.0 46.85 +1.0 46.86 perf-profile.children.cycles-pp.__libc_write 41.66 +1.0 42.66 +1.0 42.69 perf-profile.children.cycles-pp.vfs_write 39.70 +1.1 40.80 +1.1 40.84 perf-profile.children.cycles-pp.sock_write_iter 37.85 +1.2 39.07 +1.3 39.12 perf-profile.children.cycles-pp.unix_stream_sendmsg 19.34 +3.7 23.02 ± 6% +4.0 23.31 ± 5% perf-profile.children.cycles-pp.sock_alloc_send_pskb 15.43 ± 2% +4.0 19.46 ± 7% +4.3 19.70 ± 6% perf-profile.children.cycles-pp.consume_skb 4.72 ± 13% +4.4 9.16 ± 16% +4.7 9.39 ± 10% perf-profile.children.cycles-pp.__unfreeze_partials 14.51 ± 2% +4.5 19.00 ± 8% +4.7 19.21 ± 6% perf-profile.children.cycles-pp.alloc_skb_with_frags 14.30 ± 2% +4.5 18.80 ± 8% +4.7 19.01 ± 6% perf-profile.children.cycles-pp.__alloc_skb 9.74 ± 5% +5.0 14.74 ± 10% +5.3 15.00 ± 7% perf-profile.children.cycles-pp.skb_release_data 3.36 ± 12% +5.6 8.91 ± 16% +5.8 9.12 ± 10% perf-profile.children.cycles-pp.get_partial_node 4.29 ± 10% +5.6 9.91 ± 15% +5.8 10.13 ± 9% perf-profile.children.cycles-pp.___slab_alloc 6.85 ± 5% +6.3 13.12 ± 11% +6.5 13.35 ± 7% perf-profile.children.cycles-pp.__kmalloc_node_track_caller 7.23 ± 5% +6.3 13.51 ± 11% +6.5 13.72 ± 7% perf-profile.children.cycles-pp.kmalloc_reserve 6.49 ± 5% +6.3 12.80 ± 11% +6.5 13.03 ± 7% perf-profile.children.cycles-pp.__kmem_cache_alloc_node 10.87 ± 8% +9.4 20.22 ± 13% +9.8 20.71 ± 8% perf-profile.children.cycles-pp._raw_spin_lock_irqsave 8.27 ± 11% +10.0 18.23 ± 13% +10.4 18.70 ± 7% perf-profile.children.cycles-pp.native_queued_spin_lock_slowpath 5.78 -1.5 4.27 ± 5% -1.6 4.21 ± 4% perf-profile.self.cycles-pp.kmem_cache_free 5.20 ± 2% -1.1 4.08 ± 4% -1.2 4.04 ± 2% perf-profile.self.cycles-pp._raw_spin_lock 5.08 ± 3% -0.9 4.17 ± 2% -0.9 4.21 ± 4% perf-profile.self.cycles-pp.copyout 4.80 -0.9 3.92 ± 3% -0.9 3.91 ± 3% perf-profile.self.cycles-pp.unix_stream_read_generic 3.31 ± 2% -0.8 2.49 ± 5% -0.8 2.49 ± 4% perf-profile.self.cycles-pp.skb_set_owner_w 3.80 ± 2% -0.7 3.06 ± 4% -0.8 3.05 ± 4% perf-profile.self.cycles-pp.sock_wfree 3.10 ± 2% -0.6 2.48 ± 5% -0.6 2.45 ± 5% perf-profile.self.cycles-pp.unix_stream_sendmsg 3.42 ± 2% -0.6 2.83 ± 4% -0.6 2.80 ± 3% perf-profile.self.cycles-pp._raw_spin_lock_irqsave 2.65 ± 2% -0.6 2.09 ± 5% -0.6 2.08 ± 3% perf-profile.self.cycles-pp.__kmem_cache_free 2.12 ± 4% -0.5 1.62 ± 7% -0.5 1.60 ± 6% perf-profile.self.cycles-pp.sock_def_readable 3.35 -0.3 3.09 ± 5% -0.3 3.05 ± 4% perf-profile.self.cycles-pp.check_heap_object 3.13 -0.2 2.88 -0.3 2.88 ± 2% perf-profile.self.cycles-pp.__slab_free 1.75 -0.2 1.52 ± 2% -0.2 1.53 perf-profile.self.cycles-pp.memcg_slab_post_alloc_hook 0.87 -0.2 0.68 ± 4% -0.2 0.67 ± 3% perf-profile.self.cycles-pp.__build_skb_around 1.48 -0.2 1.29 ± 3% -0.2 1.30 ± 4% perf-profile.self.cycles-pp.__kmem_cache_alloc_node 1.16 -0.2 0.98 ± 2% -0.2 0.97 ± 4% perf-profile.self.cycles-pp.skb_release_data 1.01 ± 7% -0.1 0.88 ± 4% -0.1 0.88 ± 4% perf-profile.self.cycles-pp.copyin 0.59 ± 7% -0.1 0.48 ± 5% -0.1 0.48 ± 5% perf-profile.self.cycles-pp.__get_obj_cgroup_from_memcg 1.03 -0.1 0.94 ± 2% -0.1 0.94 ± 3% perf-profile.self.cycles-pp.__alloc_skb 0.96 -0.1 0.88 ± 3% -0.1 0.87 ± 2% perf-profile.self.cycles-pp.aa_sk_perm 0.42 ± 3% -0.1 0.34 ± 4% -0.1 0.34 ± 4% perf-profile.self.cycles-pp.task_mm_cid_work 1.93 -0.1 1.86 ± 2% -0.1 1.86 ± 2% perf-profile.self.cycles-pp.mod_objcg_state 0.82 -0.1 0.75 ± 2% -0.1 0.75 ± 2% perf-profile.self.cycles-pp.kmem_cache_alloc_node 1.06 ± 3% -0.1 0.99 ± 4% -0.1 0.97 ± 2% perf-profile.self.cycles-pp.__fget_light 0.10 ± 44% -0.1 0.04 ±141% -0.1 0.05 ±123% perf-profile.self.cycles-pp.perf_tp_event 0.48 ± 4% -0.1 0.42 ± 6% -0.0 0.43 perf-profile.self.cycles-pp.__virt_addr_valid 0.82 ± 3% -0.1 0.76 ± 3% -0.0 0.77 ± 2% perf-profile.self.cycles-pp.__libc_read 1.09 -0.1 1.03 ± 2% -0.1 1.03 ± 2% perf-profile.self.cycles-pp.vfs_write 0.41 -0.1 0.36 ± 2% -0.1 0.36 ± 2% perf-profile.self.cycles-pp.mutex_unlock 0.54 ± 2% -0.0 0.50 ± 3% -0.1 0.48 ± 3% perf-profile.self.cycles-pp.get_obj_cgroup_from_current 1.15 -0.0 1.10 ± 2% -0.0 1.10 ± 3% perf-profile.self.cycles-pp.entry_SYSRETQ_unsafe_stack 0.93 -0.0 0.89 -0.0 0.88 ± 2% perf-profile.self.cycles-pp.sock_write_iter 0.79 ± 2% -0.0 0.75 ± 6% -0.0 0.74 perf-profile.self.cycles-pp.__libc_write 0.06 ± 45% -0.0 0.02 ±141% -0.0 0.03 ±136% perf-profile.self.cycles-pp.perf_trace_sched_stat_runtime 0.41 ± 4% -0.0 0.37 ± 2% -0.0 0.37 ± 2% perf-profile.self.cycles-pp.syscall_return_via_sysret 0.42 -0.0 0.39 ± 2% -0.0 0.38 ± 3% perf-profile.self.cycles-pp.sock_alloc_send_pskb 0.67 ± 2% -0.0 0.64 ± 4% -0.0 0.64 ± 3% perf-profile.self.cycles-pp.apparmor_file_permission 0.59 ± 2% -0.0 0.55 ± 16% -0.0 0.55 ± 18% perf-profile.self.cycles-pp.__schedule 0.47 -0.0 0.44 ± 3% -0.0 0.45 ± 2% perf-profile.self.cycles-pp.__entry_text_start 0.45 -0.0 0.42 -0.0 0.43 perf-profile.self.cycles-pp.consume_skb 0.04 ± 45% -0.0 0.02 ±141% -0.0 0.02 ±123% perf-profile.self.cycles-pp.select_task_rq 0.10 ± 69% -0.0 0.07 ±141% -0.0 0.05 ±122% perf-profile.self.cycles-pp.queue_event 0.22 -0.0 0.19 ± 4% -0.0 0.19 ± 2% perf-profile.self.cycles-pp.__kmalloc_node_track_caller 0.31 ± 2% -0.0 0.28 -0.0 0.28 perf-profile.self.cycles-pp.aa_file_perm 0.16 ± 3% -0.0 0.13 ± 9% -0.0 0.13 ± 6% perf-profile.self.cycles-pp.task_h_load 0.28 -0.0 0.25 -0.0 0.26 ± 2% perf-profile.self.cycles-pp.syscall_enter_from_user_mode 0.43 -0.0 0.40 ± 2% -0.0 0.40 perf-profile.self.cycles-pp.unix_write_space 0.15 ± 10% -0.0 0.13 ± 5% -0.0 0.13 ± 3% perf-profile.self.cycles-pp.skb_unlink 0.15 -0.0 0.13 ± 9% -0.0 0.12 ± 8% perf-profile.self.cycles-pp.__list_add_valid 0.51 ± 2% -0.0 0.49 -0.0 0.49 ± 3% perf-profile.self.cycles-pp.__cond_resched 0.14 ± 3% -0.0 0.12 ± 4% -0.0 0.12 ± 5% perf-profile.self.cycles-pp.__mod_memcg_lruvec_state 0.20 ± 5% -0.0 0.18 ± 4% -0.0 0.17 ± 5% perf-profile.self.cycles-pp.memcg_account_kmem 0.34 ± 2% -0.0 0.32 -0.0 0.32 perf-profile.self.cycles-pp.__get_task_ioprio 1.02 -0.0 1.00 ± 2% -0.0 0.99 perf-profile.self.cycles-pp.vfs_read 0.02 ±141% -0.0 0.00 -0.0 0.00 perf-profile.self.cycles-pp.update_process_times 0.12 ± 3% -0.0 0.11 ± 13% -0.0 0.10 ± 16% perf-profile.self.cycles-pp.update_rq_clock_task 0.12 ± 3% -0.0 0.10 ± 18% -0.0 0.11 ± 22% perf-profile.self.cycles-pp.pick_next_task_fair 0.55 -0.0 0.54 ± 3% -0.0 0.54 ± 3% perf-profile.self.cycles-pp.obj_cgroup_charge 0.36 -0.0 0.35 ± 4% -0.0 0.35 ± 2% perf-profile.self.cycles-pp.mutex_lock 0.14 ± 4% -0.0 0.13 ± 3% -0.0 0.12 ± 3% perf-profile.self.cycles-pp.syscall_exit_to_user_mode_prepare 0.65 -0.0 0.63 -0.0 0.63 perf-profile.self.cycles-pp.sock_read_iter 0.20 ± 2% -0.0 0.19 ± 16% -0.0 0.19 ± 15% perf-profile.self.cycles-pp.enqueue_entity 0.23 -0.0 0.22 ± 2% -0.0 0.22 ± 4% perf-profile.self.cycles-pp.rcu_all_qs 0.13 ± 3% -0.0 0.12 ± 4% -0.0 0.12 ± 4% perf-profile.self.cycles-pp.check_stack_object 0.36 -0.0 0.35 ± 14% -0.0 0.34 ± 15% perf-profile.self.cycles-pp.restore_fpregs_from_fpstate 0.14 ± 2% -0.0 0.13 ± 2% -0.0 0.13 ± 3% perf-profile.self.cycles-pp.kfree 0.20 ± 3% -0.0 0.20 ± 3% -0.0 0.20 ± 2% perf-profile.self.cycles-pp.alloc_skb_with_frags 0.16 ± 3% -0.0 0.15 -0.0 0.15 ± 3% perf-profile.self.cycles-pp.security_socket_recvmsg 0.08 ± 12% -0.0 0.07 ± 7% -0.0 0.07 ± 5% perf-profile.self.cycles-pp.obj_cgroup_uncharge_pages 0.12 ± 4% -0.0 0.10 ± 22% -0.0 0.10 ± 25% perf-profile.self.cycles-pp.switch_fpu_return 0.13 ± 3% -0.0 0.12 ± 12% -0.0 0.11 ± 17% perf-profile.self.cycles-pp.__wake_up_common 0.05 -0.0 0.04 ± 72% -0.0 0.04 ± 83% perf-profile.self.cycles-pp.update_rq_clock 0.10 ± 5% -0.0 0.09 ± 5% -0.0 0.09 ± 5% perf-profile.self.cycles-pp.try_charge_memcg 0.08 ± 4% -0.0 0.07 -0.0 0.07 ± 5% perf-profile.self.cycles-pp.unix_passcred_enabled 0.24 -0.0 0.23 ± 2% -0.0 0.23 ± 3% perf-profile.self.cycles-pp._copy_from_iter 0.09 ± 6% -0.0 0.08 ± 19% -0.0 0.09 ± 34% perf-profile.self.cycles-pp.prepare_task_switch 0.05 ± 8% -0.0 0.05 ± 47% +0.0 0.05 ± 14% perf-profile.self.cycles-pp.ttwu_do_activate 0.49 -0.0 0.48 -0.0 0.48 ± 2% perf-profile.self.cycles-pp.__check_object_size 0.20 ± 2% -0.0 0.19 ± 3% -0.0 0.19 ± 3% perf-profile.self.cycles-pp.ksys_write 0.23 ± 2% -0.0 0.23 ± 2% -0.0 0.23 ± 3% perf-profile.self.cycles-pp._copy_to_iter 0.50 -0.0 0.49 ± 2% -0.0 0.49 ± 2% perf-profile.self.cycles-pp.refill_obj_stock 0.24 ± 3% -0.0 0.24 ± 3% -0.0 0.24 ± 3% perf-profile.self.cycles-pp.kmalloc_slab 0.16 ± 3% -0.0 0.15 ± 3% -0.0 0.15 ± 5% perf-profile.self.cycles-pp.unix_destruct_scm 0.15 ± 2% -0.0 0.14 ± 3% -0.0 0.14 ± 2% perf-profile.self.cycles-pp.security_socket_sendmsg 0.09 ± 4% -0.0 0.09 ± 5% -0.0 0.09 ± 5% perf-profile.self.cycles-pp.wait_for_unix_gc 0.16 ± 2% -0.0 0.16 ± 3% -0.0 0.16 ± 3% perf-profile.self.cycles-pp.__fdget_pos 0.24 ± 2% -0.0 0.24 ± 3% -0.0 0.24 ± 3% perf-profile.self.cycles-pp.skb_copy_datagram_from_iter 0.09 ± 4% -0.0 0.08 ± 5% -0.0 0.09 ± 5% perf-profile.self.cycles-pp.refill_stock 0.20 ± 2% -0.0 0.20 ± 3% -0.0 0.20 perf-profile.self.cycles-pp._raw_spin_unlock_irqrestore 0.16 ± 3% -0.0 0.15 ± 3% +0.0 0.16 ± 3% perf-profile.self.cycles-pp.scm_recv 0.04 ± 44% -0.0 0.04 ±101% -0.0 0.03 ±126% perf-profile.self.cycles-pp.reweight_entity 0.08 ± 8% -0.0 0.08 ± 12% -0.0 0.08 ± 24% perf-profile.self.cycles-pp.cpuacct_charge 0.34 ± 2% -0.0 0.33 ± 2% -0.0 0.33 ± 2% perf-profile.self.cycles-pp.do_syscall_64 0.12 -0.0 0.12 ± 18% -0.0 0.11 ± 19% perf-profile.self.cycles-pp.schedule_timeout 0.08 ± 4% -0.0 0.08 ± 6% -0.0 0.08 ± 6% perf-profile.self.cycles-pp.simple_copy_to_iter 0.20 -0.0 0.20 ± 3% -0.0 0.19 ± 2% perf-profile.self.cycles-pp.kmalloc_reserve 0.08 ± 5% -0.0 0.08 ± 19% -0.0 0.08 ± 27% perf-profile.self.cycles-pp.update_min_vruntime 0.18 ± 2% -0.0 0.18 ± 2% -0.0 0.17 ± 2% perf-profile.self.cycles-pp.unix_stream_recvmsg 0.12 ± 3% -0.0 0.12 ± 5% -0.0 0.12 ± 4% perf-profile.self.cycles-pp.security_socket_getpeersec_dgram 0.22 ± 2% -0.0 0.21 ± 3% -0.0 0.21 ± 3% perf-profile.self.cycles-pp.syscall_exit_to_user_mode 0.12 ± 4% -0.0 0.11 ± 4% -0.0 0.11 ± 3% perf-profile.self.cycles-pp.skb_queue_tail 0.09 ± 5% -0.0 0.09 -0.0 0.09 perf-profile.self.cycles-pp.put_pid 0.10 ± 4% -0.0 0.10 ± 3% -0.0 0.10 ± 4% perf-profile.self.cycles-pp.skb_copy_datagram_iter 0.22 ± 2% -0.0 0.21 -0.0 0.21 ± 3% perf-profile.self.cycles-pp.__skb_datagram_iter 0.08 ± 6% -0.0 0.08 ± 6% -0.0 0.07 ± 5% perf-profile.self.cycles-pp.skb_release_head_state 0.13 ± 4% -0.0 0.13 ± 2% -0.0 0.13 ± 6% perf-profile.self.cycles-pp.unix_scm_to_skb 0.12 ± 3% -0.0 0.12 ± 3% -0.0 0.12 ± 3% perf-profile.self.cycles-pp.rw_verify_area 0.22 ± 2% -0.0 0.22 ± 17% -0.0 0.22 ± 19% perf-profile.self.cycles-pp.update_curr 0.10 ± 18% -0.0 0.10 ± 22% -0.0 0.09 ± 19% perf-profile.self.cycles-pp.cgroup_rstat_updated 0.07 ± 6% -0.0 0.07 ± 15% -0.0 0.07 ± 18% perf-profile.self.cycles-pp.sched_mm_cid_migrate_to 0.11 +0.0 0.11 ± 10% -0.0 0.10 ± 3% perf-profile.self.cycles-pp.entry_SYSCALL_64_safe_stack 0.02 ±141% +0.0 0.02 ±141% -0.0 0.01 ±200% perf-profile.self.cycles-pp.kfree_skbmem 0.14 ± 3% +0.0 0.14 ± 25% -0.0 0.14 ± 27% perf-profile.self.cycles-pp.try_to_wake_up 0.06 +0.0 0.06 +0.0 0.06 perf-profile.self.cycles-pp.skb_free_head 0.18 ± 2% +0.0 0.18 ± 2% +0.0 0.19 ± 2% perf-profile.self.cycles-pp.ksys_read 0.00 +0.0 0.00 +0.0 0.01 ±200% perf-profile.self.cycles-pp.perf_trace_sched_wakeup_template 0.00 +0.0 0.00 +0.0 0.01 ±200% perf-profile.self.cycles-pp.__x64_sys_write 0.00 +0.0 0.00 +0.0 0.01 ±200% perf-profile.self.cycles-pp.wait_consider_task 0.07 ± 5% +0.0 0.07 ± 16% -0.0 0.07 ± 21% perf-profile.self.cycles-pp.dequeue_entity 0.10 +0.0 0.10 ± 19% -0.0 0.10 ± 20% perf-profile.self.cycles-pp.unix_stream_data_wait 0.12 ± 3% +0.0 0.12 ± 6% +0.0 0.12 ± 5% perf-profile.self.cycles-pp.is_vmalloc_addr 0.17 ± 2% +0.0 0.18 ± 2% -0.0 0.17 ± 2% perf-profile.self.cycles-pp.exit_to_user_mode_prepare 0.18 ± 2% +0.0 0.18 ± 22% +0.0 0.18 ± 23% perf-profile.self.cycles-pp.__switch_to 0.40 ± 2% +0.0 0.40 ± 3% +0.0 0.40 ± 3% perf-profile.self.cycles-pp.__list_del_entry_valid 0.09 ± 5% +0.0 0.10 ± 7% -0.0 0.09 ± 4% perf-profile.self.cycles-pp.kmalloc_size_roundup 0.08 ± 6% +0.0 0.08 ± 10% -0.0 0.08 ± 10% perf-profile.self.cycles-pp.unix_stream_read_actor 0.08 +0.0 0.08 ± 25% +0.0 0.08 ± 31% perf-profile.self.cycles-pp.enqueue_task_fair 0.24 ± 6% +0.0 0.24 ± 17% +0.0 0.24 ± 16% perf-profile.self.cycles-pp.__switch_to_asm 0.06 ± 7% +0.0 0.07 ± 7% +0.0 0.06 ± 7% perf-profile.self.cycles-pp.skb_put 0.11 ± 4% +0.0 0.11 ± 9% +0.0 0.11 ± 6% perf-profile.self.cycles-pp.fsnotify_perm 0.06 ± 7% +0.0 0.07 ± 27% +0.0 0.07 ± 31% perf-profile.self.cycles-pp.dequeue_task_fair 0.04 ± 44% +0.0 0.04 ± 45% +0.0 0.04 ± 51% perf-profile.self.cycles-pp.rb_erase 0.52 +0.0 0.53 ± 4% +0.0 0.53 ± 3% perf-profile.self.cycles-pp.entry_SYSCALL_64_after_hwframe 0.19 ± 2% +0.0 0.20 ± 2% +0.0 0.20 ± 3% perf-profile.self.cycles-pp.sock_recvmsg 0.30 +0.0 0.31 ± 4% +0.0 0.30 ± 2% perf-profile.self.cycles-pp.security_file_permission 0.07 ± 5% +0.0 0.08 ± 26% +0.0 0.08 ± 31% perf-profile.self.cycles-pp.prepare_to_wait 0.00 +0.0 0.01 ±223% +0.0 0.00 perf-profile.self.cycles-pp.rcu_note_context_switch 0.09 ± 4% +0.0 0.10 ± 18% +0.0 0.10 ± 21% perf-profile.self.cycles-pp.os_xsave 0.01 ±223% +0.0 0.02 ±142% +0.0 0.02 ±122% perf-profile.self.cycles-pp.put_prev_entity 0.06 ± 6% +0.0 0.07 ± 18% +0.0 0.07 ± 21% perf-profile.self.cycles-pp.schedule 0.12 ± 4% +0.0 0.14 ± 9% +0.0 0.14 ± 6% perf-profile.self.cycles-pp.put_cpu_partial 0.15 ± 2% +0.0 0.16 ± 21% +0.0 0.16 ± 26% perf-profile.self.cycles-pp.__update_load_avg_se 0.00 +0.0 0.02 ±141% +0.0 0.01 ±200% perf-profile.self.cycles-pp.migrate_task_rq_fair 0.00 +0.0 0.02 ±141% +0.0 0.01 ±200% perf-profile.self.cycles-pp.set_next_entity 0.00 +0.0 0.02 ±141% +0.0 0.01 ±200% perf-profile.self.cycles-pp.check_preempt_wakeup 0.00 +0.0 0.02 ±141% +0.0 0.01 ±200% perf-profile.self.cycles-pp.__calc_delta 0.00 +0.0 0.02 ±141% +0.0 0.01 ±200% perf-profile.self.cycles-pp.select_task_rq_fair 0.00 +0.0 0.02 ±141% +0.0 0.02 ±122% perf-profile.self.cycles-pp.native_irq_return_iret 0.00 +0.0 0.02 ±141% +0.0 0.02 ±123% perf-profile.self.cycles-pp.pick_next_entity 0.46 ± 2% +0.0 0.47 ± 2% +0.0 0.46 ± 3% perf-profile.self.cycles-pp.__check_heap_object 0.12 ± 9% +0.0 0.13 ± 34% +0.0 0.13 ± 25% perf-profile.self.cycles-pp.___perf_sw_event 0.00 +0.0 0.02 ±142% +0.0 0.01 ±200% perf-profile.self.cycles-pp.__wrgsbase_inactive 0.00 +0.0 0.02 ±142% +0.0 0.02 ±123% perf-profile.self.cycles-pp.finish_task_switch 0.00 +0.0 0.02 ±141% +0.0 0.02 ±123% perf-profile.self.cycles-pp.select_idle_sibling 0.11 ± 6% +0.0 0.13 ± 28% +0.0 0.13 ± 34% perf-profile.self.cycles-pp.__update_load_avg_cfs_rq 0.00 +0.0 0.03 ±102% +0.0 0.03 ±123% perf-profile.self.cycles-pp.native_sched_clock 0.00 +0.0 0.04 ±101% +0.0 0.03 ±122% perf-profile.self.cycles-pp.intel_idle_irq 0.00 +0.0 0.04 ±100% +0.0 0.03 ±122% perf-profile.self.cycles-pp.intel_idle 0.03 ± 70% +0.0 0.08 ± 79% +0.1 0.09 ± 66% perf-profile.self.cycles-pp.available_idle_cpu 0.44 +0.1 0.50 ± 26% +0.0 0.48 ± 27% perf-profile.self.cycles-pp.switch_mm_irqs_off 0.36 ± 3% +0.1 0.42 ± 7% +0.1 0.43 ± 6% perf-profile.self.cycles-pp.get_partial_node 0.92 +0.1 0.98 ± 6% +0.1 0.98 ± 4% perf-profile.self.cycles-pp.___slab_alloc 0.00 +0.1 0.06 ± 79% +0.1 0.06 ± 62% perf-profile.self.cycles-pp.select_idle_cpu 0.31 ± 4% +0.1 0.43 ± 8% +0.1 0.44 ± 7% perf-profile.self.cycles-pp.__unfreeze_partials 0.40 ± 3% +0.1 0.53 ± 42% +0.1 0.52 ± 52% perf-profile.self.cycles-pp.update_load_avg 0.71 ± 3% +0.8 1.52 ± 74% +0.7 1.44 ± 84% perf-profile.self.cycles-pp.update_cfs_group 8.25 ± 11% +10.0 18.22 ± 13% +10.4 18.69 ± 7% perf-profile.self.cycles-pp.native_queued_spin_lock_slowpath 0.01 ±157% +7177.0% 0.74 ±222% -98.0% 0.00 ±200% perf-sched.sch_delay.avg.ms.__cond_resched.__alloc_pages.__folio_alloc.vma_alloc_folio.wp_page_copy 0.00 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.__alloc_pages.get_zeroed_page.__p4d_alloc.__handle_mm_fault 0.00 -100.0% 0.00 +4e+98% 0.00 ±200% perf-sched.sch_delay.avg.ms.__cond_resched.__get_user_pages.get_user_pages_remote.get_arg_page.copy_strings 0.07 ±223% +783.0% 0.64 ±222% -89.8% 0.01 ±200% perf-sched.sch_delay.avg.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node.memcg_alloc_slab_cgroups.allocate_slab 0.10 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node.memcg_alloc_slab_cgroups.memcg_slab_post_alloc_hook 1.12 ±102% +86.3% 2.09 ± 34% +122.9% 2.50 ± 48% perf-sched.sch_delay.avg.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node_track_caller.kmalloc_reserve.__alloc_skb 0.36 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.__kmem_cache_alloc_node.kmalloc_trace.perf_event_mmap_event.perf_event_mmap 0.03 ± 99% -92.0% 0.00 ±143% +2e+05% 64.90 ±199% perf-sched.sch_delay.avg.ms.__cond_resched.__put_anon_vma.unlink_anon_vmas.free_pgtables.exit_mmap 0.00 ±223% +800.0% 0.00 ±223% +140.0% 0.00 ±200% perf-sched.sch_delay.avg.ms.__cond_resched.__wait_for_common.affine_move_task.__set_cpus_allowed_ptr.__sched_setaffinity 1.44 ±156% +136.1% 3.40 ± 5% +106.4% 2.97 ± 38% perf-sched.sch_delay.avg.ms.__cond_resched.aa_sk_perm.security_socket_recvmsg.sock_recvmsg.sock_read_iter 2.66 ± 97% +503.2% 16.06 ± 45% +287.0% 10.30 ± 40% perf-sched.sch_delay.avg.ms.__cond_resched.aa_sk_perm.security_socket_sendmsg.sock_write_iter.vfs_write 0.02 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.copy_pte_range.copy_p4d_range.copy_page_range.dup_mmap 0.01 ±223% +982.5% 0.07 ±223% -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.copy_strings.isra.0.do_execveat_common 2.22 ±216% +744.1% 18.72 ±223% -46.5% 1.19 ±191% perf-sched.sch_delay.avg.ms.__cond_resched.dentry_kill.dput.proc_invalidate_siblings_dcache.release_task 0.06 ±223% +499.7% 0.36 ±223% -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.do_user_addr_fault.exc_page_fault.asm_exc_page_fault.[unknown] 0.00 ±223% +1.1e+06% 3.76 ±169% -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.do_user_addr_fault.exc_page_fault.asm_exc_page_fault.__put_user_4 0.00 +5e+101% 0.50 ±223% -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.do_user_addr_fault.exc_page_fault.asm_exc_page_fault._copy_to_user 0.00 ±223% +1300.0% 0.00 ±223% -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.down_read.acct_collect.do_exit.do_group_exit 0.00 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.down_read.do_user_addr_fault.exc_page_fault.asm_exc_page_fault 0.00 +1.5e+102% 1.53 ±223% -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.down_write.__anon_vma_prepare.do_anonymous_page.__handle_mm_fault 1.47 ±223% -57.3% 0.63 ±222% -99.8% 0.00 ±200% perf-sched.sch_delay.avg.ms.__cond_resched.down_write.free_pgtables.exit_mmap.__mmput 0.00 +5e+99% 0.01 ±223% -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.down_write.mmap_region.do_mmap.vm_mmap_pgoff 0.45 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.down_write.unlink_anon_vmas.free_pgtables.exit_mmap 0.00 +1.6e+101% 0.16 ±223% +6.2e+100% 0.06 ±200% perf-sched.sch_delay.avg.ms.__cond_resched.down_write.unlink_file_vma.free_pgtables.exit_mmap 0.01 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.down_write.userfaultfd_set_vm_flags.dup_userfaultfd.dup_mmap 0.82 ±223% +183.8% 2.32 ±142% +267.5% 3.01 ±124% perf-sched.sch_delay.avg.ms.__cond_resched.down_write_killable.exec_mmap.begin_new_exec.load_elf_binary 0.00 -100.0% 0.00 +1.6e+102% 1.59 ±200% perf-sched.sch_delay.avg.ms.__cond_resched.down_write_killable.vm_mmap_pgoff.elf_map.load_elf_interp 0.00 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.dput.__fput.task_work_run.do_exit 0.00 -100.0% 0.00 +2e+98% 0.00 ±200% perf-sched.sch_delay.avg.ms.__cond_resched.dput.__fput.task_work_run.exit_to_user_mode_loop 0.00 -100.0% 0.00 +1.2e+99% 0.00 ±200% perf-sched.sch_delay.avg.ms.__cond_resched.dput.open_last_lookups.path_openat.do_filp_open 0.01 ±199% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.dput.path_put.exit_fs.do_exit 0.00 +1.8e+102% 1.77 ±223% -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.dput.step_into.link_path_walk.part 0.00 +2.3e+102% 2.35 ±161% -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.dput.terminate_walk.path_openat.do_filp_open 0.01 ±174% -52.9% 0.00 ±223% -61.2% 0.00 ±200% perf-sched.sch_delay.avg.ms.__cond_resched.exit_mmap.__mmput.exit_mm.do_exit 0.16 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.exit_signals.do_exit.do_group_exit.__x64_sys_exit_group 0.00 ±223% -100.0% 0.00 +33812.0% 0.28 ±200% perf-sched.sch_delay.avg.ms.__cond_resched.khugepaged.kthread.ret_from_fork 0.00 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.kmem_cache_alloc.copy_sighand.copy_process.kernel_clone 0.00 -100.0% 0.00 +7.2e+102% 7.23 ±200% perf-sched.sch_delay.avg.ms.__cond_resched.kmem_cache_alloc.getname_flags.part.0 0.00 +4.2e+100% 0.04 ±223% -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.kmem_cache_alloc.mas_alloc_nodes.mas_preallocate.vma_link 0.00 +8e+100% 0.08 ±223% -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.kmem_cache_alloc.vm_area_dup.__split_vma.do_vmi_align_munmap 0.62 ±112% +252.4% 2.19 ± 50% +278.8% 2.36 ± 50% perf-sched.sch_delay.avg.ms.__cond_resched.kmem_cache_alloc_node.__alloc_skb.alloc_skb_with_frags.sock_alloc_send_pskb 0.01 ±223% +31471.2% 2.74 ±223% +14403.8% 1.26 ±200% perf-sched.sch_delay.avg.ms.__cond_resched.mutex_lock.futex_exec_release.exec_mm_release.exec_mmap 0.00 -100.0% 0.00 +3e+99% 0.00 ±200% perf-sched.sch_delay.avg.ms.__cond_resched.mutex_lock.futex_exit_release.exit_mm_release.exit_mm 0.49 ±114% +284.9% 1.90 ± 31% +282.8% 1.89 ± 40% perf-sched.sch_delay.avg.ms.__cond_resched.mutex_lock.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg 0.00 +6.5e+101% 0.65 ±223% +1.8e+101% 0.18 ±199% perf-sched.sch_delay.avg.ms.__cond_resched.mutex_lock_killable.pcpu_alloc.__percpu_counter_init.mm_init 0.00 ±223% -100.0% 0.00 -85.0% 0.00 ±200% perf-sched.sch_delay.avg.ms.__cond_resched.mutex_lock_killable.pcpu_alloc.mm_init.alloc_bprm 0.00 ±223% +17500.0% 0.09 ±223% +580.0% 0.00 ±200% perf-sched.sch_delay.avg.ms.__cond_resched.put_files_struct.do_exit.do_group_exit.__x64_sys_exit_group 0.00 +1.1e+104% 113.98 ±223% -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.remove_vma.exit_mmap.__mmput.exit_mm 0.00 ±223% -100.0% 0.00 +140.0% 0.00 ±200% perf-sched.sch_delay.avg.ms.__cond_resched.rmap_walk_anon.migrate_pages_batch.migrate_pages.migrate_misplaced_page 15.56 ±150% +13.8% 17.70 ± 24% +351.5% 70.27 ±138% perf-sched.sch_delay.avg.ms.__cond_resched.shrink_dentry_list.shrink_dcache_parent.d_invalidate.proc_invalidate_siblings_dcache 6.45 ±217% -32.5% 4.35 ±131% +69.9% 10.96 ±145% perf-sched.sch_delay.avg.ms.__cond_resched.stop_one_cpu.sched_exec.bprm_execve.part 1.29 ±223% -65.1% 0.45 ± 93% +2827.9% 37.82 ±197% perf-sched.sch_delay.avg.ms.__cond_resched.task_work_run.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode 0.32 ±132% +747.3% 2.67 ± 51% +267.2% 1.16 ± 89% perf-sched.sch_delay.avg.ms.__cond_resched.task_work_run.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode 3.05 ±223% -98.7% 0.04 ±182% -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.tlb_batch_pages_flush.tlb_finish_mmu.exit_mmap.__mmput 0.00 +7.2e+100% 0.07 ±223% -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.unmap_vmas.exit_mmap.__mmput.exit_mm 0.37 ±113% +964.0% 3.89 ± 89% +122.1% 0.81 ± 71% perf-sched.sch_delay.avg.ms.__cond_resched.wait_for_unix_gc.unix_stream_sendmsg.sock_write_iter.vfs_write 0.02 ±156% +884.4% 0.15 ±223% +2332.0% 0.36 ±196% perf-sched.sch_delay.avg.ms.__cond_resched.wp_page_copy.__handle_mm_fault.handle_mm_fault.do_user_addr_fault 0.44 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.ww_mutex_lock.drm_gem_vunmap_unlocked.drm_fbdev_generic_helper_fb_dirty.drm_fb_helper_damage_work 17.52 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.zap_pmd_range.isra.0.unmap_page_range 0.00 ±223% +8470.0% 0.14 ±200% -4.0% 0.00 ±145% perf-sched.sch_delay.avg.ms.__cond_resched.zap_pte_range.zap_pmd_range.isra.0 1.25 ±222% -84.4% 0.19 ±129% -61.3% 0.48 ±112% perf-sched.sch_delay.avg.ms.__x64_sys_pause.do_syscall_64.entry_SYSCALL_64_after_hwframe.[unknown] 69.72 ±222% -100.0% 0.03 ±223% -96.9% 2.13 ±117% perf-sched.sch_delay.avg.ms.devkmsg_read.vfs_read.ksys_read.do_syscall_64 0.16 ±211% +96.4% 0.31 ±164% -40.6% 0.09 ±182% perf-sched.sch_delay.avg.ms.do_nanosleep.hrtimer_nanosleep.common_nsleep.__x64_sys_clock_nanosleep 1.39 ± 84% +1024.5% 15.63 ± 37% +1524.6% 22.58 ± 63% perf-sched.sch_delay.avg.ms.do_task_dead.do_exit.do_group_exit.__x64_sys_exit_group.do_syscall_64 0.75 ± 76% +1937.6% 15.34 ± 79% +1363.8% 11.02 ± 71% perf-sched.sch_delay.avg.ms.do_wait.kernel_wait4.__do_sys_wait4.do_syscall_64 0.01 ±125% +46.0% 0.01 ±223% -52.0% 0.00 ±176% perf-sched.sch_delay.avg.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_exc_page_fault 2.85 ±172% +171.2% 7.72 ± 68% +469.9% 16.23 ± 89% perf-sched.sch_delay.avg.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_apic_timer_interrupt 0.16 ±123% +1748.4% 2.96 ±117% +238.3% 0.54 ±123% perf-sched.sch_delay.avg.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_call_function_single 5.77 ± 85% +103.9% 11.77 ± 22% +104.9% 11.83 ± 11% perf-sched.sch_delay.avg.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_reschedule_ipi 0.54 ± 85% +244.9% 1.87 ± 16% +235.3% 1.81 ± 31% perf-sched.sch_delay.avg.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode.do_syscall_64 0.01 ±223% +9651.7% 1.45 ±223% -100.0% 0.00 perf-sched.sch_delay.avg.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode.ret_from_fork 0.10 ± 78% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.avg.ms.io_schedule.folio_wait_bit_common.filemap_fault.__do_fault 22.92 ±131% -71.5% 6.54 ±101% -12.2% 20.12 ±116% perf-sched.sch_delay.avg.ms.pipe_read.vfs_read.ksys_read.do_syscall_64 21.91 ±223% -6.7% 20.45 ±122% -79.7% 4.45 ± 68% perf-sched.sch_delay.avg.ms.rcu_gp_kthread.kthread.ret_from_fork 0.14 ± 74% +562.8% 0.94 ±183% +198.6% 0.42 ±188% perf-sched.sch_delay.avg.ms.schedule_hrtimeout_range_clock.do_poll.constprop.0.do_sys_poll 0.29 ±185% -96.1% 0.01 ± 48% -94.6% 0.02 ± 26% perf-sched.sch_delay.avg.ms.schedule_hrtimeout_range_clock.do_select.core_sys_select.kern_select 106.82 ±142% -76.0% 25.69 ±222% -80.4% 20.98 ±191% perf-sched.sch_delay.avg.ms.schedule_hrtimeout_range_clock.ep_poll.do_epoll_wait.__x64_sys_epoll_wait 0.00 -100.0% 0.00 +2.6e+99% 0.00 ±200% perf-sched.sch_delay.avg.ms.schedule_preempt_disabled.rwsem_down_read_slowpath.down_read.folio_lock_anon_vma_read 0.01 ±217% -100.0% 0.00 +27.0% 0.02 ±200% perf-sched.sch_delay.avg.ms.schedule_preempt_disabled.rwsem_down_read_slowpath.down_read.rmap_walk_anon 0.00 ±115% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.avg.ms.schedule_preempt_disabled.rwsem_down_write_slowpath.down_write.__put_anon_vma 0.12 ±189% +10355.7% 12.08 ±222% +53447.2% 61.85 ±200% perf-sched.sch_delay.avg.ms.schedule_preempt_disabled.rwsem_down_write_slowpath.down_write.unlink_anon_vmas 0.00 ±143% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.avg.ms.schedule_timeout.__wait_for_common.__flush_work.isra.0 0.03 ± 97% +152.4% 0.08 ±118% +12414.7% 3.90 ±199% perf-sched.sch_delay.avg.ms.schedule_timeout.__wait_for_common.wait_for_completion_state.kernel_clone 13.98 ±223% -99.9% 0.01 ± 13% -99.9% 0.01 ± 10% perf-sched.sch_delay.avg.ms.schedule_timeout.kcompactd.kthread.ret_from_fork 0.00 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.avg.ms.schedule_timeout.khugepaged_wait_work.khugepaged.kthread 6.20 ± 75% +91.5% 11.87 ± 47% +309.9% 25.40 ± 96% perf-sched.sch_delay.avg.ms.schedule_timeout.rcu_gp_fqs_loop.rcu_gp_kthread.kthread 2.44 ± 72% +256.3% 8.71 ± 25% +289.0% 9.50 ± 7% perf-sched.sch_delay.avg.ms.schedule_timeout.sock_alloc_send_pskb.unix_stream_sendmsg.sock_write_iter 1.10 ± 83% +184.7% 3.14 ± 19% +208.2% 3.40 ± 35% perf-sched.sch_delay.avg.ms.schedule_timeout.unix_stream_data_wait.unix_stream_read_generic.unix_stream_recvmsg 0.33 ±208% +275.3% 1.24 ±221% +99.7% 0.66 ±196% perf-sched.sch_delay.avg.ms.smpboot_thread_fn.kthread.ret_from_fork 133.81 ±141% -99.8% 0.24 ±223% -98.7% 1.71 ±129% perf-sched.sch_delay.avg.ms.syslog_print.do_syslog.kmsg_read.vfs_read 0.54 ±214% +408.9% 2.77 ±201% -100.0% 0.00 perf-sched.sch_delay.avg.ms.wait_for_partner.fifo_open.do_dentry_open.do_open 3.79 ±117% +528.5% 23.83 ±105% +426.9% 19.98 ±144% perf-sched.sch_delay.avg.ms.worker_thread.kthread.ret_from_fork 0.14 ±158% +2667.4% 3.98 ±222% -98.2% 0.00 ±200% perf-sched.sch_delay.max.ms.__cond_resched.__alloc_pages.__folio_alloc.vma_alloc_folio.wp_page_copy 0.00 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.__alloc_pages.get_zeroed_page.__p4d_alloc.__handle_mm_fault 0.00 -100.0% 0.00 +8e+98% 0.00 ±200% perf-sched.sch_delay.max.ms.__cond_resched.__get_user_pages.get_user_pages_remote.get_arg_page.copy_strings 0.34 ±223% +87.5% 0.64 ±222% -93.5% 0.02 ±200% perf-sched.sch_delay.max.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node.memcg_alloc_slab_cgroups.allocate_slab 0.65 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node.memcg_alloc_slab_cgroups.memcg_slab_post_alloc_hook 1228 ± 93% -7.4% 1137 ± 39% +60.1% 1967 ± 70% perf-sched.sch_delay.max.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node_track_caller.kmalloc_reserve.__alloc_skb 1.08 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.__kmem_cache_alloc_node.kmalloc_trace.perf_event_mmap_event.perf_event_mmap 0.65 ±110% -96.1% 0.03 ±160% +49565.1% 324.56 ±199% perf-sched.sch_delay.max.ms.__cond_resched.__put_anon_vma.unlink_anon_vmas.free_pgtables.exit_mmap 0.10 ±145% +194.9% 0.29 ±214% -17.5% 0.08 ±171% perf-sched.sch_delay.max.ms.__cond_resched.__wait_for_common.affine_move_task.__set_cpus_allowed_ptr.__sched_setaffinity 625.66 ±199% +64.6% 1029 ± 29% -8.4% 573.27 ± 52% perf-sched.sch_delay.max.ms.__cond_resched.aa_sk_perm.security_socket_recvmsg.sock_recvmsg.sock_read_iter 941.66 ± 78% +178.2% 2619 ± 21% +165.4% 2499 ± 38% perf-sched.sch_delay.max.ms.__cond_resched.aa_sk_perm.security_socket_sendmsg.sock_write_iter.vfs_write 0.02 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.copy_pte_range.copy_p4d_range.copy_page_range.dup_mmap 0.02 ±223% +260.8% 0.07 ±223% -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.copy_strings.isra.0.do_execveat_common 70.12 ±212% +51.1% 105.92 ±223% -75.2% 17.36 ±195% perf-sched.sch_delay.max.ms.__cond_resched.dentry_kill.dput.proc_invalidate_siblings_dcache.release_task 0.59 ±223% +690.7% 4.65 ±223% -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.do_user_addr_fault.exc_page_fault.asm_exc_page_fault.[unknown] 0.00 ±223% +1.1e+06% 3.76 ±169% -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.do_user_addr_fault.exc_page_fault.asm_exc_page_fault.__put_user_4 0.00 +5e+101% 0.50 ±223% -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.do_user_addr_fault.exc_page_fault.asm_exc_page_fault._copy_to_user 0.00 ±223% +685.7% 0.01 ±223% -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.down_read.acct_collect.do_exit.do_group_exit 0.00 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.down_read.do_user_addr_fault.exc_page_fault.asm_exc_page_fault 0.00 +1.5e+102% 1.53 ±223% -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.down_write.__anon_vma_prepare.do_anonymous_page.__handle_mm_fault 30.86 ±223% -67.5% 10.03 ±223% -99.9% 0.02 ±200% perf-sched.sch_delay.max.ms.__cond_resched.down_write.free_pgtables.exit_mmap.__mmput 0.00 +5.4e+100% 0.05 ±223% -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.down_write.mmap_region.do_mmap.vm_mmap_pgoff 1.34 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.down_write.unlink_anon_vmas.free_pgtables.exit_mmap 0.00 +8e+101% 0.80 ±223% +6.2e+100% 0.06 ±200% perf-sched.sch_delay.max.ms.__cond_resched.down_write.unlink_file_vma.free_pgtables.exit_mmap 0.01 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.down_write.userfaultfd_set_vm_flags.dup_userfaultfd.dup_mmap 4.09 ±223% +387.7% 19.96 ±136% +571.1% 27.46 ±149% perf-sched.sch_delay.max.ms.__cond_resched.down_write_killable.exec_mmap.begin_new_exec.load_elf_binary 0.00 -100.0% 0.00 +3.2e+102% 3.19 ±200% perf-sched.sch_delay.max.ms.__cond_resched.down_write_killable.vm_mmap_pgoff.elf_map.load_elf_interp 0.02 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.dput.__fput.task_work_run.do_exit 0.00 -100.0% 0.00 +1.2e+99% 0.00 ±200% perf-sched.sch_delay.max.ms.__cond_resched.dput.__fput.task_work_run.exit_to_user_mode_loop 0.00 -100.0% 0.00 +1.2e+99% 0.00 ±200% perf-sched.sch_delay.max.ms.__cond_resched.dput.open_last_lookups.path_openat.do_filp_open 0.02 ±171% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.dput.path_put.exit_fs.do_exit 0.00 +5.3e+102% 5.32 ±223% -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.dput.step_into.link_path_walk.part 0.00 +9.4e+102% 9.38 ±161% -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.dput.terminate_walk.path_openat.do_filp_open 0.15 ±196% -87.7% 0.02 ±223% -80.4% 0.03 ±200% perf-sched.sch_delay.max.ms.__cond_resched.exit_mmap.__mmput.exit_mm.do_exit 0.16 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.exit_signals.do_exit.do_group_exit.__x64_sys_exit_group 0.00 ±223% -100.0% 0.00 +33812.0% 0.28 ±200% perf-sched.sch_delay.max.ms.__cond_resched.khugepaged.kthread.ret_from_fork 0.00 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.kmem_cache_alloc.copy_sighand.copy_process.kernel_clone 0.00 -100.0% 0.00 +2.9e+103% 28.93 ±200% perf-sched.sch_delay.max.ms.__cond_resched.kmem_cache_alloc.getname_flags.part.0 0.00 +4.2e+100% 0.04 ±223% -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.kmem_cache_alloc.mas_alloc_nodes.mas_preallocate.vma_link 0.00 +4.8e+101% 0.48 ±223% -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.kmem_cache_alloc.vm_area_dup.__split_vma.do_vmi_align_munmap 624.75 ±118% +119.9% 1373 ± 75% +94.4% 1214 ± 78% perf-sched.sch_delay.max.ms.__cond_resched.kmem_cache_alloc_node.__alloc_skb.alloc_skb_with_frags.sock_alloc_send_pskb 0.01 ±223% +31471.2% 2.74 ±223% +28905.4% 2.51 ±200% perf-sched.sch_delay.max.ms.__cond_resched.mutex_lock.futex_exec_release.exec_mm_release.exec_mmap 0.00 -100.0% 0.00 +1.2e+100% 0.01 ±200% perf-sched.sch_delay.max.ms.__cond_resched.mutex_lock.futex_exit_release.exit_mm_release.exit_mm 113.31 ±164% +511.1% 692.48 ± 32% +485.2% 663.04 ± 47% perf-sched.sch_delay.max.ms.__cond_resched.mutex_lock.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg 0.00 +6.5e+101% 0.65 ±223% +1.8e+101% 0.18 ±193% perf-sched.sch_delay.max.ms.__cond_resched.mutex_lock_killable.pcpu_alloc.__percpu_counter_init.mm_init 0.00 ±223% -100.0% 0.00 -70.0% 0.00 ±200% perf-sched.sch_delay.max.ms.__cond_resched.mutex_lock_killable.pcpu_alloc.mm_init.alloc_bprm 0.02 ±223% +3059.8% 0.62 ±223% -21.0% 0.02 ±200% perf-sched.sch_delay.max.ms.__cond_resched.put_files_struct.do_exit.do_group_exit.__x64_sys_exit_group 0.00 +1.1e+104% 113.98 ±223% -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.remove_vma.exit_mmap.__mmput.exit_mm 0.01 ±223% -100.0% 0.00 -6.3% 0.01 ±200% perf-sched.sch_delay.max.ms.__cond_resched.rmap_walk_anon.migrate_pages_batch.migrate_pages.migrate_misplaced_page 1312 ±104% -68.1% 419.35 ± 65% -25.8% 974.56 ± 57% perf-sched.sch_delay.max.ms.__cond_resched.shrink_dentry_list.shrink_dcache_parent.d_invalidate.proc_invalidate_siblings_dcache 25.63 ±218% -25.1% 19.21 ±103% +64.0% 42.03 ± 97% perf-sched.sch_delay.max.ms.__cond_resched.stop_one_cpu.sched_exec.bprm_execve.part 12.42 ±223% -70.2% 3.70 ± 86% +3850.4% 490.54 ±197% perf-sched.sch_delay.max.ms.__cond_resched.task_work_run.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode 36.52 ±137% +1666.2% 645.04 ± 66% +495.3% 217.43 ± 98% perf-sched.sch_delay.max.ms.__cond_resched.task_work_run.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode 45.78 ±223% -99.3% 0.33 ±182% -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.tlb_batch_pages_flush.tlb_finish_mmu.exit_mmap.__mmput 0.00 +2.1e+101% 0.21 ±223% -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.unmap_vmas.exit_mmap.__mmput.exit_mm 49.98 ±111% +1263.8% 681.67 ±102% +37.1% 68.52 ± 67% perf-sched.sch_delay.max.ms.__cond_resched.wait_for_unix_gc.unix_stream_sendmsg.sock_write_iter.vfs_write 0.23 ±178% +1143.8% 2.80 ±223% +1742.3% 4.15 ±197% perf-sched.sch_delay.max.ms.__cond_resched.wp_page_copy.__handle_mm_fault.handle_mm_fault.do_user_addr_fault 0.45 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.ww_mutex_lock.drm_gem_vunmap_unlocked.drm_fbdev_generic_helper_fb_dirty.drm_fb_helper_damage_work 17.52 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.zap_pmd_range.isra.0.unmap_page_range 0.09 ±210% +5466.4% 4.89 ±206% -65.4% 0.03 ±124% perf-sched.sch_delay.max.ms.__cond_resched.zap_pte_range.zap_pmd_range.isra.0 1.25 ±222% -77.2% 0.28 ±143% -59.9% 0.50 ±111% perf-sched.sch_delay.max.ms.__x64_sys_pause.do_syscall_64.entry_SYSCALL_64_after_hwframe.[unknown] 696.86 ±222% -100.0% 0.06 ±223% -99.7% 2.18 ±112% perf-sched.sch_delay.max.ms.devkmsg_read.vfs_read.ksys_read.do_syscall_64 0.31 ±216% +28.1% 0.40 ±135% -69.5% 0.09 ±182% perf-sched.sch_delay.max.ms.do_nanosleep.hrtimer_nanosleep.common_nsleep.__x64_sys_clock_nanosleep 698.60 ±110% +108.6% 1457 ± 61% +175.7% 1925 ± 32% perf-sched.sch_delay.max.ms.do_task_dead.do_exit.do_group_exit.__x64_sys_exit_group.do_syscall_64 95.16 ± 93% +995.8% 1042 ± 83% +416.4% 491.38 ± 65% perf-sched.sch_delay.max.ms.do_wait.kernel_wait4.__do_sys_wait4.do_syscall_64 0.29 ±130% +14.5% 0.33 ±223% -84.1% 0.05 ±123% perf-sched.sch_delay.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_exc_page_fault 178.31 ±206% +188.7% 514.72 ± 81% +624.5% 1291 ±106% perf-sched.sch_delay.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_apic_timer_interrupt 19.65 ±139% +414.3% 101.08 ±145% -72.5% 5.40 ±119% perf-sched.sch_delay.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_call_function_single 1070 ± 94% +51.5% 1621 ± 22% +41.1% 1511 ± 44% perf-sched.sch_delay.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_reschedule_ipi 2308 ± 72% +70.2% 3930 ± 11% +63.0% 3762 ± 39% perf-sched.sch_delay.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode.do_syscall_64 0.01 ±223% +9651.7% 1.45 ±223% -100.0% 0.00 perf-sched.sch_delay.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode.ret_from_fork 2.56 ±166% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.max.ms.io_schedule.folio_wait_bit_common.filemap_fault.__do_fault 1723 ± 99% -10.9% 1535 ±134% -9.1% 1565 ± 97% perf-sched.sch_delay.max.ms.pipe_read.vfs_read.ksys_read.do_syscall_64 87.59 ±223% +542.6% 562.84 ±137% +8.7% 95.22 ± 83% perf-sched.sch_delay.max.ms.rcu_gp_kthread.kthread.ret_from_fork 14.77 ±165% -86.6% 1.97 ±162% -96.9% 0.46 ±171% perf-sched.sch_delay.max.ms.schedule_hrtimeout_range_clock.do_poll.constprop.0.do_sys_poll 3.23 ±213% -99.6% 0.01 ± 58% -99.5% 0.02 ± 23% perf-sched.sch_delay.max.ms.schedule_hrtimeout_range_clock.do_select.core_sys_select.kern_select 1330 ±141% -96.1% 51.64 ±221% -95.3% 63.01 ±191% perf-sched.sch_delay.max.ms.schedule_hrtimeout_range_clock.ep_poll.do_epoll_wait.__x64_sys_epoll_wait 0.00 -100.0% 0.00 +2.6e+99% 0.00 ±200% perf-sched.sch_delay.max.ms.schedule_preempt_disabled.rwsem_down_read_slowpath.down_read.folio_lock_anon_vma_read 0.03 ±214% -100.0% 0.00 +40.7% 0.04 ±200% perf-sched.sch_delay.max.ms.schedule_preempt_disabled.rwsem_down_read_slowpath.down_read.rmap_walk_anon 0.00 ±118% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.max.ms.schedule_preempt_disabled.rwsem_down_write_slowpath.down_write.__put_anon_vma 2.97 ±159% +5136.6% 155.69 ±223% +24861.7% 742.15 ±200% perf-sched.sch_delay.max.ms.schedule_preempt_disabled.rwsem_down_write_slowpath.down_write.unlink_anon_vmas 0.00 ±147% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.max.ms.schedule_timeout.__wait_for_common.__flush_work.isra.0 0.25 ±139% +178.8% 0.70 ±125% +4544.2% 11.70 ±199% perf-sched.sch_delay.max.ms.schedule_timeout.__wait_for_common.wait_for_completion_state.kernel_clone 83.85 ±223% -100.0% 0.02 ± 33% -100.0% 0.01 ± 28% perf-sched.sch_delay.max.ms.schedule_timeout.kcompactd.kthread.ret_from_fork 0.00 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.sch_delay.max.ms.schedule_timeout.khugepaged_wait_work.khugepaged.kthread 1152 ± 86% +17.1% 1350 ± 60% +67.1% 1925 ± 52% perf-sched.sch_delay.max.ms.schedule_timeout.rcu_gp_fqs_loop.rcu_gp_kthread.kthread 810.39 ± 72% +119.9% 1781 ± 21% +164.8% 2145 ± 38% perf-sched.sch_delay.max.ms.schedule_timeout.sock_alloc_send_pskb.unix_stream_sendmsg.sock_write_iter 2251 ± 73% +60.1% 3605 ± 15% +54.6% 3479 ± 44% perf-sched.sch_delay.max.ms.schedule_timeout.unix_stream_data_wait.unix_stream_read_generic.unix_stream_recvmsg 193.53 ±220% +54.1% 298.18 ±223% +6.3% 205.81 ±199% perf-sched.sch_delay.max.ms.smpboot_thread_fn.kthread.ret_from_fork 1329 ±141% -100.0% 0.48 ±223% -99.8% 2.19 ±140% perf-sched.sch_delay.max.ms.syslog_print.do_syslog.kmsg_read.vfs_read 3.71 ±219% +173.1% 10.14 ±199% -100.0% 0.00 perf-sched.sch_delay.max.ms.wait_for_partner.fifo_open.do_dentry_open.do_open 1485 ±118% +75.0% 2599 ± 52% -25.2% 1111 ±122% perf-sched.sch_delay.max.ms.worker_thread.kthread.ret_from_fork 1.03 ± 84% +246.0% 3.55 ± 18% +264.8% 3.74 ± 27% perf-sched.total_sch_delay.average.ms 2527 ± 72% +69.8% 4291 ± 18% +61.9% 4093 ± 27% perf-sched.total_sch_delay.max.ms 4.29 ± 80% +195.8% 12.70 ± 16% +208.8% 13.26 ± 26% perf-sched.total_wait_and_delay.average.ms 2044832 ± 85% +15.6% 2363513 ± 19% +18.8% 2430279 ± 33% perf-sched.total_wait_and_delay.count.ms 4763 ± 73% +72.4% 8212 ± 11% +72.2% 8202 ± 27% perf-sched.total_wait_and_delay.max.ms 3.27 ± 80% +180.0% 9.15 ± 16% +191.3% 9.51 ± 26% perf-sched.total_wait_time.average.ms 3235 ± 70% +66.9% 5398 ± 17% +69.8% 5492 ± 13% perf-sched.total_wait_time.max.ms 1.43 ±223% +187.5% 4.11 ±223% -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.__cond_resched.__alloc_pages.__folio_alloc.vma_alloc_folio.shmem_alloc_folio 12.32 ±223% -100.0% 0.00 -77.9% 2.72 ±200% perf-sched.wait_and_delay.avg.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node.memcg_alloc_slab_cgroups.allocate_slab 13.18 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node.memcg_alloc_slab_cgroups.memcg_slab_post_alloc_hook 2.47 ±141% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node_track_caller.kmalloc_reserve.__alloc_skb 1.58 ±223% +1540.6% 25.86 ±223% +8137.9% 129.83 ±200% perf-sched.wait_and_delay.avg.ms.__cond_resched.__put_anon_vma.unlink_anon_vmas.free_pgtables.exit_mmap 10.78 ± 71% +87.5% 20.21 ± 28% +70.9% 18.42 ± 65% perf-sched.wait_and_delay.avg.ms.__cond_resched.__wait_for_common.affine_move_task.__set_cpus_allowed_ptr.__sched_setaffinity 2.62 ±223% +222.3% 8.45 ± 71% +97.0% 5.16 ±124% perf-sched.wait_and_delay.avg.ms.__cond_resched.aa_sk_perm.security_socket_recvmsg.sock_recvmsg.sock_read_iter 11.90 ± 83% +319.8% 49.94 ± 34% +284.5% 45.74 ± 33% perf-sched.wait_and_delay.avg.ms.__cond_resched.aa_sk_perm.security_socket_sendmsg.sock_write_iter.vfs_write 4.50 ±223% +739.6% 37.74 ±223% -47.6% 2.35 ±200% perf-sched.wait_and_delay.avg.ms.__cond_resched.dentry_kill.dput.proc_invalidate_siblings_dcache.release_task 0.00 +2.9e+102% 2.90 ±223% -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.__cond_resched.do_user_addr_fault.exc_page_fault.asm_exc_page_fault.__put_user_4 3.03 ±223% +626.2% 22.04 ±223% -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.__cond_resched.down_write.free_pgtables.exit_mmap.__mmput 0.00 -100.0% 0.00 +2e+104% 203.87 ±199% perf-sched.wait_and_delay.avg.ms.__cond_resched.down_write.unlink_file_vma.free_pgtables.exit_mmap 0.00 -100.0% 0.00 +1.8e+102% 1.76 ±200% perf-sched.wait_and_delay.avg.ms.__cond_resched.down_write_killable.exec_mmap.begin_new_exec.load_elf_binary 4.88 ±187% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.__cond_resched.dput.__fput.task_work_run.exit_to_user_mode_loop 0.00 +1.8e+102% 1.78 ±223% -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.__cond_resched.dput.step_into.link_path_walk.part 0.00 +1.7e+102% 1.73 ±223% -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.__cond_resched.dput.terminate_walk.path_openat.do_filp_open 0.00 +4.6e+103% 45.91 ±223% -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.__cond_resched.exit_mmap.__mmput.exit_mm.do_exit 37.98 ±177% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.__cond_resched.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.vfs_write 0.00 -100.0% 0.00 +7.2e+102% 7.23 ±200% perf-sched.wait_and_delay.avg.ms.__cond_resched.kmem_cache_alloc.getname_flags.part.0 1.07 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.__cond_resched.kmem_cache_alloc.vm_area_dup.__split_vma.mprotect_fixup 4.34 ±120% -47.4% 2.28 ±223% +2.2% 4.44 ±130% perf-sched.wait_and_delay.avg.ms.__cond_resched.kmem_cache_alloc_node.__alloc_skb.alloc_skb_with_frags.sock_alloc_send_pskb 1.16 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.__cond_resched.mutex_lock.__fdget_pos.ksys_write.do_syscall_64 0.00 +2.7e+102% 2.74 ±223% -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.__cond_resched.mutex_lock.futex_exec_release.exec_mm_release.exec_mmap 3.09 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.__cond_resched.mutex_lock.perf_poll.do_poll.constprop 1.74 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.__cond_resched.mutex_lock.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg 0.00 -100.0% 0.00 +2.5e+104% 253.89 ±199% perf-sched.wait_and_delay.avg.ms.__cond_resched.put_files_struct.do_exit.do_group_exit.__x64_sys_exit_group 0.00 +2.3e+104% 230.51 ±223% -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.__cond_resched.remove_vma.exit_mmap.__mmput.exit_mm 183.84 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.__cond_resched.rmap_walk_anon.try_to_migrate.migrate_folio_unmap.migrate_pages_batch 34.77 ±134% +41.6% 49.22 ± 22% +364.8% 161.59 ±144% perf-sched.wait_and_delay.avg.ms.__cond_resched.shrink_dentry_list.shrink_dcache_parent.d_invalidate.proc_invalidate_siblings_dcache 2.84 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.__cond_resched.stop_one_cpu.migrate_task_to.task_numa_migrate.isra 6.29 ±223% -54.7% 2.85 ±223% +35.7% 8.54 ±200% perf-sched.wait_and_delay.avg.ms.__cond_resched.stop_one_cpu.sched_exec.bprm_execve.part 2.88 ±223% +146.4% 7.10 ±223% +2507.2% 75.17 ±200% perf-sched.wait_and_delay.avg.ms.__cond_resched.task_work_run.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode 2.91 ±223% -17.2% 2.41 ±223% -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.__cond_resched.task_work_run.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode 8.23 ±164% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.__cond_resched.tlb_batch_pages_flush.tlb_finish_mmu.exit_mmap.__mmput 0.00 +1.2e+103% 11.69 ±101% +3.2e+102% 3.17 ±200% perf-sched.wait_and_delay.avg.ms.__cond_resched.wait_for_unix_gc.unix_stream_sendmsg.sock_write_iter.vfs_write 337.44 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.__cond_resched.ww_mutex_lock.drm_gem_vunmap_unlocked.drm_fbdev_generic_helper_fb_dirty.drm_fb_helper_damage_work 17.52 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.__cond_resched.zap_pmd_range.isra.0.unmap_page_range 19.32 ±212% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.__cond_resched.zap_pte_range.zap_pmd_range.isra.0 417.87 ±222% +19.8% 500.43 ±152% -28.1% 300.29 ±133% perf-sched.wait_and_delay.avg.ms.__x64_sys_pause.do_syscall_64.entry_SYSCALL_64_after_hwframe.[unknown] 135.47 ±140% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.devkmsg_read.vfs_read.ksys_read.do_syscall_64 750.58 ±142% -37.1% 472.40 ±116% -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.do_nanosleep.hrtimer_nanosleep.common_nsleep.__x64_sys_clock_nanosleep 10.22 ± 85% +445.2% 55.73 ± 31% +947.7% 107.10 ±111% perf-sched.wait_and_delay.avg.ms.do_task_dead.do_exit.do_group_exit.__x64_sys_exit_group.do_syscall_64 5.42 ±119% +921.9% 55.34 ± 46% +806.6% 49.09 ± 84% perf-sched.wait_and_delay.avg.ms.do_wait.kernel_wait4.__do_sys_wait4.do_syscall_64 6.16 ±118% +480.3% 35.75 ± 25% +707.3% 49.73 ± 60% perf-sched.wait_and_delay.avg.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_apic_timer_interrupt 0.00 +1e+103% 10.48 ±103% +1e+103% 9.97 ± 91% perf-sched.wait_and_delay.avg.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_call_function_single 20.62 ± 82% +109.6% 43.23 ± 23% +108.7% 43.05 ± 11% perf-sched.wait_and_delay.avg.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_reschedule_ipi 2.07 ± 80% +221.0% 6.66 ± 14% +218.8% 6.61 ± 28% perf-sched.wait_and_delay.avg.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode.do_syscall_64 1.33 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.io_schedule.folio_wait_bit_common.filemap_fault.__do_fault 140.85 ±118% -64.8% 49.63 ± 64% -29.5% 99.31 ±101% perf-sched.wait_and_delay.avg.ms.pipe_read.vfs_read.ksys_read.do_syscall_64 53.08 ±223% -18.2% 43.42 ±125% -84.0% 8.51 ±122% perf-sched.wait_and_delay.avg.ms.rcu_gp_kthread.kthread.ret_from_fork 3.97 ±141% +1790.8% 75.14 ±142% +16275.4% 650.73 ±181% perf-sched.wait_and_delay.avg.ms.schedule_hrtimeout_range_clock.do_poll.constprop.0.do_sys_poll 193.74 ± 71% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.schedule_hrtimeout_range_clock.do_select.core_sys_select.kern_select 309.14 ±105% +27.3% 393.45 ± 80% +103.4% 628.77 ± 68% perf-sched.wait_and_delay.avg.ms.schedule_hrtimeout_range_clock.ep_poll.do_epoll_wait.__x64_sys_epoll_wait 15.08 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.schedule_preempt_disabled.rwsem_down_read_slowpath.down_read.rmap_walk_anon 1.06 ±223% +2310.1% 25.50 ±223% +12976.7% 138.37 ±200% perf-sched.wait_and_delay.avg.ms.schedule_preempt_disabled.rwsem_down_write_slowpath.down_write.unlink_anon_vmas 0.00 +7.8e+103% 78.37 ±216% +5.7e+103% 56.51 ± 85% perf-sched.wait_and_delay.avg.ms.schedule_timeout.__wait_for_common.wait_for_completion_state.kernel_clone 901.76 ± 73% -20.5% 716.69 ± 61% +19.6% 1078 ± 38% perf-sched.wait_and_delay.avg.ms.schedule_timeout.kcompactd.kthread.ret_from_fork 16.69 ± 73% +75.5% 29.30 ± 51% +262.9% 60.57 ± 88% perf-sched.wait_and_delay.avg.ms.schedule_timeout.rcu_gp_fqs_loop.rcu_gp_kthread.kthread 16.73 ± 72% +88.0% 31.45 ± 22% +101.0% 33.63 ± 7% perf-sched.wait_and_delay.avg.ms.schedule_timeout.sock_alloc_send_pskb.unix_stream_sendmsg.sock_write_iter 3.93 ± 79% +177.4% 10.90 ± 18% +199.8% 11.77 ± 35% perf-sched.wait_and_delay.avg.ms.schedule_timeout.unix_stream_data_wait.unix_stream_read_generic.unix_stream_recvmsg 633.44 ± 79% -6.2% 594.26 ± 18% -11.4% 561.37 ± 13% perf-sched.wait_and_delay.avg.ms.smpboot_thread_fn.kthread.ret_from_fork 199.31 ±148% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.syslog_print.do_syslog.kmsg_read.vfs_read 0.00 +2.5e+102% 2.54 ±223% -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.wait_for_partner.fifo_open.do_dentry_open.do_open 306.11 ± 71% +109.5% 641.23 ± 17% +89.5% 579.99 ± 15% perf-sched.wait_and_delay.avg.ms.worker_thread.kthread.ret_from_fork 0.67 ±223% +775.0% 5.83 ±223% -100.0% 0.00 perf-sched.wait_and_delay.count.__cond_resched.__alloc_pages.__folio_alloc.vma_alloc_folio.shmem_alloc_folio 1.67 ±223% -100.0% 0.00 -88.0% 0.20 ±200% perf-sched.wait_and_delay.count.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node.memcg_alloc_slab_cgroups.allocate_slab 2.67 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.count.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node.memcg_alloc_slab_cgroups.memcg_slab_post_alloc_hook 933.50 ±147% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.count.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node_track_caller.kmalloc_reserve.__alloc_skb 5.17 ±223% -58.1% 2.17 ±223% -80.6% 1.00 ±200% perf-sched.wait_and_delay.count.__cond_resched.__put_anon_vma.unlink_anon_vmas.free_pgtables.exit_mmap 174.33 ± 73% +54.2% 268.83 ± 26% -2.8% 169.40 ± 56% perf-sched.wait_and_delay.count.__cond_resched.__wait_for_common.affine_move_task.__set_cpus_allowed_ptr.__sched_setaffinity 209.50 ±223% +567.4% 1398 ± 77% +306.3% 851.20 ±125% perf-sched.wait_and_delay.count.__cond_resched.aa_sk_perm.security_socket_recvmsg.sock_recvmsg.sock_read_iter 533.17 ± 91% +19.4% 636.67 ± 28% +12.7% 601.00 ± 37% perf-sched.wait_and_delay.count.__cond_resched.aa_sk_perm.security_socket_sendmsg.sock_write_iter.vfs_write 10.00 ±223% -90.0% 1.00 ±223% -66.0% 3.40 ±200% perf-sched.wait_and_delay.count.__cond_resched.dentry_kill.dput.proc_invalidate_siblings_dcache.release_task 0.00 +1.7e+101% 0.17 ±223% -100.0% 0.00 perf-sched.wait_and_delay.count.__cond_resched.do_user_addr_fault.exc_page_fault.asm_exc_page_fault.__put_user_4 3.50 ±223% -47.6% 1.83 ±223% -100.0% 0.00 perf-sched.wait_and_delay.count.__cond_resched.down_write.free_pgtables.exit_mmap.__mmput 0.00 -100.0% 0.00 +2e+101% 0.20 ±200% perf-sched.wait_and_delay.count.__cond_resched.down_write.unlink_file_vma.free_pgtables.exit_mmap 0.00 -100.0% 0.00 +2.8e+102% 2.80 ±200% perf-sched.wait_and_delay.count.__cond_resched.down_write_killable.exec_mmap.begin_new_exec.load_elf_binary 0.50 ±152% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.count.__cond_resched.dput.__fput.task_work_run.exit_to_user_mode_loop 0.00 +5e+101% 0.50 ±223% -100.0% 0.00 perf-sched.wait_and_delay.count.__cond_resched.dput.step_into.link_path_walk.part 0.00 +6.7e+101% 0.67 ±223% -100.0% 0.00 perf-sched.wait_and_delay.count.__cond_resched.dput.terminate_walk.path_openat.do_filp_open 0.00 +1.5e+102% 1.50 ±223% -100.0% 0.00 perf-sched.wait_and_delay.count.__cond_resched.exit_mmap.__mmput.exit_mm.do_exit 5.17 ±150% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.count.__cond_resched.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.vfs_write 0.00 -100.0% 0.00 +8e+101% 0.80 ±200% perf-sched.wait_and_delay.count.__cond_resched.kmem_cache_alloc.getname_flags.part.0 0.33 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.count.__cond_resched.kmem_cache_alloc.vm_area_dup.__split_vma.mprotect_fixup 541.83 ±112% -54.8% 244.67 ±223% +46.3% 792.80 ±125% perf-sched.wait_and_delay.count.__cond_resched.kmem_cache_alloc_node.__alloc_skb.alloc_skb_with_frags.sock_alloc_send_pskb 0.17 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.count.__cond_resched.mutex_lock.__fdget_pos.ksys_write.do_syscall_64 0.00 +1.7e+101% 0.17 ±223% -100.0% 0.00 perf-sched.wait_and_delay.count.__cond_resched.mutex_lock.futex_exec_release.exec_mm_release.exec_mmap 1.00 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.count.__cond_resched.mutex_lock.perf_poll.do_poll.constprop 77.83 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.count.__cond_resched.mutex_lock.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg 0.00 -100.0% 0.00 +8e+101% 0.80 ±200% perf-sched.wait_and_delay.count.__cond_resched.put_files_struct.do_exit.do_group_exit.__x64_sys_exit_group 0.00 +1.7e+101% 0.17 ±223% -100.0% 0.00 perf-sched.wait_and_delay.count.__cond_resched.remove_vma.exit_mmap.__mmput.exit_mm 0.50 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.count.__cond_resched.rmap_walk_anon.try_to_migrate.migrate_folio_unmap.migrate_pages_batch 112.83 ±100% -63.2% 41.50 ± 37% -48.2% 58.40 ± 88% perf-sched.wait_and_delay.count.__cond_resched.shrink_dentry_list.shrink_dcache_parent.d_invalidate.proc_invalidate_siblings_dcache 0.17 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.count.__cond_resched.stop_one_cpu.migrate_task_to.task_numa_migrate.isra 0.67 ±223% +100.0% 1.33 ±223% +50.0% 1.00 ±200% perf-sched.wait_and_delay.count.__cond_resched.stop_one_cpu.sched_exec.bprm_execve.part 1.67 ±223% +80.0% 3.00 ±223% +56.0% 2.60 ±200% perf-sched.wait_and_delay.count.__cond_resched.task_work_run.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode 41.17 ±223% +82.6% 75.17 ±223% -100.0% 0.00 perf-sched.wait_and_delay.count.__cond_resched.task_work_run.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode 5.83 ±143% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.count.__cond_resched.tlb_batch_pages_flush.tlb_finish_mmu.exit_mmap.__mmput 0.00 +1.1e+104% 108.50 ±100% +3.5e+103% 35.20 ±200% perf-sched.wait_and_delay.count.__cond_resched.wait_for_unix_gc.unix_stream_sendmsg.sock_write_iter.vfs_write 0.33 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.count.__cond_resched.ww_mutex_lock.drm_gem_vunmap_unlocked.drm_fbdev_generic_helper_fb_dirty.drm_fb_helper_damage_work 0.17 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.count.__cond_resched.zap_pmd_range.isra.0.unmap_page_range 13.67 ±150% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.count.__cond_resched.zap_pte_range.zap_pmd_range.isra.0 0.50 ±152% +33.3% 0.67 ±141% +100.0% 1.00 ±126% perf-sched.wait_and_delay.count.__x64_sys_pause.do_syscall_64.entry_SYSCALL_64_after_hwframe.[unknown] 4.83 ±100% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.count.devkmsg_read.vfs_read.ksys_read.do_syscall_64 0.67 ±141% +100.0% 1.33 ±103% -100.0% 0.00 perf-sched.wait_and_delay.count.do_nanosleep.hrtimer_nanosleep.common_nsleep.__x64_sys_clock_nanosleep 738.17 ± 75% -66.2% 249.83 ± 36% -59.8% 296.60 ± 51% perf-sched.wait_and_delay.count.do_task_dead.do_exit.do_group_exit.__x64_sys_exit_group.do_syscall_64 169.67 ±102% -44.4% 94.33 ± 25% -69.5% 51.80 ± 60% perf-sched.wait_and_delay.count.do_wait.kernel_wait4.__do_sys_wait4.do_syscall_64 52.50 ±101% +207.0% 161.17 ± 25% +212.4% 164.00 ± 38% perf-sched.wait_and_delay.count.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_apic_timer_interrupt 0.00 +1.5e+103% 14.67 ±142% +2e+103% 19.80 ±146% perf-sched.wait_and_delay.count.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_call_function_single 610.50 ± 71% +282.4% 2334 ± 24% +233.6% 2036 ± 24% perf-sched.wait_and_delay.count.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_reschedule_ipi 528932 ±100% -6.8% 492884 ± 16% +1.2% 535264 ± 31% perf-sched.wait_and_delay.count.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode.do_syscall_64 24.67 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.count.io_schedule.folio_wait_bit_common.filemap_fault.__do_fault 132.50 ±118% +186.8% 380.00 ± 39% +262.0% 479.60 ±136% perf-sched.wait_and_delay.count.pipe_read.vfs_read.ksys_read.do_syscall_64 0.67 ±223% +1850.0% 13.00 ±109% +1460.0% 10.40 ±122% perf-sched.wait_and_delay.count.rcu_gp_kthread.kthread.ret_from_fork 477.00 ±145% -99.7% 1.50 ±142% -99.7% 1.20 ±133% perf-sched.wait_and_delay.count.schedule_hrtimeout_range_clock.do_poll.constprop.0.do_sys_poll 9.33 ± 71% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.count.schedule_hrtimeout_range_clock.do_select.core_sys_select.kern_select 6.00 ±100% -47.2% 3.17 ± 64% -50.0% 3.00 ± 51% perf-sched.wait_and_delay.count.schedule_hrtimeout_range_clock.ep_poll.do_epoll_wait.__x64_sys_epoll_wait 0.33 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.count.schedule_preempt_disabled.rwsem_down_read_slowpath.down_read.rmap_walk_anon 6.83 ±223% -68.3% 2.17 ±223% -64.9% 2.40 ±200% perf-sched.wait_and_delay.count.schedule_preempt_disabled.rwsem_down_write_slowpath.down_write.unlink_anon_vmas 0.00 +4.7e+102% 4.67 ±149% +8.8e+102% 8.80 ± 77% perf-sched.wait_and_delay.count.schedule_timeout.__wait_for_common.wait_for_completion_state.kernel_clone 3.50 ± 73% +4.8% 3.67 ± 56% +20.0% 4.20 ± 27% perf-sched.wait_and_delay.count.schedule_timeout.kcompactd.kthread.ret_from_fork 208.00 ± 73% +8.7% 226.00 ± 54% +11.8% 232.60 ± 53% perf-sched.wait_and_delay.count.schedule_timeout.rcu_gp_fqs_loop.rcu_gp_kthread.kthread 77040 ± 84% +290.0% 300458 ± 28% +247.7% 267893 ± 27% perf-sched.wait_and_delay.count.schedule_timeout.sock_alloc_send_pskb.unix_stream_sendmsg.sock_write_iter 1428129 ± 83% +8.9% 1554630 ± 21% +13.0% 1613199 ± 35% perf-sched.wait_and_delay.count.schedule_timeout.unix_stream_data_wait.unix_stream_read_generic.unix_stream_recvmsg 377.83 ± 71% -10.5% 338.17 ± 26% -26.5% 277.80 ± 15% perf-sched.wait_and_delay.count.smpboot_thread_fn.kthread.ret_from_fork 4.83 ±100% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.count.syslog_print.do_syslog.kmsg_read.vfs_read 0.00 +1e+102% 1.00 ±223% -100.0% 0.00 perf-sched.wait_and_delay.count.wait_for_partner.fifo_open.do_dentry_open.do_open 660.33 ± 71% -46.6% 352.33 ± 26% -47.5% 346.60 ± 55% perf-sched.wait_and_delay.count.worker_thread.kthread.ret_from_fork 5.69 ±223% +1786.2% 107.37 ±223% -100.0% 0.00 perf-sched.wait_and_delay.max.ms.__cond_resched.__alloc_pages.__folio_alloc.vma_alloc_folio.shmem_alloc_folio 38.51 ±223% -100.0% 0.00 -92.9% 2.72 ±200% perf-sched.wait_and_delay.max.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node.memcg_alloc_slab_cgroups.allocate_slab 38.99 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.max.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node.memcg_alloc_slab_cgroups.memcg_slab_post_alloc_hook 1688 ±153% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.max.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node_track_caller.kmalloc_reserve.__alloc_skb 42.49 ±223% +355.4% 193.51 ±223% +1427.3% 648.91 ±200% perf-sched.wait_and_delay.max.ms.__cond_resched.__put_anon_vma.unlink_anon_vmas.free_pgtables.exit_mmap 833.48 ± 71% +114.3% 1785 ± 39% +21.4% 1011 ± 50% perf-sched.wait_and_delay.max.ms.__cond_resched.__wait_for_common.affine_move_task.__set_cpus_allowed_ptr.__sched_setaffinity 1137 ±223% +45.7% 1657 ± 72% -54.9% 512.98 ±126% perf-sched.wait_and_delay.max.ms.__cond_resched.aa_sk_perm.security_socket_recvmsg.sock_recvmsg.sock_read_iter 2109 ± 72% +149.7% 5266 ± 21% +137.5% 5008 ± 38% perf-sched.wait_and_delay.max.ms.__cond_resched.aa_sk_perm.security_socket_sendmsg.sock_write_iter.vfs_write 140.82 ±223% +50.7% 212.22 ±223% -75.8% 34.10 ±200% perf-sched.wait_and_delay.max.ms.__cond_resched.dentry_kill.dput.proc_invalidate_siblings_dcache.release_task 0.00 +2.9e+102% 2.90 ±223% -100.0% 0.00 perf-sched.wait_and_delay.max.ms.__cond_resched.do_user_addr_fault.exc_page_fault.asm_exc_page_fault.__put_user_4 63.53 ±223% +281.0% 242.03 ±223% -100.0% 0.00 perf-sched.wait_and_delay.max.ms.__cond_resched.down_write.free_pgtables.exit_mmap.__mmput 0.00 -100.0% 0.00 +2e+104% 203.87 ±199% perf-sched.wait_and_delay.max.ms.__cond_resched.down_write.unlink_file_vma.free_pgtables.exit_mmap 0.00 -100.0% 0.00 +2.1e+103% 21.12 ±199% perf-sched.wait_and_delay.max.ms.__cond_resched.down_write_killable.exec_mmap.begin_new_exec.load_elf_binary 5.59 ±165% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.max.ms.__cond_resched.dput.__fput.task_work_run.exit_to_user_mode_loop 0.00 +5.3e+102% 5.32 ±223% -100.0% 0.00 perf-sched.wait_and_delay.max.ms.__cond_resched.dput.step_into.link_path_walk.part 0.00 +6.8e+102% 6.81 ±223% -100.0% 0.00 perf-sched.wait_and_delay.max.ms.__cond_resched.dput.terminate_walk.path_openat.do_filp_open 0.00 +4.1e+104% 412.86 ±223% -100.0% 0.00 perf-sched.wait_and_delay.max.ms.__cond_resched.exit_mmap.__mmput.exit_mm.do_exit 469.71 ±159% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.max.ms.__cond_resched.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.vfs_write 0.00 -100.0% 0.00 +2.9e+103% 28.93 ±200% perf-sched.wait_and_delay.max.ms.__cond_resched.kmem_cache_alloc.getname_flags.part.0 2.14 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.max.ms.__cond_resched.kmem_cache_alloc.vm_area_dup.__split_vma.mprotect_fixup 1257 ±115% -61.7% 481.03 ±223% -32.8% 845.01 ±150% perf-sched.wait_and_delay.max.ms.__cond_resched.kmem_cache_alloc_node.__alloc_skb.alloc_skb_with_frags.sock_alloc_send_pskb 1.16 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.max.ms.__cond_resched.mutex_lock.__fdget_pos.ksys_write.do_syscall_64 0.00 +2.7e+102% 2.74 ±223% -100.0% 0.00 perf-sched.wait_and_delay.max.ms.__cond_resched.mutex_lock.futex_exec_release.exec_mm_release.exec_mmap 15.68 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.max.ms.__cond_resched.mutex_lock.perf_poll.do_poll.constprop 610.19 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.max.ms.__cond_resched.mutex_lock.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg 0.00 -100.0% 0.00 +1e+105% 1015 ±200% perf-sched.wait_and_delay.max.ms.__cond_resched.put_files_struct.do_exit.do_group_exit.__x64_sys_exit_group 0.00 +2.3e+104% 230.51 ±223% -100.0% 0.00 perf-sched.wait_and_delay.max.ms.__cond_resched.remove_vma.exit_mmap.__mmput.exit_mm 551.38 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.max.ms.__cond_resched.rmap_walk_anon.try_to_migrate.migrate_folio_unmap.migrate_pages_batch 2935 ± 91% -67.2% 962.97 ± 55% -28.4% 2101 ± 75% perf-sched.wait_and_delay.max.ms.__cond_resched.shrink_dentry_list.shrink_dcache_parent.d_invalidate.proc_invalidate_siblings_dcache 2.84 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.max.ms.__cond_resched.stop_one_cpu.migrate_task_to.task_numa_migrate.isra 25.17 ±223% -65.0% 8.80 ±223% -3.0% 24.41 ±199% perf-sched.wait_and_delay.max.ms.__cond_resched.stop_one_cpu.sched_exec.bprm_execve.part 26.19 ±223% +367.4% 122.42 ±223% +3611.4% 972.07 ±200% perf-sched.wait_and_delay.max.ms.__cond_resched.task_work_run.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode 534.68 ±223% -29.1% 379.11 ±223% -100.0% 0.00 perf-sched.wait_and_delay.max.ms.__cond_resched.task_work_run.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode 133.51 ±155% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.max.ms.__cond_resched.tlb_batch_pages_flush.tlb_finish_mmu.exit_mmap.__mmput 0.00 +1.4e+105% 1442 ±105% +2.7e+104% 272.96 ±200% perf-sched.wait_and_delay.max.ms.__cond_resched.wait_for_unix_gc.unix_stream_sendmsg.sock_write_iter.vfs_write 672.17 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.max.ms.__cond_resched.ww_mutex_lock.drm_gem_vunmap_unlocked.drm_fbdev_generic_helper_fb_dirty.drm_fb_helper_damage_work 17.52 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.max.ms.__cond_resched.zap_pmd_range.isra.0.unmap_page_range 578.84 ±204% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.max.ms.__cond_resched.zap_pte_range.zap_pmd_range.isra.0 834.50 ±223% +19.9% 1000 ±152% -28.0% 600.43 ±133% perf-sched.wait_and_delay.max.ms.__x64_sys_pause.do_syscall_64.entry_SYSCALL_64_after_hwframe.[unknown] 1329 ±141% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.max.ms.devkmsg_read.vfs_read.ksys_read.do_syscall_64 1500 ±142% -44.5% 833.60 ±128% -100.0% 0.00 perf-sched.wait_and_delay.max.ms.do_nanosleep.hrtimer_nanosleep.common_nsleep.__x64_sys_clock_nanosleep 2325 ± 74% +33.1% 3094 ± 55% +90.5% 4428 ± 49% perf-sched.wait_and_delay.max.ms.do_task_dead.do_exit.do_group_exit.__x64_sys_exit_group.do_syscall_64 1211 ±127% +95.7% 2369 ± 65% +2.6% 1242 ± 65% perf-sched.wait_and_delay.max.ms.do_wait.kernel_wait4.__do_sys_wait4.do_syscall_64 266.22 ±135% +472.7% 1524 ± 37% +917.1% 2707 ± 99% perf-sched.wait_and_delay.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_apic_timer_interrupt 0.00 +1.6e+104% 164.50 ±123% +2e+104% 197.56 ±158% perf-sched.wait_and_delay.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_call_function_single 2440 ± 76% +43.1% 3491 ± 26% +33.0% 3246 ± 39% perf-sched.wait_and_delay.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_reschedule_ipi 4626 ± 72% +71.9% 7951 ± 11% +63.3% 7553 ± 38% perf-sched.wait_and_delay.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode.do_syscall_64 53.25 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.max.ms.io_schedule.folio_wait_bit_common.filemap_fault.__do_fault 2977 ± 71% +40.5% 4183 ± 32% +21.8% 3625 ± 23% perf-sched.wait_and_delay.max.ms.pipe_read.vfs_read.ksys_read.do_syscall_64 203.83 ±223% +462.6% 1146 ±144% -28.3% 146.14 ±133% perf-sched.wait_and_delay.max.ms.rcu_gp_kthread.kthread.ret_from_fork 720.60 ±204% -53.7% 333.35 ±141% +94.5% 1401 ±166% perf-sched.wait_and_delay.max.ms.schedule_hrtimeout_range_clock.do_poll.constprop.0.do_sys_poll 2651 ± 70% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.max.ms.schedule_hrtimeout_range_clock.do_select.core_sys_select.kern_select 2249 ±100% -45.9% 1217 ±107% -6.7% 2099 ± 78% perf-sched.wait_and_delay.max.ms.schedule_hrtimeout_range_clock.ep_poll.do_epoll_wait.__x64_sys_epoll_wait 30.16 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.max.ms.schedule_preempt_disabled.rwsem_down_read_slowpath.down_read.rmap_walk_anon 39.33 ±223% +708.9% 318.16 ±223% +4097.3% 1650 ±200% perf-sched.wait_and_delay.max.ms.schedule_preempt_disabled.rwsem_down_write_slowpath.down_write.unlink_anon_vmas 0.00 +7.6e+104% 757.56 ±219% +4.9e+104% 492.18 ±106% perf-sched.wait_and_delay.max.ms.schedule_timeout.__wait_for_common.wait_for_completion_state.kernel_clone 2940 ± 72% -40.0% 1763 ± 51% -0.6% 2923 ± 36% perf-sched.wait_and_delay.max.ms.schedule_timeout.kcompactd.kthread.ret_from_fork 2407 ± 85% +15.5% 2779 ± 62% +61.9% 3897 ± 52% perf-sched.wait_and_delay.max.ms.schedule_timeout.rcu_gp_fqs_loop.rcu_gp_kthread.kthread 1636 ± 72% +119.1% 3585 ± 21% +165.8% 4348 ± 37% perf-sched.wait_and_delay.max.ms.schedule_timeout.sock_alloc_send_pskb.unix_stream_sendmsg.sock_write_iter 4470 ± 73% +62.2% 7252 ± 15% +57.9% 7059 ± 43% perf-sched.wait_and_delay.max.ms.schedule_timeout.unix_stream_data_wait.unix_stream_read_generic.unix_stream_recvmsg 3296 ± 72% +54.2% 5084 ± 25% +57.9% 5205 ± 21% perf-sched.wait_and_delay.max.ms.smpboot_thread_fn.kthread.ret_from_fork 2025 ±155% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_and_delay.max.ms.syslog_print.do_syslog.kmsg_read.vfs_read 0.00 +9.2e+102% 9.20 ±223% -100.0% 0.00 perf-sched.wait_and_delay.max.ms.wait_for_partner.fifo_open.do_dentry_open.do_open 3621 ± 75% +57.4% 5702 ± 29% +54.9% 5611 ± 9% perf-sched.wait_and_delay.max.ms.worker_thread.kthread.ret_from_fork 0.00 +1.3e+102% 1.32 ±217% +1.2e+100% 0.01 ± 85% perf-sched.wait_time.avg.ms.__cond_resched.__alloc_pages.__folio_alloc.vma_alloc_folio.do_anonymous_page 0.00 +8.2e+99% 0.01 ±192% +7.4e+100% 0.07 ±200% perf-sched.wait_time.avg.ms.__cond_resched.__alloc_pages.__folio_alloc.vma_alloc_folio.do_cow_fault 1.75 ±176% +197.3% 5.20 ±168% -60.9% 0.68 ± 53% perf-sched.wait_time.avg.ms.__cond_resched.__alloc_pages.__folio_alloc.vma_alloc_folio.shmem_alloc_folio 0.10 ± 88% +1050.8% 1.19 ±128% +682.3% 0.81 ±126% perf-sched.wait_time.avg.ms.__cond_resched.__alloc_pages.__folio_alloc.vma_alloc_folio.wp_page_copy 0.00 +2.8e+99% 0.00 ±150% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.__alloc_pages.__get_free_pages.pgd_alloc.mm_init 0.00 +5.1e+100% 0.05 ±211% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.__alloc_pages.__pmd_alloc.__handle_mm_fault.handle_mm_fault 0.00 +1.5e+99% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.__alloc_pages.__pmd_alloc.move_page_tables.shift_arg_pages 0.03 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.__alloc_pages.allocate_slab.___slab_alloc.constprop 0.00 -100.0% 0.00 +2.2e+99% 0.00 ±200% perf-sched.wait_time.avg.ms.__cond_resched.__alloc_pages.get_zeroed_page.__p4d_alloc.__handle_mm_fault 0.00 +1.4e+100% 0.01 ±187% +7.4e+99% 0.01 ±142% perf-sched.wait_time.avg.ms.__cond_resched.__alloc_pages.get_zeroed_page.__pud_alloc.__handle_mm_fault 0.00 +7.2e+99% 0.01 ±145% +9.8e+99% 0.01 ±200% perf-sched.wait_time.avg.ms.__cond_resched.__alloc_pages.get_zeroed_page.__pud_alloc.alloc_new_pud 0.00 -100.0% 0.00 +1.1e+100% 0.01 ±200% perf-sched.wait_time.avg.ms.__cond_resched.__alloc_pages.pipe_write.vfs_write.ksys_write 0.00 ±223% +241.7% 0.01 ±223% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.__alloc_pages.pte_alloc_one.__do_fault.do_cow_fault 0.00 +2.7e+99% 0.00 ±223% +1.5e+100% 0.02 ±200% perf-sched.wait_time.avg.ms.__cond_resched.__alloc_pages.pte_alloc_one.__do_fault.do_read_fault 0.01 ±205% +139.4% 0.03 ±159% -20.6% 0.01 ±127% perf-sched.wait_time.avg.ms.__cond_resched.__alloc_pages.pte_alloc_one.__pte_alloc.do_anonymous_page 0.00 ±223% +1533.3% 0.02 ±141% +3080.0% 0.03 ±124% perf-sched.wait_time.avg.ms.__cond_resched.__alloc_pages.pte_alloc_one.__pte_alloc.move_page_tables 0.00 +9.2e+100% 0.09 ±220% +8.2e+100% 0.08 ±166% perf-sched.wait_time.avg.ms.__cond_resched.__alloc_pages.pte_alloc_one.do_read_fault.do_fault 0.00 +7.2e+100% 0.07 ±144% +8.4e+99% 0.01 ±200% perf-sched.wait_time.avg.ms.__cond_resched.__anon_vma_prepare.do_anonymous_page.__handle_mm_fault.handle_mm_fault 0.01 ±159% +216.3% 0.03 ±160% -46.1% 0.00 ±129% perf-sched.wait_time.avg.ms.__cond_resched.__anon_vma_prepare.do_cow_fault.do_fault.__handle_mm_fault 0.31 ±203% +107.2% 0.63 ± 61% +981.0% 3.31 ± 89% perf-sched.wait_time.avg.ms.__cond_resched.__do_fault.do_read_fault.do_fault.__handle_mm_fault 0.00 -100.0% 0.00 +1.1e+101% 0.11 ±200% perf-sched.wait_time.avg.ms.__cond_resched.__fput.task_work_run.do_exit.do_group_exit 0.01 ±223% +435.7% 0.04 ±130% -17.1% 0.01 ±200% perf-sched.wait_time.avg.ms.__cond_resched.__get_user_pages.get_user_pages_remote.get_arg_page.copy_string_kernel 0.00 +4.8e+100% 0.05 ±110% +4.7e+100% 0.05 ±117% perf-sched.wait_time.avg.ms.__cond_resched.__get_user_pages.get_user_pages_remote.get_arg_page.copy_strings 0.03 ±150% +4450.9% 1.28 ±142% +1767.5% 0.53 ±167% perf-sched.wait_time.avg.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc.inotify_handle_inode_event.send_to_group 0.00 +7.5e+101% 0.75 ±222% +1.4e+101% 0.14 ±181% perf-sched.wait_time.avg.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc.load_elf_phdrs.load_elf_binary 0.00 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc.perf_read.vfs_read 0.00 +5.8e+99% 0.01 ±223% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc.security_prepare_creds.prepare_creds 0.01 ±155% -85.4% 0.00 ±223% +62.5% 0.01 ±200% perf-sched.wait_time.avg.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node.alloc_cpumask_var_node.__sched_setaffinity 0.00 +8.3e+99% 0.01 ±179% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node.alloc_cpumask_var_node.__x64_sys_sched_setaffinity 12.25 ±223% -90.9% 1.12 ±164% -77.1% 2.80 ±192% perf-sched.wait_time.avg.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node.memcg_alloc_slab_cgroups.allocate_slab 13.08 ±223% -100.0% 0.00 -100.0% 0.00 ±200% perf-sched.wait_time.avg.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node.memcg_alloc_slab_cgroups.memcg_slab_post_alloc_hook 0.01 ± 90% +591.6% 0.10 ±198% -16.1% 0.01 ±123% perf-sched.wait_time.avg.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node.sched_setaffinity.__x64_sys_sched_setaffinity 0.00 -100.0% 0.00 +5.9e+100% 0.06 ±200% perf-sched.wait_time.avg.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node.seq_read_iter.vfs_read 3.12 ± 83% +95.9% 6.11 ± 33% +113.4% 6.66 ± 44% perf-sched.wait_time.avg.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node_track_caller.kmalloc_reserve.__alloc_skb 0.00 +1.5e+99% 0.00 ±223% +2.6e+99% 0.00 ±200% perf-sched.wait_time.avg.ms.__cond_resched.__kmem_cache_alloc_node.kmalloc_trace.alloc_bprm.do_execveat_common 0.01 ±121% +4656.4% 0.31 ± 97% +16201.5% 1.06 ±188% perf-sched.wait_time.avg.ms.__cond_resched.__kmem_cache_alloc_node.kmalloc_trace.perf_event_mmap_event.perf_event_mmap 0.00 ±223% -8.3% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.__kmem_cache_alloc_node.kmalloc_trace.proc_pid_readlink.vfs_readlink 0.02 ±134% +44.2% 0.02 ±208% -65.9% 0.01 ±200% perf-sched.wait_time.avg.ms.__cond_resched.__kmem_cache_alloc_node.kmalloc_trace.task_numa_work.task_work_run 0.00 +1.7e+99% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.__kmem_cache_alloc_node.kmalloc_trace.vmstat_start.seq_read_iter 1.82 ±188% +1329.6% 26.06 ±221% +3479.0% 65.24 ±198% perf-sched.wait_time.avg.ms.__cond_resched.__put_anon_vma.unlink_anon_vmas.free_pgtables.exit_mmap 0.00 -100.0% 0.00 +1.2e+99% 0.00 ±200% perf-sched.wait_time.avg.ms.__cond_resched.__put_anon_vma.unlink_anon_vmas.free_pgtables.unmap_region 0.00 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.__vmalloc_area_node.__vmalloc_node_range.alloc_thread_stack_node.dup_task_struct 10.78 ± 71% +87.5% 20.20 ± 28% +97.9% 21.33 ± 39% perf-sched.wait_time.avg.ms.__cond_resched.__wait_for_common.affine_move_task.__set_cpus_allowed_ptr.__sched_setaffinity 2.51 ±126% +273.9% 9.37 ± 8% +215.7% 7.92 ± 27% perf-sched.wait_time.avg.ms.__cond_resched.aa_sk_perm.security_socket_recvmsg.sock_recvmsg.sock_read_iter 9.23 ± 80% +266.9% 33.88 ± 30% +283.8% 35.44 ± 37% perf-sched.wait_time.avg.ms.__cond_resched.aa_sk_perm.security_socket_sendmsg.sock_write_iter.vfs_write 0.00 -100.0% 0.00 +8.2e+100% 0.08 ±199% perf-sched.wait_time.avg.ms.__cond_resched.apparmor_file_alloc_security.security_file_alloc.__alloc_file.alloc_empty_file 0.01 ±113% -29.7% 0.01 ±185% +50.0% 0.02 ±143% perf-sched.wait_time.avg.ms.__cond_resched.change_pmd_range.change_p4d_range.change_protection_range.change_prot_numa 0.00 +8.8e+100% 0.09 ±201% +4.5e+101% 0.45 ±189% perf-sched.wait_time.avg.ms.__cond_resched.change_pmd_range.change_p4d_range.change_protection_range.mprotect_fixup 0.04 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.copy_page_range.dup_mmap.dup_mm.constprop 0.00 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.copy_pte_range.copy_p4d_range.copy_page_range.dup_mmap 0.03 ±146% +1393.6% 0.43 ±191% -73.5% 0.01 ± 94% perf-sched.wait_time.avg.ms.__cond_resched.copy_strings.isra.0.do_execveat_common 0.01 ±223% -31.3% 0.01 ±104% +2694.0% 0.31 ±135% perf-sched.wait_time.avg.ms.__cond_resched.count.constprop.0.isra 0.02 ±183% +0.0% 0.02 ±114% +237.0% 0.06 ± 89% perf-sched.wait_time.avg.ms.__cond_resched.dentry_kill.dput.__fput.task_work_run 2.48 ±208% +682.0% 19.39 ±218% -34.3% 1.63 ±136% perf-sched.wait_time.avg.ms.__cond_resched.dentry_kill.dput.proc_invalidate_siblings_dcache.release_task 0.00 +3.3e+100% 0.03 ±206% +1.3e+100% 0.01 ±103% perf-sched.wait_time.avg.ms.__cond_resched.dentry_kill.dput.step_into.link_path_walk 0.00 -100.0% 0.00 +3.9e+100% 0.04 ±200% perf-sched.wait_time.avg.ms.__cond_resched.dentry_kill.dput.step_into.open_last_lookups 0.00 +5.8e+99% 0.01 ±223% +8.6e+99% 0.01 ±200% perf-sched.wait_time.avg.ms.__cond_resched.dentry_kill.dput.step_into.path_lookupat 0.00 ±223% +80.0% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.do_close_on_exec.begin_new_exec.load_elf_binary.search_binary_handler 0.00 -100.0% 0.00 +1.1e+100% 0.01 ±200% perf-sched.wait_time.avg.ms.__cond_resched.do_page_mkwrite.do_wp_page.__handle_mm_fault.handle_mm_fault 0.76 ±136% -4.4% 0.73 ±152% -94.3% 0.04 ± 29% perf-sched.wait_time.avg.ms.__cond_resched.do_user_addr_fault.exc_page_fault.asm_exc_page_fault.[unknown] 0.00 +1.3e+100% 0.01 ±160% +3.8e+99% 0.00 ±200% perf-sched.wait_time.avg.ms.__cond_resched.do_user_addr_fault.exc_page_fault.asm_exc_page_fault._copy_to_user 0.01 ±148% +2426.7% 0.13 ±123% +208.0% 0.02 ±200% perf-sched.wait_time.avg.ms.__cond_resched.do_user_addr_fault.exc_page_fault.asm_exc_page_fault.copyout 0.81 ±216% +113.9% 1.74 ±119% -14.7% 0.69 ± 69% perf-sched.wait_time.avg.ms.__cond_resched.do_user_addr_fault.exc_page_fault.asm_exc_page_fault.fault_in_readable 0.00 -100.0% 0.00 +1.6e+99% 0.00 ±200% perf-sched.wait_time.avg.ms.__cond_resched.do_user_addr_fault.exc_page_fault.asm_exc_page_fault.strncpy_from_user 0.08 ±102% +344.9% 0.36 ±139% -23.6% 0.06 ±165% perf-sched.wait_time.avg.ms.__cond_resched.down_read.acct_collect.do_exit.do_group_exit 0.05 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.down_read.do_user_addr_fault.exc_page_fault.asm_exc_page_fault 0.00 +1.1e+102% 1.08 ±222% +2.6e+100% 0.03 ±164% perf-sched.wait_time.avg.ms.__cond_resched.down_read.exit_mm.do_exit.do_group_exit 0.00 +1.3e+100% 0.01 ±194% +1.6e+99% 0.00 ±200% perf-sched.wait_time.avg.ms.__cond_resched.down_read.exit_mmap.__mmput.exit_mm 0.00 -100.0% 0.00 +4.2e+99% 0.00 ±200% perf-sched.wait_time.avg.ms.__cond_resched.down_read.get_arg_page.copy_string_kernel.do_execveat_common 0.00 +1.3e+100% 0.01 ±223% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.down_read.kernfs_dop_revalidate.lookup_fast.walk_component 0.00 +9.5e+101% 0.95 ±215% +1.6e+99% 0.00 ±200% perf-sched.wait_time.avg.ms.__cond_resched.down_read.kernfs_iop_permission.inode_permission.link_path_walk 0.00 +5.3e+99% 0.01 ±141% +2.8e+100% 0.03 ±163% perf-sched.wait_time.avg.ms.__cond_resched.down_read.open_last_lookups.path_openat.do_filp_open 0.02 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.down_read.rmap_walk_anon.migrate_pages_batch.migrate_pages 0.00 ±141% +505.3% 0.02 ±139% +790.5% 0.03 ±200% perf-sched.wait_time.avg.ms.__cond_resched.down_read.walk_component.link_path_walk.part 0.00 ±223% +742.9% 0.01 ±142% +54.3% 0.00 ±200% perf-sched.wait_time.avg.ms.__cond_resched.down_read.walk_component.path_lookupat.filename_lookup 0.00 +4.5e+99% 0.00 ±158% +5.7e+100% 0.06 ±164% perf-sched.wait_time.avg.ms.__cond_resched.down_read_killable.create_elf_tables.load_elf_binary.search_binary_handler 0.00 -100.0% 0.00 +7.4e+99% 0.01 ±200% perf-sched.wait_time.avg.ms.__cond_resched.down_write.__anon_vma_prepare.do_cow_fault.do_fault 0.00 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.down_write.__sock_release.sock_close.__fput 0.00 +1.3e+100% 0.01 ±206% +6.1e+100% 0.06 ±182% perf-sched.wait_time.avg.ms.__cond_resched.down_write.anon_vma_clone.__split_vma.mprotect_fixup 0.00 +1.7e+99% 0.00 ±223% +5.1e+100% 0.05 ±179% perf-sched.wait_time.avg.ms.__cond_resched.down_write.do_brk_flags.__do_sys_brk.do_syscall_64 0.00 +2.3e+99% 0.00 ±223% +6.6e+100% 0.07 ±200% perf-sched.wait_time.avg.ms.__cond_resched.down_write.do_brk_flags.vm_brk_flags.load_elf_interp 0.00 +1e+99% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.down_write.do_brk_flags.vm_brk_flags.set_brk 0.00 +2.7e+99% 0.00 ±223% +6.2e+100% 0.06 ±184% perf-sched.wait_time.avg.ms.__cond_resched.down_write.do_vmi_align_munmap.do_vmi_munmap.__vm_munmap 0.00 +1.7e+99% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.down_write.do_vmi_align_munmap.do_vmi_munmap.mmap_region 0.00 ±223% +1214.3% 0.02 ±223% +1940.0% 0.02 ±200% perf-sched.wait_time.avg.ms.__cond_resched.down_write.exit_mmap.__mmput.exit_mm 1.65 ±210% +1257.8% 22.37 ±219% -85.4% 0.24 ±112% perf-sched.wait_time.avg.ms.__cond_resched.down_write.free_pgtables.exit_mmap.__mmput 0.00 +4.1e+101% 0.41 ±223% +9.4e+99% 0.01 ±200% perf-sched.wait_time.avg.ms.__cond_resched.down_write.generic_file_write_iter.vfs_write.ksys_write 0.01 ±127% +8574.0% 1.11 ±204% +1396.1% 0.19 ± 98% perf-sched.wait_time.avg.ms.__cond_resched.down_write.mmap_region.do_mmap.vm_mmap_pgoff 0.62 ±204% -76.7% 0.14 ± 50% -71.1% 0.18 ±112% perf-sched.wait_time.avg.ms.__cond_resched.down_write.unlink_anon_vmas.free_pgtables.exit_mmap 0.04 ±109% +847.7% 0.37 ±125% +5.2e+05% 203.84 ±199% perf-sched.wait_time.avg.ms.__cond_resched.down_write.unlink_file_vma.free_pgtables.exit_mmap 0.00 ±223% +75.0% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.down_write.unlink_file_vma.free_pgtables.unmap_region 0.00 +2.7e+100% 0.03 ±201% +2.6e+99% 0.00 ±200% perf-sched.wait_time.avg.ms.__cond_resched.down_write.vma_prepare.__split_vma.do_vmi_align_munmap 0.00 +1.3e+100% 0.01 ±199% +6.8e+100% 0.07 ±184% perf-sched.wait_time.avg.ms.__cond_resched.down_write.vma_prepare.__split_vma.mprotect_fixup 0.00 +1.5e+99% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.down_write_killable.__do_sys_brk.do_syscall_64.entry_SYSCALL_64_after_hwframe 0.00 -100.0% 0.00 +1.1e+101% 0.11 ±199% perf-sched.wait_time.avg.ms.__cond_resched.down_write_killable.__vm_munmap.__x64_sys_munmap.do_syscall_64 0.00 +1.3e+100% 0.01 ±141% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.down_write_killable.do_mprotect_pkey.__x64_sys_mprotect.do_syscall_64 0.04 ±170% +1780.0% 0.69 ± 98% +1064.0% 0.43 ±160% perf-sched.wait_time.avg.ms.__cond_resched.down_write_killable.exec_mmap.begin_new_exec.load_elf_binary 0.00 +9.8e+99% 0.01 ±178% +1.1e+100% 0.01 ± 95% perf-sched.wait_time.avg.ms.__cond_resched.down_write_killable.map_vdso.load_elf_binary.search_binary_handler 0.00 ±150% +1575.0% 0.06 ±100% +4934.0% 0.17 ±131% perf-sched.wait_time.avg.ms.__cond_resched.down_write_killable.setup_arg_pages.load_elf_binary.search_binary_handler 0.00 +1e+100% 0.01 ±197% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.down_write_killable.vm_mmap_pgoff.do_syscall_64.entry_SYSCALL_64_after_hwframe 0.00 +5.8e+99% 0.01 ±159% +5.3e+101% 0.53 ±162% perf-sched.wait_time.avg.ms.__cond_resched.down_write_killable.vm_mmap_pgoff.elf_map.load_elf_binary 0.00 +1.3e+100% 0.01 ±141% +1.3e+101% 0.13 ±200% perf-sched.wait_time.avg.ms.__cond_resched.down_write_killable.vm_mmap_pgoff.elf_map.load_elf_interp 0.00 +2.6e+100% 0.03 ±145% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.down_write_killable.vm_mmap_pgoff.ksys_mmap_pgoff.do_syscall_64 0.34 ±174% -87.1% 0.04 ±148% -89.7% 0.03 ±165% perf-sched.wait_time.avg.ms.__cond_resched.dput.__fput.task_work_run.do_exit 4.88 ±186% -88.0% 0.59 ±159% -54.6% 2.22 ±107% perf-sched.wait_time.avg.ms.__cond_resched.dput.__fput.task_work_run.exit_to_user_mode_loop 0.00 +1.2e+99% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.dput.dcache_dir_close.__fput.task_work_run 0.00 +6.7e+99% 0.01 ±223% +3.4e+100% 0.03 ±123% perf-sched.wait_time.avg.ms.__cond_resched.dput.nd_jump_root.pick_link.step_into 0.01 ±159% -11.4% 0.01 ±121% +266.9% 0.02 ±123% perf-sched.wait_time.avg.ms.__cond_resched.dput.open_last_lookups.path_openat.do_filp_open 0.19 ±121% -65.4% 0.06 ± 96% -55.4% 0.08 ±147% perf-sched.wait_time.avg.ms.__cond_resched.dput.path_put.exit_fs.do_exit 0.00 +4.1e+100% 0.04 ±145% +7e+99% 0.01 ±200% perf-sched.wait_time.avg.ms.__cond_resched.dput.path_put.vfs_statx.vfs_fstatat 0.06 ±160% -100.0% 0.00 +376.1% 0.27 ±151% perf-sched.wait_time.avg.ms.__cond_resched.dput.proc_invalidate_siblings_dcache.release_task.wait_task_zombie 0.00 ±141% +790.5% 0.03 ± 82% +42437.1% 1.49 ±190% perf-sched.wait_time.avg.ms.__cond_resched.dput.step_into.link_path_walk.part 0.00 ±223% +1133.3% 0.02 ±175% +2153.3% 0.03 ±154% perf-sched.wait_time.avg.ms.__cond_resched.dput.step_into.open_last_lookups.path_openat 0.00 ±223% +3008.3% 0.06 ±137% +180.0% 0.01 ±200% perf-sched.wait_time.avg.ms.__cond_resched.dput.step_into.path_lookupat.filename_lookup 0.01 ±159% +923.7% 0.06 ±103% +2571.6% 0.17 ±140% perf-sched.wait_time.avg.ms.__cond_resched.dput.terminate_walk.path_openat.do_filp_open 0.00 -100.0% 0.00 +4e+99% 0.00 ±200% perf-sched.wait_time.avg.ms.__cond_resched.dput.walk_component.link_path_walk.part 0.00 -100.0% 0.00 +1.4e+102% 1.38 ±200% perf-sched.wait_time.avg.ms.__cond_resched.exit_mmap.__mmput.exec_mmap.begin_new_exec 0.27 ±130% +16951.2% 46.10 ±222% -4.7% 0.26 ± 71% perf-sched.wait_time.avg.ms.__cond_resched.exit_mmap.__mmput.exit_mm.do_exit 0.18 ±200% -69.8% 0.06 ±175% -99.6% 0.00 ±200% perf-sched.wait_time.avg.ms.__cond_resched.exit_signals.do_exit.do_group_exit.__x64_sys_exit_group 0.00 -100.0% 0.00 +1.3e+100% 0.01 ±200% perf-sched.wait_time.avg.ms.__cond_resched.filemap_read.__kernel_read.load_elf_binary.search_binary_handler 0.00 +1.7e+100% 0.02 ±165% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.filemap_read.__kernel_read.load_elf_phdrs.load_elf_binary 0.00 ±223% +2635.7% 0.06 ±162% +32505.7% 0.76 ±168% perf-sched.wait_time.avg.ms.__cond_resched.filemap_read.__kernel_read.search_binary_handler.exec_binprm 0.00 +1.3e+99% 0.00 ±223% +2.3e+101% 0.23 ±123% perf-sched.wait_time.avg.ms.__cond_resched.filemap_read.vfs_read.ksys_read.do_syscall_64 38.16 ±176% -95.0% 1.90 ± 93% -92.4% 2.88 ± 72% perf-sched.wait_time.avg.ms.__cond_resched.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.vfs_write 0.00 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.khugepaged.kthread.ret_from_fork 0.01 ±142% +438.7% 0.07 ± 72% +503.2% 0.08 ± 86% perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.__alloc_file.alloc_empty_file.path_openat 0.00 ±223% +1166.7% 0.03 ±206% +1960.0% 0.04 ±176% perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.__anon_vma_prepare.do_anonymous_page.__handle_mm_fault 0.00 +9.5e+99% 0.01 ±223% +6.6e+99% 0.01 ±142% perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.__anon_vma_prepare.do_cow_fault.do_fault 0.00 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.alloc_pid.copy_process.kernel_clone 0.00 +7.3e+101% 0.73 ±218% +2.4e+101% 0.24 ±197% perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.getname_flags.part.0 0.00 -100.0% 0.00 +2.3e+100% 0.02 ±122% perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.mas_alloc_nodes.mas_preallocate.__split_vma 0.00 +1.1e+100% 0.01 ±127% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.mas_alloc_nodes.mas_preallocate.expand_downwards 0.00 ±223% +528.0% 0.03 ± 80% +850.4% 0.04 ±118% perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.mas_alloc_nodes.mas_preallocate.mmap_region 0.00 +2.8e+100% 0.03 ±142% +4.8e+100% 0.05 ±161% perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.mas_alloc_nodes.mas_preallocate.vma_expand 0.00 +8.8e+99% 0.01 ±194% +9.6e+99% 0.01 ±133% perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.mas_alloc_nodes.mas_preallocate.vma_link 0.00 +3.5e+99% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.mm_alloc.alloc_bprm.do_execveat_common 0.05 ±223% -98.0% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.prepare_creds.prepare_exec_creds.bprm_execve 0.01 ±147% +500.0% 0.03 ±178% +117.5% 0.01 ± 91% perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.security_file_alloc.__alloc_file.alloc_empty_file 0.00 ±223% +1525.0% 0.03 ±110% +230.0% 0.01 ±158% perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.vm_area_alloc.__install_special_mapping.map_vdso 0.05 ±188% +129.7% 0.12 ±204% -94.9% 0.00 ±200% perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.vm_area_alloc.alloc_bprm.do_execveat_common 0.00 +3.7e+100% 0.04 ±178% +7.2e+99% 0.01 ±200% perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.vm_area_alloc.do_brk_flags.__do_sys_brk 0.01 ±223% +8138.7% 0.43 ±198% +658.7% 0.04 ± 99% perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.vm_area_alloc.mmap_region.do_mmap 0.03 ±205% +136.9% 0.06 ±104% +517.6% 0.16 ±115% perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.vm_area_dup.__split_vma.do_vmi_align_munmap 1.07 ±223% -99.4% 0.01 ±190% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.vm_area_dup.__split_vma.mprotect_fixup 0.00 +8.5e+99% 0.01 ±223% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc_bulk.mas_alloc_nodes.mas_preallocate.__split_vma 0.00 +1e+99% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc_bulk.mas_alloc_nodes.mas_preallocate.mmap_region 4.68 ± 97% +60.4% 7.51 ± 29% +74.4% 8.17 ± 24% perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc_node.__alloc_skb.alloc_skb_with_frags.sock_alloc_send_pskb 0.08 ±198% -96.3% 0.00 ±223% -72.1% 0.02 ±171% perf-sched.wait_time.avg.ms.__cond_resched.migrate_pages_batch.migrate_pages.migrate_misplaced_page.do_numa_page 0.00 +1.8e+100% 0.02 ±131% +9.4e+100% 0.09 ± 93% perf-sched.wait_time.avg.ms.__cond_resched.mmput.exec_mmap.begin_new_exec.load_elf_binary 0.01 ±182% -18.5% 0.01 ±158% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.mmput.exit_mm.do_exit.do_group_exit 0.00 +2.5e+99% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.mnt_want_write.filename_create.do_mkdirat.__x64_sys_mkdir 0.02 ±223% -61.2% 0.01 ±223% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.move_page_tables.shift_arg_pages.setup_arg_pages.load_elf_binary 1.19 ±215% -88.4% 0.14 ±223% -94.1% 0.07 ±200% perf-sched.wait_time.avg.ms.__cond_resched.mutex_lock.__fdget_pos.ksys_write.do_syscall_64 0.01 ±170% +12.2% 0.01 ±179% +142.4% 0.02 ±122% perf-sched.wait_time.avg.ms.__cond_resched.mutex_lock.futex_exec_release.exec_mm_release.exec_mmap 0.04 ±106% -55.4% 0.02 ±136% +1637.3% 0.65 ±141% perf-sched.wait_time.avg.ms.__cond_resched.mutex_lock.futex_exit_release.exit_mm_release.exit_mm 0.00 +1.8e+99% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.mutex_lock.kernfs_fop_open.do_dentry_open.do_open 0.00 +3.4e+101% 0.34 ±222% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.mutex_lock.kernfs_seq_start.seq_read_iter.vfs_read 0.15 ±180% +384.4% 0.71 ±101% +174.6% 0.40 ±158% perf-sched.wait_time.avg.ms.__cond_resched.mutex_lock.perf_event_ctx_lock_nested.constprop.0 0.00 +4.3e+100% 0.04 ±183% +1.4e+99% 0.00 ±199% perf-sched.wait_time.avg.ms.__cond_resched.mutex_lock.perf_event_exit_task.do_exit.do_group_exit 0.02 ±194% -88.6% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.mutex_lock.perf_event_for_each_child._perf_ioctl.perf_ioctl 3.86 ±175% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.mutex_lock.perf_poll.do_poll.constprop 0.00 +3e+99% 0.00 ±142% +3.4e+99% 0.00 ±200% perf-sched.wait_time.avg.ms.__cond_resched.mutex_lock.perf_read.vfs_read.ksys_read 0.00 +8.3e+98% 0.00 ±223% +1.4e+100% 0.01 ±179% perf-sched.wait_time.avg.ms.__cond_resched.mutex_lock.pipe_write.vfs_write.ksys_write 0.00 +4e+100% 0.04 ±223% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.mutex_lock.seq_read_iter.vfs_read.ksys_read 2.07 ±148% +130.3% 4.76 ± 15% +122.0% 4.58 ± 37% perf-sched.wait_time.avg.ms.__cond_resched.mutex_lock.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg 0.00 ±143% +6.3% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.mutex_lock.uprobe_clear_state.__mmput.exit_mm 0.01 ±151% +402.7% 0.06 ±155% +225.5% 0.04 ±129% perf-sched.wait_time.avg.ms.__cond_resched.mutex_lock_killable.pcpu_alloc.__percpu_counter_init.mm_init 0.01 ±150% +73.0% 0.01 ±117% -54.6% 0.00 ±124% perf-sched.wait_time.avg.ms.__cond_resched.mutex_lock_killable.pcpu_alloc.mm_init.alloc_bprm 0.51 ±198% +57.8% 0.81 ±189% +49565.5% 255.20 ±198% perf-sched.wait_time.avg.ms.__cond_resched.put_files_struct.do_exit.do_group_exit.__x64_sys_exit_group 0.00 ±223% -100.0% 0.00 +2300.0% 0.04 ±200% perf-sched.wait_time.avg.ms.__cond_resched.remove_vma.do_vmi_align_munmap.do_vmi_munmap.mmap_region 0.03 ±136% +3.5e+05% 116.55 ±223% +1374.5% 0.50 ±191% perf-sched.wait_time.avg.ms.__cond_resched.remove_vma.exit_mmap.__mmput.exit_mm 0.14 ±127% -98.8% 0.00 ±223% -48.6% 0.07 ±195% perf-sched.wait_time.avg.ms.__cond_resched.rmap_walk_anon.migrate_pages_batch.migrate_pages.migrate_misplaced_page 183.86 ±223% -100.0% 0.01 ±173% -100.0% 0.08 ±176% perf-sched.wait_time.avg.ms.__cond_resched.rmap_walk_anon.try_to_migrate.migrate_folio_unmap.migrate_pages_batch 0.14 ± 98% -25.4% 0.10 ±130% -5.9% 0.13 ±146% perf-sched.wait_time.avg.ms.__cond_resched.shmem_get_folio_gfp.shmem_write_begin.generic_perform_write.__generic_file_write_iter 0.01 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.shrink_dcache_parent.d_invalidate.proc_invalidate_siblings_dcache.release_task 19.20 ±123% +64.1% 31.52 ± 26% +386.4% 93.40 ±143% perf-sched.wait_time.avg.ms.__cond_resched.shrink_dentry_list.shrink_dcache_parent.d_invalidate.proc_invalidate_siblings_dcache 0.00 +4.5e+100% 0.04 ± 93% +2e+100% 0.02 ± 79% perf-sched.wait_time.avg.ms.__cond_resched.slab_pre_alloc_hook.constprop.0.kmem_cache_alloc_lru 2.85 ±222% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.stop_one_cpu.migrate_task_to.task_numa_migrate.isra 0.00 ±179% +52754.5% 0.97 ±153% +25645.5% 0.47 ±120% perf-sched.wait_time.avg.ms.__cond_resched.stop_one_cpu.sched_exec.bprm_execve.part 0.04 ±206% -63.1% 0.01 ±223% +47.5% 0.05 ±200% perf-sched.wait_time.avg.ms.__cond_resched.switch_task_namespaces.do_exit.do_group_exit.__x64_sys_exit_group 0.00 ±223% +72.7% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.task_numa_work.task_work_run.exit_to_user_mode_loop.exit_to_user_mode_prepare 0.02 ±144% +4.3% 0.02 ±184% -54.0% 0.01 ±200% perf-sched.wait_time.avg.ms.__cond_resched.task_work_run.do_exit.do_group_exit.__x64_sys_exit_group 1.74 ±200% +460.9% 9.78 ±148% +2183.2% 39.80 ±187% perf-sched.wait_time.avg.ms.__cond_resched.task_work_run.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode 3.51 ±165% +69.9% 5.96 ± 27% +67.0% 5.86 ± 69% perf-sched.wait_time.avg.ms.__cond_resched.task_work_run.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode 5.37 ±137% -81.3% 1.01 ±143% -95.3% 0.25 ± 74% perf-sched.wait_time.avg.ms.__cond_resched.tlb_batch_pages_flush.tlb_finish_mmu.exit_mmap.__mmput 0.00 +7.7e+99% 0.01 ±163% +1.9e+100% 0.02 ±135% perf-sched.wait_time.avg.ms.__cond_resched.tlb_batch_pages_flush.tlb_finish_mmu.shift_arg_pages.setup_arg_pages 0.00 +5.8e+99% 0.01 ±175% +2.6e+100% 0.03 ±179% perf-sched.wait_time.avg.ms.__cond_resched.tlb_batch_pages_flush.tlb_finish_mmu.unmap_region.do_vmi_align_munmap 0.02 ±223% -86.1% 0.00 ±223% +420.7% 0.10 ±200% perf-sched.wait_time.avg.ms.__cond_resched.try_to_migrate_one.rmap_walk_anon.try_to_migrate.migrate_folio_unmap 0.03 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.unmap_page_range.unmap_vmas.exit_mmap.__mmput 0.00 -100.0% 0.00 +1.7e+101% 0.17 ±200% perf-sched.wait_time.avg.ms.__cond_resched.unmap_vmas.exit_mmap.__mmput.exec_mmap 0.02 ±112% +65.5% 0.04 ±134% +293.9% 0.10 ±190% perf-sched.wait_time.avg.ms.__cond_resched.unmap_vmas.exit_mmap.__mmput.exit_mm 0.02 ±156% +1404.1% 0.24 ±150% +323.1% 0.07 ±116% perf-sched.wait_time.avg.ms.__cond_resched.unmap_vmas.unmap_region.do_vmi_align_munmap.do_vmi_munmap 0.00 +1.7e+100% 0.02 ±202% +5e+99% 0.00 ±140% perf-sched.wait_time.avg.ms.__cond_resched.vfs_write.ksys_write.do_syscall_64.entry_SYSCALL_64_after_hwframe 2.90 ± 92% +271.3% 10.76 ± 65% +107.4% 6.01 ± 84% perf-sched.wait_time.avg.ms.__cond_resched.wait_for_unix_gc.unix_stream_sendmsg.sock_write_iter.vfs_write 0.15 ±143% +248.7% 0.51 ±175% +344.6% 0.65 ± 79% perf-sched.wait_time.avg.ms.__cond_resched.wp_page_copy.__handle_mm_fault.handle_mm_fault.do_user_addr_fault 337.00 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.ww_mutex_lock.drm_gem_vunmap_unlocked.drm_fbdev_generic_helper_fb_dirty.drm_fb_helper_damage_work 0.03 ±142% +116.0% 0.07 ±100% +57.1% 0.05 ±139% perf-sched.wait_time.avg.ms.__cond_resched.zap_pmd_range.isra.0.unmap_page_range 19.42 ±210% -91.2% 1.71 ±201% -98.6% 0.28 ± 61% perf-sched.wait_time.avg.ms.__cond_resched.zap_pte_range.zap_pmd_range.isra.0 416.63 ±223% +20.1% 500.33 ±152% -28.0% 300.05 ±133% perf-sched.wait_time.avg.ms.__x64_sys_pause.do_syscall_64.entry_SYSCALL_64_after_hwframe.[unknown] 66.63 ±213% -99.2% 0.50 ±141% -99.4% 0.43 ±200% perf-sched.wait_time.avg.ms.devkmsg_read.vfs_read.ksys_read.do_syscall_64 750.42 ±142% -37.1% 472.35 ±116% -100.0% 0.00 perf-sched.wait_time.avg.ms.do_nanosleep.hrtimer_nanosleep.common_nsleep.__x64_sys_clock_nanosleep 8.83 ± 87% +354.0% 40.10 ± 36% +857.0% 84.51 ±124% perf-sched.wait_time.avg.ms.do_task_dead.do_exit.do_group_exit.__x64_sys_exit_group.do_syscall_64 5.34 ±109% +649.1% 39.99 ± 39% +659.4% 40.54 ± 77% perf-sched.wait_time.avg.ms.do_wait.kernel_wait4.__do_sys_wait4.do_syscall_64 0.32 ±109% -65.3% 0.11 ± 73% -21.2% 0.25 ± 89% perf-sched.wait_time.avg.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_exc_page_fault 4.83 ± 71% +480.5% 28.03 ± 21% +593.9% 33.50 ± 52% perf-sched.wait_time.avg.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_apic_timer_interrupt 0.78 ±104% +1138.9% 9.62 ± 77% +1310.0% 10.95 ± 70% perf-sched.wait_time.avg.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_call_function_single 14.85 ± 82% +111.8% 31.46 ± 24% +110.2% 31.22 ± 11% perf-sched.wait_time.avg.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_reschedule_ipi 1.53 ± 79% +212.5% 4.79 ± 14% +213.0% 4.80 ± 27% perf-sched.wait_time.avg.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode.do_syscall_64 0.00 +1.3e+100% 0.01 ±145% +1.1e+100% 0.01 ±200% perf-sched.wait_time.avg.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode.ret_from_fork 3.26 ± 86% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.avg.ms.io_schedule.folio_wait_bit_common.filemap_fault.__do_fault 117.93 ±116% -63.5% 43.09 ± 66% -32.8% 79.19 ±116% perf-sched.wait_time.avg.ms.pipe_read.vfs_read.ksys_read.do_syscall_64 0.00 -100.0% 0.00 +9e+101% 0.90 ±200% perf-sched.wait_time.avg.ms.pipe_write.vfs_write.ksys_write.do_syscall_64 32.77 ±210% -18.3% 26.76 ± 99% -72.1% 9.15 ± 37% perf-sched.wait_time.avg.ms.rcu_gp_kthread.kthread.ret_from_fork 5.48 ± 89% +1270.2% 75.07 ±142% +11777.8% 650.73 ±181% perf-sched.wait_time.avg.ms.schedule_hrtimeout_range_clock.do_poll.constprop.0.do_sys_poll 193.46 ± 71% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.avg.ms.schedule_hrtimeout_range_clock.do_select.core_sys_select.kern_select 203.12 ± 99% +81.1% 367.80 ± 92% +199.2% 607.80 ± 68% perf-sched.wait_time.avg.ms.schedule_hrtimeout_range_clock.ep_poll.do_epoll_wait.__x64_sys_epoll_wait 0.01 ±223% +4810.7% 0.61 ±223% +119.2% 0.03 ±200% perf-sched.wait_time.avg.ms.schedule_preempt_disabled.rwsem_down_read_slowpath.down_read.folio_lock_anon_vma_read 15.30 ±219% -98.8% 0.18 ±223% -95.5% 0.69 ±199% perf-sched.wait_time.avg.ms.schedule_preempt_disabled.rwsem_down_read_slowpath.down_read.rmap_walk_anon 0.13 ±133% +93.3% 0.25 ±223% -100.0% 0.00 perf-sched.wait_time.avg.ms.schedule_preempt_disabled.rwsem_down_write_slowpath.down_write.__put_anon_vma 2.14 ±127% +540.9% 13.74 ±218% +3472.5% 76.61 ±199% perf-sched.wait_time.avg.ms.schedule_preempt_disabled.rwsem_down_write_slowpath.down_write.unlink_anon_vmas 0.12 ±145% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.avg.ms.schedule_preempt_disabled.rwsem_down_write_slowpath.down_write.unlink_file_vma 0.01 ±142% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.avg.ms.schedule_timeout.__wait_for_common.__flush_work.isra.0 0.87 ±131% +9438.8% 82.89 ±201% +6172.5% 54.51 ± 86% perf-sched.wait_time.avg.ms.schedule_timeout.__wait_for_common.wait_for_completion_state.kernel_clone 887.78 ± 73% -19.3% 716.68 ± 61% +21.5% 1078 ± 38% perf-sched.wait_time.avg.ms.schedule_timeout.kcompactd.kthread.ret_from_fork 0.76 ±219% -100.0% 0.00 -48.8% 0.39 ±200% perf-sched.wait_time.avg.ms.schedule_timeout.khugepaged_wait_work.khugepaged.kthread 10.49 ± 72% +77.2% 18.60 ± 38% +235.1% 35.17 ± 83% perf-sched.wait_time.avg.ms.schedule_timeout.rcu_gp_fqs_loop.rcu_gp_kthread.kthread 14.29 ± 72% +59.2% 22.75 ± 21% +68.8% 24.12 ± 7% perf-sched.wait_time.avg.ms.schedule_timeout.sock_alloc_send_pskb.unix_stream_sendmsg.sock_write_iter 2.82 ± 78% +174.5% 7.75 ± 18% +196.4% 8.37 ± 36% perf-sched.wait_time.avg.ms.schedule_timeout.unix_stream_data_wait.unix_stream_read_generic.unix_stream_recvmsg 633.11 ± 79% -6.3% 593.03 ± 18% -11.4% 560.71 ± 13% perf-sched.wait_time.avg.ms.smpboot_thread_fn.kthread.ret_from_fork 66.39 ±213% -99.7% 0.20 ±188% -98.7% 0.86 ±200% perf-sched.wait_time.avg.ms.syslog_print.do_syslog.kmsg_read.vfs_read 0.01 ±144% +1518.9% 0.14 ±165% +5934.0% 0.53 ±126% perf-sched.wait_time.avg.ms.wait_for_partner.fifo_open.do_dentry_open.do_open 302.32 ± 71% +104.2% 617.40 ± 17% +85.2% 560.01 ± 13% perf-sched.wait_time.avg.ms.worker_thread.kthread.ret_from_fork 0.00 +2.6e+102% 2.63 ±217% +1.9e+100% 0.02 ± 92% perf-sched.wait_time.max.ms.__cond_resched.__alloc_pages.__folio_alloc.vma_alloc_folio.do_anonymous_page 0.00 +8.2e+99% 0.01 ±192% +7.4e+100% 0.07 ±200% perf-sched.wait_time.max.ms.__cond_resched.__alloc_pages.__folio_alloc.vma_alloc_folio.do_cow_fault 16.86 ±104% +999.7% 185.45 ±115% +630.8% 123.24 ±137% perf-sched.wait_time.max.ms.__cond_resched.__alloc_pages.__folio_alloc.vma_alloc_folio.shmem_alloc_folio 0.82 ±110% +649.0% 6.11 ±122% +334.0% 3.54 ±123% perf-sched.wait_time.max.ms.__cond_resched.__alloc_pages.__folio_alloc.vma_alloc_folio.wp_page_copy 0.00 +2.8e+99% 0.00 ±150% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.__alloc_pages.__get_free_pages.pgd_alloc.mm_init 0.00 +5.1e+100% 0.05 ±211% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.__alloc_pages.__pmd_alloc.__handle_mm_fault.handle_mm_fault 0.00 +1.7e+99% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.__alloc_pages.__pmd_alloc.move_page_tables.shift_arg_pages 0.03 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.__alloc_pages.allocate_slab.___slab_alloc.constprop 0.00 -100.0% 0.00 +2.2e+99% 0.00 ±200% perf-sched.wait_time.max.ms.__cond_resched.__alloc_pages.get_zeroed_page.__p4d_alloc.__handle_mm_fault 0.00 +1.4e+100% 0.01 ±187% +7.4e+99% 0.01 ±142% perf-sched.wait_time.max.ms.__cond_resched.__alloc_pages.get_zeroed_page.__pud_alloc.__handle_mm_fault 0.00 +7.2e+99% 0.01 ±145% +9.8e+99% 0.01 ±200% perf-sched.wait_time.max.ms.__cond_resched.__alloc_pages.get_zeroed_page.__pud_alloc.alloc_new_pud 0.00 -100.0% 0.00 +1.1e+100% 0.01 ±200% perf-sched.wait_time.max.ms.__cond_resched.__alloc_pages.pipe_write.vfs_write.ksys_write 0.00 ±223% +241.7% 0.01 ±223% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.__alloc_pages.pte_alloc_one.__do_fault.do_cow_fault 0.00 +2.7e+99% 0.00 ±223% +1.9e+100% 0.02 ±200% perf-sched.wait_time.max.ms.__cond_resched.__alloc_pages.pte_alloc_one.__do_fault.do_read_fault 0.01 ±205% +163.4% 0.03 ±147% -20.6% 0.01 ±127% perf-sched.wait_time.max.ms.__cond_resched.__alloc_pages.pte_alloc_one.__pte_alloc.do_anonymous_page 0.00 ±223% +1533.3% 0.02 ±141% +3760.0% 0.04 ±122% perf-sched.wait_time.max.ms.__cond_resched.__alloc_pages.pte_alloc_one.__pte_alloc.move_page_tables 0.00 +1.8e+101% 0.18 ±221% +8.2e+100% 0.08 ±166% perf-sched.wait_time.max.ms.__cond_resched.__alloc_pages.pte_alloc_one.do_read_fault.do_fault 0.00 +7.2e+100% 0.07 ±144% +1.5e+100% 0.01 ±200% perf-sched.wait_time.max.ms.__cond_resched.__anon_vma_prepare.do_anonymous_page.__handle_mm_fault.handle_mm_fault 0.01 ±159% +226.5% 0.03 ±154% -46.1% 0.00 ±129% perf-sched.wait_time.max.ms.__cond_resched.__anon_vma_prepare.do_cow_fault.do_fault.__handle_mm_fault 9.21 ±222% -18.6% 7.50 ± 73% +247.4% 31.98 ± 71% perf-sched.wait_time.max.ms.__cond_resched.__do_fault.do_read_fault.do_fault.__handle_mm_fault 0.00 -100.0% 0.00 +1.1e+101% 0.11 ±200% perf-sched.wait_time.max.ms.__cond_resched.__fput.task_work_run.do_exit.do_group_exit 0.01 ±223% +540.5% 0.04 ±118% -17.1% 0.01 ±200% perf-sched.wait_time.max.ms.__cond_resched.__get_user_pages.get_user_pages_remote.get_arg_page.copy_string_kernel 0.00 +5.3e+100% 0.05 ±101% +9.6e+100% 0.10 ±113% perf-sched.wait_time.max.ms.__cond_resched.__get_user_pages.get_user_pages_remote.get_arg_page.copy_strings 0.05 ±156% +3296.0% 1.82 ±144% +1035.9% 0.61 ±143% perf-sched.wait_time.max.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc.inotify_handle_inode_event.send_to_group 0.00 +1.5e+102% 1.45 ±222% +1.4e+101% 0.14 ±175% perf-sched.wait_time.max.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc.load_elf_phdrs.load_elf_binary 0.00 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc.perf_read.vfs_read 0.00 +5.8e+99% 0.01 ±223% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc.security_prepare_creds.prepare_creds 0.01 ±155% -85.4% 0.00 ±223% +62.5% 0.01 ±200% perf-sched.wait_time.max.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node.alloc_cpumask_var_node.__sched_setaffinity 0.00 +1.3e+100% 0.01 ±194% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node.alloc_cpumask_var_node.__x64_sys_sched_setaffinity 38.36 ±223% -97.0% 1.13 ±161% -92.4% 2.92 ±183% perf-sched.wait_time.max.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node.memcg_alloc_slab_cgroups.allocate_slab 38.82 ±223% -100.0% 0.00 -100.0% 0.00 ±200% perf-sched.wait_time.max.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node.memcg_alloc_slab_cgroups.memcg_slab_post_alloc_hook 0.02 ±106% +1879.8% 0.34 ±216% +15.4% 0.02 ±123% perf-sched.wait_time.max.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node.sched_setaffinity.__x64_sys_sched_setaffinity 0.00 -100.0% 0.00 +5.9e+100% 0.06 ±200% perf-sched.wait_time.max.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node.seq_read_iter.vfs_read 1525 ± 80% +38.2% 2108 ± 28% +44.4% 2201 ± 67% perf-sched.wait_time.max.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node_track_caller.kmalloc_reserve.__alloc_skb 0.00 +1.5e+99% 0.00 ±223% +2.6e+99% 0.00 ±200% perf-sched.wait_time.max.ms.__cond_resched.__kmem_cache_alloc_node.kmalloc_trace.alloc_bprm.do_execveat_common 0.01 ±137% +14876.5% 1.70 ±134% +37523.5% 4.26 ±186% perf-sched.wait_time.max.ms.__cond_resched.__kmem_cache_alloc_node.kmalloc_trace.perf_event_mmap_event.perf_event_mmap 0.00 ±223% -8.3% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.__kmem_cache_alloc_node.kmalloc_trace.proc_pid_readlink.vfs_readlink 0.02 ±132% +111.5% 0.03 ±213% -66.2% 0.01 ±200% perf-sched.wait_time.max.ms.__cond_resched.__kmem_cache_alloc_node.kmalloc_trace.task_numa_work.task_work_run 0.00 +1.7e+99% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.__kmem_cache_alloc_node.kmalloc_trace.vmstat_start.seq_read_iter 47.35 ±196% +311.6% 194.92 ±221% +588.3% 325.96 ±198% perf-sched.wait_time.max.ms.__cond_resched.__put_anon_vma.unlink_anon_vmas.free_pgtables.exit_mmap 0.00 -100.0% 0.00 +1.2e+99% 0.00 ±200% perf-sched.wait_time.max.ms.__cond_resched.__put_anon_vma.unlink_anon_vmas.free_pgtables.unmap_region 0.00 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.__vmalloc_area_node.__vmalloc_node_range.alloc_thread_stack_node.dup_task_struct 833.48 ± 71% +114.3% 1785 ± 39% +52.4% 1270 ± 6% perf-sched.wait_time.max.ms.__cond_resched.__wait_for_common.affine_move_task.__set_cpus_allowed_ptr.__sched_setaffinity 697.80 ±174% +103.1% 1417 ± 27% +13.6% 792.76 ± 27% perf-sched.wait_time.max.ms.__cond_resched.aa_sk_perm.security_socket_recvmsg.sock_recvmsg.sock_read_iter 1558 ± 73% +75.1% 2728 ± 20% +90.5% 2968 ± 46% perf-sched.wait_time.max.ms.__cond_resched.aa_sk_perm.security_socket_sendmsg.sock_write_iter.vfs_write 0.00 -100.0% 0.00 +8.2e+100% 0.08 ±199% perf-sched.wait_time.max.ms.__cond_resched.apparmor_file_alloc_security.security_file_alloc.__alloc_file.alloc_empty_file 0.02 ±141% -41.8% 0.01 ±198% +34.8% 0.03 ±158% perf-sched.wait_time.max.ms.__cond_resched.change_pmd_range.change_p4d_range.change_protection_range.change_prot_numa 0.00 +9.2e+100% 0.09 ±192% +8.6e+101% 0.86 ±186% perf-sched.wait_time.max.ms.__cond_resched.change_pmd_range.change_p4d_range.change_protection_range.mprotect_fixup 0.04 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.copy_page_range.dup_mmap.dup_mm.constprop 0.00 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.copy_pte_range.copy_p4d_range.copy_page_range.dup_mmap 0.07 ±187% +2290.9% 1.57 ±210% -82.7% 0.01 ±119% perf-sched.wait_time.max.ms.__cond_resched.copy_strings.isra.0.do_execveat_common 0.01 ±223% +17.9% 0.01 ±124% +2694.0% 0.31 ±135% perf-sched.wait_time.max.ms.__cond_resched.count.constprop.0.isra 0.05 ±194% -48.2% 0.03 ±126% +36.1% 0.07 ±107% perf-sched.wait_time.max.ms.__cond_resched.dentry_kill.dput.__fput.task_work_run 76.96 ±212% +40.5% 108.11 ±219% -75.1% 19.19 ±172% perf-sched.wait_time.max.ms.__cond_resched.dentry_kill.dput.proc_invalidate_siblings_dcache.release_task 0.00 +3.5e+100% 0.04 ±194% +2e+100% 0.02 ±131% perf-sched.wait_time.max.ms.__cond_resched.dentry_kill.dput.step_into.link_path_walk 0.00 -100.0% 0.00 +3.9e+100% 0.04 ±200% perf-sched.wait_time.max.ms.__cond_resched.dentry_kill.dput.step_into.open_last_lookups 0.00 +5.8e+99% 0.01 ±223% +8.6e+99% 0.01 ±200% perf-sched.wait_time.max.ms.__cond_resched.dentry_kill.dput.step_into.path_lookupat 0.00 ±223% +80.0% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.do_close_on_exec.begin_new_exec.load_elf_binary.search_binary_handler 0.00 -100.0% 0.00 +1.1e+100% 0.01 ±200% perf-sched.wait_time.max.ms.__cond_resched.do_page_mkwrite.do_wp_page.__handle_mm_fault.handle_mm_fault 9.34 ±153% -48.1% 4.84 ±129% -98.5% 0.14 ± 60% perf-sched.wait_time.max.ms.__cond_resched.do_user_addr_fault.exc_page_fault.asm_exc_page_fault.[unknown] 0.00 +1.3e+100% 0.01 ±160% +3.8e+99% 0.00 ±200% perf-sched.wait_time.max.ms.__cond_resched.do_user_addr_fault.exc_page_fault.asm_exc_page_fault._copy_to_user 0.01 ±160% +2059.0% 0.14 ±110% +136.9% 0.02 ±200% perf-sched.wait_time.max.ms.__cond_resched.do_user_addr_fault.exc_page_fault.asm_exc_page_fault.copyout 1.16 ±201% +5273.2% 62.29 ±137% +1990.6% 24.24 ± 64% perf-sched.wait_time.max.ms.__cond_resched.do_user_addr_fault.exc_page_fault.asm_exc_page_fault.fault_in_readable 0.00 -100.0% 0.00 +1.6e+99% 0.00 ±200% perf-sched.wait_time.max.ms.__cond_resched.do_user_addr_fault.exc_page_fault.asm_exc_page_fault.strncpy_from_user 0.25 ±111% +386.7% 1.24 ±170% -54.9% 0.11 ±169% perf-sched.wait_time.max.ms.__cond_resched.down_read.acct_collect.do_exit.do_group_exit 0.05 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.down_read.do_user_addr_fault.exc_page_fault.asm_exc_page_fault 0.00 +1.1e+102% 1.08 ±222% +2.6e+100% 0.03 ±164% perf-sched.wait_time.max.ms.__cond_resched.down_read.exit_mm.do_exit.do_group_exit 0.00 +2.4e+100% 0.02 ±206% +1.6e+99% 0.00 ±200% perf-sched.wait_time.max.ms.__cond_resched.down_read.exit_mmap.__mmput.exit_mm 0.00 -100.0% 0.00 +4.2e+99% 0.00 ±200% perf-sched.wait_time.max.ms.__cond_resched.down_read.get_arg_page.copy_string_kernel.do_execveat_common 0.00 +2.4e+100% 0.02 ±223% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.down_read.kernfs_dop_revalidate.lookup_fast.walk_component 0.00 +1.1e+102% 1.15 ±216% +1.6e+99% 0.00 ±200% perf-sched.wait_time.max.ms.__cond_resched.down_read.kernfs_iop_permission.inode_permission.link_path_walk 0.00 +5.3e+99% 0.01 ±141% +3.6e+100% 0.04 ±171% perf-sched.wait_time.max.ms.__cond_resched.down_read.open_last_lookups.path_openat.do_filp_open 0.02 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.down_read.rmap_walk_anon.migrate_pages_batch.migrate_pages 0.00 ±141% +1205.3% 0.04 ±123% +4662.1% 0.15 ±199% perf-sched.wait_time.max.ms.__cond_resched.down_read.walk_component.link_path_walk.part 0.00 ±223% +1028.6% 0.01 ±141% +54.3% 0.00 ±200% perf-sched.wait_time.max.ms.__cond_resched.down_read.walk_component.path_lookupat.filename_lookup 0.00 +4.5e+99% 0.00 ±158% +5.8e+100% 0.06 ±163% perf-sched.wait_time.max.ms.__cond_resched.down_read_killable.create_elf_tables.load_elf_binary.search_binary_handler 0.00 -100.0% 0.00 +7.4e+99% 0.01 ±200% perf-sched.wait_time.max.ms.__cond_resched.down_write.__anon_vma_prepare.do_cow_fault.do_fault 0.00 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.down_write.__sock_release.sock_close.__fput 0.00 +1.8e+100% 0.02 ±211% +1.2e+101% 0.12 ±185% perf-sched.wait_time.max.ms.__cond_resched.down_write.anon_vma_clone.__split_vma.mprotect_fixup 0.00 +2e+99% 0.00 ±223% +5.1e+100% 0.05 ±179% perf-sched.wait_time.max.ms.__cond_resched.down_write.do_brk_flags.__do_sys_brk.do_syscall_64 0.00 +2.3e+99% 0.00 ±223% +1.1e+101% 0.11 ±200% perf-sched.wait_time.max.ms.__cond_resched.down_write.do_brk_flags.vm_brk_flags.load_elf_interp 0.00 +1e+99% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.down_write.do_brk_flags.vm_brk_flags.set_brk 0.00 +2.7e+99% 0.00 ±223% +6.4e+100% 0.06 ±184% perf-sched.wait_time.max.ms.__cond_resched.down_write.do_vmi_align_munmap.do_vmi_munmap.__vm_munmap 0.00 +1.7e+99% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.down_write.do_vmi_align_munmap.do_vmi_munmap.mmap_region 0.00 ±223% +1214.3% 0.02 ±223% +1940.0% 0.02 ±200% perf-sched.wait_time.max.ms.__cond_resched.down_write.exit_mmap.__mmput.exit_mm 33.89 ±214% +622.5% 244.81 ±220% -97.1% 0.98 ±128% perf-sched.wait_time.max.ms.__cond_resched.down_write.free_pgtables.exit_mmap.__mmput 0.00 +4.1e+101% 0.41 ±223% +9.4e+99% 0.01 ±200% perf-sched.wait_time.max.ms.__cond_resched.down_write.generic_file_write_iter.vfs_write.ksys_write 0.01 ±116% +22596.5% 3.25 ±208% +7119.5% 1.03 ±126% perf-sched.wait_time.max.ms.__cond_resched.down_write.mmap_region.do_mmap.vm_mmap_pgoff 1.93 ±197% -81.5% 0.36 ± 58% -89.1% 0.21 ± 94% perf-sched.wait_time.max.ms.__cond_resched.down_write.unlink_anon_vmas.free_pgtables.exit_mmap 0.18 ±110% +665.9% 1.40 ±145% +1.1e+05% 203.86 ±199% perf-sched.wait_time.max.ms.__cond_resched.down_write.unlink_file_vma.free_pgtables.exit_mmap 0.00 ±223% +75.0% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.down_write.unlink_file_vma.free_pgtables.unmap_region 0.00 +2.7e+100% 0.03 ±201% +2.6e+99% 0.00 ±200% perf-sched.wait_time.max.ms.__cond_resched.down_write.vma_prepare.__split_vma.do_vmi_align_munmap 0.00 +2.3e+100% 0.02 ±208% +1.1e+101% 0.11 ±190% perf-sched.wait_time.max.ms.__cond_resched.down_write.vma_prepare.__split_vma.mprotect_fixup 0.00 +1.5e+99% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.down_write_killable.__do_sys_brk.do_syscall_64.entry_SYSCALL_64_after_hwframe 0.00 -100.0% 0.00 +1.1e+101% 0.11 ±199% perf-sched.wait_time.max.ms.__cond_resched.down_write_killable.__vm_munmap.__x64_sys_munmap.do_syscall_64 0.00 +1.3e+100% 0.01 ±141% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.down_write_killable.do_mprotect_pkey.__x64_sys_mprotect.do_syscall_64 0.07 ±178% +6706.2% 4.76 ± 85% +1669.4% 1.24 ±124% perf-sched.wait_time.max.ms.__cond_resched.down_write_killable.exec_mmap.begin_new_exec.load_elf_binary 0.00 +9.8e+99% 0.01 ±178% +1.2e+100% 0.01 ± 91% perf-sched.wait_time.max.ms.__cond_resched.down_write_killable.map_vdso.load_elf_binary.search_binary_handler 0.00 ±150% +2935.0% 0.10 ±117% +5348.0% 0.18 ±118% perf-sched.wait_time.max.ms.__cond_resched.down_write_killable.setup_arg_pages.load_elf_binary.search_binary_handler 0.00 +1.1e+100% 0.01 ±200% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.down_write_killable.vm_mmap_pgoff.do_syscall_64.entry_SYSCALL_64_after_hwframe 0.00 +9e+99% 0.01 ±178% +1e+102% 1.04 ±163% perf-sched.wait_time.max.ms.__cond_resched.down_write_killable.vm_mmap_pgoff.elf_map.load_elf_binary 0.00 +1.4e+100% 0.01 ±141% +3.9e+101% 0.39 ±200% perf-sched.wait_time.max.ms.__cond_resched.down_write_killable.vm_mmap_pgoff.elf_map.load_elf_interp 0.00 +2.6e+100% 0.03 ±145% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.down_write_killable.vm_mmap_pgoff.ksys_mmap_pgoff.do_syscall_64 0.63 ±136% -89.2% 0.07 ±128% -84.1% 0.10 ±182% perf-sched.wait_time.max.ms.__cond_resched.dput.__fput.task_work_run.do_exit 5.59 ±164% -74.8% 1.41 ±130% +91.7% 10.72 ± 88% perf-sched.wait_time.max.ms.__cond_resched.dput.__fput.task_work_run.exit_to_user_mode_loop 0.00 +1.2e+99% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.dput.dcache_dir_close.__fput.task_work_run 0.00 +6.7e+99% 0.01 ±223% +3.7e+100% 0.04 ±118% perf-sched.wait_time.max.ms.__cond_resched.dput.nd_jump_root.pick_link.step_into 0.01 ±159% -11.4% 0.01 ±121% +815.4% 0.05 ±167% perf-sched.wait_time.max.ms.__cond_resched.dput.open_last_lookups.path_openat.do_filp_open 0.52 ±136% -74.8% 0.13 ±141% -83.9% 0.08 ±147% perf-sched.wait_time.max.ms.__cond_resched.dput.path_put.exit_fs.do_exit 0.00 +4.1e+100% 0.04 ±145% +7e+99% 0.01 ±200% perf-sched.wait_time.max.ms.__cond_resched.dput.path_put.vfs_statx.vfs_fstatat 0.07 ±171% -100.0% 0.00 +558.9% 0.48 ±171% perf-sched.wait_time.max.ms.__cond_resched.dput.proc_invalidate_siblings_dcache.release_task.wait_task_zombie 0.00 ±141% +1354.2% 0.06 ± 92% +39905.0% 1.60 ±174% perf-sched.wait_time.max.ms.__cond_resched.dput.step_into.link_path_walk.part 0.00 ±223% +1133.3% 0.02 ±175% +2153.3% 0.03 ±154% perf-sched.wait_time.max.ms.__cond_resched.dput.step_into.open_last_lookups.path_openat 0.00 ±223% +3100.0% 0.06 ±132% +180.0% 0.01 ±200% perf-sched.wait_time.max.ms.__cond_resched.dput.step_into.path_lookupat.filename_lookup 0.01 ±185% +1522.5% 0.19 ±130% +7943.4% 0.95 ±176% perf-sched.wait_time.max.ms.__cond_resched.dput.terminate_walk.path_openat.do_filp_open 0.00 -100.0% 0.00 +4e+99% 0.00 ±200% perf-sched.wait_time.max.ms.__cond_resched.dput.walk_component.link_path_walk.part 0.00 -100.0% 0.00 +1.4e+102% 1.38 ±200% perf-sched.wait_time.max.ms.__cond_resched.exit_mmap.__mmput.exec_mmap.begin_new_exec 4.88 ±170% +8376.3% 413.99 ±222% -68.8% 1.52 ±104% perf-sched.wait_time.max.ms.__cond_resched.exit_mmap.__mmput.exit_mm.do_exit 0.19 ±193% -58.5% 0.08 ±172% -99.6% 0.00 ±200% perf-sched.wait_time.max.ms.__cond_resched.exit_signals.do_exit.do_group_exit.__x64_sys_exit_group 0.00 -100.0% 0.00 +1.3e+100% 0.01 ±200% perf-sched.wait_time.max.ms.__cond_resched.filemap_read.__kernel_read.load_elf_binary.search_binary_handler 0.00 +1.7e+100% 0.02 ±165% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.filemap_read.__kernel_read.load_elf_phdrs.load_elf_binary 0.00 ±223% +4121.4% 0.10 ±161% +66508.6% 1.55 ±164% perf-sched.wait_time.max.ms.__cond_resched.filemap_read.__kernel_read.search_binary_handler.exec_binprm 0.00 +1.3e+99% 0.00 ±223% +2.3e+101% 0.23 ±123% perf-sched.wait_time.max.ms.__cond_resched.filemap_read.vfs_read.ksys_read.do_syscall_64 493.62 ±149% -68.4% 155.79 ± 62% -44.2% 275.58 ± 48% perf-sched.wait_time.max.ms.__cond_resched.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.vfs_write 0.00 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.khugepaged.kthread.ret_from_fork 0.02 ±155% +791.4% 0.16 ± 89% +974.3% 0.19 ± 84% perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.__alloc_file.alloc_empty_file.path_openat 0.00 ±223% +1166.7% 0.03 ±206% +1960.0% 0.04 ±176% perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.__anon_vma_prepare.do_anonymous_page.__handle_mm_fault 0.00 +9.5e+99% 0.01 ±223% +8.6e+99% 0.01 ±153% perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.__anon_vma_prepare.do_cow_fault.do_fault 0.00 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.alloc_pid.copy_process.kernel_clone 0.00 +7.3e+101% 0.73 ±217% +6.7e+101% 0.67 ±198% perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.getname_flags.part.0 0.00 -100.0% 0.00 +3.4e+100% 0.03 ±133% perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.mas_alloc_nodes.mas_preallocate.__split_vma 0.00 +1.1e+100% 0.01 ±127% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.mas_alloc_nodes.mas_preallocate.expand_downwards 0.00 ±223% +892.0% 0.04 ± 93% +2112.8% 0.09 ±119% perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.mas_alloc_nodes.mas_preallocate.mmap_region 0.00 +2.8e+100% 0.03 ±142% +4.8e+100% 0.05 ±161% perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.mas_alloc_nodes.mas_preallocate.vma_expand 0.00 +8.8e+99% 0.01 ±194% +9.6e+99% 0.01 ±133% perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.mas_alloc_nodes.mas_preallocate.vma_link 0.00 +3.5e+99% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.mm_alloc.alloc_bprm.do_execveat_common 0.05 ±223% -98.0% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.prepare_creds.prepare_exec_creds.bprm_execve 0.01 ±147% +578.1% 0.04 ±158% +290.0% 0.02 ±110% perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.security_file_alloc.__alloc_file.alloc_empty_file 0.00 ±223% +2125.0% 0.04 ±127% +390.0% 0.01 ±171% perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.vm_area_alloc.__install_special_mapping.map_vdso 0.06 ±170% +106.5% 0.12 ±204% -95.4% 0.00 ±200% perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.vm_area_alloc.alloc_bprm.do_execveat_common 0.00 +3.7e+100% 0.04 ±178% +1.3e+100% 0.01 ±200% perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.vm_area_alloc.do_brk_flags.__do_sys_brk 0.01 ±223% +22196.8% 1.15 ±211% +1785.2% 0.10 ±106% perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.vm_area_alloc.mmap_region.do_mmap 0.03 ±205% +261.8% 0.09 ± 93% +825.6% 0.24 ±128% perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.vm_area_dup.__split_vma.do_vmi_align_munmap 2.14 ±223% -99.7% 0.01 ±190% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.vm_area_dup.__split_vma.mprotect_fixup 0.00 +8.5e+99% 0.01 ±223% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc_bulk.mas_alloc_nodes.mas_preallocate.__split_vma 0.00 +1e+99% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc_bulk.mas_alloc_nodes.mas_preallocate.mmap_region 1396 ± 83% +28.7% 1796 ± 49% +37.0% 1912 ± 35% perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc_node.__alloc_skb.alloc_skb_with_frags.sock_alloc_send_pskb 0.15 ±208% -97.4% 0.00 ±223% -85.3% 0.02 ±171% perf-sched.wait_time.max.ms.__cond_resched.migrate_pages_batch.migrate_pages.migrate_misplaced_page.do_numa_page 0.00 +1.8e+100% 0.02 ±131% +9.4e+100% 0.09 ± 93% perf-sched.wait_time.max.ms.__cond_resched.mmput.exec_mmap.begin_new_exec.load_elf_binary 0.01 ±182% -5.6% 0.01 ±146% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.mmput.exit_mm.do_exit.do_group_exit 0.00 +2.5e+99% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.mnt_want_write.filename_create.do_mkdirat.__x64_sys_mkdir 0.02 ±223% -61.2% 0.01 ±223% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.move_page_tables.shift_arg_pages.setup_arg_pages.load_elf_binary 1.19 ±215% -66.4% 0.40 ±223% -94.1% 0.07 ±200% perf-sched.wait_time.max.ms.__cond_resched.mutex_lock.__fdget_pos.ksys_write.do_syscall_64 0.01 ±170% +12.2% 0.01 ±179% +289.4% 0.03 ±128% perf-sched.wait_time.max.ms.__cond_resched.mutex_lock.futex_exec_release.exec_mm_release.exec_mmap 0.06 ±118% -38.4% 0.04 ±155% +2198.2% 1.34 ±165% perf-sched.wait_time.max.ms.__cond_resched.mutex_lock.futex_exit_release.exit_mm_release.exit_mm 0.00 +1.8e+99% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.mutex_lock.kernfs_fop_open.do_dentry_open.do_open 0.00 +3.4e+101% 0.34 ±222% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.mutex_lock.kernfs_seq_start.seq_read_iter.vfs_read 0.80 ±188% +318.9% 3.36 ± 90% +211.1% 2.49 ±151% perf-sched.wait_time.max.ms.__cond_resched.mutex_lock.perf_event_ctx_lock_nested.constprop.0 0.00 +4.3e+100% 0.04 ±183% +1.4e+99% 0.00 ±199% perf-sched.wait_time.max.ms.__cond_resched.mutex_lock.perf_event_exit_task.do_exit.do_group_exit 0.02 ±194% -87.9% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.mutex_lock.perf_event_for_each_child._perf_ioctl.perf_ioctl 18.35 ±187% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.mutex_lock.perf_poll.do_poll.constprop 0.00 +3e+99% 0.00 ±142% +3.4e+99% 0.00 ±200% perf-sched.wait_time.max.ms.__cond_resched.mutex_lock.perf_read.vfs_read.ksys_read 0.00 +8.3e+98% 0.00 ±223% +1.4e+100% 0.01 ±176% perf-sched.wait_time.max.ms.__cond_resched.mutex_lock.pipe_write.vfs_write.ksys_write 0.00 +4e+100% 0.04 ±223% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.mutex_lock.seq_read_iter.vfs_read.ksys_read 591.36 ±193% +93.2% 1142 ± 25% +46.8% 867.98 ± 31% perf-sched.wait_time.max.ms.__cond_resched.mutex_lock.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg 0.00 ±143% +6.3% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.mutex_lock.uprobe_clear_state.__mmput.exit_mm 0.03 ±183% +353.7% 0.13 ±139% +279.0% 0.11 ±127% perf-sched.wait_time.max.ms.__cond_resched.mutex_lock_killable.pcpu_alloc.__percpu_counter_init.mm_init 0.01 ±150% +154.1% 0.02 ±123% -38.4% 0.00 ±124% perf-sched.wait_time.max.ms.__cond_resched.mutex_lock_killable.pcpu_alloc.mm_init.alloc_bprm 16.54 ±219% -68.0% 5.30 ±200% +6086.1% 1023 ±198% perf-sched.wait_time.max.ms.__cond_resched.put_files_struct.do_exit.do_group_exit.__x64_sys_exit_group 0.00 ±223% -100.0% 0.00 +2300.0% 0.04 ±200% perf-sched.wait_time.max.ms.__cond_resched.remove_vma.do_vmi_align_munmap.do_vmi_munmap.mmap_region 0.06 ±139% +1.9e+05% 116.57 ±223% +690.0% 0.50 ±191% perf-sched.wait_time.max.ms.__cond_resched.remove_vma.exit_mmap.__mmput.exit_mm 0.80 ±149% -99.6% 0.00 ±223% -70.2% 0.24 ±198% perf-sched.wait_time.max.ms.__cond_resched.rmap_walk_anon.migrate_pages_batch.migrate_pages.migrate_misplaced_page 551.45 ±223% -100.0% 0.04 ±213% -100.0% 0.16 ±188% perf-sched.wait_time.max.ms.__cond_resched.rmap_walk_anon.try_to_migrate.migrate_folio_unmap.migrate_pages_batch 0.16 ±112% +85.8% 0.30 ±187% -22.2% 0.13 ±146% perf-sched.wait_time.max.ms.__cond_resched.shmem_get_folio_gfp.shmem_write_begin.generic_perform_write.__generic_file_write_iter 0.02 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.shrink_dcache_parent.d_invalidate.proc_invalidate_siblings_dcache.release_task 1623 ± 83% -65.9% 554.21 ± 45% -25.6% 1207 ± 76% perf-sched.wait_time.max.ms.__cond_resched.shrink_dentry_list.shrink_dcache_parent.d_invalidate.proc_invalidate_siblings_dcache 0.00 +4.7e+100% 0.05 ± 87% +2.9e+100% 0.03 ±105% perf-sched.wait_time.max.ms.__cond_resched.slab_pre_alloc_hook.constprop.0.kmem_cache_alloc_lru 2.85 ±222% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.stop_one_cpu.migrate_task_to.task_numa_migrate.isra 0.00 ±141% +3.3e+05% 9.32 ±159% +1.1e+05% 3.24 ±118% perf-sched.wait_time.max.ms.__cond_resched.stop_one_cpu.sched_exec.bprm_execve.part 0.07 ±214% -53.3% 0.03 ±223% -22.1% 0.05 ±200% perf-sched.wait_time.max.ms.__cond_resched.switch_task_namespaces.do_exit.do_group_exit.__x64_sys_exit_group 0.00 ±223% +118.2% 0.00 ±223% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.task_numa_work.task_work_run.exit_to_user_mode_loop.exit_to_user_mode_prepare 0.03 ±132% -13.0% 0.02 ±184% -61.7% 0.01 ±200% perf-sched.wait_time.max.ms.__cond_resched.task_work_run.do_exit.do_group_exit.__x64_sys_exit_group 14.66 ±207% +943.0% 152.95 ±170% +3331.1% 503.14 ±191% perf-sched.wait_time.max.ms.__cond_resched.task_work_run.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode 603.73 ±193% +28.1% 773.60 ± 48% +38.0% 832.85 ± 96% perf-sched.wait_time.max.ms.__cond_resched.task_work_run.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode 89.38 ±137% -94.5% 4.90 ±114% -99.3% 0.65 ± 79% perf-sched.wait_time.max.ms.__cond_resched.tlb_batch_pages_flush.tlb_finish_mmu.exit_mmap.__mmput 0.00 +7.7e+99% 0.01 ±163% +1.9e+100% 0.02 ±136% perf-sched.wait_time.max.ms.__cond_resched.tlb_batch_pages_flush.tlb_finish_mmu.shift_arg_pages.setup_arg_pages 0.00 +9.2e+99% 0.01 ±191% +2.6e+100% 0.03 ±179% perf-sched.wait_time.max.ms.__cond_resched.tlb_batch_pages_flush.tlb_finish_mmu.unmap_region.do_vmi_align_munmap 0.02 ±223% -76.5% 0.00 ±223% +420.7% 0.10 ±200% perf-sched.wait_time.max.ms.__cond_resched.try_to_migrate_one.rmap_walk_anon.try_to_migrate.migrate_folio_unmap 0.03 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.unmap_page_range.unmap_vmas.exit_mmap.__mmput 0.00 -100.0% 0.00 +1.7e+101% 0.17 ±200% perf-sched.wait_time.max.ms.__cond_resched.unmap_vmas.exit_mmap.__mmput.exec_mmap 0.04 ±128% +27.6% 0.05 ±130% +137.1% 0.10 ±185% perf-sched.wait_time.max.ms.__cond_resched.unmap_vmas.exit_mmap.__mmput.exit_mm 0.02 ±156% +2047.4% 0.35 ±100% +1197.7% 0.21 ±112% perf-sched.wait_time.max.ms.__cond_resched.unmap_vmas.unmap_region.do_vmi_align_munmap.do_vmi_munmap 0.00 +1.7e+100% 0.02 ±202% +5e+99% 0.00 ±140% perf-sched.wait_time.max.ms.__cond_resched.vfs_write.ksys_write.do_syscall_64.entry_SYSCALL_64_after_hwframe 341.18 ± 98% +235.3% 1143 ± 61% +7.1% 365.27 ±137% perf-sched.wait_time.max.ms.__cond_resched.wait_for_unix_gc.unix_stream_sendmsg.sock_write_iter.vfs_write 1.55 ±187% +214.0% 4.87 ±204% +116.7% 3.36 ± 99% perf-sched.wait_time.max.ms.__cond_resched.wp_page_copy.__handle_mm_fault.handle_mm_fault.do_user_addr_fault 671.72 ±223% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.ww_mutex_lock.drm_gem_vunmap_unlocked.drm_fbdev_generic_helper_fb_dirty.drm_fb_helper_damage_work 0.05 ±132% +162.4% 0.13 ± 94% +57.2% 0.08 ±124% perf-sched.wait_time.max.ms.__cond_resched.zap_pmd_range.isra.0.unmap_page_range 583.27 ±202% -92.4% 44.37 ±206% -99.5% 3.10 ± 89% perf-sched.wait_time.max.ms.__cond_resched.zap_pte_range.zap_pmd_range.isra.0 833.26 ±223% +20.1% 1000 ±152% -28.0% 600.18 ±133% perf-sched.wait_time.max.ms.__x64_sys_pause.do_syscall_64.entry_SYSCALL_64_after_hwframe.[unknown] 639.01 ±221% -99.8% 1.01 ±141% -99.9% 0.87 ±200% perf-sched.wait_time.max.ms.devkmsg_read.vfs_read.ksys_read.do_syscall_64 1500 ±142% -44.5% 833.59 ±128% -100.0% 0.00 perf-sched.wait_time.max.ms.do_nanosleep.hrtimer_nanosleep.common_nsleep.__x64_sys_clock_nanosleep 1760 ± 84% +14.5% 2015 ± 65% +47.0% 2588 ± 60% perf-sched.wait_time.max.ms.do_task_dead.do_exit.do_group_exit.__x64_sys_exit_group.do_syscall_64 1182 ±132% +18.8% 1405 ± 47% -24.5% 892.74 ± 55% perf-sched.wait_time.max.ms.do_wait.kernel_wait4.__do_sys_wait4.do_syscall_64 8.72 ±128% -85.1% 1.30 ± 99% -66.7% 2.90 ± 75% perf-sched.wait_time.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_exc_page_fault 137.80 ±100% +730.4% 1144 ± 29% +1091.6% 1642 ± 80% perf-sched.wait_time.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_apic_timer_interrupt 168.53 ±196% +41.5% 238.45 ± 85% +23.4% 207.96 ±147% perf-sched.wait_time.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_call_function_single 1459 ± 71% +67.2% 2440 ± 22% +33.7% 1951 ± 29% perf-sched.wait_time.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_reschedule_ipi 2351 ± 72% +72.3% 4052 ± 12% +64.6% 3871 ± 35% perf-sched.wait_time.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode.do_syscall_64 0.00 +1.5e+100% 0.02 ±142% +1.1e+100% 0.01 ±200% perf-sched.wait_time.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode.ret_from_fork 165.52 ± 92% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.max.ms.io_schedule.folio_wait_bit_common.filemap_fault.__do_fault 2976 ± 71% +18.5% 3528 ± 36% -6.7% 2777 ± 38% perf-sched.wait_time.max.ms.pipe_read.vfs_read.ksys_read.do_syscall_64 0.00 -100.0% 0.00 +9e+101% 0.90 ±200% perf-sched.wait_time.max.ms.pipe_write.vfs_write.ksys_write.do_syscall_64 118.33 ±219% +440.4% 639.48 ±131% -4.7% 112.74 ± 64% perf-sched.wait_time.max.ms.rcu_gp_kthread.kthread.ret_from_fork 1554 ±113% -78.6% 333.42 ±141% -9.9% 1401 ±166% perf-sched.wait_time.max.ms.schedule_hrtimeout_range_clock.do_poll.constprop.0.do_sys_poll 2651 ± 70% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.max.ms.schedule_hrtimeout_range_clock.do_select.core_sys_select.kern_select 2251 ± 99% -48.2% 1166 ±115% -6.8% 2099 ± 78% perf-sched.wait_time.max.ms.schedule_hrtimeout_range_clock.ep_poll.do_epoll_wait.__x64_sys_epoll_wait 0.02 ±223% +4403.5% 1.07 ±223% +15.8% 0.03 ±200% perf-sched.wait_time.max.ms.schedule_preempt_disabled.rwsem_down_read_slowpath.down_read.folio_lock_anon_vma_read 30.58 ±219% -99.4% 0.18 ±223% -93.8% 1.90 ±200% perf-sched.wait_time.max.ms.schedule_preempt_disabled.rwsem_down_read_slowpath.down_read.rmap_walk_anon 0.22 ±152% +16.8% 0.25 ±223% -100.0% 0.00 perf-sched.wait_time.max.ms.schedule_preempt_disabled.rwsem_down_write_slowpath.down_write.__put_anon_vma 54.85 ±156% +197.4% 163.12 ±222% +1556.8% 908.82 ±199% perf-sched.wait_time.max.ms.schedule_preempt_disabled.rwsem_down_write_slowpath.down_write.unlink_anon_vmas 0.21 ±152% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.max.ms.schedule_preempt_disabled.rwsem_down_write_slowpath.down_write.unlink_file_vma 0.03 ±144% -100.0% 0.00 -100.0% 0.00 perf-sched.wait_time.max.ms.schedule_timeout.__wait_for_common.__flush_work.isra.0 9.24 ±183% +8372.4% 782.92 ±211% +5187.3% 488.59 ±107% perf-sched.wait_time.max.ms.schedule_timeout.__wait_for_common.wait_for_completion_state.kernel_clone 2856 ± 71% -38.3% 1763 ± 51% +2.4% 2923 ± 36% perf-sched.wait_time.max.ms.schedule_timeout.kcompactd.kthread.ret_from_fork 0.76 ±219% -100.0% 0.00 -48.8% 0.39 ±200% perf-sched.wait_time.max.ms.schedule_timeout.khugepaged_wait_work.khugepaged.kthread 1254 ± 85% +18.4% 1485 ± 56% +57.2% 1972 ± 51% perf-sched.wait_time.max.ms.schedule_timeout.rcu_gp_fqs_loop.rcu_gp_kthread.kthread 909.09 ± 75% +117.4% 1975 ± 18% +143.6% 2214 ± 36% perf-sched.wait_time.max.ms.schedule_timeout.sock_alloc_send_pskb.unix_stream_sendmsg.sock_write_iter 2263 ± 73% +61.6% 3656 ± 15% +58.2% 3581 ± 43% perf-sched.wait_time.max.ms.schedule_timeout.unix_stream_data_wait.unix_stream_read_generic.unix_stream_recvmsg 3154 ± 71% +61.2% 5084 ± 25% +65.0% 5205 ± 21% perf-sched.wait_time.max.ms.smpboot_thread_fn.kthread.ret_from_fork 699.58 ±221% -99.9% 0.41 ±188% -99.8% 1.73 ±200% perf-sched.wait_time.max.ms.syslog_print.do_syslog.kmsg_read.vfs_read 0.05 ±150% +990.2% 0.54 ±174% +3810.0% 1.92 ±120% perf-sched.wait_time.max.ms.wait_for_partner.fifo_open.do_dentry_open.do_open 3056 ± 70% +63.8% 5007 ± 21% +68.1% 5139 ± 19% perf-sched.wait_time.max.ms.worker_thread.kthread.ret_from_fork