> Date: Tue, 1 Jul 2025 20:41:47 +0200
> From: Alexander Bluhm <[email protected]>
> 
> Hi
> 
> I see this crash on a vmd guest while running regress/sys/kern/sosplice.
> Note that it is a single CPU GENERIC kernel.  sysctl kern.splassert=2
> 
> panic: assertwaitok: non-zero mutex count: 2
> Stopped at      db_enter+0x14:  popq    %rbp
>     TID    PID    UID     PRFLAGS     PFLAGS  CPU  COMMAND
> *519542  91140      0         0x1          0    0  perl
> db_enter() at db_enter+0x14
> panic(ffffffff82595a39) at panic+0xc9
> assertwaitok() at assertwaitok+0x9e
> mi_switch() at mi_switch+0x19c
> pool_get(ffffffff82a28d28,1) at pool_get+0xe7
> uvm_mapent_alloc(ffffffff82b0eb60,8) at uvm_mapent_alloc+0x2b2
> uvm_map_mkentry(ffffffff82b0eb60,fffffd8006e6cbd0,fffffd8006e6cbd0,ffff80002a32
> 0000,1000,8,79bcd127adccfb5a,7) at uvm_map_mkentry+0x63
> uvm_mapent_clone(ffffffff82b0eb60,ffff80002a320000,1000,0,1,7,a33acdf397a7ed83,
> fffffd806c1f89e8,fffffd806e3beb40,c) at uvm_mapent_clone+0x92
> uvm_map_extract(fffffd806e3beb40,83d6d1f7000,1000,ffff80002a39f048,8) at 
> uvm_ma
> p_extract+0x309
> sys_kbind(ffff80002a294020,ffff80002a39f160,ffff80002a39f0d0) at 
> sys_kbind+0x3a
> 1
> syscall(ffff80002a39f160) at syscall+0x444
> Xsyscall() at Xsyscall+0x128
> end of kernel
> end trace frame: 0x783818799758, count: 3
> https://www.openbsd.org/ddb.html describes the minimum info required in bug
> reports.  Insufficient info makes it difficult to find and fix bugs.

I don't see anything in that codepath to would end up there with a
mutex held.  So my guess is you somehow returned to userland with a
mutex held because of a missing mtx_leave() call in an error path.  Or
maybe an interrupt handler that forgot to unlock a mutex?

> ddb> show panic
> *cpu0: assertwaitok: non-zero mutex count: 2
> 
> ddb> trace
> db_enter() at db_enter+0x14
> panic(ffffffff82595a39) at panic+0xc9
> assertwaitok() at assertwaitok+0x9e
> mi_switch() at mi_switch+0x19c
> pool_get(ffffffff82a28d28,1) at pool_get+0xe7
> uvm_mapent_alloc(ffffffff82b0eb60,8) at uvm_mapent_alloc+0x2b2
> uvm_map_mkentry(ffffffff82b0eb60,fffffd8006e6cbd0,fffffd8006e6cbd0,ffff80002a32
> 0000,1000,8,79bcd127adccfb5a,7) at uvm_map_mkentry+0x63
> uvm_mapent_clone(ffffffff82b0eb60,ffff80002a320000,1000,0,1,7,a33acdf397a7ed83,
> fffffd806c1f89e8,fffffd806e3beb40,c) at uvm_mapent_clone+0x92
> uvm_map_extract(fffffd806e3beb40,83d6d1f7000,1000,ffff80002a39f048,8) at 
> uvm_ma
> p_extract+0x309
> sys_kbind(ffff80002a294020,ffff80002a39f160,ffff80002a39f0d0) at 
> sys_kbind+0x3a
> 1
> syscall(ffff80002a39f160) at syscall+0x444
> Xsyscall() at Xsyscall+0x128
> end of kernel
> end trace frame: 0x783818799758, count: -12
> 
> ddb> ps
>    PID     TID   PPID    UID  S       FLAGS  WAIT          COMMAND
>  50719  364359  30184      0  3        0x81  nanoslp       perl
>  89315  397256  30184      0  2         0x1                perl
> *91140  519542  30184      0  7         0x1                perl
>  30184  440958  16446      0  3        0x83  nanoslp       perl
>  16446  518682  98387      0  3    0x100083  wait          time
>  98387  323339  53126      0  3    0x10008b  sigsusp       make
>  53126  141318  82761      0  3    0x10008b  sigsusp       sh
>  82761  354971  73008      0  3    0x10008b  sigsusp       make
>  31594  235787      0      0  3     0x14200  bored         sosplice
>  73008  434575  63099      0  3    0x10008b  sigsusp       sh
>  58384  523054  59573      0  3    0x100083  piperd        tee
>  63099   14511  59573      0  3    0x10008b  sigsusp       make
>  59573  466089  75154      0  3    0x10008b  sigsusp       ksh
>  75154  337295  48262      0  3        0x98  kqread        sshd-session
>  48262  159930  79222      0  3        0x92  kqread        sshd-session
>  45486  167975      1      0  3    0x100083  ttyin         getty
>  33276  425960      1      0  3    0x100098  kqread        cron
>  27681  270929      1     99  3   0x1100090  kqread        sndiod
>  11364  417661      1    110  3    0x100090  kqread        sndiod
>  40089  188665      1     62  3    0x100090  bpf           spamlogd
>  34557  125396      1      0  3    0x100080  kqread        spamd
>  99039  337448  85595     95  3   0x1100092  kqread        smtpd
>  24186  103575  85595    103  3   0x1100092  kqread        smtpd
>  14780  310785  85595     95  3   0x1100092  kqread        smtpd
>  17041  440957  85595     95  3    0x100092  kqread        smtpd
>  37498   70857      1      0  3    0x100090  kqread        inetd
>  35977  143013  85595     95  3   0x1100092  kqread        smtpd
>  70759  169462  85595     95  3   0x1100092  kqread        smtpd
>  85595  185578      1      0  3    0x100080  kqread        smtpd
>  45812  428879  30694     90  3   0x1100090  kqread        ospf6d
>  35226  149611  30694     90  3   0x1100090  kqread        ospf6d
>  30694  452548      1      0  3        0x80  kqread        ospf6d
>  95899  361430  72950     85  3   0x1100090  kqread        ospfd
>   4921  141423  72950     85  3   0x1100090  kqread        ospfd
>  72950  472267      1      0  3        0x80  kqread        ospfd
>  79222  404798      1      0  3        0x88  kqread        sshd
>  88688   35376      0      0  3     0x14200  acct          acct
>  78489  362341  69879     74  3   0x1100092  bpf           pflogd
>  69879  354812      1      0  3        0x80  sbwait        pflogd
>  74564  397593  41443     73  3   0x1100090  kqread        syslogd
>  41443  338970      1      0  3    0x100082  sbwait        syslogd
>  39889  104554      1      0  3    0x100080  kqread        resolvd
>  71873  424240  22448     77  3    0x100092  kqread        dhcpleased
>  52541  234292  22448     77  3    0x100092  kqread        dhcpleased
>  22448  366633      1      0  3        0x80  kqread        dhcpleased
>  65215  408302  48713    115  3    0x100092  kqread        slaacd
>  88844  259687  48713    115  3    0x100092  kqread        slaacd
>  48713  474175      1      0  3    0x100080  kqread        slaacd
>  10869  362036      0      0  3     0x14200  bored         smr
>  23377  242819      0      0  2     0x14200                zerothread
>  84206  406928      0      0  3     0x14200  aiodoned      aiodoned
>  97161  190994      0      0  3     0x14200  syncer        update
>  23617  305588      0      0  3     0x14200  cleaner       cleaner
>  60916  201903      0      0  3     0x14200  reaper        reaper
>  65745  105486      0      0  3     0x14200  pgdaemon      pagedaemon
>  19419  198748      0      0  3     0x14200  bored         softnet3
>  26877  279498      0      0  3     0x14200  bored         softnet2
>  17305  433961      0      0  3     0x14200  bored         softnet1
>  38737  347863      0      0  3     0x14200  bored         softnet0
>    559  457743      0      0  3     0x14200  bored         systqmp
>  90589  320713      0      0  3     0x14200  bored         systq
>  47461  409408      0      0  3  0x40014200  tmoslp        softclock
>  99978  479319      0      0  3  0x40014200                idle0
>      1   49942      0      0  3        0x82  wait          init
>      0       0     -1      0  3     0x10200  scheduler     swapper
> 
> ddb> show uvm
> Current UVM status:
>   pagesize=4096 (0x1000), pagemask=0xfff, pageshift=12
>   500108 VM pages: 12045 active, 1408 inactive, 1 wired, 455822 free (54893 
> zer
> o)
>   freemin=16670, free-target=22226, inactive-target=0, wired-max=166702
>   faults=1046055, traps=1053337, intrs=46537, ctxswitch=138527 fpuswitch=0
>   softint=35691, syscalls=854823, kmapent=8
>   fault counts:
>     noram=0, noanon=0, noamap=0, pgwait=0, pgrele=0
>     relocks=5110(0), upgrades=0(0) anget(retries)=473884(0), amapcopy=159122
>     neighbor anon/obj pg=444003/521471, gets(lock/unlock)=163634/5111
>     cases: anon=344989, anoncow=128895, obj=139013, prcopy=24620, 
> przero=408538
> 
>   daemon and swap counts:
>     woke=0, revs=0, scans=0, obscans=0, anscans=0
>     busy=0, freed=0, reactivate=0, deactivate=0
>     pageouts=0, pending=0, nswget=0
>     nswapdev=1
>     swpages=73590, swpginuse=0, swpgonly=0 paging=0
>   kernel pointers:
>     objs(kern)=0xffffffff82a854e0
> 
> 

Reply via email to