Hi,

ever since this commit: ...

dillon      2000/11/18 15:06:27 PST
 
   Modified files:
    sys/kern             vfs_bio.c vfs_cluster.c vfs_subr.c
                         vfs_vnops.c
    sys/sys              buf.h vnode.h
    sys/ufs/ffs          ffs_inode.c ffs_softdep.c
    sys/ufs/ufs          ufs_readwrite.c
    sys/vm               swap_pager.c vm_page.c vm_page.h
                         vm_pageout.c
   Log:
   Implement a low-memory deadlock solution.  


... I can very reliable reproduce this panic.
I have INN running here (inn-2.3.0 straight from /usr/ports) and feed
it articles with suck.  Actually suck is run twice in a row (for
different news servers) and as soon as the second run starts feeding
articles to innd, the panic occurs.  (In a few cases the panic occured
a bit later, maybe 30 seconds.)

I've appended some output from gdb and put a crash dump (96 MB / 17MB
gzipped) and a debug kernel on http://ltilx150.etec.uni-karlsruhe.de/p/
(This is with sources from today.)

Some additional observations:
-The INN is compiled with mmap(). The history file has about 11 MBytes,
 the active file has just 23 lines :-).
-The output from "trace" in ddb has one line more than gdb's
 "backtrace":
 [...]
 sync_fsyc(c76e1f7c) at sync_fsync + 0xcf
 sched_sync at sched_sync + 0x13a
 fork_trampoline at fork_trampoline + 0x1c

-Apart from this situation, I haven't seen this (or any other) panic.

Please let me know if I should provide additional information.

Bye, Philipp
-- 
http://www.uni-karlsruhe.de/~un1i/                          (,.)
                                                          \\\00 )
                                                            \= )
                                                            cc_|\_,^



gdb -k kernel.73.debug vmcore.73
GNU gdb 4.18
Copyright 1998 Free Software Foundation, Inc.
GDB is free software, covered by the GNU General Public License, and you are
welcome to change it and/or distribute copies of it under certain conditions.
Type "show copying" to see the conditions.
There is absolutely no warranty for GDB.  Type "show warranty" for details.
This GDB was configured as "i386-unknown-freebsd"...
IdlePTD 4055040
initial pcb at 32e6c0
panicstr: from debugger
panic messages:
---
panic: vm_pageout_flush page 0xc04eacd0 index 0/1: partially dirty page
panic: from debugger
Uptime: 4m2s

dumping to dev da0s1b, offset 26624
dump 96 95 94 93 92 91 90 89 88 87 86 85 84 83 82 81 80 79 78 77 76 75 74 73 72 71 70 
69 68 67 66 65 64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 
40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 
11 10 9 8 7 6 5 4 3 2 1 
---
#0  dumpsys () at /usr/src/sys/kern/kern_shutdown.c:477
477             if (dumping++) {
(kgdb) bt
#0  dumpsys () at /usr/src/sys/kern/kern_shutdown.c:477
#1  0xc0178080 in boot (howto=260) at /usr/src/sys/kern/kern_shutdown.c:320
#2  0xc01784d9 in panic (fmt=0xc02b70d4 "from debugger")
    at /usr/src/sys/kern/kern_shutdown.c:570
#3  0xc0132c01 in db_panic (addr=-1071064268, have_addr=0, count=-1, 
    modif=0xc70e1c4c "") at /usr/src/sys/ddb/db_command.c:433
#4  0xc0132ba1 in db_command (last_cmdp=0xc02f81d4, cmd_table=0xc02f8034, 
    aux_cmd_tablep=0xc031acb8) at /usr/src/sys/ddb/db_command.c:333
#5  0xc0132c66 in db_command_loop () at /usr/src/sys/ddb/db_command.c:455
#6  0xc0134e2b in db_trap (type=3, code=0) at /usr/src/sys/ddb/db_trap.c:71
#7  0xc028d8c6 in kdb_trap (type=3, code=0, regs=0xc70e1d4c)
    at /usr/src/sys/i386/i386/db_interface.c:163
#8  0xc029989c in trap (frame={tf_fs = 16, tf_es = 16, tf_ds = 16, tf_edi = 0, 
      tf_esi = 256, tf_ebp = -955376232, tf_isp = -955376264, 
      tf_ebx = 2097666, tf_edx = -1072980320, tf_ecx = 32, tf_eax = 18, 
      tf_trapno = 3, tf_err = 0, tf_eip = -1071064268, tf_cs = 8, 
      tf_eflags = 2097222, tf_esp = -1070695009, tf_ss = -1070848829})
    at /usr/src/sys/i386/i386/trap.c:589
#9  0xc028db34 in Debugger (msg=0xc02c24c3 "panic") at machine/cpufunc.h:60
#10 0xc01784d0 in panic (
    fmt=0xc02e2580 "vm_pageout_flush page %p index %d/%d: partially dirty page") at 
/usr/src/sys/kern/kern_shutdown.c:568
#11 0xc027798f in vm_pageout_flush (mc=0xc70e1df4, count=1, flags=0)
    at /usr/src/sys/vm/vm_pageout.c:378
#12 0xc0274902 in vm_object_page_clean (object=0xc78754e0, start=0, end=0, 
    flags=4) at /usr/src/sys/vm/vm_object.c:655
#13 0xc01ae696 in vfs_msync (mp=0xc0aa1200, flags=2)
    at /usr/src/sys/kern/vfs_subr.c:2597
#14 0xc01aea73 in sync_fsync (ap=0xc70e1f7c)
    at /usr/src/sys/kern/vfs_subr.c:2866
#15 0xc01ac9e6 in sched_sync () at vnode_if.h:423
(kgdb) frame 15
#15 0xc01ac9e6 in sched_sync () at vnode_if.h:423
423             rc = VCALL(vp, VOFFSET(vop_fsync), &a);
(kgdb) print a
$1 = {a_desc = 0xc02f2a40, a_vp = 0xc781bd00, a_cred = 0xc0699e00, 
  a_waitfor = 3, a_p = 0xc6a0cfe0}
(kgdb) print vp
$2 = (struct vnode *) 0xc781bd00
(kgdb) print *((struct vnode *) 0xc781bd00)
$3 = {v_flag = 2097152, v_usecount = 1, v_writecount = 0, v_holdcnt = 0, 
  v_id = 92, v_mount = 0xc0aa1200, v_op = 0xc0a56600, v_freelist = {
    tqe_next = 0x0, tqe_prev = 0x0}, v_mntvnodes = {le_next = 0x0, 
    le_prev = 0xc7819524}, v_cleanblkhd = {tqh_first = 0x0, 
    tqh_last = 0xc781bd2c}, v_dirtyblkhd = {tqh_first = 0x0, 
    tqh_last = 0xc781bd34}, v_synclist = {le_next = 0x0, 
    le_prev = 0xc0a4a798}, v_numoutput = 0, v_type = VNON, v_un = {
    vu_mountedhere = 0x0, vu_socket = 0x0, vu_spec = {vu_specinfo = 0x0, 
      vu_specnext = {sle_next = 0x0}}, vu_fifoinfo = 0x0}, v_lease = 0x0, 
  v_lastw = 0, v_cstart = 0, v_lasta = 0, v_clen = 0, v_object = 0x0, 
  v_interlock = {mtx_lock = 8, mtx_recurse = 0, mtx_saveintr = 0, 
    mtx_description = 0xc02c82e5 "vnode interlock", mtx_blocked = {
      tqh_first = 0x0, tqh_last = 0xc781bd7c}, mtx_contested = {le_next = 0x0, 
      le_prev = 0x0}, mtx_next = 0xc781bc6c, mtx_prev = 0xc781be6c}, v_lock = {
    lk_interlock = 0xc0692e28, lk_flags = 16777216, lk_sharecount = 0, 
    lk_waitcount = 0, lk_exclusivecount = 0, lk_prio = 20, 
    lk_wmesg = 0xc02c82f5 "vnlock", lk_timo = 0, lk_lockholder = -1}, 
  v_vnlock = 0x0, v_tag = VT_VFS, v_data = 0x0, v_cache_src = {
    lh_first = 0x0}, v_cache_dst = {tqh_first = 0x0, tqh_last = 0xc781bdc4}, 
  v_dd = 0xc781bd00, v_ddid = 0, v_pollinfo = {vpi_lock = {lock_data = 0}, 
    vpi_selinfo = {si_pid = 0, si_note = {slh_first = 0x0}, si_flags = 0}, 
    vpi_events = 0, vpi_revents = 0}, v_vxproc = 0x0}
(kgdb) down
#14 0xc01aea73 in sync_fsync (ap=0xc70e1f7c)
    at /usr/src/sys/kern/vfs_subr.c:2866
2866            vfs_msync(mp, MNT_NOWAIT);
(kgdb) print mp
$4 = (struct mount *) 0xc0aa1200
(kgdb) print *((struct mount *) 0xc0aa1200)
$5 = {mnt_list = {tqe_next = 0xc0ad9400, tqe_prev = 0xc0aa1a00}, 
  mnt_op = 0xc030df20, mnt_vfc = 0xc030df60, mnt_vnodecovered = 0xc70d8200, 
  mnt_syncer = 0xc781bd00, mnt_vnodelist = {lh_first = 0xc7828e00}, 
  mnt_lock = {lk_interlock = 0xc0692e00, lk_flags = 17825792, 
    lk_sharecount = 1, lk_waitcount = 0, lk_exclusivecount = 0, lk_prio = 20, 
    lk_wmesg = 0xc02c88a1 "vfslock", lk_timo = 0, lk_lockholder = -1}, 
  mnt_writeopcount = 1, mnt_flag = 4096, mnt_kern_flag = 0, 
  mnt_maxsymlinklen = 60, mnt_stat = {f_spare2 = 0, f_bsize = 1024, 
    f_iosize = 8192, f_blocks = 1817777, f_bfree = 201403, f_bavail = 55981, 
    f_files = 445438, f_ffree = 309929, f_fsid = {val = {901998103, 
        703157984}}, f_owner = 0, f_type = 6, f_flags = 4096, 
    f_syncwrites = 110, f_asyncwrites = 256, 
    f_fstypename = "ufs", '\000' <repeats 12 times>, 
    f_mntonname = "/usr", '\000' <repeats 75 times>, f_syncreads = 727, 
    f_asyncreads = 213, f_spares1 = 0, 
    f_mntfromname = "/dev/da0s1f", '\000' <repeats 68 times>, f_spares2 = 0, 
    f_spare = {0, 0}}, mnt_data = 0xc0aa1000, mnt_time = 0, 
  mnt_iosize_max = 65536}
(kgdb) down
#13 0xc01ae696 in vfs_msync (mp=0xc0aa1200, flags=2)
    at /usr/src/sys/kern/vfs_subr.c:2597
2597                                            vm_object_page_clean(obj, 0, 0, flags 
== MNT_WAIT ? OBJPC_SYNC : OBJPC_NOSYNC);
(kgdb) print obj
$6 = (struct vm_object *) 0xc78754e0
(kgdb) print *((struct vm_object *) 0xc78754e0)
$7 = {object_list = {tqe_next = 0xc787fa20, tqe_prev = 0xc7875420}, 
  shadow_head = {tqh_first = 0x0, tqh_last = 0xc78754e8}, shadow_list = {
    tqe_next = 0x0, tqe_prev = 0xc787fc10}, memq = {tqh_first = 0xc04eacd0, 
    tqh_last = 0xc04eacdc}, generation = 31, size = 1, ref_count = 1, 
  shadow_count = 0, hash_rand = -767679, type = 2 '\002', flags = 512, 
  pg_color = 13, paging_in_progress = 0, resident_page_count = 1, 
  backing_object = 0x0, backing_object_offset = 0, pager_object_list = {
    tqe_next = 0x0, tqe_prev = 0x0}, handle = 0xc70d6900, un_pager = {vnp = {
      vnp_size = 932}, devp = {devp_pglist = {tqh_first = 0x3a4, 
        tqh_last = 0x0}}, swp = {swp_bcount = 932}}}
(kgdb) down
#12 0xc0274902 in vm_object_page_clean (object=0xc78754e0, start=0, end=0, 
    flags=4) at /usr/src/sys/vm/vm_object.c:655
655                     vm_pageout_flush(ma, runlen, pagerflags);
(kgdb) print runlen
$8 = 1
(kgdb) print pagerflags
$9 = 0
(kgdb) print ma
$10 = (vm_page_t (*)[0]) 0xc70e1df4
(kgdb) print *(struct vm_page *)ma[0]
$14 = {pageq = {tqe_next = 0xc04eacd0, tqe_prev = 0xc70e1e28}, 
  hnext = 0xc01a4bc0, listq = {tqe_next = 0x0, tqe_prev = 0xc28590e4}, 
  object = 0xc70e1e28, pindex = 3222948849, phys_addr = 0, md = {
    pv_list_count = -1031434144, pv_list = {tqh_first = 0x1, tqh_last = 0x0}}, 
  queue = 37092, flags = 49797, pc = 0, wire_count = 0, hold_count = 7780, 
  act_count = 14 '\016', busy = 199 'Ç', valid = 97 'a', dirty = 64 '@'}
(kgdb) print *((struct vm_page *)ma[1])
$15 = {pageq = {tqe_next = 0x28c04eac, tqe_prev = 0xc0c70e1e}, 
  hnext = 0xc01a4b, listq = {tqe_next = 0xe4000000, tqe_prev = 0x28c28590}, 
  object = 0xf1c70e1e, pindex = 12589643, phys_addr = 1610612736, md = {
    pv_list_count = 29525392, pv_list = {tqh_first = 0x0, 
      tqh_last = 0xe4000000}}, queue = 34192, flags = 194, pc = 0, 
  wire_count = 25600, hold_count = 3614, act_count = 199 'Ç', busy = 97 'a', 
  valid = 64 '@', dirty = 26 '\032'}
(kgdb) down
#11 0xc027798f in vm_pageout_flush (mc=0xc70e1df4, count=1, flags=0)
    at /usr/src/sys/vm/vm_pageout.c:378
378                     KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL && mc[i]->dirty == 
VM_PAGE_BITS_ALL, ("vm_pageout_flush page %p index %d/%d: partially dirty page", 
mc[i], i, count));
(kgdb) print i
$16 = 0
(kgdb) print count
$17 = 1
(kgdb) print mc[0]
$18 = 0xc04eacd0
(kgdb) print *mc[0]
$19 = {pageq = {tqe_next = 0xc04baa6c, tqe_prev = 0xc0500af8}, hnext = 0x0, 
  listq = {tqe_next = 0x0, tqe_prev = 0xc78754f8}, object = 0xc78754e0, 
  pindex = 0, phys_addr = 39374848, md = {pv_list_count = 1, pv_list = {
      tqh_first = 0xc0635e40, tqh_last = 0xc0635e48}}, queue = 33, 
  flags = 160, pc = 13, wire_count = 0, hold_count = 0, act_count = 5 '\005', 
  busy = 0 '\000', valid = 255 'ÿ', dirty = 252 'ü'}
(kgdb) quit


To Unsubscribe: send mail to [EMAIL PROTECTED]
with "unsubscribe freebsd-current" in the body of the message

Reply via email to