Diff below prevents a future lock ordering problem between NFSnode locks (similar to Inode locks) and the NET_LOCK().
It ensures the NET_LOCK() is always locked *after* any NFSnode lock by fixing the UVM fault case. So we have always have: VFS -> NFS -> NFSnode lock -> socket -> NET_LOCK(). Ok? Index: uvm/uvm_vnode.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_vnode.c,v retrieving revision 1.99 diff -u -p -r1.99 uvm_vnode.c --- uvm/uvm_vnode.c 8 Mar 2018 22:04:18 -0000 1.99 +++ uvm/uvm_vnode.c 20 Mar 2018 12:58:17 -0000 @@ -1105,6 +1105,7 @@ uvn_io(struct uvm_vnode *uvn, vm_page_t off_t file_offset; int waitf, result, mapinflags; size_t got, wanted; + int netlocked = 0; /* init values */ waitf = (flags & PGO_SYNCIO) ? M_WAITOK : M_NOWAIT; @@ -1174,18 +1175,24 @@ uvn_io(struct uvm_vnode *uvn, vm_page_t * Ideally, this kind of operation *should* work. */ result = 0; - if ((uvn->u_flags & UVM_VNODE_VNISLOCKED) == 0) - result = vn_lock(vn, LK_EXCLUSIVE | LK_RECURSEFAIL, curproc); - - if (result == 0) { - int netlocked = (rw_status(&netlock) == RW_WRITE); - + if ((uvn->u_flags & UVM_VNODE_VNISLOCKED) == 0) { /* * This process may already have the NET_LOCK(), if we * faulted in copyin() or copyout() in the network stack. */ - if (netlocked) + if (rw_status(&netlock) == RW_WRITE) { + netlocked = 1; + NET_UNLOCK(); + } + + result = vn_lock(vn, LK_EXCLUSIVE | LK_RECURSEFAIL, curproc); + } + + if (result == 0) { + if (!netlocked && (rw_status(&netlock) == RW_WRITE)) { + netlocked = 1; NET_UNLOCK(); + } /* NOTE: vnode now locked! */ if (rw == UIO_READ) @@ -1195,11 +1202,12 @@ uvn_io(struct uvm_vnode *uvn, vm_page_t (flags & PGO_PDFREECLUST) ? IO_NOCACHE : 0, curproc->p_ucred); + if ((uvn->u_flags & UVM_VNODE_VNISLOCKED) == 0) + VOP_UNLOCK(vn, curproc); + if (netlocked) NET_LOCK(); - if ((uvn->u_flags & UVM_VNODE_VNISLOCKED) == 0) - VOP_UNLOCK(vn, curproc); } /* NOTE: vnode now unlocked (unless vnislocked) */