The compiler is also smart enough to recognize that this is redundant. The resulting code on amd64 is basically equivalent (slightly different register allocation and instruction scheduling).
ok? Index: uvm/uvm_amap.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_amap.c,v retrieving revision 1.61 diff -u -p -r1.61 uvm_amap.c --- uvm/uvm_amap.c 15 Mar 2016 18:16:21 -0000 1.61 +++ uvm/uvm_amap.c 16 Mar 2016 16:33:52 -0000 @@ -917,10 +917,6 @@ amap_swap_off(int startslot, int endslot LIST_INSERT_BEFORE(am, &marker_prev, am_list); LIST_INSERT_AFTER(am, &marker_next, am_list); - if (am->am_nused <= 0) { - goto next; - } - for (i = 0; i < am->am_nused; i++) { int slot; int swslot; @@ -950,7 +946,6 @@ amap_swap_off(int startslot, int endslot i = 0; } -next: KASSERT(LIST_NEXT(&marker_prev, am_list) == &marker_next || LIST_NEXT(LIST_NEXT(&marker_prev, am_list), am_list) == &marker_next);