Module Name:    src
Committed By:   riastradh
Date:           Sun Apr  9 09:00:56 UTC 2023

Modified Files:
        src/sys/uvm: uvm_amap.c uvm_bio.c uvm_fault.c uvm_km.c uvm_page.c
            uvm_physseg.c uvm_swap.c uvm_vnode.c

Log Message:
uvm(9): KASSERT(A && B) -> KASSERT(A); KASSERT(B)


To generate a diff of this commit:
cvs rdiff -u -r1.126 -r1.127 src/sys/uvm/uvm_amap.c
cvs rdiff -u -r1.127 -r1.128 src/sys/uvm/uvm_bio.c
cvs rdiff -u -r1.231 -r1.232 src/sys/uvm/uvm_fault.c
cvs rdiff -u -r1.164 -r1.165 src/sys/uvm/uvm_km.c
cvs rdiff -u -r1.251 -r1.252 src/sys/uvm/uvm_page.c
cvs rdiff -u -r1.17 -r1.18 src/sys/uvm/uvm_physseg.c
cvs rdiff -u -r1.207 -r1.208 src/sys/uvm/uvm_swap.c
cvs rdiff -u -r1.118 -r1.119 src/sys/uvm/uvm_vnode.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/uvm/uvm_amap.c
diff -u src/sys/uvm/uvm_amap.c:1.126 src/sys/uvm/uvm_amap.c:1.127
--- src/sys/uvm/uvm_amap.c:1.126	Sat Mar 13 15:29:55 2021
+++ src/sys/uvm/uvm_amap.c	Sun Apr  9 09:00:56 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_amap.c,v 1.126 2021/03/13 15:29:55 skrll Exp $	*/
+/*	$NetBSD: uvm_amap.c,v 1.127 2023/04/09 09:00:56 riastradh Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -35,7 +35,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.126 2021/03/13 15:29:55 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.127 2023/04/09 09:00:56 riastradh Exp $");
 
 #include "opt_uvmhist.h"
 
@@ -323,7 +323,8 @@ amap_free(struct vm_amap *amap)
 
 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
-	KASSERT(amap->am_ref == 0 && amap->am_nused == 0);
+	KASSERT(amap->am_ref == 0);
+	KASSERT(amap->am_nused == 0);
 	KASSERT((amap->am_flags & AMAP_SWAPOFF) == 0);
 	slots = amap->am_maxslot;
 	kmem_free(amap->am_slots, slots * sizeof(*amap->am_slots));
@@ -774,7 +775,8 @@ amap_wipeout(struct vm_amap *amap)
 
 		slot = amap->am_slots[lcv];
 		anon = amap->am_anon[slot];
-		KASSERT(anon != NULL && anon->an_ref != 0);
+		KASSERT(anon != NULL);
+		KASSERT(anon->an_ref != 0);
 
 		KASSERT(anon->an_lock == amap->am_lock);
 		UVMHIST_LOG(maphist,"  processing anon %#jx, ref=%jd",
@@ -1069,7 +1071,8 @@ ReStart:
 		if (pg->loan_count != 0) {
 			continue;
 		}
-		KASSERT(pg->uanon == anon && pg->uobject == NULL);
+		KASSERT(pg->uanon == anon);
+		KASSERT(pg->uobject == NULL);
 
 		/*
 		 * If the page is busy, then we have to unlock, wait for

Index: src/sys/uvm/uvm_bio.c
diff -u src/sys/uvm/uvm_bio.c:1.127 src/sys/uvm/uvm_bio.c:1.128
--- src/sys/uvm/uvm_bio.c:1.127	Sun Feb 12 16:28:32 2023
+++ src/sys/uvm/uvm_bio.c	Sun Apr  9 09:00:56 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_bio.c,v 1.127 2023/02/12 16:28:32 andvar Exp $	*/
+/*	$NetBSD: uvm_bio.c,v 1.128 2023/04/09 09:00:56 riastradh Exp $	*/
 
 /*
  * Copyright (c) 1998 Chuck Silvers.
@@ -34,7 +34,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.127 2023/02/12 16:28:32 andvar Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.128 2023/04/09 09:00:56 riastradh Exp $");
 
 #include "opt_uvmhist.h"
 #include "opt_ubc.h"
@@ -555,7 +555,9 @@ again:
 	}
 
 	if (flags & UBC_WRITE) {
-		KASSERTMSG(umap->writeoff == 0 && umap->writelen == 0,
+		KASSERTMSG(umap->writeoff == 0,
+		    "ubc_alloc: concurrent writes to uobj %p", uobj);
+		KASSERTMSG(umap->writelen == 0,
 		    "ubc_alloc: concurrent writes to uobj %p", uobj);
 		umap->writeoff = slot_offset;
 		umap->writelen = *lenp;

Index: src/sys/uvm/uvm_fault.c
diff -u src/sys/uvm/uvm_fault.c:1.231 src/sys/uvm/uvm_fault.c:1.232
--- src/sys/uvm/uvm_fault.c:1.231	Wed Oct 26 23:27:32 2022
+++ src/sys/uvm/uvm_fault.c	Sun Apr  9 09:00:56 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_fault.c,v 1.231 2022/10/26 23:27:32 riastradh Exp $	*/
+/*	$NetBSD: uvm_fault.c,v 1.232 2023/04/09 09:00:56 riastradh Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -32,7 +32,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.231 2022/10/26 23:27:32 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.232 2023/04/09 09:00:56 riastradh Exp $");
 
 #include "opt_uvmhist.h"
 
@@ -2670,7 +2670,8 @@ uvm_fault_unwire_locked(struct vm_map *m
 	 * find the beginning map entry for the region.
 	 */
 
-	KASSERT(start >= vm_map_min(map) && end <= vm_map_max(map));
+	KASSERT(start >= vm_map_min(map));
+	KASSERT(end <= vm_map_max(map));
 	if (uvm_map_lookup_entry(map, start, &entry) == false)
 		panic("uvm_fault_unwire_locked: address not in map");
 
@@ -2683,8 +2684,8 @@ uvm_fault_unwire_locked(struct vm_map *m
 
 		KASSERT(va >= entry->start);
 		while (va >= entry->end) {
-			KASSERT(entry->next != &map->header &&
-				entry->next->start <= entry->end);
+			KASSERT(entry->next != &map->header);
+			KASSERT(entry->next->start <= entry->end);
 			entry = entry->next;
 		}
 

Index: src/sys/uvm/uvm_km.c
diff -u src/sys/uvm/uvm_km.c:1.164 src/sys/uvm/uvm_km.c:1.165
--- src/sys/uvm/uvm_km.c:1.164	Sun Feb 26 07:27:14 2023
+++ src/sys/uvm/uvm_km.c	Sun Apr  9 09:00:56 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_km.c,v 1.164 2023/02/26 07:27:14 skrll Exp $	*/
+/*	$NetBSD: uvm_km.c,v 1.165 2023/04/09 09:00:56 riastradh Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -152,7 +152,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.164 2023/02/26 07:27:14 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.165 2023/04/09 09:00:56 riastradh Exp $");
 
 #include "opt_uvmhist.h"
 
@@ -537,7 +537,8 @@ uvm_km_pgremove_intrsafe(struct vm_map *
 		for (i = 0; i < npgrm; i++) {
 			pg = PHYS_TO_VM_PAGE(pa[i]);
 			KASSERT(pg);
-			KASSERT(pg->uobject == NULL && pg->uanon == NULL);
+			KASSERT(pg->uobject == NULL);
+			KASSERT(pg->uanon == NULL);
 			KASSERT((pg->flags & PG_BUSY) == 0);
 			uvm_pagefree(pg);
 		}

Index: src/sys/uvm/uvm_page.c
diff -u src/sys/uvm/uvm_page.c:1.251 src/sys/uvm/uvm_page.c:1.252
--- src/sys/uvm/uvm_page.c:1.251	Wed Oct 26 23:38:09 2022
+++ src/sys/uvm/uvm_page.c	Sun Apr  9 09:00:56 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_page.c,v 1.251 2022/10/26 23:38:09 riastradh Exp $	*/
+/*	$NetBSD: uvm_page.c,v 1.252 2023/04/09 09:00:56 riastradh Exp $	*/
 
 /*-
  * Copyright (c) 2019, 2020 The NetBSD Foundation, Inc.
@@ -95,7 +95,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.251 2022/10/26 23:38:09 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.252 2023/04/09 09:00:56 riastradh Exp $");
 
 #include "opt_ddb.h"
 #include "opt_uvm.h"
@@ -1215,7 +1215,8 @@ uvm_pagealloc_strat(struct uvm_object *o
 	case UVM_PGA_STRAT_ONLY:
 	case UVM_PGA_STRAT_FALLBACK:
 		/* Attempt to allocate from the specified free list. */
-		KASSERT(free_list >= 0 && free_list < VM_NFREELIST);
+		KASSERT(free_list >= 0);
+		KASSERT(free_list < VM_NFREELIST);
 		pg = uvm_pagealloc_pgfl(ucpu, free_list, &color, flags);
 		if (pg != NULL) {
 			goto gotit;
@@ -2100,7 +2101,8 @@ uvm_direct_process(struct vm_page **pgs,
 	voff_t pgoff = (off & PAGE_MASK);
 	struct vm_page *pg;
 
-	KASSERT(npages > 0 && len > 0);
+	KASSERT(npages > 0);
+	KASSERT(len > 0);
 
 	for (int i = 0; i < npages; i++) {
 		pg = pgs[i];
@@ -2111,7 +2113,8 @@ uvm_direct_process(struct vm_page **pgs,
 		 * Caller is responsible for ensuring all the pages are
 		 * available.
 		 */
-		KASSERT(pg != NULL && pg != PGO_DONTCARE);
+		KASSERT(pg != NULL);
+		KASSERT(pg != PGO_DONTCARE);
 
 		pa = VM_PAGE_TO_PHYS(pg);
 		todo = MIN(len, PAGE_SIZE - pgoff);

Index: src/sys/uvm/uvm_physseg.c
diff -u src/sys/uvm/uvm_physseg.c:1.17 src/sys/uvm/uvm_physseg.c:1.18
--- src/sys/uvm/uvm_physseg.c:1.17	Wed Jul 15 15:08:26 2020
+++ src/sys/uvm/uvm_physseg.c	Sun Apr  9 09:00:56 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_physseg.c,v 1.17 2020/07/15 15:08:26 rin Exp $ */
+/* $NetBSD: uvm_physseg.c,v 1.18 2023/04/09 09:00:56 riastradh Exp $ */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -173,7 +173,8 @@ uvm_physseg_alloc(size_t sz)
 		size_t n = sz / sizeof(struct uvm_physseg);
 		nseg += n;
 
-		KASSERT(nseg > 0 && nseg <= VM_PHYSSEG_MAX);
+		KASSERT(nseg > 0);
+		KASSERT(nseg <= VM_PHYSSEG_MAX);
 
 		return &uvm_physseg[nseg - n];
 	}
@@ -1012,7 +1013,8 @@ uvm_physseg_set_avail_start(uvm_physseg_
 	paddr_t avail_end;
 	avail_end = uvm_physseg_get_avail_end(upm);
 	KASSERT(uvm_physseg_valid_p(upm));
-	KASSERT(avail_start < avail_end && avail_start >= ps->start);
+	KASSERT(avail_start < avail_end);
+	KASSERT(avail_start >= ps->start);
 #endif
 
 	ps->avail_start = avail_start;
@@ -1027,7 +1029,8 @@ uvm_physseg_set_avail_end(uvm_physseg_t 
 	paddr_t avail_start;
 	avail_start = uvm_physseg_get_avail_start(upm);
 	KASSERT(uvm_physseg_valid_p(upm));
-	KASSERT(avail_end > avail_start && avail_end <= ps->end);
+	KASSERT(avail_end > avail_start);
+	KASSERT(avail_end <= ps->end);
 #endif
 
 	ps->avail_end = avail_end;
@@ -1093,7 +1096,8 @@ uvm_physseg_init_seg(uvm_physseg_t upm, 
 	struct uvm_physseg *seg;
 	struct vm_page *pg;
 
-	KASSERT(upm != UVM_PHYSSEG_TYPE_INVALID && pgs != NULL);
+	KASSERT(upm != UVM_PHYSSEG_TYPE_INVALID);
+	KASSERT(pgs != NULL);
 
 	seg = HANDLE_TO_PHYSSEG_NODE(upm);
 	KASSERT(seg != NULL);

Index: src/sys/uvm/uvm_swap.c
diff -u src/sys/uvm/uvm_swap.c:1.207 src/sys/uvm/uvm_swap.c:1.208
--- src/sys/uvm/uvm_swap.c:1.207	Wed Dec 21 02:28:06 2022
+++ src/sys/uvm/uvm_swap.c	Sun Apr  9 09:00:56 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_swap.c,v 1.207 2022/12/21 02:28:06 chs Exp $	*/
+/*	$NetBSD: uvm_swap.c,v 1.208 2023/04/09 09:00:56 riastradh Exp $	*/
 
 /*
  * Copyright (c) 1995, 1996, 1997, 2009 Matthew R. Green
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_swap.c,v 1.207 2022/12/21 02:28:06 chs Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_swap.c,v 1.208 2023/04/09 09:00:56 riastradh Exp $");
 
 #include "opt_uvmhist.h"
 #include "opt_compat_netbsd.h"
@@ -1873,7 +1873,8 @@ uvm_swap_io(struct vm_page **pps, int st
 	 * allocate a buf for the i/o.
 	 */
 
-	KASSERT(curlwp != uvm.pagedaemon_lwp || (write && async));
+	KASSERT(curlwp != uvm.pagedaemon_lwp || write);
+	KASSERT(curlwp != uvm.pagedaemon_lwp || async);
 	bp = getiobuf(swapdev_vp, curlwp != uvm.pagedaemon_lwp);
 	if (bp == NULL) {
 		uvm_aio_aiodone_pages(pps, npages, true, ENOMEM);

Index: src/sys/uvm/uvm_vnode.c
diff -u src/sys/uvm/uvm_vnode.c:1.118 src/sys/uvm/uvm_vnode.c:1.119
--- src/sys/uvm/uvm_vnode.c:1.118	Sat Mar 13 15:29:55 2021
+++ src/sys/uvm/uvm_vnode.c	Sun Apr  9 09:00:56 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_vnode.c,v 1.118 2021/03/13 15:29:55 skrll Exp $	*/
+/*	$NetBSD: uvm_vnode.c,v 1.119 2023/04/09 09:00:56 riastradh Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -45,7 +45,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.118 2021/03/13 15:29:55 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.119 2023/04/09 09:00:56 riastradh Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_uvmhist.h"
@@ -449,7 +449,8 @@ uvm_vnp_setsize(struct vnode *vp, voff_t
 	 * toss some pages...
 	 */
 
-	KASSERT(newsize != VSIZENOTSET && newsize >= 0);
+	KASSERT(newsize != VSIZENOTSET);
+	KASSERT(newsize >= 0);
 	KASSERT(vp->v_size <= vp->v_writesize);
 	KASSERT(vp->v_size == vp->v_writesize ||
 	    newsize == vp->v_writesize || newsize <= vp->v_size);
@@ -476,7 +477,8 @@ uvm_vnp_setwritesize(struct vnode *vp, v
 {
 
 	rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
-	KASSERT(newsize != VSIZENOTSET && newsize >= 0);
+	KASSERT(newsize != VSIZENOTSET);
+	KASSERT(newsize >= 0);
 	KASSERT(vp->v_size != VSIZENOTSET);
 	KASSERT(vp->v_writesize != VSIZENOTSET);
 	KASSERT(vp->v_size <= vp->v_writesize);

Reply via email to