Module Name:    src
Committed By:   rin
Date:           Wed Sep  8 07:13:18 UTC 2021

Modified Files:
        src/sys/arch/sh3/sh3: pmap.c

Log Message:
Revert rev 1.89:
http://cvsweb.netbsd.org/bsdweb.cgi/src/sys/arch/sh3/sh3/pmap.c#rev1.89

I misunderstood evaluation order of ? operator.

I should have split the commit into two parts, i.e., with and without
binary diffs, in order to avoid such a serious mistake.

Thanks rillig@ so much for careful check!!!!


To generate a diff of this commit:
cvs rdiff -u -r1.89 -r1.90 src/sys/arch/sh3/sh3/pmap.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/sh3/sh3/pmap.c
diff -u src/sys/arch/sh3/sh3/pmap.c:1.89 src/sys/arch/sh3/sh3/pmap.c:1.90
--- src/sys/arch/sh3/sh3/pmap.c:1.89	Wed Sep  8 00:35:56 2021
+++ src/sys/arch/sh3/sh3/pmap.c	Wed Sep  8 07:13:18 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.89 2021/09/08 00:35:56 rin Exp $	*/
+/*	$NetBSD: pmap.c,v 1.90 2021/09/08 07:13:18 rin Exp $	*/
 
 /*-
  * Copyright (c) 2002 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.89 2021/09/08 00:35:56 rin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.90 2021/09/08 07:13:18 rin Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -153,7 +153,7 @@ pmap_steal_memory(vsize_t size, vaddr_t 
 	va = SH3_PHYS_TO_P1SEG(pa);
 	memset((void *)va, 0, size);
 
-	return va;
+	return (va);
 }
 
 vaddr_t
@@ -162,7 +162,7 @@ pmap_growkernel(vaddr_t maxkvaddr)
 	int i, n;
 
 	if (maxkvaddr <= __pmap_kve)
-		return __pmap_kve;
+		return (__pmap_kve);
 
 	i = __PMAP_PTP_INDEX(__pmap_kve - VM_MIN_KERNEL_ADDRESS);
 	__pmap_kve = __PMAP_PTP_TRUNC(maxkvaddr);
@@ -190,9 +190,9 @@ pmap_growkernel(vaddr_t maxkvaddr)
 		}
 	}
 
-	return __pmap_kve;
+	return (__pmap_kve);
  error:
-	panic("%s: out of memory", __func__);
+	panic("pmap_growkernel: out of memory.");
 	/* NOTREACHED */
 }
 
@@ -248,7 +248,7 @@ pmap_create(void)
 		    uvm_pagealloc(NULL, 0, NULL,
 			UVM_PGA_USERESERVE | UVM_PGA_ZERO)));
 
-	return pmap;
+	return (pmap);
 }
 
 void
@@ -271,7 +271,7 @@ pmap_destroy(pmap_t pmap)
 			for (j = 0; j < __PMAP_PTP_PG_N; j++, pte++)
 				KDASSERT(*pte == 0);
 		}
-#endif
+#endif /* DEBUG */
 		/* Purge cache entry for next use of this page. */
 		if (SH_HAS_VIRTUAL_ALIAS)
 			sh_dcache_inv_range(va, PAGE_SIZE);
@@ -323,7 +323,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd
 	struct vm_page *pg;
 	struct vm_page_md *pvh;
 	pt_entry_t entry, *pte;
-	bool kva = pmap == pmap_kernel();
+	bool kva = (pmap == pmap_kernel());
 
 	/* "flags" never exceed "prot" */
 	KDASSERT(prot != 0 && ((flags & VM_PROT_ALL) & ~prot) == 0);
@@ -362,23 +362,24 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd
 
 		/* Check for existing mapping */
 		if (__pmap_map_change(pmap, va, pa, prot, entry))
-			return 0;
+			return (0);
 
 		/* Add to physical-virtual map list of this page */
 		if (__pmap_pv_enter(pmap, pg, va)) {
 			if (flags & PMAP_CANFAIL)
 				return ENOMEM;
-			panic("%s: cannot allocate pv", __func__);
+			panic("%s: __pmap_pv_enter failed", __func__);
 		}
 	} else {	/* bus-space (always uncached map) */
-		if (kva)
+		if (kva) {
 			entry |= PG_V | PG_SH |
-			    (prot & VM_PROT_WRITE) ?
-			    (PG_PR_KRW | PG_D) : PG_PR_KRO;
-		else
+			    ((prot & VM_PROT_WRITE) ?
+			    (PG_PR_KRW | PG_D) : PG_PR_KRO);
+		} else {
 			entry |= PG_V |
-			    (prot & VM_PROT_WRITE) ?
-			    (PG_PR_URW | PG_D) : PG_PR_URO;
+			    ((prot & VM_PROT_WRITE) ?
+			    (PG_PR_URW | PG_D) : PG_PR_URO);
+		}
 	}
 
 	/* Register to page table */
@@ -392,7 +393,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd
 					__pmap_pv_remove(pmap, pg, va);
 				return ENOMEM;
 			}
-			panic("%s: cannot allocate pte", __func__);
+			panic("%s: __pmap_pte_alloc failed", __func__);
 		}
 	}
 
@@ -409,7 +410,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd
 		pmap->pm_stats.wired_count++;
 	pmap->pm_stats.resident_count++;
 
-	return 0;
+	return (0);
 }
 
 /*
@@ -427,13 +428,13 @@ __pmap_map_change(pmap_t pmap, vaddr_t v
 	vaddr_t eva = va + PAGE_SIZE;
 
 	if ((pte = __pmap_pte_lookup(pmap, va)) == NULL ||
-	    (oentry = *pte) == 0)
-		return false;		/* no mapping exists. */
+	    ((oentry = *pte) == 0))
+		return (false);		/* no mapping exists. */
 
 	if (pa != (oentry & PG_PPN)) {
 		/* Enter a mapping at a mapping to another physical page. */
 		pmap_remove(pmap, va, eva);
-		return false;
+		return (false);
 	}
 
 	/* Pre-existing mapping */
@@ -453,10 +454,10 @@ __pmap_map_change(pmap_t pmap, vaddr_t v
 	} else if (entry & _PG_WIRED) {
 		/* unwired -> wired. make sure to reflect "flags" */
 		pmap_remove(pmap, va, eva);
-		return false;
+		return (false);
 	}
 
-	return true;	/* mapping was changed. */
+	return (true);	/* mapping was changed. */
 }
 
 /*
@@ -574,7 +575,7 @@ __pmap_pv_remove(pmap_t pmap, struct vm_
 #ifdef DEBUG
 	/* Check duplicated map. */
 	SLIST_FOREACH(pv, &pvh->pvh_head, pv_link)
-		KDASSERT(!(pv->pv_pmap == pmap && pv->pv_va == vaddr));
+	    KDASSERT(!(pv->pv_pmap == pmap && pv->pv_va == vaddr));
 #endif
 	splx(s);
 }
@@ -637,17 +638,17 @@ pmap_extract(pmap_t pmap, vaddr_t va, pa
 	if (pmap == pmap_kernel() && (va >> 30) == 2) {
 		if (pap != NULL)
 			*pap = va & SH3_PHYS_MASK;
-		return true;
+		return (true);
 	}
 
 	pte = __pmap_pte_lookup(pmap, va);
 	if (pte == NULL || *pte == 0)
-		return false;
+		return (false);
 
 	if (pap != NULL)
 		*pap = (*pte & PG_PPN) | (va & PGOFSET);
 
-	return true;
+	return (true);
 }
 
 void
@@ -666,20 +667,23 @@ pmap_protect(pmap_t pmap, vaddr_t sva, v
 
 	switch (prot) {
 	default:
-		panic("%s: invalid protection mode %x", __func__, prot);
+		panic("pmap_protect: invalid protection mode %x", prot);
 		/* NOTREACHED */
 	case VM_PROT_READ:
+		/* FALLTHROUGH */
 	case VM_PROT_READ | VM_PROT_EXECUTE:
 		protbits = kernel ? PG_PR_KRO : PG_PR_URO;
 		break;
 	case VM_PROT_READ | VM_PROT_WRITE:
+		/* FALLTHROUGH */
 	case VM_PROT_ALL:
 		protbits = kernel ? PG_PR_KRW : PG_PR_URW;
 		break;
 	}
 
 	for (va = sva; va < eva; va += PAGE_SIZE) {
-		if ((pte = __pmap_pte_lookup(pmap, va)) == NULL ||
+
+		if (((pte = __pmap_pte_lookup(pmap, va)) == NULL) ||
 		    (entry = *pte) == 0)
 			continue;
 
@@ -709,10 +713,12 @@ pmap_page_protect(struct vm_page *pg, vm
 
 	switch (prot) {
 	case VM_PROT_READ | VM_PROT_WRITE:
+		/* FALLTHROUGH */
 	case VM_PROT_ALL:
 		break;
 
 	case VM_PROT_READ:
+		/* FALLTHROUGH */
 	case VM_PROT_READ | VM_PROT_EXECUTE:
 		s = splvm();
 		SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
@@ -775,8 +781,9 @@ pmap_zero_page(paddr_t phys)
 		/* sync cache since we access via P2. */
 		sh_dcache_wbinv_all();
 		memset((void *)SH3_PHYS_TO_P2SEG(phys), 0, PAGE_SIZE);
-	} else
+	} else {
 		memset((void *)SH3_PHYS_TO_P1SEG(phys), 0, PAGE_SIZE);
+	}
 }
 
 void
@@ -788,9 +795,10 @@ pmap_copy_page(paddr_t src, paddr_t dst)
 		sh_dcache_wbinv_all();
 		memcpy((void *)SH3_PHYS_TO_P2SEG(dst),
 		    (void *)SH3_PHYS_TO_P2SEG(src), PAGE_SIZE);
-	} else
+	} else {
 		memcpy((void *)SH3_PHYS_TO_P1SEG(dst),
 		    (void *)SH3_PHYS_TO_P1SEG(src), PAGE_SIZE);
+	}
 }
 
 bool
@@ -798,7 +806,7 @@ pmap_is_referenced(struct vm_page *pg)
 {
 	struct vm_page_md *pvh = VM_PAGE_TO_MD(pg);
 
-	return (pvh->pvh_flags & PVH_REFERENCED) ? true : false;
+	return ((pvh->pvh_flags & PVH_REFERENCED) ? true : false);
 }
 
 bool
@@ -833,7 +841,7 @@ pmap_clear_reference(struct vm_page *pg)
 	}
 	splx(s);
 
-	return true;
+	return (true);
 }
 
 bool
@@ -841,7 +849,7 @@ pmap_is_modified(struct vm_page *pg)
 {
 	struct vm_page_md *pvh = VM_PAGE_TO_MD(pg);
 
-	return (pvh->pvh_flags & PVH_MODIFIED) ? true : false;
+	return ((pvh->pvh_flags & PVH_MODIFIED) ? true : false);
 }
 
 bool
@@ -857,14 +865,14 @@ pmap_clear_modify(struct vm_page *pg)
 
 	modified = pvh->pvh_flags & PVH_MODIFIED;
 	if (!modified)
-		return false;
+		return (false);
 
 	pvh->pvh_flags &= ~PVH_MODIFIED;
 
 	s = splvm();
 	if (SLIST_EMPTY(&pvh->pvh_head)) {/* no map on this page */
 		splx(s);
-		return true;
+		return (true);
 	}
 
 	/* Write-back and invalidate TLB entry */
@@ -889,14 +897,14 @@ pmap_clear_modify(struct vm_page *pg)
 	}
 	splx(s);
 
-	return true;
+	return (true);
 }
 
 paddr_t
 pmap_phys_address(paddr_t cookie)
 {
 
-	return sh3_ptob(cookie);
+	return (sh3_ptob(cookie));
 }
 
 #ifdef SH4
@@ -937,9 +945,9 @@ __pmap_pv_page_alloc(struct pool *pool, 
 
 	pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
 	if (pg == NULL)
-		return NULL;
+		return (NULL);
 
-	return (void *)SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(pg));
+	return ((void *)SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(pg)));
 }
 
 void
@@ -965,7 +973,7 @@ __pmap_pte_alloc(pmap_t pmap, vaddr_t va
 	pt_entry_t *ptp, *pte;
 
 	if ((pte = __pmap_pte_lookup(pmap, va)) != NULL)
-		return pte;
+		return (pte);
 
 	/* Allocate page table (not managed page) */
 	pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE | UVM_PGA_ZERO);
@@ -975,7 +983,7 @@ __pmap_pte_alloc(pmap_t pmap, vaddr_t va
 	ptp = (pt_entry_t *)SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(pg));
 	pmap->pm_ptp[__PMAP_PTP_INDEX(va)] = ptp;
 
-	return ptp + __PMAP_PTP_OFSET(va);
+	return (ptp + __PMAP_PTP_OFSET(va));
 }
 
 /*
@@ -988,14 +996,14 @@ __pmap_pte_lookup(pmap_t pmap, vaddr_t v
 	pt_entry_t *ptp;
 
 	if (pmap == pmap_kernel())
-		return __pmap_kpte_lookup(va);
+		return (__pmap_kpte_lookup(va));
 
 	/* Lookup page table page */
 	ptp = pmap->pm_ptp[__PMAP_PTP_INDEX(va)];
 	if (ptp == NULL)
-		return NULL;
+		return (NULL);
 
-	return ptp + __PMAP_PTP_OFSET(va);
+	return (ptp + __PMAP_PTP_OFSET(va));
 }
 
 /*
@@ -1011,7 +1019,7 @@ __pmap_kpte_lookup(vaddr_t va)
 	if (ptp == NULL)
 		return NULL;
 
-	return ptp + __PMAP_PTP_OFSET(va);
+	return (ptp + __PMAP_PTP_OFSET(va));
 }
 
 /*
@@ -1026,13 +1034,13 @@ __pmap_pte_load(pmap_t pmap, vaddr_t va,
 	pt_entry_t *pte;
 	pt_entry_t entry;
 
-	KDASSERT(((intptr_t)va < 0 && pmap == pmap_kernel()) ||
-	    ((intptr_t)va >= 0 && pmap != pmap_kernel()));
+	KDASSERT((((int)va < 0) && (pmap == pmap_kernel())) ||
+	    (((int)va >= 0) && (pmap != pmap_kernel())));
 
 	/* Lookup page table entry */
-	if ((pte = __pmap_pte_lookup(pmap, va)) == NULL ||
-	    (entry = *pte) == 0)
-		return false;
+	if (((pte = __pmap_pte_lookup(pmap, va)) == NULL) ||
+	    ((entry = *pte) == 0))
+		return (false);
 
 	KDASSERT(va != 0);
 
@@ -1055,7 +1063,7 @@ __pmap_pte_load(pmap_t pmap, vaddr_t va,
 	if (pmap->pm_asid != -1)
 		sh_tlb_update(pmap->pm_asid, va, entry);
 
-	return true;
+	return (true);
 }
 
 /*
@@ -1078,7 +1086,7 @@ __pmap_asid_alloc(void)
 			if ((map & (1 << j)) == 0 && (k + j) != 0) {
 				__pmap_asid.map[k] |= (1 << j);
 				__pmap_asid.hint = (k << 5) + j;
-				return __pmap_asid.hint;
+				return (__pmap_asid.hint);
 			}
 		}
 	}
@@ -1092,11 +1100,11 @@ __pmap_asid_alloc(void)
 			/* Invalidate all old ASID entry */
 			sh_tlb_invalidate_asid(pmap->pm_asid);
 
-			return __pmap_asid.hint;
+			return (__pmap_asid.hint);
 		}
 	}
 
-	panic("%s: no ASID allocated", __func__);
+	panic("No ASID allocated.");
 	/* NOTREACHED */
 }
 

Reply via email to