Module Name: src Committed By: matt Date: Mon Jun 22 06:24:17 UTC 2015
Modified Files: src/sys/uvm: uvm_fault.c uvm_map.c Log Message: Use %p, %#xl etc. for pointers and addresses. To generate a diff of this commit: cvs rdiff -u -r1.196 -r1.197 src/sys/uvm/uvm_fault.c cvs rdiff -u -r1.333 -r1.334 src/sys/uvm/uvm_map.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/uvm/uvm_fault.c diff -u src/sys/uvm/uvm_fault.c:1.196 src/sys/uvm/uvm_fault.c:1.197 --- src/sys/uvm/uvm_fault.c:1.196 Sun Aug 10 16:44:37 2014 +++ src/sys/uvm/uvm_fault.c Mon Jun 22 06:24:17 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_fault.c,v 1.196 2014/08/10 16:44:37 tls Exp $ */ +/* $NetBSD: uvm_fault.c,v 1.197 2015/06/22 06:24:17 matt Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -32,7 +32,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.196 2014/08/10 16:44:37 tls Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.197 2015/06/22 06:24:17 matt Exp $"); #include "opt_uvmhist.h" @@ -806,7 +806,7 @@ uvm_fault_internal(struct vm_map *orig_m UVMHIST_FUNC("uvm_fault"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist, "(map=0x%x, vaddr=0x%x, at=%d, ff=%d)", + UVMHIST_LOG(maphist, "(map=%p, vaddr=%#lx, at=%d, ff=%d)", orig_map, vaddr, access_type, fault_flag); cd = &(curcpu()->ci_data); @@ -947,7 +947,7 @@ uvm_fault_check( ufi->entry->max_protection : ufi->entry->protection; if ((check_prot & flt->access_type) != flt->access_type) { UVMHIST_LOG(maphist, - "<- protection failure (prot=0x%x, access=0x%x)", + "<- protection failure (prot=%#x, access=%#x)", ufi->entry->protection, flt->access_type, 0, 0); uvmfault_unlockmaps(ufi, false); return EACCES; @@ -1060,9 +1060,9 @@ uvm_fault_check( const voff_t eoff = flt->startva - ufi->entry->start; /* locked: maps(read) */ - UVMHIST_LOG(maphist, " narrow=%d, back=%d, forw=%d, startva=0x%x", + UVMHIST_LOG(maphist, " narrow=%d, back=%d, forw=%d, startva=%#lx", flt->narrow, nback, nforw, flt->startva); - UVMHIST_LOG(maphist, " entry=0x%x, amap=0x%x, obj=0x%x", ufi->entry, + UVMHIST_LOG(maphist, " entry=%p, amap=%p, obj=%p", ufi->entry, amap, uobj, 0); /* @@ -1229,7 +1229,7 @@ uvm_fault_upper_neighbor( uvm_pageenqueue(pg); mutex_exit(&uvm_pageqlock); UVMHIST_LOG(maphist, - " MAPPING: n anon: pm=0x%x, va=0x%x, pg=0x%x", + " MAPPING: n anon: pm=%p, va=%#lx, pg=%p", ufi->orig_map->pmap, currva, pg, 0); uvmexp.fltnamap++; @@ -1276,7 +1276,7 @@ uvm_fault_upper( * handle case 1: fault on an anon in our amap */ - UVMHIST_LOG(maphist, " case 1 fault: anon=0x%x", anon, 0,0,0); + UVMHIST_LOG(maphist, " case 1 fault: anon=%p", anon, 0,0,0); /* * no matter if we have case 1A or case 1B we are going to need to @@ -1506,7 +1506,7 @@ uvm_fault_upper_enter( */ UVMHIST_LOG(maphist, - " MAPPING: anon: pm=0x%x, va=0x%x, pg=0x%x, promote=%d", + " MAPPING: anon: pm=%p, va=%#lx, pg=%p, promote=%d", ufi->orig_map->pmap, ufi->orig_rvaddr, pg, flt->promote); if (pmap_enter(ufi->orig_map->pmap, ufi->orig_rvaddr, VM_PAGE_TO_PHYS(pg), @@ -1803,7 +1803,7 @@ uvm_fault_lower_neighbor( uvm_pageenqueue(pg); mutex_exit(&uvm_pageqlock); UVMHIST_LOG(maphist, - " MAPPING: n obj: pm=0x%x, va=0x%x, pg=0x%x", + " MAPPING: n obj: pm=%p, va=%#lx, pg=%p", ufi->orig_map->pmap, currva, pg, 0); uvmexp.fltnomap++; @@ -2186,7 +2186,7 @@ uvm_fault_lower_enter( */ UVMHIST_LOG(maphist, - " MAPPING: case2: pm=0x%x, va=0x%x, pg=0x%x, promote=%d", + " MAPPING: case2: pm=%p, va=%#lx, pg=%#x, promote=%d", ufi->orig_map->pmap, ufi->orig_rvaddr, pg, flt->promote); KASSERT((flt->access_type & VM_PROT_WRITE) == 0 || (pg->flags & PG_RDONLY) == 0); Index: src/sys/uvm/uvm_map.c diff -u src/sys/uvm/uvm_map.c:1.333 src/sys/uvm/uvm_map.c:1.334 --- src/sys/uvm/uvm_map.c:1.333 Sun Feb 1 16:26:00 2015 +++ src/sys/uvm/uvm_map.c Mon Jun 22 06:24:17 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_map.c,v 1.333 2015/02/01 16:26:00 christos Exp $ */ +/* $NetBSD: uvm_map.c,v 1.334 2015/06/22 06:24:17 matt Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -66,7 +66,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.333 2015/02/01 16:26:00 christos Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.334 2015/06/22 06:24:17 matt Exp $"); #include "opt_ddb.h" #include "opt_uvmhist.h" @@ -538,7 +538,7 @@ _uvm_tree_sanity(struct vm_map *map) for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) { if (tmp->gap != uvm_rb_gap(tmp)) { - printf("%d/%d gap %lx != %lx %s\n", + printf("%d/%d gap %#lx != %#lx %s\n", n + 1, map->nentries, (ulong)tmp->gap, (ulong)uvm_rb_gap(tmp), tmp->next == &map->header ? "(last)" : ""); @@ -563,7 +563,7 @@ _uvm_tree_sanity(struct vm_map *map) trtmp = NULL; for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) { if (tmp->maxgap != uvm_rb_maxgap(tmp)) { - printf("maxgap %lx != %lx\n", + printf("maxgap %#lx != %#lx\n", (ulong)tmp->maxgap, (ulong)uvm_rb_maxgap(tmp)); goto error; @@ -763,7 +763,7 @@ uvm_mapent_alloc(struct vm_map *map, int } me->flags = 0; - UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]", me, + UVMHIST_LOG(maphist, "<- new entry=%p [kentry=%d]", me, (map == kernel_map), 0, 0); return me; } @@ -777,7 +777,7 @@ uvm_mapent_free(struct vm_map_entry *me) { UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]", + UVMHIST_LOG(maphist,"<- freeing map entry=%p [flags=%d]", me, me->flags, 0, 0); pool_cache_put(&uvm_map_entry_cache, me); } @@ -1105,9 +1105,9 @@ uvm_map_prepare(struct vm_map *map, vadd UVMHIST_FUNC("uvm_map_prepare"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)", + UVMHIST_LOG(maphist, "(map=%p, start=%#lx, size=%lu, flags=%#x)", map, start, size, flags); - UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0); + UVMHIST_LOG(maphist, " uobj/offset %p/%ld", uobj, uoffset,0,0); /* * detect a popular device driver bug. @@ -1129,7 +1129,7 @@ uvm_map_prepare(struct vm_map *map, vadd */ if ((prot & maxprot) != prot) { - UVMHIST_LOG(maphist, "<- prot. failure: prot=0x%x, max=0x%x", + UVMHIST_LOG(maphist, "<- prot. failure: prot=%#x, max=%#x", prot, maxprot,0,0); return EACCES; } @@ -1150,7 +1150,7 @@ retry: unsigned int timestamp; timestamp = map->timestamp; - UVMHIST_LOG(maphist,"waiting va timestamp=0x%x", + UVMHIST_LOG(maphist,"waiting va timestamp=%#x", timestamp,0,0,0); map->flags |= VM_MAP_WANTVA; vm_map_unlock(map); @@ -1259,9 +1259,9 @@ uvm_map_enter(struct vm_map *map, const UVMHIST_FUNC("uvm_map_enter"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)", + UVMHIST_LOG(maphist, "(map=%p, start=%#lx, size=%lu, flags=%#x)", map, start, size, flags); - UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0); + UVMHIST_LOG(maphist, " uobj/offset %p/%ld", uobj, uoffset,0,0); KASSERT(map->hint == prev_entry); /* bimerge case assumes this */ KASSERT(vm_map_locked_p(map)); @@ -1609,7 +1609,7 @@ uvm_map_lookup_entry(struct vm_map *map, UVMHIST_FUNC("uvm_map_lookup_entry"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(map=0x%x,addr=0x%x,ent=0x%x)", + UVMHIST_LOG(maphist,"(map=%p,addr=%#lx,ent=%p)", map, address, entry, 0); /* @@ -1640,7 +1640,7 @@ uvm_map_lookup_entry(struct vm_map *map, if (cur != &map->header && cur->end > address) { UVMMAP_EVCNT_INCR(mlk_hint); *entry = cur; - UVMHIST_LOG(maphist,"<- got it via hint (0x%x)", + UVMHIST_LOG(maphist,"<- got it via hint (%p)", cur, 0, 0, 0); uvm_mapent_check(*entry); return (true); @@ -1688,7 +1688,7 @@ uvm_map_lookup_entry(struct vm_map *map, *entry = cur; got: SAVE_HINT(map, map->hint, *entry); - UVMHIST_LOG(maphist,"<- search got it (0x%x)", + UVMHIST_LOG(maphist,"<- search got it (%p)", cur, 0, 0, 0); KDASSERT((*entry)->start <= address); KDASSERT(address < (*entry)->end); @@ -1803,7 +1803,7 @@ uvm_map_findspace(struct vm_map *map, va UVMHIST_FUNC("uvm_map_findspace"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, flags=0x%x)", + UVMHIST_LOG(maphist, "(map=%p, hint=%l#x, len=%lu, flags=%#x)", map, hint, length, flags); KASSERT((flags & UVM_FLAG_COLORMATCH) != 0 || (align & (align - 1)) == 0); KASSERT((flags & UVM_FLAG_COLORMATCH) == 0 || align < uvmexp.ncolors); @@ -1826,7 +1826,7 @@ uvm_map_findspace(struct vm_map *map, va hint = vm_map_min(map); } if (hint > vm_map_max(map)) { - UVMHIST_LOG(maphist,"<- VA 0x%x > range [0x%x->0x%x]", + UVMHIST_LOG(maphist,"<- VA %#lx > range [%#lx->%#lx]", hint, vm_map_min(map), vm_map_max(map), 0); return (NULL); } @@ -2101,7 +2101,7 @@ nextgap: found: SAVE_HINT(map, map->hint, entry); *result = hint; - UVMHIST_LOG(maphist,"<- got it! (result=0x%x)", hint, 0,0,0); + UVMHIST_LOG(maphist,"<- got it! (result=%#lx)", hint, 0,0,0); KASSERT( topdown || hint >= orig_hint); KASSERT(!topdown || hint <= orig_hint); KASSERT(entry->end <= hint); @@ -2140,7 +2140,7 @@ uvm_unmap_remove(struct vm_map *map, vad vaddr_t len; UVMHIST_FUNC("uvm_unmap_remove"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(map=0x%x, start=0x%x, end=0x%x)", + UVMHIST_LOG(maphist,"(map=%p, start=%#lx, end=%#lx)", map, start, end, 0); VM_MAP_RANGE_CHECK(map, start, end); @@ -2264,7 +2264,7 @@ uvm_unmap_remove(struct vm_map *map, vad * that we've nuked. then go to next entry. */ - UVMHIST_LOG(maphist, " removed map entry 0x%x", entry, 0, 0,0); + UVMHIST_LOG(maphist, " removed map entry %p", entry, 0, 0,0); /* critical! prevents stale hint */ SAVE_HINT(map, entry, entry->prev); @@ -2321,7 +2321,7 @@ uvm_unmap_detach(struct vm_map_entry *fi while (first_entry) { KASSERT(!VM_MAPENT_ISWIRED(first_entry)); UVMHIST_LOG(maphist, - " detach 0x%x: amap=0x%x, obj=0x%x, submap?=%d", + " detach %p: amap=%p, obj=%p, submap?=%d", first_entry, first_entry->aref.ar_amap, first_entry->object.uvm_obj, UVM_ET_ISSUBMAP(first_entry)); @@ -2373,7 +2373,7 @@ uvm_map_reserve(struct vm_map *map, vsiz { UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, offset=0x%x,addr=0x%x)", + UVMHIST_LOG(maphist, "(map=%p, size=%#lx, offset=%#lx, addr=%p)", map,size,offset,raddr); size = round_page(size); @@ -2389,7 +2389,7 @@ uvm_map_reserve(struct vm_map *map, vsiz return (false); } - UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0); + UVMHIST_LOG(maphist, "<- done (*raddr=%#lx)", *raddr,0,0,0); return (true); } @@ -2452,9 +2452,9 @@ uvm_map_replace(struct vm_map *map, vadd panic("uvm_map_replace1"); if (tmpent->start >= tmpent->end || tmpent->end > end) { panic("uvm_map_replace2: " - "tmpent->start=0x%"PRIxVADDR - ", tmpent->end=0x%"PRIxVADDR - ", end=0x%"PRIxVADDR, + "tmpent->start=%#"PRIxVADDR + ", tmpent->end=%#"PRIxVADDR + ", end=%#"PRIxVADDR, tmpent->start, tmpent->end, end); } cur = tmpent->end; @@ -2556,9 +2556,9 @@ uvm_map_extract(struct vm_map *srcmap, v vsize_t nsize; UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(srcmap=0x%x,start=0x%x, len=0x%x", srcmap, start, + UVMHIST_LOG(maphist,"(srcmap=%p,start=%#lx, len=%#lx", srcmap, start, len,0); - UVMHIST_LOG(maphist," ...,dstmap=0x%x, flags=0x%x)", dstmap,flags,0,0); + UVMHIST_LOG(maphist," ...,dstmap=%p, flags=%#x)", dstmap,flags,0,0); /* * step 0: sanity check: start must be on a page boundary, length @@ -2582,7 +2582,7 @@ uvm_map_extract(struct vm_map *srcmap, v return (ENOMEM); KASSERT((atop(start ^ dstaddr) & uvmexp.colormask) == 0); *dstaddrp = dstaddr; /* pass address back to caller */ - UVMHIST_LOG(maphist, " dstaddr=0x%x", dstaddr,0,0,0); + UVMHIST_LOG(maphist, " dstaddr=%#lx", dstaddr,0,0,0); } else { dstaddr = *dstaddrp; } @@ -2945,7 +2945,7 @@ uvm_map_protect(struct vm_map *map, vadd struct vm_map_entry *current, *entry; int error = 0; UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_prot=0x%x)", + UVMHIST_LOG(maphist,"(map=%p,start=%#lx,end=%#lx,new_prot=%#x)", map, start, end, new_prot); vm_map_lock(map); @@ -3090,7 +3090,7 @@ uvm_map_inherit(struct vm_map *map, vadd { struct vm_map_entry *entry, *temp_entry; UVMHIST_FUNC("uvm_map_inherit"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_inh=0x%x)", + UVMHIST_LOG(maphist,"(map=%p,start=%#lx,end=%#lx,new_inh=%#x)", map, start, end, new_inheritance); switch (new_inheritance) { @@ -3133,7 +3133,7 @@ uvm_map_advice(struct vm_map *map, vaddr { struct vm_map_entry *entry, *temp_entry; UVMHIST_FUNC("uvm_map_advice"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_adv=0x%x)", + UVMHIST_LOG(maphist,"(map=%p,start=%#lx,end=%#lx,new_adv=%#x)", map, start, end, new_advice); vm_map_lock(map); @@ -3182,7 +3182,7 @@ uvm_map_willneed(struct vm_map *map, vad { struct vm_map_entry *entry; UVMHIST_FUNC("uvm_map_willneed"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(map=0x%lx,start=0x%lx,end=0x%lx)", + UVMHIST_LOG(maphist,"(map=%p,start=%#lx,end=%#lx)", map, start, end, 0); vm_map_lock_read(map); @@ -3249,7 +3249,7 @@ uvm_map_pageable(struct vm_map *map, vad u_int timestamp_save; #endif UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_pageable=0x%x)", + UVMHIST_LOG(maphist,"(map=%p,start=%#lx,end=%#lx,new_pageable=%u)", map, start, end, new_pageable); KASSERT(map->flags & VM_MAP_PAGEABLE); @@ -3497,7 +3497,7 @@ uvm_map_pageable_all(struct vm_map *map, u_int timestamp_save; #endif UVMHIST_FUNC("uvm_map_pageable_all"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(map=0x%x,flags=0x%x)", map, flags, 0, 0); + UVMHIST_LOG(maphist,"(map=%p,flags=%#x)", map, flags, 0, 0); KASSERT(map->flags & VM_MAP_PAGEABLE); @@ -3732,7 +3732,7 @@ uvm_map_clean(struct vm_map *map, vaddr_ int error, refs; UVMHIST_FUNC("uvm_map_clean"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,flags=0x%x)", + UVMHIST_LOG(maphist,"(map=%p,start=%#lx,end=%#lx,flags=%#x)", map, start, end, flags); KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) != (PGO_FREE|PGO_DEACTIVATE)); @@ -3938,7 +3938,7 @@ uvmspace_alloc(vaddr_t vmin, vaddr_t vma vm = pool_cache_get(&uvm_vmspace_cache, PR_WAITOK); uvmspace_init(vm, NULL, vmin, vmax, topdown); - UVMHIST_LOG(maphist,"<- done (vm=0x%x)", vm,0,0,0); + UVMHIST_LOG(maphist,"<- done (vm=%p)", vm,0,0,0); return (vm); } @@ -3954,6 +3954,10 @@ uvmspace_init(struct vmspace *vm, struct { UVMHIST_FUNC("uvmspace_init"); UVMHIST_CALLED(maphist); + UVMHIST_LOG(maphist, "(vm=%p, pmap=%p, vmin=%#lx, vmax=%#lx", + vm, pmap, vmin, vmax); + UVMHIST_LOG(maphist, " topdown=%u)", topdown, 0, 0, 0); + memset(vm, 0, sizeof(*vm)); uvm_map_setup(&vm->vm_map, vmin, vmax, VM_MAP_PAGEABLE | (topdown ? VM_MAP_TOPDOWN : 0) @@ -4151,7 +4155,7 @@ uvmspace_free(struct vmspace *vm) UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(vm=0x%x) ref=%d", vm, vm->vm_refcnt,0,0); + UVMHIST_LOG(maphist,"(vm=%p) ref=%d", vm, vm->vm_refcnt,0,0); mutex_enter(&map->misc_lock); n = --vm->vm_refcnt; mutex_exit(&map->misc_lock); @@ -4624,7 +4628,7 @@ uvm_unmap1(struct vm_map *map, vaddr_t s struct vm_map_entry *dead_entries; UVMHIST_FUNC("uvm_unmap"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist, " (map=0x%x, start=0x%x, end=0x%x)", + UVMHIST_LOG(maphist, " (map=%p, start=%#lx, end=%#lx)", map, start, end, 0); if (map == kernel_map) { LOCKDEBUG_MEM_CHECK((void *)start, end - start); @@ -4708,9 +4712,9 @@ uvm_map_printit(struct vm_map *map, bool { struct vm_map_entry *entry; - (*pr)("MAP %p: [0x%lx->0x%lx]\n", map, vm_map_min(map), + (*pr)("MAP %p: [%#lx->%#lx]\n", map, vm_map_min(map), vm_map_max(map)); - (*pr)("\t#ent=%d, sz=%d, ref=%d, version=%d, flags=0x%x\n", + (*pr)("\t#ent=%d, sz=%d, ref=%d, version=%d, flags=%#x\n", map->nentries, map->size, map->ref_count, map->timestamp, map->flags); (*pr)("\tpmap=%p(resident=%ld, wired=%ld)\n", map->pmap, @@ -4719,7 +4723,7 @@ uvm_map_printit(struct vm_map *map, bool return; for (entry = map->header.next; entry != &map->header; entry = entry->next) { - (*pr)(" - %p: 0x%lx->0x%lx: obj=%p/0x%llx, amap=%p/%d\n", + (*pr)(" - %p: %#lx->%#lx: obj=%p/%#llx, amap=%p/%d\n", entry, entry->start, entry->end, entry->object.uvm_obj, (long long)entry->offset, entry->aref.ar_amap, entry->aref.ar_pageoff);