Module Name:    src
Committed By:   rmind
Date:           Fri Sep 24 22:51:52 UTC 2010

Modified Files:
        src/common/lib/libc/gen: rb.c
        src/common/lib/libprop: prop_dictionary.c prop_number.c
        src/sys/fs/udf: udf.h udf_subr.c
        src/sys/kern: subr_lockdebug.c
        src/sys/net/npf: npf_session.c npf_tableset.c
        src/sys/nfs: nfs_node.c
        src/sys/rump/librump/rumpkern: vm.c
        src/sys/sys: rb.h
        src/sys/uvm: uvm_map.c uvm_object.h uvm_page.c

Log Message:
Fixes/improvements to RB-tree implementation:
1. Fix inverted node order, so that negative value from comparison operator
   would represent lower (left) node, and positive - higher (right) node.
2. Add an argument (i.e. "context"), passed to comparison operators.
3. Change rb_tree_insert_node() to return a node - either inserted one or
   already existing one.
4. Amend the interface to manipulate the actual object, instead of the
   rb_node (in a similar way as Patricia-tree interface does).
5. Update all RB-tree users accordingly.

XXX: Perhaps rename rb.h to rbtree.h, since cleaning-up..

1-3 address the PR/43488 by Jeremy Huddleston.

Passes RB-tree regression tests.
Reviewed by: matt@, christos@


To generate a diff of this commit:
cvs rdiff -u -r1.6 -r1.7 src/common/lib/libc/gen/rb.c
cvs rdiff -u -r1.35 -r1.36 src/common/lib/libprop/prop_dictionary.c
cvs rdiff -u -r1.22 -r1.23 src/common/lib/libprop/prop_number.c
cvs rdiff -u -r1.41 -r1.42 src/sys/fs/udf/udf.h
cvs rdiff -u -r1.107 -r1.108 src/sys/fs/udf/udf_subr.c
cvs rdiff -u -r1.41 -r1.42 src/sys/kern/subr_lockdebug.c
cvs rdiff -u -r1.2 -r1.3 src/sys/net/npf/npf_session.c
cvs rdiff -u -r1.1 -r1.2 src/sys/net/npf/npf_tableset.c
cvs rdiff -u -r1.113 -r1.114 src/sys/nfs/nfs_node.c
cvs rdiff -u -r1.95 -r1.96 src/sys/rump/librump/rumpkern/vm.c
cvs rdiff -u -r1.13 -r1.14 src/sys/sys/rb.h
cvs rdiff -u -r1.292 -r1.293 src/sys/uvm/uvm_map.c
cvs rdiff -u -r1.26 -r1.27 src/sys/uvm/uvm_object.h
cvs rdiff -u -r1.155 -r1.156 src/sys/uvm/uvm_page.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/common/lib/libc/gen/rb.c
diff -u src/common/lib/libc/gen/rb.c:1.6 src/common/lib/libc/gen/rb.c:1.7
--- src/common/lib/libc/gen/rb.c:1.6	Fri Apr 30 13:58:09 2010
+++ src/common/lib/libc/gen/rb.c	Fri Sep 24 22:51:51 2010
@@ -1,4 +1,4 @@
-/* $NetBSD: rb.c,v 1.6 2010/04/30 13:58:09 joerg Exp $ */
+/*	$NetBSD: rb.c,v 1.7 2010/09/24 22:51:51 rmind Exp $	*/
 
 /*-
  * Copyright (c) 2001 The NetBSD Foundation, Inc.
@@ -87,11 +87,17 @@
 #define	rb_tree_check_node(a, b, c, d)	true
 #endif
 
+#define	RB_NODETOITEM(rbto, rbn)	\
+    ((void *)((uintptr_t)(rbn) - (rbto)->rbto_node_offset))
+#define	RB_ITEMTONODE(rbto, rbn)	\
+    ((rb_node_t *)((uintptr_t)(rbn) + (rbto)->rbto_node_offset))
+
 #define	RB_SENTINEL_NODE	NULL
 
 void
-rb_tree_init(struct rb_tree *rbt, const struct rb_tree_ops *ops)
+rb_tree_init(struct rb_tree *rbt, const rb_tree_ops_t *ops)
 {
+
 	rbt->rbt_ops = ops;
 	*((const struct rb_node **)&rbt->rbt_root) = RB_SENTINEL_NODE;
 	RB_TAILQ_INIT(&rbt->rbt_nodes);
@@ -110,65 +116,73 @@
 #endif
 }
 
-struct rb_node *
+void *
 rb_tree_find_node(struct rb_tree *rbt, const void *key)
 {
-	rbto_compare_key_fn compare_key = rbt->rbt_ops->rbto_compare_key;
+	const rb_tree_ops_t *rbto = rbt->rbt_ops;
+	rbto_compare_key_fn compare_key = rbto->rbto_compare_key;
 	struct rb_node *parent = rbt->rbt_root;
 
 	while (!RB_SENTINEL_P(parent)) {
-		const signed int diff = (*compare_key)(parent, key);
+		void *pobj = RB_NODETOITEM(rbto, parent);
+		const signed int diff = (*compare_key)(rbto->rbto_context,
+		    pobj, key);
 		if (diff == 0)
-			return parent;
-		parent = parent->rb_nodes[diff > 0];
+			return pobj;
+		parent = parent->rb_nodes[diff < 0];
 	}
 
 	return NULL;
 }
- 
-struct rb_node *
+
+void *
 rb_tree_find_node_geq(struct rb_tree *rbt, const void *key)
 {
-	rbto_compare_key_fn compare_key = rbt->rbt_ops->rbto_compare_key;
-	struct rb_node *parent = rbt->rbt_root;
-	struct rb_node *last = NULL;
+	const rb_tree_ops_t *rbto = rbt->rbt_ops;
+	rbto_compare_key_fn compare_key = rbto->rbto_compare_key;
+	struct rb_node *parent = rbt->rbt_root, *last = NULL;
 
 	while (!RB_SENTINEL_P(parent)) {
-		const signed int diff = (*compare_key)(parent, key);
+		void *pobj = RB_NODETOITEM(rbto, parent);
+		const signed int diff = (*compare_key)(rbto->rbto_context,
+		    pobj, key);
 		if (diff == 0)
-			return parent;
-		if (diff < 0)
+			return pobj;
+		if (diff > 0)
 			last = parent;
-		parent = parent->rb_nodes[diff > 0];
+		parent = parent->rb_nodes[diff < 0];
 	}
 
-	return last;
+	return RB_NODETOITEM(rbto, last);
 }
- 
-struct rb_node *
+
+void *
 rb_tree_find_node_leq(struct rb_tree *rbt, const void *key)
 {
-	rbto_compare_key_fn compare_key = rbt->rbt_ops->rbto_compare_key;
-	struct rb_node *parent = rbt->rbt_root;
-	struct rb_node *last = NULL;
+	const rb_tree_ops_t *rbto = rbt->rbt_ops;
+	rbto_compare_key_fn compare_key = rbto->rbto_compare_key;
+	struct rb_node *parent = rbt->rbt_root, *last = NULL;
 
 	while (!RB_SENTINEL_P(parent)) {
-		const signed int diff = (*compare_key)(parent, key);
+		void *pobj = RB_NODETOITEM(rbto, parent);
+		const signed int diff = (*compare_key)(rbto->rbto_context,
+		    pobj, key);
 		if (diff == 0)
-			return parent;
-		if (diff > 0)
+			return pobj;
+		if (diff < 0)
 			last = parent;
-		parent = parent->rb_nodes[diff > 0];
+		parent = parent->rb_nodes[diff < 0];
 	}
 
-	return last;
+	return RB_NODETOITEM(rbto, last);
 }
-
-bool
-rb_tree_insert_node(struct rb_tree *rbt, struct rb_node *self)
+
+void *
+rb_tree_insert_node(struct rb_tree *rbt, void *object)
 {
-	rbto_compare_nodes_fn compare_nodes = rbt->rbt_ops->rbto_compare_nodes;
-	struct rb_node *parent, *tmp;
+	const rb_tree_ops_t *rbto = rbt->rbt_ops;
+	rbto_compare_nodes_fn compare_nodes = rbto->rbto_compare_nodes;
+	struct rb_node *parent, *tmp, *self = RB_ITEMTONODE(rbto, object);
 	unsigned int position;
 	bool rebalance;
 
@@ -189,15 +203,17 @@
 	 * Find out where to place this new leaf.
 	 */
 	while (!RB_SENTINEL_P(tmp)) {
-		const signed int diff = (*compare_nodes)(tmp, self);
+		void *tobj = RB_NODETOITEM(rbto, tmp);
+		const signed int diff = (*compare_nodes)(rbto->rbto_context,
+		    tobj, object);
 		if (__predict_false(diff == 0)) {
 			/*
-			 * Node already exists; don't insert.
+			 * Node already exists; return it.
 			 */
-			return false;
+			return tobj;
 		}
 		parent = tmp;
-		position = (diff > 0);
+		position = (diff < 0);
 		tmp = parent->rb_nodes[position];
 	}
 
@@ -221,8 +237,10 @@
 			prev = TAILQ_PREV(next, rb_node_qh, rb_link);
 		KASSERT(prev == NULL || !RB_SENTINEL_P(prev));
 		KASSERT(next == NULL || !RB_SENTINEL_P(next));
-		KASSERT(prev == NULL || (*compare_nodes)(prev, self) > 0);
-		KASSERT(next == NULL || (*compare_nodes)(self, next) > 0);
+		KASSERT(prev == NULL || (*compare_nodes)(rbto->rbto_context,
+		    RB_NODETOITEM(rbto, prev), RB_NODETOITEM(rbto, self)) < 0);
+		KASSERT(next == NULL || (*compare_nodes)(rbto->rbto_context,
+		    RB_NODETOITEM(rbto, self), RB_NODETOITEM(rbto, next)) < 0);
 	}
 #endif
 
@@ -270,10 +288,14 @@
 	if (RB_ROOT_P(rbt, self)) {
 		RB_TAILQ_INSERT_HEAD(&rbt->rbt_nodes, self, rb_link);
 	} else if (position == RB_DIR_LEFT) {
-		KASSERT((*compare_nodes)(self, RB_FATHER(self)) > 0);
+		KASSERT((*compare_nodes)(rbto->rbto_context,
+		    RB_NODETOITEM(rbto, self),
+		    RB_NODETOITEM(rbto, RB_FATHER(self))) < 0);
 		RB_TAILQ_INSERT_BEFORE(RB_FATHER(self), self, rb_link);
 	} else {
-		KASSERT((*compare_nodes)(RB_FATHER(self), self) > 0);
+		KASSERT((*compare_nodes)(rbto->rbto_context,
+		    RB_NODETOITEM(rbto, RB_FATHER(self)),
+		    RB_NODETOITEM(rbto, self)) < 0);
 		RB_TAILQ_INSERT_AFTER(&rbt->rbt_nodes, RB_FATHER(self),
 		    self, rb_link);
 	}
@@ -288,9 +310,10 @@
 		KASSERT(rb_tree_check_node(rbt, self, NULL, true));
 	}
 
-	return true;
+	/* Succesfully inserted, return our node pointer. */
+	return object;
 }
-
+
 /*
  * Swap the location and colors of 'self' and its child @ which.  The child
  * can not be a sentinel node.  This is our rotation function.  However,
@@ -316,7 +339,8 @@
 
 	KASSERT(rb_tree_check_node(rbt, old_father, NULL, false));
 	KASSERT(rb_tree_check_node(rbt, old_child, NULL, false));
-	KASSERT(RB_ROOT_P(rbt, old_father) || rb_tree_check_node(rbt, grandpa, NULL, false));
+	KASSERT(RB_ROOT_P(rbt, old_father) ||
+	    rb_tree_check_node(rbt, grandpa, NULL, false));
 
 	/*
 	 * Exchange descendant linkages.
@@ -358,9 +382,10 @@
 
 	KASSERT(rb_tree_check_node(rbt, new_father, NULL, false));
 	KASSERT(rb_tree_check_node(rbt, new_child, NULL, false));
-	KASSERT(RB_ROOT_P(rbt, new_father) || rb_tree_check_node(rbt, grandpa, NULL, false));
+	KASSERT(RB_ROOT_P(rbt, new_father) ||
+	    rb_tree_check_node(rbt, grandpa, NULL, false));
 }
-
+
 static void
 rb_tree_insert_rebalance(struct rb_tree *rbt, struct rb_node *self)
 {
@@ -466,7 +491,7 @@
 	 */
 	RB_MARK_BLACK(rbt->rbt_root);
 }
-
+
 static void
 rb_tree_prune_node(struct rb_tree *rbt, struct rb_node *self, bool rebalance)
 {
@@ -515,7 +540,7 @@
 		rb_tree_removal_rebalance(rbt, father, which);
 	KASSERT(was_root || rb_tree_check_node(rbt, father, NULL, true));
 }
-
+
 /*
  * When deleting an interior node
  */
@@ -716,13 +741,12 @@
 	KASSERT(was_root || rb_tree_check_node(rbt, father, NULL, true));
 	KASSERT(rb_tree_check_node(rbt, son, NULL, true));
 }
-/*
- *
- */
+
 void
-rb_tree_remove_node(struct rb_tree *rbt, struct rb_node *self)
+rb_tree_remove_node(struct rb_tree *rbt, void *object)
 {
-	struct rb_node *standin;
+	const rb_tree_ops_t *rbto = rbt->rbt_ops;
+	struct rb_node *standin, *self = RB_ITEMTONODE(rbto, object);
 	unsigned int which;
 
 	KASSERT(!RB_SENTINEL_P(self));
@@ -779,7 +803,7 @@
 	 * Let's find the node closes to us opposite of our parent
 	 * Now swap it with ourself, "prune" it, and rebalance, if needed.
 	 */
-	standin = rb_tree_iterate(rbt, self, which);
+	standin = RB_ITEMTONODE(rbto, rb_tree_iterate(rbt, object, which));
 	rb_tree_swap_prune_and_rebalance(rbt, self, standin);
 }
 
@@ -934,27 +958,30 @@
 	KASSERT(rb_tree_check_node(rbt, parent, NULL, true));
 }
 
-struct rb_node *
-rb_tree_iterate(struct rb_tree *rbt, struct rb_node *self,
-	const unsigned int direction)
+void *
+rb_tree_iterate(struct rb_tree *rbt, void *object, const unsigned int direction)
 {
+	const rb_tree_ops_t *rbto = rbt->rbt_ops;
 	const unsigned int other = direction ^ RB_DIR_OTHER;
+	struct rb_node *self;
+
 	KASSERT(direction == RB_DIR_LEFT || direction == RB_DIR_RIGHT);
 
-	if (self == NULL) {
+	if (object == NULL) {
 #ifndef RBSMALL
 		if (RB_SENTINEL_P(rbt->rbt_root))
 			return NULL;
-		return rbt->rbt_minmax[direction];
+		return RB_NODETOITEM(rbto, rbt->rbt_minmax[direction]);
 #else
 		self = rbt->rbt_root;
 		if (RB_SENTINEL_P(self))
 			return NULL;
 		while (!RB_SENTINEL_P(self->rb_nodes[direction]))
 			self = self->rb_nodes[direction];
-		return self;
+		return RB_NODETOITEM(rbto, self);
 #endif /* !RBSMALL */
 	}
+	self = RB_ITEMTONODE(rbto, object);
 	KASSERT(!RB_SENTINEL_P(self));
 	/*
 	 * We can't go any further in this direction.  We proceed up in the
@@ -963,7 +990,7 @@
 	if (RB_SENTINEL_P(self->rb_nodes[direction])) {
 		while (!RB_ROOT_P(rbt, self)) {
 			if (other == RB_POSITION(self))
-				return RB_FATHER(self);
+				return RB_NODETOITEM(rbto, RB_FATHER(self));
 			self = RB_FATHER(self);
 		}
 		return NULL;
@@ -977,7 +1004,7 @@
 	KASSERT(!RB_SENTINEL_P(self));
 	while (!RB_SENTINEL_P(self->rb_nodes[other]))
 		self = self->rb_nodes[other];
-	return self;
+	return RB_NODETOITEM(rbto, self);
 }
 
 #ifdef RBDEBUG
@@ -1047,10 +1074,12 @@
 rb_tree_check_node(const struct rb_tree *rbt, const struct rb_node *self,
 	const struct rb_node *prev, bool red_check)
 {
-	rbto_compare_nodes_fn compare_nodes = rbt->rbt_ops->rbto_compare_nodes;
+	const rb_tree_ops_t *rbto = rbt->rbt_ops;
+	rbto_compare_nodes_fn compare_nodes = rbto->rbto_compare_nodes;
 
 	KASSERT(!RB_SENTINEL_P(self));
-	KASSERT(prev == NULL || (*compare_nodes)(prev, self) > 0);
+	KASSERT(prev == NULL || (*compare_nodes)(rbto->rbto_context,
+	    RB_NODETOITEM(rbto, prev), RB_NODETOITEM(rbto, self)) < 0);
 
 	/*
 	 * Verify our relationship to our parent.
@@ -1061,13 +1090,17 @@
 		KASSERT(RB_FATHER(self)->rb_nodes[RB_DIR_LEFT] == self);
 		KASSERT(RB_FATHER(self) == (const struct rb_node *) &rbt->rbt_root);
 	} else {
+		int diff = (*compare_nodes)(rbto->rbto_context,
+		    RB_NODETOITEM(rbto, self),
+		    RB_NODETOITEM(rbto, RB_FATHER(self)));
+
 		KASSERT(self != rbt->rbt_root);
 		KASSERT(!RB_FATHER_SENTINEL_P(self));
 		if (RB_POSITION(self) == RB_DIR_LEFT) {
-			KASSERT((*compare_nodes)(self, RB_FATHER(self)) > 0);
+			KASSERT(diff < 0);
 			KASSERT(RB_FATHER(self)->rb_nodes[RB_DIR_LEFT] == self);
 		} else {
-			KASSERT((*compare_nodes)(self, RB_FATHER(self)) < 0);
+			KASSERT(diff > 0);
 			KASSERT(RB_FATHER(self)->rb_nodes[RB_DIR_RIGHT] == self);
 		}
 	}

Index: src/common/lib/libprop/prop_dictionary.c
diff -u src/common/lib/libprop/prop_dictionary.c:1.35 src/common/lib/libprop/prop_dictionary.c:1.36
--- src/common/lib/libprop/prop_dictionary.c:1.35	Tue Apr 14 02:53:41 2009
+++ src/common/lib/libprop/prop_dictionary.c	Fri Sep 24 22:51:52 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: prop_dictionary.c,v 1.35 2009/04/14 02:53:41 haad Exp $	*/
+/*	$NetBSD: prop_dictionary.c,v 1.36 2010/09/24 22:51:52 rmind Exp $	*/
 
 /*-
  * Copyright (c) 2006, 2007 The NetBSD Foundation, Inc.
@@ -66,10 +66,6 @@
 	/* actually variable length */
 };
 
-#define	RBNODE_TO_PDK(n)						\
-	((struct _prop_dictionary_keysym *)				\
-	 ((uintptr_t)n - offsetof(struct _prop_dictionary_keysym, pdk_link)))
-
 	/* pdk_key[1] takes care of the NUL */
 #define	PDK_SIZE_16		(sizeof(struct _prop_dictionary_keysym) + 16)
 #define	PDK_SIZE_32		(sizeof(struct _prop_dictionary_keysym) + 32)
@@ -176,28 +172,32 @@
  */
 
 static int
-_prop_dict_keysym_rb_compare_nodes(const struct rb_node *n1,
-				   const struct rb_node *n2)
+/*ARGSUSED*/
+_prop_dict_keysym_rb_compare_nodes(void *ctx __unused,
+				   const void *n1, const void *n2)
 {
-	const prop_dictionary_keysym_t pdk1 = RBNODE_TO_PDK(n1);
-	const prop_dictionary_keysym_t pdk2 = RBNODE_TO_PDK(n2);
+	const struct _prop_dictionary_keysym *pdk1 = n1;
+	const struct _prop_dictionary_keysym *pdk2 = n2;
 
-	return (strcmp(pdk1->pdk_key, pdk2->pdk_key));
+	return strcmp(pdk1->pdk_key, pdk2->pdk_key);
 }
 
 static int
-_prop_dict_keysym_rb_compare_key(const struct rb_node *n,
-				 const void *v)
+/*ARGSUSED*/
+_prop_dict_keysym_rb_compare_key(void *ctx __unused,
+				 const void *n, const void *v)
 {
-	const prop_dictionary_keysym_t pdk = RBNODE_TO_PDK(n);
+	const struct _prop_dictionary_keysym *pdk = n;
 	const char *cp = v;
 
-	return (strcmp(pdk->pdk_key, cp));
+	return strcmp(pdk->pdk_key, cp);
 }
 
-static const struct rb_tree_ops _prop_dict_keysym_rb_tree_ops = {
+static const rb_tree_ops_t _prop_dict_keysym_rb_tree_ops = {
 	.rbto_compare_nodes = _prop_dict_keysym_rb_compare_nodes,
-	.rbto_compare_key   = _prop_dict_keysym_rb_compare_key,
+	.rbto_compare_key = _prop_dict_keysym_rb_compare_key,
+	.rbto_node_offset = offsetof(struct _prop_dictionary_keysym, pdk_link),
+	.rbto_context = NULL
 };
 
 static struct rb_tree _prop_dict_keysym_tree;
@@ -235,7 +235,7 @@
 {
 	prop_dictionary_keysym_t pdk = *obj;
 
-	_prop_rb_tree_remove_node(&_prop_dict_keysym_tree, &pdk->pdk_link);
+	_prop_rb_tree_remove_node(&_prop_dict_keysym_tree, pdk);
 	_prop_dict_keysym_put(pdk);
 
 	return _PROP_OBJECT_FREE_DONE;
@@ -282,10 +282,8 @@
 static prop_dictionary_keysym_t
 _prop_dict_keysym_alloc(const char *key)
 {
-	prop_dictionary_keysym_t opdk, pdk;
-	const struct rb_node *n;
+	prop_dictionary_keysym_t opdk, pdk, rpdk;
 	size_t size;
-	bool rv;
 
 	_PROP_ONCE_RUN(_prop_dict_init_once, _prop_dict_init);
 
@@ -294,9 +292,8 @@
 	 * we just retain it and return it.
 	 */
 	_PROP_MUTEX_LOCK(_prop_dict_keysym_tree_mutex);
-	n = _prop_rb_tree_find(&_prop_dict_keysym_tree, key);
-	if (n != NULL) {
-		opdk = RBNODE_TO_PDK(n);
+	opdk = _prop_rb_tree_find(&_prop_dict_keysym_tree, key);
+	if (opdk != NULL) {
 		prop_object_retain(opdk);
 		_PROP_MUTEX_UNLOCK(_prop_dict_keysym_tree_mutex);
 		return (opdk);
@@ -331,16 +328,15 @@
 	 * we have to check again if it is in the tree.
 	 */
 	_PROP_MUTEX_LOCK(_prop_dict_keysym_tree_mutex);
-	n = _prop_rb_tree_find(&_prop_dict_keysym_tree, key);
-	if (n != NULL) {
-		opdk = RBNODE_TO_PDK(n);
+	opdk = _prop_rb_tree_find(&_prop_dict_keysym_tree, key);
+	if (opdk != NULL) {
 		prop_object_retain(opdk);
 		_PROP_MUTEX_UNLOCK(_prop_dict_keysym_tree_mutex);
 		_prop_dict_keysym_put(pdk);
 		return (opdk);
 	}
-	rv = _prop_rb_tree_insert_node(&_prop_dict_keysym_tree, &pdk->pdk_link);
-	_PROP_ASSERT(rv == true);
+	rpdk = _prop_rb_tree_insert_node(&_prop_dict_keysym_tree, pdk);
+	_PROP_ASSERT(rpdk == pdk);
 	_PROP_MUTEX_UNLOCK(_prop_dict_keysym_tree_mutex);
 	return (pdk);
 }

Index: src/common/lib/libprop/prop_number.c
diff -u src/common/lib/libprop/prop_number.c:1.22 src/common/lib/libprop/prop_number.c:1.23
--- src/common/lib/libprop/prop_number.c:1.22	Sun Mar 15 22:29:11 2009
+++ src/common/lib/libprop/prop_number.c	Fri Sep 24 22:51:52 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: prop_number.c,v 1.22 2009/03/15 22:29:11 cegger Exp $	*/
+/*	$NetBSD: prop_number.c,v 1.23 2010/09/24 22:51:52 rmind Exp $	*/
 
 /*-
  * Copyright (c) 2006 The NetBSD Foundation, Inc.
@@ -58,10 +58,6 @@
 	} pn_value;
 };
 
-#define	RBNODE_TO_PN(n)							\
-	((struct _prop_number *)					\
-	 ((uintptr_t)n - offsetof(struct _prop_number, pn_link)))
-
 _PROP_POOL_INIT(_prop_number_pool, sizeof(struct _prop_number), "propnmbr")
 
 static _prop_object_free_rv_t
@@ -122,28 +118,31 @@
 }
 
 static int
-_prop_number_rb_compare_nodes(const struct rb_node *n1,
-			      const struct rb_node *n2)
+/*ARGSUSED*/
+_prop_number_rb_compare_nodes(void *ctx __unused,
+			      const void *n1, const void *n2)
 {
-	const prop_number_t pn1 = RBNODE_TO_PN(n1);
-	const prop_number_t pn2 = RBNODE_TO_PN(n2);
+	const struct _prop_number *pn1 = n1;
+	const struct _prop_number *pn2 = n2;
 
-	return (_prop_number_compare_values(&pn1->pn_value, &pn2->pn_value));
+	return _prop_number_compare_values(&pn1->pn_value, &pn2->pn_value);
 }
 
 static int
-_prop_number_rb_compare_key(const struct rb_node *n,
-			    const void *v)
+/*ARGSUSED*/
+_prop_number_rb_compare_key(void *ctx __unused, const void *n, const void *v)
 {
-	const prop_number_t pn = RBNODE_TO_PN(n);
+	const struct _prop_number *pn = n;
 	const struct _prop_number_value *pnv = v;
 
-	return (_prop_number_compare_values(&pn->pn_value, pnv));
+	return _prop_number_compare_values(&pn->pn_value, pnv);
 }
 
-static const struct rb_tree_ops _prop_number_rb_tree_ops = {
+static const rb_tree_ops_t _prop_number_rb_tree_ops = {
 	.rbto_compare_nodes = _prop_number_rb_compare_nodes,
-	.rbto_compare_key   = _prop_number_rb_compare_key,
+	.rbto_compare_key = _prop_number_rb_compare_key,
+	.rbto_node_offset = offsetof(struct _prop_number, pn_link),
+	.rbto_context = NULL
 };
 
 static struct rb_tree _prop_number_tree;
@@ -155,7 +154,7 @@
 {
 	prop_number_t pn = *obj;
 
-	_prop_rb_tree_remove_node(&_prop_number_tree, &pn->pn_link);
+	_prop_rb_tree_remove_node(&_prop_number_tree, pn);
 
 	_PROP_POOL_PUT(_prop_number_pool, pn);
 
@@ -169,8 +168,7 @@
 {
 
 	_PROP_MUTEX_INIT(_prop_number_tree_mutex);
-	_prop_rb_tree_init(&_prop_number_tree,
-	    &_prop_number_rb_tree_ops);
+	_prop_rb_tree_init(&_prop_number_tree, &_prop_number_rb_tree_ops);
 	return 0;
 }
 
@@ -271,9 +269,7 @@
 static prop_number_t
 _prop_number_alloc(const struct _prop_number_value *pnv)
 {
-	prop_number_t opn, pn;
-	struct rb_node *n;
-	bool rv;
+	prop_number_t opn, pn, rpn;
 
 	_PROP_ONCE_RUN(_prop_number_init_once, _prop_number_init);
 
@@ -282,9 +278,8 @@
 	 * we just retain it and return it.
 	 */
 	_PROP_MUTEX_LOCK(_prop_number_tree_mutex);
-	n = _prop_rb_tree_find(&_prop_number_tree, pnv);
-	if (n != NULL) {
-		opn = RBNODE_TO_PN(n);
+	opn = _prop_rb_tree_find(&_prop_number_tree, pnv);
+	if (opn != NULL) {
 		prop_object_retain(opn);
 		_PROP_MUTEX_UNLOCK(_prop_number_tree_mutex);
 		return (opn);
@@ -308,16 +303,15 @@
 	 * we have to check again if it is in the tree.
 	 */
 	_PROP_MUTEX_LOCK(_prop_number_tree_mutex);
-	n = _prop_rb_tree_find(&_prop_number_tree, pnv);
-	if (n != NULL) {
-		opn = RBNODE_TO_PN(n);
+	opn = _prop_rb_tree_find(&_prop_number_tree, pnv);
+	if (opn != NULL) {
 		prop_object_retain(opn);
 		_PROP_MUTEX_UNLOCK(_prop_number_tree_mutex);
 		_PROP_POOL_PUT(_prop_number_pool, pn);
 		return (opn);
 	}
-	rv = _prop_rb_tree_insert_node(&_prop_number_tree, &pn->pn_link);
-	_PROP_ASSERT(rv == true);
+	rpn = _prop_rb_tree_insert_node(&_prop_number_tree, pn);
+	_PROP_ASSERT(rpn == pn);
 	_PROP_MUTEX_UNLOCK(_prop_number_tree_mutex);
 	return (pn);
 }

Index: src/sys/fs/udf/udf.h
diff -u src/sys/fs/udf/udf.h:1.41 src/sys/fs/udf/udf.h:1.42
--- src/sys/fs/udf/udf.h:1.41	Thu Feb 25 16:15:57 2010
+++ src/sys/fs/udf/udf.h	Fri Sep 24 22:51:50 2010
@@ -1,4 +1,4 @@
-/* $NetBSD: udf.h,v 1.41 2010/02/25 16:15:57 reinoud Exp $ */
+/* $NetBSD: udf.h,v 1.42 2010/09/24 22:51:50 rmind Exp $ */
 
 /*
  * Copyright (c) 2006, 2008 Reinoud Zandijk
@@ -357,12 +357,6 @@
 	void			*strategy_private;
 };
 
-
-#define RBTOUDFNODE(node) \
-	((node) ? \
-	 (void *)((uintptr_t)(node) - offsetof(struct udf_node, rbnode)) \
-	 : NULL)
-
 /*
  * UDF node describing a file/directory.
  *

Index: src/sys/fs/udf/udf_subr.c
diff -u src/sys/fs/udf/udf_subr.c:1.107 src/sys/fs/udf/udf_subr.c:1.108
--- src/sys/fs/udf/udf_subr.c:1.107	Wed Jul 21 17:52:11 2010
+++ src/sys/fs/udf/udf_subr.c	Fri Sep 24 22:51:50 2010
@@ -1,4 +1,4 @@
-/* $NetBSD: udf_subr.c,v 1.107 2010/07/21 17:52:11 hannken Exp $ */
+/* $NetBSD: udf_subr.c,v 1.108 2010/09/24 22:51:50 rmind Exp $ */
 
 /*
  * Copyright (c) 2006, 2008 Reinoud Zandijk
@@ -29,7 +29,7 @@
 
 #include <sys/cdefs.h>
 #ifndef lint
-__KERNEL_RCSID(0, "$NetBSD: udf_subr.c,v 1.107 2010/07/21 17:52:11 hannken Exp $");
+__KERNEL_RCSID(0, "$NetBSD: udf_subr.c,v 1.108 2010/09/24 22:51:50 rmind Exp $");
 #endif /* not lint */
 
 
@@ -3396,34 +3396,37 @@
 
 
 static int
-udf_compare_rbnodes(const struct rb_node *a, const struct rb_node *b)
+udf_compare_rbnodes(void *ctx, const void *a, const void *b)
 {
-	struct udf_node *a_node = RBTOUDFNODE(a);
-	struct udf_node *b_node = RBTOUDFNODE(b);
+	const struct udf_node *a_node = a;
+	const struct udf_node *b_node = b;
 
 	return udf_compare_icb(&a_node->loc, &b_node->loc);
 }
 
 
 static int
-udf_compare_rbnode_icb(const struct rb_node *a, const void *key)
+udf_compare_rbnode_icb(void *ctx, const void *a, const void *key)
 {
-	struct udf_node *a_node = RBTOUDFNODE(a);
+	const struct udf_node *a_node = a;
 	const struct long_ad * const icb = key;
 
 	return udf_compare_icb(&a_node->loc, icb);
 }
 
 
-static const struct rb_tree_ops udf_node_rbtree_ops = {
+static const rb_tree_ops_t udf_node_rbtree_ops = {
 	.rbto_compare_nodes = udf_compare_rbnodes,
-	.rbto_compare_key   = udf_compare_rbnode_icb,
+	.rbto_compare_key = udf_compare_rbnode_icb,
+	.rbto_node_offset = offsetof(struct udf_node, rbnode),
+	.rbto_context = NULL
 };
 
 
 void
 udf_init_nodes_tree(struct udf_mount *ump)
 {
+
 	rb_tree_init(&ump->udf_node_tree, &udf_node_rbtree_ops);
 }
 
@@ -3431,16 +3434,14 @@
 static struct udf_node *
 udf_node_lookup(struct udf_mount *ump, struct long_ad *icbptr)
 {
-	struct rb_node  *rb_node;
 	struct udf_node *udf_node;
 	struct vnode *vp;
 
 loop:
 	mutex_enter(&ump->ihash_lock);
 
-	rb_node = rb_tree_find_node(&ump->udf_node_tree, icbptr);
-	if (rb_node) {
-		udf_node = RBTOUDFNODE(rb_node);
+	udf_node = rb_tree_find_node(&ump->udf_node_tree, icbptr);
+	if (udf_node) {
 		vp = udf_node->vnode;
 		assert(vp);
 		mutex_enter(&vp->v_interlock);
@@ -3462,7 +3463,7 @@
 
 	/* add node to the rb tree */
 	mutex_enter(&ump->ihash_lock);
-		rb_tree_insert_node(&ump->udf_node_tree, &udf_node->rbnode);
+	rb_tree_insert_node(&ump->udf_node_tree, udf_node);
 	mutex_exit(&ump->ihash_lock);
 }
 
@@ -3474,7 +3475,7 @@
 
 	/* remove node from the rb tree */
 	mutex_enter(&ump->ihash_lock);
-		rb_tree_remove_node(&ump->udf_node_tree, &udf_node->rbnode);
+	rb_tree_remove_node(&ump->udf_node_tree, udf_node);
 	mutex_exit(&ump->ihash_lock);
 }
 
@@ -6367,7 +6368,7 @@
 	KASSERT(mutex_owned(&mntvnode_lock));
 
 	DPRINTF(SYNC, ("sync_pass %d\n", pass));
-	udf_node = RBTOUDFNODE(RB_TREE_MIN(&ump->udf_node_tree));
+	udf_node = RB_TREE_MIN(&ump->udf_node_tree);
 	for (;udf_node; udf_node = n_udf_node) {
 		DPRINTF(SYNC, ("."));
 
@@ -6375,9 +6376,8 @@
 		vp = udf_node->vnode;
 
 		mutex_enter(&vp->v_interlock);
-		n_udf_node = RBTOUDFNODE(rb_tree_iterate(
-			&ump->udf_node_tree, &udf_node->rbnode,
-			RB_DIR_RIGHT));
+		n_udf_node = rb_tree_iterate(&ump->udf_node_tree,
+		    udf_node, RB_DIR_RIGHT);
 
 		if (n_udf_node)
 			n_udf_node->i_flags |= IN_SYNCED;

Index: src/sys/kern/subr_lockdebug.c
diff -u src/sys/kern/subr_lockdebug.c:1.41 src/sys/kern/subr_lockdebug.c:1.42
--- src/sys/kern/subr_lockdebug.c:1.41	Tue Nov  3 00:29:11 2009
+++ src/sys/kern/subr_lockdebug.c	Fri Sep 24 22:51:50 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: subr_lockdebug.c,v 1.41 2009/11/03 00:29:11 dyoung Exp $	*/
+/*	$NetBSD: subr_lockdebug.c,v 1.42 2010/09/24 22:51:50 rmind Exp $	*/
 
 /*-
  * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
@@ -34,7 +34,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.41 2009/11/03 00:29:11 dyoung Exp $");
+__KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.42 2010/09/24 22:51:50 rmind Exp $");
 
 #include "opt_ddb.h"
 
@@ -68,7 +68,7 @@
 #define	LD_WRITE_LOCK	0x80000000
 
 typedef struct lockdebug {
-	struct rb_node	ld_rb_node;	/* must be the first member */
+	struct rb_node	ld_rb_node;
 	__cpu_simple_lock_t ld_spinlock;
 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
@@ -103,39 +103,41 @@
 static void	lockdebug_init(void);
 
 static signed int
-ld_rbto_compare_nodes(const struct rb_node *n1, const struct rb_node *n2)
+ld_rbto_compare_nodes(void *ctx, const void *n1, const void *n2)
 {
-	const lockdebug_t *ld1 = (const void *)n1;
-	const lockdebug_t *ld2 = (const void *)n2;
+	const lockdebug_t *ld1 = n1;
+	const lockdebug_t *ld2 = n2;
 	const uintptr_t a = (uintptr_t)ld1->ld_lock;
 	const uintptr_t b = (uintptr_t)ld2->ld_lock;
 
 	if (a < b)
-		return 1;
-	if (a > b)
 		return -1;
+	if (a > b)
+		return 1;
 	return 0;
 }
 
 static signed int
-ld_rbto_compare_key(const struct rb_node *n, const void *key)
+ld_rbto_compare_key(void *ctx, const void *n, const void *key)
 {
-	const lockdebug_t *ld = (const void *)n;
+	const lockdebug_t *ld = n;
 	const uintptr_t a = (uintptr_t)ld->ld_lock;
 	const uintptr_t b = (uintptr_t)key;
 
 	if (a < b)
-		return 1;
-	if (a > b)
 		return -1;
+	if (a > b)
+		return 1;
 	return 0;
 }
 
-static struct rb_tree ld_rb_tree;
+static rb_tree_t ld_rb_tree;
 
-static const struct rb_tree_ops ld_rb_tree_ops = {
+static const rb_tree_ops_t ld_rb_tree_ops = {
 	.rbto_compare_nodes = ld_rbto_compare_nodes,
 	.rbto_compare_key = ld_rbto_compare_key,
+	.rbto_node_offset = offsetof(lockdebug_t, ld_rb_node),
+	.rbto_context = NULL
 };
 
 static inline lockdebug_t *
@@ -189,8 +191,10 @@
 	lockdebug_t *ld;
 
 	ld = lockdebug_lookup1(lock);
-	if (ld == NULL)
-		panic("lockdebug_lookup: uninitialized lock (lock=%p, from=%08"PRIxPTR")", lock, where);
+	if (ld == NULL) {
+		panic("lockdebug_lookup: uninitialized lock "
+		    "(lock=%p, from=%08"PRIxPTR")", lock, where);
+	}
 	return ld;
 }
 
@@ -292,7 +296,7 @@
 	ld->ld_initaddr = initaddr;
 	ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0);
 	lockdebug_lock_cpus();
-	rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
+	(void)rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(ld));
 	lockdebug_unlock_cpus();
 	__cpu_simple_unlock(&ld_mod_lk);
 
@@ -330,7 +334,7 @@
 		return;
 	}
 	lockdebug_lock_cpus();
-	rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
+	rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(ld));
 	lockdebug_unlock_cpus();
 	ld->ld_lock = NULL;
 	TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);

Index: src/sys/net/npf/npf_session.c
diff -u src/sys/net/npf/npf_session.c:1.2 src/sys/net/npf/npf_session.c:1.3
--- src/sys/net/npf/npf_session.c:1.2	Thu Sep 16 04:53:27 2010
+++ src/sys/net/npf/npf_session.c	Fri Sep 24 22:51:50 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: npf_session.c,v 1.2 2010/09/16 04:53:27 rmind Exp $	*/
+/*	$NetBSD: npf_session.c,v 1.3 2010/09/24 22:51:50 rmind Exp $	*/
 
 /*-
  * Copyright (c) 2010 The NetBSD Foundation, Inc.
@@ -85,7 +85,7 @@
 
 #ifdef _KERNEL
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: npf_session.c,v 1.2 2010/09/16 04:53:27 rmind Exp $");
+__KERNEL_RCSID(0, "$NetBSD: npf_session.c,v 1.3 2010/09/24 22:51:50 rmind Exp $");
 
 #include <sys/param.h>
 #include <sys/kernel.h>
@@ -139,17 +139,13 @@
 	struct timespec 		s_atime;
 };
 
-/* Return pointer to npf_session_t from RB-tree node. (XXX fix rb-tree) */
-#define	NPF_RBN2SESENT(n)		\
-    (npf_session_t *)((uintptr_t)n - offsetof(npf_session_t, se_entry.rbnode))
-
 LIST_HEAD(npf_sesslist, npf_session);
 
 #define	SESS_HASH_BUCKETS		1024	/* XXX tune + make tunable */
 #define	SESS_HASH_MASK			(SESS_HASH_BUCKETS - 1)
 
 typedef struct {
-	struct rb_tree			sh_tree;
+	rb_tree_t			sh_tree;
 	krwlock_t			sh_lock;
 	u_int				sh_count;
 } npf_sess_hash_t;
@@ -229,10 +225,10 @@
  */
 
 static signed int
-sess_rbtree_cmp_nodes(const struct rb_node *n1, const struct rb_node *n2)
+sess_rbtree_cmp_nodes(void *ctx, const void *n1, const void *n2)
 {
-	const npf_session_t * const se1 = NPF_RBN2SESENT(n1);
-	const npf_session_t * const se2 = NPF_RBN2SESENT(n2);
+	const npf_session_t * const se1 = n1;
+	const npf_session_t * const se2 = n2;
 
 	/*
 	 * Note: must compare equivalent streams.
@@ -269,9 +265,9 @@
 }
 
 static signed int
-sess_rbtree_cmp_key(const struct rb_node *n1, const void *key)
+sess_rbtree_cmp_key(void *ctx, const void *n1, const void *key)
 {
-	const npf_session_t * const se = NPF_RBN2SESENT(n1);
+	const npf_session_t * const se = n1;
 	const npf_cache_t * const npc = key;
 	in_port_t sport, dport;
 	in_addr_t src, dst;
@@ -302,9 +298,11 @@
 	return 0;
 }
 
-static const struct rb_tree_ops sess_rbtree_ops = {
+static const rb_tree_ops_t sess_rbtree_ops = {
 	.rbto_compare_nodes = sess_rbtree_cmp_nodes,
-	.rbto_compare_key = sess_rbtree_cmp_key
+	.rbto_compare_key = sess_rbtree_cmp_key,
+	.rbto_node_offset = offsetof(npf_session_t, se_entry.rbnode),
+	.rbto_context = NULL
 };
 
 static inline npf_sess_hash_t *
@@ -484,7 +482,6 @@
     struct ifnet *ifp, const int di)
 {
 	npf_sess_hash_t *sh;
-	struct rb_node *nd;
 	npf_session_t *se;
 
 	/* Attempt to fetch and cache all relevant IPv4 data. */
@@ -519,12 +516,11 @@
 
 	/* Lookup the tree for a state entry. */
 	rw_enter(&sh->sh_lock, RW_READER);
-	nd = rb_tree_find_node(&sh->sh_tree, key);
-	if (nd == NULL) {
+	se = rb_tree_find_node(&sh->sh_tree, key);
+	if (se == NULL) {
 		rw_exit(&sh->sh_lock);
 		return NULL;
 	}
-	se = NPF_RBN2SESENT(nd);
 
 	/* Inspect the protocol data and handle state changes. */
 	if (npf_session_pstate(npc, se, di)) {
@@ -606,7 +602,7 @@
 	/* Find the hash bucket and insert the state into the tree. */
 	sh = sess_hash_bucket(npc);
 	rw_enter(&sh->sh_lock, RW_WRITER);
-	ok = rb_tree_insert_node(&sh->sh_tree, &se->se_entry.rbnode);
+	ok = rb_tree_insert_node(&sh->sh_tree, se) == se;
 	if (__predict_true(ok)) {
 		sh->sh_count++;
 		DPRINTF(("NPF: new se %p (link %p, nat %p)\n",
@@ -727,7 +723,7 @@
 npf_session_gc(struct npf_sesslist *gc_list, bool flushall)
 {
 	struct timespec tsnow;
-	npf_session_t *se;
+	npf_session_t *se, *nse;
 	u_int i;
 
 	getnanouptime(&tsnow);
@@ -735,7 +731,6 @@
 	/* Scan each session in the hash table. */
 	for (i = 0; i < SESS_HASH_BUCKETS; i++) {
 		npf_sess_hash_t *sh;
-		struct rb_node *nd;
 
 		sh = &sess_hashtbl[i];
 		if (sh->sh_count == 0) {
@@ -743,17 +738,16 @@
 		}
 		rw_enter(&sh->sh_lock, RW_WRITER);
 		/* For each (left -> right) ... */
-		nd = rb_tree_iterate(&sh->sh_tree, NULL, RB_DIR_LEFT);
-		while (nd != NULL) {
+		se = rb_tree_iterate(&sh->sh_tree, NULL, RB_DIR_LEFT);
+		while (se != NULL) {
 			/* Get item, pre-iterate, skip if not expired. */
-			se = NPF_RBN2SESENT(nd);
-			nd = rb_tree_iterate(&sh->sh_tree, nd, RB_DIR_RIGHT);
+			nse = rb_tree_iterate(&sh->sh_tree, se, RB_DIR_RIGHT);
 			if (!npf_session_expired(se, &tsnow) && !flushall) {
 				continue;
 			}
 
 			/* Expired - move to G/C list. */
-			rb_tree_remove_node(&sh->sh_tree, &se->se_entry.rbnode);
+			rb_tree_remove_node(&sh->sh_tree, se);
 			LIST_INSERT_HEAD(gc_list, se, se_entry.gclist);
 			sh->sh_count--;
 
@@ -765,6 +759,7 @@
 				    se, se->s_nat_se));
 				se->s_nat_se = NULL;
 			}
+			se = nse;
 		}
 		KASSERT(!flushall || sh->sh_count == 0);
 		rw_exit(&sh->sh_lock);
@@ -845,7 +840,6 @@
 npf_sessions_dump(void)
 {
 	npf_sess_hash_t *sh;
-	struct rb_node *nd;
 	npf_session_t *se;
 	struct timespec tsnow;
 
@@ -862,13 +856,11 @@
 			continue;
 		}
 		printf("s_bucket %d (count = %d)\n", i, sh->sh_count);
-		RB_TREE_FOREACH(nd, &sh->sh_tree) {
+		RB_TREE_FOREACH(se, &sh->sh_tree) {
 			struct timespec tsdiff;
 			struct in_addr ip;
 			int etime;
 
-			se = NPF_RBN2SESENT(nd);
-
 			timespecsub(&tsnow, &se->s_atime, &tsdiff);
 			etime = (se->s_state == SE_ESTABLISHED) ?
 			    sess_expire_table[se->s_type] : 10;

Index: src/sys/net/npf/npf_tableset.c
diff -u src/sys/net/npf/npf_tableset.c:1.1 src/sys/net/npf/npf_tableset.c:1.2
--- src/sys/net/npf/npf_tableset.c:1.1	Sun Aug 22 18:56:23 2010
+++ src/sys/net/npf/npf_tableset.c	Fri Sep 24 22:51:50 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: npf_tableset.c,v 1.1 2010/08/22 18:56:23 rmind Exp $	*/
+/*	$NetBSD: npf_tableset.c,v 1.2 2010/09/24 22:51:50 rmind Exp $	*/
 
 /*-
  * Copyright (c) 2009-2010 The NetBSD Foundation, Inc.
@@ -43,7 +43,7 @@
 
 #ifdef _KERNEL
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: npf_tableset.c,v 1.1 2010/08/22 18:56:23 rmind Exp $");
+__KERNEL_RCSID(0, "$NetBSD: npf_tableset.c,v 1.2 2010/09/24 22:51:50 rmind Exp $");
 #endif
 
 #include <sys/param.h>
@@ -62,19 +62,16 @@
 
 /* Table entry structure. */
 struct npf_tblent {
-	/* IPv4 CIDR block. */
-	in_addr_t			te_addr;
-	in_addr_t			te_mask;
+	/* Hash/tree entry. */
 	union {
 		LIST_ENTRY(npf_tblent)	hashq;
 		struct rb_node		rbnode;
 	} te_entry;
+	/* IPv4 CIDR block. */
+	in_addr_t			te_addr;
+	in_addr_t			te_mask;
 };
 
-/* Return pointer to npf_tblent_t from RB-tree node. (XXX fix rb-tree) */
-#define	NPF_RBN2TBLENT(n)		\
-    (npf_tblent_t *)((uintptr_t)n - offsetof(npf_tblent_t, te_entry.rbnode))
-
 LIST_HEAD(npf_hashl, npf_tblent);
 
 /* Table structure. */
@@ -89,7 +86,7 @@
 	u_int				t_type;
 	struct npf_hashl *		t_hashl;
 	u_long				t_hashmask;
-	struct rb_tree			t_rbtree;
+	rb_tree_t			t_rbtree;
 };
 
 /* Global table array and its lock. */
@@ -201,37 +198,39 @@
  */
 
 static signed int
-table_rbtree_cmp_nodes(const struct rb_node *n1, const struct rb_node *n2)
+table_rbtree_cmp_nodes(void *ctx, const void *n1, const void *n2)
 {
-	const npf_tblent_t *te1 = NPF_RBN2TBLENT(n1);
-	const npf_tblent_t *te2 = NPF_RBN2TBLENT(n2);
+	const npf_tblent_t * const te1 = n1;
+	const npf_tblent_t * const te2 = n2;
 	const in_addr_t x = te1->te_addr & te1->te_mask;
 	const in_addr_t y = te2->te_addr & te2->te_mask;
 
 	if (x < y)
-		return 1;
-	if (x > y)
 		return -1;
+	if (x > y)
+		return 1;
 	return 0;
 }
 
 static signed int
-table_rbtree_cmp_key(const struct rb_node *n1, const void *key)
+table_rbtree_cmp_key(void *ctx, const void *n1, const void *key)
 {
-	const npf_tblent_t *te = NPF_RBN2TBLENT(n1);
+	const npf_tblent_t * const te = n1;
 	const in_addr_t x = te->te_addr & te->te_mask;
 	const in_addr_t y = *(const in_addr_t *)key;
 
 	if (x < y)
-		return 1;
-	if (x > y)
 		return -1;
+	if (x > y)
+		return 1;
 	return 0;
 }
 
-static const struct rb_tree_ops table_rbtree_ops = {
+static const rb_tree_ops_t table_rbtree_ops = {
 	.rbto_compare_nodes = table_rbtree_cmp_nodes,
-	.rbto_compare_key = table_rbtree_cmp_key
+	.rbto_compare_key = table_rbtree_cmp_key,
+	.rbto_node_offset = offsetof(npf_tblent_t, te_entry.rbnode),
+	.rbto_context = NULL
 };
 
 /*
@@ -285,7 +284,6 @@
 npf_table_destroy(npf_table_t *t)
 {
 	npf_tblent_t *e;
-	struct rb_node *nd;
 	u_int n;
 
 	switch (t->t_type) {
@@ -299,10 +297,9 @@
 		hashdone(t->t_hashl, HASH_LIST, t->t_hashmask);
 		break;
 	case NPF_TABLE_RBTREE:
-		while ((nd = rb_tree_iterate(&t->t_rbtree, NULL,
-		    RB_DIR_RIGHT)) != NULL) {
-			e = NPF_RBN2TBLENT(nd);
-			rb_tree_remove_node(&t->t_rbtree, &e->te_entry.rbnode);
+		while ((e = rb_tree_iterate(&t->t_rbtree, NULL,
+		    RB_DIR_LEFT)) != NULL) {
+			rb_tree_remove_node(&t->t_rbtree, e);
 			pool_cache_put(tblent_cache, e);
 		}
 		break;
@@ -442,7 +439,7 @@
 		break;
 	case NPF_TABLE_RBTREE:
 		/* Insert entry.  Returns false, if duplicate. */
-		if (!rb_tree_insert_node(&t->t_rbtree, &e->te_entry.rbnode)) {
+		if (rb_tree_insert_node(&t->t_rbtree, e) != e) {
 			error = EEXIST;
 		}
 		break;
@@ -465,7 +462,6 @@
     in_addr_t addr, in_addr_t mask)
 {
 	struct npf_hashl *htbl;
-	struct rb_node *nd;
 	npf_tblent_t *e;
 	npf_table_t *t;
 	in_addr_t val;
@@ -497,10 +493,9 @@
 	case NPF_TABLE_RBTREE:
 		/* Key: (address & mask). */
 		val = addr & mask;
-		nd = rb_tree_find_node(&t->t_rbtree, &val);
-		if (__predict_true(nd != NULL)) {
-			e = NPF_RBN2TBLENT(nd);
-			rb_tree_remove_node(&t->t_rbtree, &e->te_entry.rbnode);
+		e = rb_tree_find_node(&t->t_rbtree, &val);
+		if (__predict_true(e != NULL)) {
+			rb_tree_remove_node(&t->t_rbtree, e);
 		} else {
 			error = ESRCH;
 		}
@@ -525,7 +520,6 @@
 npf_table_match_v4addr(u_int tid, in_addr_t ip4addr)
 {
 	struct npf_hashl *htbl;
-	struct rb_node *nd;
 	npf_tblent_t *e;
 	npf_table_t *t;
 
@@ -546,8 +540,7 @@
 		}
 		break;
 	case NPF_TABLE_RBTREE:
-		nd = rb_tree_find_node(&t->t_rbtree, &ip4addr);
-		e = NPF_RBN2TBLENT(nd);
+		e = rb_tree_find_node(&t->t_rbtree, &ip4addr);
 		KASSERT((ip4addr & e->te_mask) == e->te_addr);
 		break;
 	default:

Index: src/sys/nfs/nfs_node.c
diff -u src/sys/nfs/nfs_node.c:1.113 src/sys/nfs/nfs_node.c:1.114
--- src/sys/nfs/nfs_node.c:1.113	Wed Jul 21 17:52:13 2010
+++ src/sys/nfs/nfs_node.c	Fri Sep 24 22:51:50 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: nfs_node.c,v 1.113 2010/07/21 17:52:13 hannken Exp $	*/
+/*	$NetBSD: nfs_node.c,v 1.114 2010/09/24 22:51:50 rmind Exp $	*/
 
 /*
  * Copyright (c) 1989, 1993
@@ -35,7 +35,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: nfs_node.c,v 1.113 2010/07/21 17:52:13 hannken Exp $");
+__KERNEL_RCSID(0, "$NetBSD: nfs_node.c,v 1.114 2010/09/24 22:51:50 rmind Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_nfs.h"
@@ -106,9 +106,6 @@
 	workqueue_destroy(nfs_sillyworkq);
 }
 
-#define	RBTONFSNODE(node) \
-	(void *)((uintptr_t)(node) - offsetof(struct nfsnode, n_rbnode))
-
 struct fh_match {
 	nfsfh_t *fhm_fhp;
 	size_t fhm_fhsize;
@@ -116,10 +113,10 @@
 };
 
 static int
-nfs_compare_nodes(const struct rb_node *parent, const struct rb_node *node)
+nfs_compare_nodes(void *ctx, const void *parent, const void *node)
 {
-	const struct nfsnode * const pnp = RBTONFSNODE(parent);
-	const struct nfsnode * const np = RBTONFSNODE(node);
+	const struct nfsnode * const pnp = parent;
+	const struct nfsnode * const np = node;
 
 	if (pnp->n_fhsize != np->n_fhsize)
 		return np->n_fhsize - pnp->n_fhsize;
@@ -128,9 +125,9 @@
 }
 
 static int
-nfs_compare_node_fh(const struct rb_node *b, const void *key)
+nfs_compare_node_fh(void *ctx, const void *b, const void *key)
 {
-	const struct nfsnode * const pnp = RBTONFSNODE(b);
+	const struct nfsnode * const pnp = b;
 	const struct fh_match * const fhm = key;
 
 	if (pnp->n_fhsize != fhm->fhm_fhsize)
@@ -139,18 +136,20 @@
 	return memcmp(fhm->fhm_fhp, pnp->n_fhp, pnp->n_fhsize);
 }
 
-static const struct rb_tree_ops nfs_node_rbtree_ops = {
+static const rb_tree_ops_t nfs_node_rbtree_ops = {
 	.rbto_compare_nodes = nfs_compare_nodes,
 	.rbto_compare_key = nfs_compare_node_fh,
+	.rbto_node_offset = offsetof(struct nfsnode, n_rbnode),
+	.rbto_context = NULL
 };
 
 void
 nfs_rbtinit(struct nfsmount *nmp)
 {
+
 	rb_tree_init(&nmp->nm_rbtree, &nfs_node_rbtree_ops);
 }
 
-
 /*
  * Look up a vnode/nfsnode by file handle.
  * Callers must check for mount points!!
@@ -158,23 +157,22 @@
  * nfsnode structure is returned.
  */
 int
-nfs_nget1(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp, int lkflags)
+nfs_nget1(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp,
+    int lkflags)
 {
 	struct nfsnode *np;
 	struct vnode *vp;
 	struct nfsmount *nmp = VFSTONFS(mntp);
 	int error;
 	struct fh_match fhm;
-	struct rb_node *node;
 
 	fhm.fhm_fhp = fhp;
 	fhm.fhm_fhsize = fhsize;
 
 loop:
 	rw_enter(&nmp->nm_rbtlock, RW_READER);
-	node = rb_tree_find_node(&nmp->nm_rbtree, &fhm);
-	if (node != NULL) {
-		np = RBTONFSNODE(node);
+	np = rb_tree_find_node(&nmp->nm_rbtree, &fhm);
+	if (np != NULL) {
 		vp = NFSTOV(np);
 		mutex_enter(&vp->v_interlock);
 		rw_exit(&nmp->nm_rbtlock);
@@ -234,7 +232,7 @@
 	VOP_LOCK(vp, LK_EXCLUSIVE);
 	NFS_INVALIDATE_ATTRCACHE(np);
 	uvm_vnp_setsize(vp, 0);
-	rb_tree_insert_node(&nmp->nm_rbtree, &np->n_rbnode);
+	(void)rb_tree_insert_node(&nmp->nm_rbtree, np);
 	rw_exit(&nmp->nm_rbtlock);
 
 	*npp = np;
@@ -294,7 +292,7 @@
 		vprint("nfs_reclaim: pushing active", vp);
 
 	rw_enter(&nmp->nm_rbtlock, RW_WRITER);
-	rb_tree_remove_node(&nmp->nm_rbtree, &np->n_rbnode);
+	rb_tree_remove_node(&nmp->nm_rbtree, np);
 	rw_exit(&nmp->nm_rbtlock);
 
 	/*

Index: src/sys/rump/librump/rumpkern/vm.c
diff -u src/sys/rump/librump/rumpkern/vm.c:1.95 src/sys/rump/librump/rumpkern/vm.c:1.96
--- src/sys/rump/librump/rumpkern/vm.c:1.95	Thu Sep  9 12:23:06 2010
+++ src/sys/rump/librump/rumpkern/vm.c	Fri Sep 24 22:51:51 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: vm.c,v 1.95 2010/09/09 12:23:06 pooka Exp $	*/
+/*	$NetBSD: vm.c,v 1.96 2010/09/24 22:51:51 rmind Exp $	*/
 
 /*
  * Copyright (c) 2007-2010 Antti Kantee.  All Rights Reserved.
@@ -41,7 +41,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.95 2010/09/09 12:23:06 pooka Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.96 2010/09/24 22:51:51 rmind Exp $");
 
 #include <sys/param.h>
 #include <sys/atomic.h>
@@ -106,29 +106,31 @@
 static unsigned vmpage_onqueue;
 
 static int
-pg_compare_key(const struct rb_node *n, const void *key)
+pg_compare_key(void *ctx, const void *n, const void *key)
 {
 	voff_t a = ((const struct vm_page *)n)->offset;
 	voff_t b = *(const voff_t *)key;
 
 	if (a < b)
-		return 1;
-	else if (a > b)
 		return -1;
+	else if (a > b)
+		return 1;
 	else
 		return 0;
 }
 
 static int
-pg_compare_nodes(const struct rb_node *n1, const struct rb_node *n2)
+pg_compare_nodes(void *ctx, const void *n1, const void *n2)
 {
 
-	return pg_compare_key(n1, &((const struct vm_page *)n2)->offset);
+	return pg_compare_key(ctx, n1, &((const struct vm_page *)n2)->offset);
 }
 
-const struct rb_tree_ops uvm_page_tree_ops = {
+const rb_tree_ops_t uvm_page_tree_ops = {
 	.rbto_compare_nodes = pg_compare_nodes,
 	.rbto_compare_key = pg_compare_key,
+	.rbto_node_offset = offsetof(struct vm_page, rb_node),
+	.rbto_context = NULL
 };
 
 /*
@@ -177,7 +179,7 @@
 	}
 
 	TAILQ_INSERT_TAIL(&uobj->memq, pg, listq.queue);
-	rb_tree_insert_node(&uobj->rb_tree, &pg->rb_node);
+	(void)rb_tree_insert_node(&uobj->rb_tree, pg);
 
 	/*
 	 * Don't put anons on the LRU page queue.  We can't flush them
@@ -215,7 +217,7 @@
 	TAILQ_REMOVE(&uobj->memq, pg, listq.queue);
 
 	uobj->uo_npages--;
-	rb_tree_remove_node(&uobj->rb_tree, &pg->rb_node);
+	rb_tree_remove_node(&uobj->rb_tree, pg);
 
 	if (!UVM_OBJ_IS_AOBJ(uobj)) {
 		TAILQ_REMOVE(&vmpage_lruqueue, pg, pageq.queue);
@@ -468,7 +470,7 @@
 {
 	struct vm_page *pg;
 
-	pg = (struct vm_page *)rb_tree_find_node(&uobj->rb_tree, &off);
+	pg = rb_tree_find_node(&uobj->rb_tree, &off);
 	if (pg && !UVM_OBJ_IS_AOBJ(pg->uobject)) {
 		mutex_enter(&uvm_pageqlock);
 		TAILQ_REMOVE(&vmpage_lruqueue, pg, pageq.queue);

Index: src/sys/sys/rb.h
diff -u src/sys/sys/rb.h:1.13 src/sys/sys/rb.h:1.14
--- src/sys/sys/rb.h:1.13	Sun Aug 16 10:57:01 2009
+++ src/sys/sys/rb.h	Fri Sep 24 22:51:51 2010
@@ -1,4 +1,4 @@
-/* $NetBSD: rb.h,v 1.13 2009/08/16 10:57:01 yamt Exp $ */
+/*	$NetBSD: rb.h,v 1.14 2010/09/24 22:51:51 rmind Exp $	*/
 
 /*-
  * Copyright (c) 2001 The NetBSD Foundation, Inc.
@@ -28,6 +28,7 @@
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGE.
  */
+
 #ifndef _SYS_RB_H_
 #define	_SYS_RB_H_
 
@@ -40,7 +41,9 @@
 #include <sys/queue.h>
 #include <sys/endian.h>
 
-struct rb_node {
+__BEGIN_DECLS
+
+typedef struct rb_node {
 	struct rb_node *rb_nodes[2];
 #define	RB_DIR_LEFT		0
 #define	RB_DIR_RIGHT		1
@@ -95,7 +98,7 @@
 #ifdef RBDEBUG
 	TAILQ_ENTRY(rb_node) rb_link;
 #endif
-};
+} rb_node_t;
 
 #define RB_TREE_MIN(T) rb_tree_iterate((T), NULL, RB_DIR_LEFT)
 #define RB_TREE_MAX(T) rb_tree_iterate((T), NULL, RB_DIR_RIGHT)
@@ -124,29 +127,31 @@
 
 /*
  * rbto_compare_nodes_fn:
- *	return a positive value if the first node < the second node.
- *	return a negative value if the first node > the second node.
+ *	return a positive value if the first node > the second node.
+ *	return a negative value if the first node < the second node.
  *	return 0 if they are considered same.
  *
  * rbto_compare_key_fn:
- *	return a positive value if the node < the key.
- *	return a negative value if the node > the key.
+ *	return a positive value if the node > the key.
+ *	return a negative value if the node < the key.
  *	return 0 if they are considered same.
  */
 
-typedef signed int (*const rbto_compare_nodes_fn)(const struct rb_node *,
-    const struct rb_node *);
-typedef signed int (*const rbto_compare_key_fn)(const struct rb_node *,
-    const void *);
+typedef signed int (*const rbto_compare_nodes_fn)(void *,
+    const void *, const void *);
+typedef signed int (*const rbto_compare_key_fn)(void *,
+    const void *, const void *);
 
-struct rb_tree_ops {
+typedef struct {
 	rbto_compare_nodes_fn rbto_compare_nodes;
 	rbto_compare_key_fn rbto_compare_key;
-};
+	size_t rbto_node_offset;
+	void *rbto_context;
+} rb_tree_ops_t;
 
-struct rb_tree {
+typedef struct rb_tree {
 	struct rb_node *rbt_root;
-	const struct rb_tree_ops *rbt_ops;
+	const rb_tree_ops_t *rbt_ops;
 	struct rb_node *rbt_minmax[2];
 #ifdef RBDEBUG
 	struct rb_node_qh rbt_nodes;
@@ -160,7 +165,7 @@
 	unsigned int rbt_removal_rebalance_calls;
 	unsigned int rbt_removal_rebalance_passes;
 #endif
-};
+} rb_tree_t;
 
 #ifdef RBSTATS
 #define	RBSTAT_INC(v)	((void)((v)++))
@@ -170,22 +175,20 @@
 #define	RBSTAT_DEC(v)	do { } while (/*CONSTCOND*/0)
 #endif
 
-void	rb_tree_init(struct rb_tree *, const struct rb_tree_ops *);
-bool	rb_tree_insert_node(struct rb_tree *, struct rb_node *);
-struct rb_node	*
-	rb_tree_find_node(struct rb_tree *, const void *);
-struct rb_node	*
-	rb_tree_find_node_geq(struct rb_tree *, const void *);
-struct rb_node	*
-	rb_tree_find_node_leq(struct rb_tree *, const void *);
-void	rb_tree_remove_node(struct rb_tree *, struct rb_node *);
-struct rb_node *
-	rb_tree_iterate(struct rb_tree *, struct rb_node *, const unsigned int);
+void	rb_tree_init(rb_tree_t *, const rb_tree_ops_t *);
+void *	rb_tree_insert_node(rb_tree_t *, void *);
+void *	rb_tree_find_node(rb_tree_t *, const void *);
+void *	rb_tree_find_node_geq(rb_tree_t *, const void *);
+void *	rb_tree_find_node_leq(rb_tree_t *, const void *);
+void	rb_tree_remove_node(rb_tree_t *, void *);
+void *	rb_tree_iterate(rb_tree_t *, void *, const unsigned int);
 #ifdef RBDEBUG
-void	rb_tree_check(const struct rb_tree *, bool);
+void	rb_tree_check(const rb_tree_t *, bool);
 #endif
 #ifdef RBSTATS
-void	rb_tree_depths(const struct rb_tree *, size_t *);
+void	rb_tree_depths(const rb_tree_t *, size_t *);
 #endif
 
+__END_DECLS
+
 #endif	/* _SYS_RB_H_*/

Index: src/sys/uvm/uvm_map.c
diff -u src/sys/uvm/uvm_map.c:1.292 src/sys/uvm/uvm_map.c:1.293
--- src/sys/uvm/uvm_map.c:1.292	Tue Jun 22 18:34:50 2010
+++ src/sys/uvm/uvm_map.c	Fri Sep 24 22:51:51 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_map.c,v 1.292 2010/06/22 18:34:50 rmind Exp $	*/
+/*	$NetBSD: uvm_map.c,v 1.293 2010/09/24 22:51:51 rmind Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -71,7 +71,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.292 2010/06/22 18:34:50 rmind Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.293 2010/09/24 22:51:51 rmind Exp $");
 
 #include "opt_ddb.h"
 #include "opt_uvmhist.h"
@@ -318,53 +318,56 @@
 int _uvm_tree_sanity(struct vm_map *);
 static vsize_t uvm_rb_maxgap(const struct vm_map_entry *);
 
-CTASSERT(offsetof(struct vm_map_entry, rb_node) == 0);
 #define	ROOT_ENTRY(map)		((struct vm_map_entry *)(map)->rb_tree.rbt_root)
 #define	LEFT_ENTRY(entry)	((struct vm_map_entry *)(entry)->rb_node.rb_left)
 #define	RIGHT_ENTRY(entry)	((struct vm_map_entry *)(entry)->rb_node.rb_right)
 #define	PARENT_ENTRY(map, entry) \
 	(ROOT_ENTRY(map) == (entry) \
-	    ? NULL \
-	    : (struct vm_map_entry *)RB_FATHER(&(entry)->rb_node))
+	    ? NULL : (struct vm_map_entry *)RB_FATHER(&(entry)->rb_node))
 
 static int
-uvm_map_compare_nodes(const struct rb_node *nparent,
-	const struct rb_node *nkey)
+uvm_map_compare_nodes(void *ctx, const void *nparent, const void *nkey)
 {
-	const struct vm_map_entry *eparent = (const void *) nparent;
-	const struct vm_map_entry *ekey = (const void *) nkey;
+	const struct vm_map_entry *eparent = nparent;
+	const struct vm_map_entry *ekey = nkey;
 
 	KASSERT(eparent->start < ekey->start || eparent->start >= ekey->end);
 	KASSERT(ekey->start < eparent->start || ekey->start >= eparent->end);
 
-	if (ekey->start < eparent->start)
+	if (eparent->start < ekey->start)
 		return -1;
-	if (ekey->start >= eparent->end)
+	if (eparent->end >= ekey->start)
 		return 1;
 	return 0;
 }
 
 static int
-uvm_map_compare_key(const struct rb_node *nparent, const void *vkey)
+uvm_map_compare_key(void *ctx, const void *nparent, const void *vkey)
 {
-	const struct vm_map_entry *eparent = (const void *) nparent;
+	const struct vm_map_entry *eparent = nparent;
 	const vaddr_t va = *(const vaddr_t *) vkey;
 
-	if (va < eparent->start)
+	if (eparent->start < va)
 		return -1;
-	if (va >= eparent->end)
+	if (eparent->end >= va)
 		return 1;
 	return 0;
 }
 
-static const struct rb_tree_ops uvm_map_tree_ops = {
+static const rb_tree_ops_t uvm_map_tree_ops = {
 	.rbto_compare_nodes = uvm_map_compare_nodes,
 	.rbto_compare_key = uvm_map_compare_key,
+	.rbto_node_offset = offsetof(struct vm_map_entry, rb_node),
+	.rbto_context = NULL
 };
 
+/*
+ * uvm_rb_gap: return the gap size between our entry and next entry.
+ */
 static inline vsize_t
 uvm_rb_gap(const struct vm_map_entry *entry)
 {
+
 	KASSERT(entry->next != NULL);
 	return entry->next->start - entry->end;
 }
@@ -402,16 +405,18 @@
 	while ((parent = PARENT_ENTRY(map, entry)) != NULL) {
 		struct vm_map_entry *brother;
 		vsize_t maxgap = parent->gap;
+		unsigned int which;
 
 		KDASSERT(parent->gap == uvm_rb_gap(parent));
 		if (maxgap < entry->maxgap)
 			maxgap = entry->maxgap;
 		/*
-		 * Since we work our towards the root, we know entry's maxgap
-		 * value is ok but its brothers may now be out-of-date due
-		 * rebalancing.  So refresh it.
+		 * Since we work towards the root, we know entry's maxgap
+		 * value is OK, but its brothers may now be out-of-date due
+		 * to rebalancing.  So refresh it.
 		 */
-		brother = (struct vm_map_entry *)parent->rb_node.rb_nodes[RB_POSITION(&entry->rb_node) ^ RB_DIR_OTHER];
+		which = RB_POSITION(&entry->rb_node) ^ RB_DIR_OTHER;
+		brother = (struct vm_map_entry *)parent->rb_node.rb_nodes[which];
 		if (brother != NULL) {
 			KDASSERT(brother->gap == uvm_rb_gap(brother));
 			brother->maxgap = uvm_rb_maxgap(brother);
@@ -427,12 +432,16 @@
 static void
 uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry)
 {
+	struct vm_map_entry *ret;
+
 	entry->gap = entry->maxgap = uvm_rb_gap(entry);
 	if (entry->prev != &map->header)
 		entry->prev->gap = uvm_rb_gap(entry->prev);
 
-	if (!rb_tree_insert_node(&map->rb_tree, &entry->rb_node))
-		panic("uvm_rb_insert: map %p: duplicate entry?", map);
+	ret = rb_tree_insert_node(&map->rb_tree, entry);
+	KASSERTMSG(ret == entry,
+	    ("uvm_rb_insert: map %p: duplicate entry %p", map, ret)
+	);
 
 	/*
 	 * If the previous entry is not our immediate left child, then it's an
@@ -460,7 +469,7 @@
 	if (entry->next != &map->header)
 		next_parent = PARENT_ENTRY(map, entry->next);
 
-	rb_tree_remove_node(&map->rb_tree, &entry->rb_node);
+	rb_tree_remove_node(&map->rb_tree, entry);
 
 	/*
 	 * If the previous node has a new parent, fixup the tree starting
@@ -598,8 +607,7 @@
 
 	for (tmp = map->header.next; tmp != &map->header;
 	    tmp = tmp->next, i++) {
-		trtmp = (void *) rb_tree_iterate(&map->rb_tree, &tmp->rb_node,
-		    RB_DIR_LEFT);
+		trtmp = rb_tree_iterate(&map->rb_tree, tmp, RB_DIR_LEFT);
 		if (trtmp == NULL)
 			trtmp = &map->header;
 		if (tmp->prev != trtmp) {
@@ -607,8 +615,7 @@
 			    i, tmp, tmp->prev, trtmp);
 			goto error;
 		}
-		trtmp = (void *) rb_tree_iterate(&map->rb_tree, &tmp->rb_node,
-		    RB_DIR_RIGHT);
+		trtmp = rb_tree_iterate(&map->rb_tree, tmp, RB_DIR_RIGHT);
 		if (trtmp == NULL)
 			trtmp = &map->header;
 		if (tmp->next != trtmp) {
@@ -616,7 +623,7 @@
 			    i, tmp, tmp->next, trtmp);
 			goto error;
 		}
-		trtmp = (void *)rb_tree_find_node(&map->rb_tree, &tmp->start);
+		trtmp = rb_tree_find_node(&map->rb_tree, &tmp->start);
 		if (trtmp != tmp) {
 			printf("lookup: %d: %p - %p: %p\n", i, tmp, trtmp,
 			    PARENT_ENTRY(map, tmp));

Index: src/sys/uvm/uvm_object.h
diff -u src/sys/uvm/uvm_object.h:1.26 src/sys/uvm/uvm_object.h:1.27
--- src/sys/uvm/uvm_object.h:1.26	Wed Jun  4 15:06:04 2008
+++ src/sys/uvm/uvm_object.h	Fri Sep 24 22:51:51 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_object.h,v 1.26 2008/06/04 15:06:04 ad Exp $	*/
+/*	$NetBSD: uvm_object.h,v 1.27 2010/09/24 22:51:51 rmind Exp $	*/
 
 /*
  *
@@ -105,7 +105,7 @@
 #define	UVM_OBJ_IS_AOBJ(uobj)						\
 	((uobj)->pgops == &aobj_pager)
 
-extern const struct rb_tree_ops uvm_page_tree_ops;
+extern const rb_tree_ops_t uvm_page_tree_ops;
 
 #define	UVM_OBJ_INIT(uobj, ops, refs)					\
 	do {								\

Index: src/sys/uvm/uvm_page.c
diff -u src/sys/uvm/uvm_page.c:1.155 src/sys/uvm/uvm_page.c:1.156
--- src/sys/uvm/uvm_page.c:1.155	Sun Apr 25 15:54:14 2010
+++ src/sys/uvm/uvm_page.c	Fri Sep 24 22:51:51 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_page.c,v 1.155 2010/04/25 15:54:14 ad Exp $	*/
+/*	$NetBSD: uvm_page.c,v 1.156 2010/09/24 22:51:51 rmind Exp $	*/
 
 /*
  * Copyright (c) 2010 The NetBSD Foundation, Inc.
@@ -97,7 +97,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.155 2010/04/25 15:54:14 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.156 2010/09/24 22:51:51 rmind Exp $");
 
 #include "opt_ddb.h"
 #include "opt_uvmhist.h"
@@ -186,37 +186,39 @@
  */
 
 static signed int
-uvm_page_compare_nodes(const struct rb_node *n1, const struct rb_node *n2)
+uvm_page_compare_nodes(void *ctx, const void *n1, const void *n2)
 {
-	const struct vm_page *pg1 = (const void *)n1;
-	const struct vm_page *pg2 = (const void *)n2;
+	const struct vm_page *pg1 = n1;
+	const struct vm_page *pg2 = n2;
 	const voff_t a = pg1->offset;
 	const voff_t b = pg2->offset;
 
 	if (a < b)
-		return 1;
-	if (a > b)
 		return -1;
+	if (a > b)
+		return 1;
 	return 0;
 }
 
 static signed int
-uvm_page_compare_key(const struct rb_node *n, const void *key)
+uvm_page_compare_key(void *ctx, const void *n, const void *key)
 {
-	const struct vm_page *pg = (const void *)n;
+	const struct vm_page *pg = n;
 	const voff_t a = pg->offset;
 	const voff_t b = *(const voff_t *)key;
 
 	if (a < b)
-		return 1;
-	if (a > b)
 		return -1;
+	if (a > b)
+		return 1;
 	return 0;
 }
 
-const struct rb_tree_ops uvm_page_tree_ops = {
+const rb_tree_ops_t uvm_page_tree_ops = {
 	.rbto_compare_nodes = uvm_page_compare_nodes,
 	.rbto_compare_key = uvm_page_compare_key,
+	.rbto_node_offset = offsetof(struct vm_page, rb_node),
+	.rbto_context = NULL
 };
 
 /*
@@ -270,11 +272,11 @@
 static inline void
 uvm_pageinsert_tree(struct uvm_object *uobj, struct vm_page *pg)
 {
-	bool success;
+	struct vm_page *ret;
 
 	KASSERT(uobj == pg->uobject);
-	success = rb_tree_insert_node(&uobj->rb_tree, &pg->rb_node);
-	KASSERT(success);
+	ret = rb_tree_insert_node(&uobj->rb_tree, pg);
+	KASSERT(ret == pg);
 }
 
 static inline void
@@ -328,7 +330,7 @@
 {
 
 	KASSERT(uobj == pg->uobject);
-	rb_tree_remove_node(&uobj->rb_tree, &pg->rb_node);
+	rb_tree_remove_node(&uobj->rb_tree, pg);
 }
 
 static inline void
@@ -1726,12 +1728,12 @@
 
 	KASSERT(mutex_owned(&obj->vmobjlock));
 
-	pg = (struct vm_page *)rb_tree_find_node(&obj->rb_tree, &off);
+	pg = rb_tree_find_node(&obj->rb_tree, &off);
 
 	KASSERT(pg == NULL || obj->uo_npages != 0);
 	KASSERT(pg == NULL || (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
 		(pg->flags & PG_BUSY) != 0);
-	return(pg);
+	return pg;
 }
 
 /*

Reply via email to