its not quite an even trade, we lose some bytes still cos the subr_tree code is a bit bigger than what RB_GENERATE produced. we'll be ahead after the next conversion though.
ok? Index: conf/files =================================================================== RCS file: /cvs/src/sys/conf/files,v retrieving revision 1.624 diff -u -p -r1.624 files --- conf/files 13 Aug 2016 20:35:57 -0000 1.624 +++ conf/files 2 Sep 2016 13:36:22 -0000 @@ -694,6 +694,7 @@ file kern/subr_hibernate.c hibernate file kern/subr_log.c file kern/subr_poison.c diagnostic file kern/subr_pool.c +file kern/subr_tree.c file kern/dma_alloc.c file kern/subr_prf.c file kern/subr_prof.c Index: sys/tree.h =================================================================== RCS file: /cvs/src/sys/sys/tree.h,v retrieving revision 1.15 diff -u -p -r1.15 tree.h --- sys/tree.h 2 Sep 2016 11:17:14 -0000 1.15 +++ sys/tree.h 2 Sep 2016 13:36:22 -0000 @@ -745,7 +745,7 @@ name##_RB_MINMAX(struct name *head, int ((x) != NULL) && ((y) = name##_RB_PREV(x), 1); \ (x) = (y)) -#if 0 && defined(_KERNEL) +#ifdef _KERNEL /* * Copyright (c) 2016 David Gwynne <d...@openbsd.org> Index: sys/pool.h =================================================================== RCS file: /cvs/src/sys/sys/pool.h,v retrieving revision 1.59 diff -u -p -r1.59 pool.h --- sys/pool.h 21 Apr 2016 04:09:28 -0000 1.59 +++ sys/pool.h 2 Sep 2016 13:36:22 -0000 @@ -121,7 +121,7 @@ struct pool { int pr_ipl; - RB_HEAD(phtree, pool_item_header) + RBT_HEAD(phtree, pool_item_header) pr_phtree; u_int pr_align; Index: kern/subr_pool.c =================================================================== RCS file: /cvs/src/sys/kern/subr_pool.c,v retrieving revision 1.194 diff -u -p -r1.194 subr_pool.c --- kern/subr_pool.c 15 Jan 2016 11:21:58 -0000 1.194 +++ kern/subr_pool.c 2 Sep 2016 13:36:23 -0000 @@ -79,7 +79,7 @@ struct pool_item_header { TAILQ_ENTRY(pool_item_header) ph_pagelist; /* pool page list */ XSIMPLEQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */ - RB_ENTRY(pool_item_header) + RBT_ENTRY(pool_item_header) ph_node; /* Off-page page headers */ int ph_nmissing; /* # of chunks in use */ caddr_t ph_page; /* this page's address */ @@ -165,8 +165,11 @@ struct task pool_gc_task = TASK_INITIALI int pool_wait_free = 1; int pool_wait_gc = 8; +RBT_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare); + static inline int -phtree_compare(struct pool_item_header *a, struct pool_item_header *b) +phtree_compare(const struct pool_item_header *a, + const struct pool_item_header *b) { vaddr_t va = (vaddr_t)a->ph_page; vaddr_t vb = (vaddr_t)b->ph_page; @@ -180,8 +183,7 @@ phtree_compare(struct pool_item_header * return (0); } -RB_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare); -RB_GENERATE(phtree, pool_item_header, ph_node, phtree_compare); +RBT_GENERATE(phtree, pool_item_header, ph_node, phtree_compare); /* * Return the pool page header based on page address. @@ -200,7 +202,7 @@ pr_find_pagehead(struct pool *pp, void * } key.ph_page = v; - ph = RB_NFIND(phtree, &pp->pr_phtree, &key); + ph = RBT_NFIND(phtree, &pp->pr_phtree, &key); if (ph == NULL) panic("%s: %s: page header missing", __func__, pp->pr_wchan); @@ -292,7 +294,7 @@ pool_init(struct pool *pp, size_t size, pp->pr_hardlimit_ratecap.tv_usec = 0; pp->pr_hardlimit_warning_last.tv_sec = 0; pp->pr_hardlimit_warning_last.tv_usec = 0; - RB_INIT(&pp->pr_phtree); + RBT_INIT(phtree, &pp->pr_phtree); /* * Use the space between the chunks and the page header @@ -847,7 +849,7 @@ pool_p_insert(struct pool *pp, struct po TAILQ_INSERT_TAIL(&pp->pr_emptypages, ph, ph_pagelist); if (!POOL_INPGHDR(pp)) - RB_INSERT(phtree, &pp->pr_phtree, ph); + RBT_INSERT(phtree, &pp->pr_phtree, ph); pp->pr_nitems += pp->pr_itemsperpage; pp->pr_nidle++; @@ -868,7 +870,7 @@ pool_p_remove(struct pool *pp, struct po pp->pr_nitems -= pp->pr_itemsperpage; if (!POOL_INPGHDR(pp)) - RB_REMOVE(phtree, &pp->pr_phtree, ph); + RBT_REMOVE(phtree, &pp->pr_phtree, ph); TAILQ_REMOVE(&pp->pr_emptypages, ph, ph_pagelist); pool_update_curpage(pp);