With the anchor node at the top of the rbtree, there is always a valid node for rb_next() to return, such that cached_node is only ever NULL until the first allocation. Initialising it to point at the anchor node gets rid of that window and makes the NULL checking entirely redundant.
Signed-off-by: Robin Murphy <robin.mur...@arm.com> --- Oops, spotted this one slightly too late. This could be squashed into patch #5 (which I'll do myself if I there's any cause to resend the whole series again). Robin. drivers/iommu/iova.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index a7af8273fa98..ec443c0a8319 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -51,7 +51,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, spin_lock_init(&iovad->iova_rbtree_lock); iovad->rbroot = RB_ROOT; - iovad->cached_node = NULL; + iovad->cached_node = &iovad->anchor.node; iovad->cached32_node = NULL; iovad->granule = granule; iovad->start_pfn = start_pfn; @@ -120,10 +120,7 @@ __get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn) if (limit_pfn <= iovad->dma_32bit_pfn && iovad->cached32_node) return iovad->cached32_node; - if (iovad->cached_node) - return iovad->cached_node; - - return &iovad->anchor.node; + return iovad->cached_node; } static void @@ -141,14 +138,11 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) struct iova *cached_iova; struct rb_node **curr; - if (free->pfn_hi < iovad->dma_32bit_pfn) + if (free->pfn_hi < iovad->dma_32bit_pfn && iovad->cached32_node) curr = &iovad->cached32_node; else curr = &iovad->cached_node; - if (!*curr) - return; - cached_iova = rb_entry(*curr, struct iova, node); if (free->pfn_lo >= cached_iova->pfn_lo) *curr = rb_next(&free->node); -- 2.13.4.dirty