On Thu, 24 Sep 2020 07:16:41 -0400 Miaohe Lin <[email protected]> wrote:

> Add else to split mutually exclusive case and avoid some unnecessary check.
> It doesn't seem to change code generation (compiler is smart), but I think
> it helps readability.
> 
> ...
>
> --- a/mm/mempool.c
> +++ b/mm/mempool.c
> @@ -58,11 +58,10 @@ static void __check_element(mempool_t *pool, void 
> *element, size_t size)
>  static void check_element(mempool_t *pool, void *element)
>  {
>       /* Mempools backed by slab allocator */
> -     if (pool->free == mempool_free_slab || pool->free == mempool_kfree)
> +     if (pool->free == mempool_free_slab || pool->free == mempool_kfree) {
>               __check_element(pool, element, ksize(element));
> -
>       /* Mempools backed by page allocator */
> -     if (pool->free == mempool_free_pages) {
> +     } else if (pool->free == mempool_free_pages) {
>               int order = (int)(long)pool->pool_data;
>               void *addr = kmap_atomic((struct page *)element);
>  
> @@ -82,11 +81,10 @@ static void __poison_element(void *element, size_t size)
>  static void poison_element(mempool_t *pool, void *element)
>  {
>       /* Mempools backed by slab allocator */
> -     if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
> +     if (pool->alloc == mempool_alloc_slab || pool->alloc == 
> mempool_kmalloc) {
>               __poison_element(element, ksize(element));
> -
>       /* Mempools backed by page allocator */
> -     if (pool->alloc == mempool_alloc_pages) {
> +     } else if (pool->alloc == mempool_alloc_pages) {
>               int order = (int)(long)pool->pool_data;
>               void *addr = kmap_atomic((struct page *)element);
>  

OK, I guess.  But the comments are now in the wrong place.

--- a/mm/mempool.c~mm-mempool-add-else-to-split-mutually-exclusive-case-fix
+++ a/mm/mempool.c
@@ -60,8 +60,8 @@ static void check_element(mempool_t *poo
        /* Mempools backed by slab allocator */
        if (pool->free == mempool_free_slab || pool->free == mempool_kfree) {
                __check_element(pool, element, ksize(element));
-       /* Mempools backed by page allocator */
        } else if (pool->free == mempool_free_pages) {
+               /* Mempools backed by page allocator */
                int order = (int)(long)pool->pool_data;
                void *addr = kmap_atomic((struct page *)element);
 
@@ -83,8 +83,8 @@ static void poison_element(mempool_t *po
        /* Mempools backed by slab allocator */
        if (pool->alloc == mempool_alloc_slab || pool->alloc == 
mempool_kmalloc) {
                __poison_element(element, ksize(element));
-       /* Mempools backed by page allocator */
        } else if (pool->alloc == mempool_alloc_pages) {
+               /* Mempools backed by page allocator */
                int order = (int)(long)pool->pool_data;
                void *addr = kmap_atomic((struct page *)element);
 
_

Reply via email to