Suppose:
CONFIG_SPARSEMEM is opened.
CONFIG_DMA_API_DEBUG or CONFIG_CMA is opened.

Then virt_to_page or phys_to_page will be called. Finally, in __pfn_to_page, 
__sec = __pfn_to_section(__pfn) is NULL.
So access section->section_mem_map will trigger exception.

---------

#define __pfn_to_page(pfn)                              \
({      unsigned long __pfn = (pfn);                    \
        struct mem_section *__sec = __pfn_to_section(__pfn);    \
        __section_mem_map_addr(__sec) + __pfn;          \
})

static inline struct page *__section_mem_map_addr(struct mem_section *section)
{
        unsigned long map = section->section_mem_map;
        map &= SECTION_MAP_MASK;
        return (struct page *)map;
}


On 2016/3/7 17:21, Zhen Lei wrote:
> Do this to keep consistent with kfree, which tolerate ptr is NULL.
> 
> Signed-off-by: Zhen Lei <thunder.leiz...@huawei.com>
> ---
>  include/linux/dma-mapping.h | 5 ++++-
>  1 file changed, 4 insertions(+), 1 deletion(-)
> 
> diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
> index 75857cd..fdd4294 100644
> --- a/include/linux/dma-mapping.h
> +++ b/include/linux/dma-mapping.h
> @@ -402,7 +402,10 @@ static inline void *dma_alloc_coherent(struct device 
> *dev, size_t size,
>  static inline void dma_free_coherent(struct device *dev, size_t size,
>               void *cpu_addr, dma_addr_t dma_handle)
>  {
> -     return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL);
> +     if (unlikely(!cpu_addr))
> +             return;
> +
> +     dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL);
>  }
> 
>  static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
> --
> 2.5.0
> 
> 
> 
> .
> 

Reply via email to