This patch makes memmap_valid_within return bool due to this particular function only using either one or zero as its return value.
This patch also refactors memmap_valid_within for simplicity. No functional change. Signed-off-by: Yaowei Bai <baiyao...@cmss.chinamobile.com> --- include/linux/mmzone.h | 6 +++--- mm/mmzone.c | 10 ++-------- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 9963846..b9b59bb8 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -1202,13 +1202,13 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); * the zone and PFN linkages are still valid. This is expensive, but walkers * of the full memmap are extremely rare. */ -int memmap_valid_within(unsigned long pfn, +bool memmap_valid_within(unsigned long pfn, struct page *page, struct zone *zone); #else -static inline int memmap_valid_within(unsigned long pfn, +static inline bool memmap_valid_within(unsigned long pfn, struct page *page, struct zone *zone) { - return 1; + return true; } #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ diff --git a/mm/mmzone.c b/mm/mmzone.c index 7d87ebb..de0824e 100644 --- a/mm/mmzone.c +++ b/mm/mmzone.c @@ -72,16 +72,10 @@ struct zoneref *next_zones_zonelist(struct zoneref *z, } #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL -int memmap_valid_within(unsigned long pfn, +bool memmap_valid_within(unsigned long pfn, struct page *page, struct zone *zone) { - if (page_to_pfn(page) != pfn) - return 0; - - if (page_zone(page) != zone) - return 0; - - return 1; + return page_to_pfn(page) == pfn && page_zone(page) == zone; } #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ -- 1.9.1 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/