This fixes PAGE_ALIGN for greater than 32-bit addresses.
Also fix up some whitespace in lib/arm/asm/page.h

Signed-off-by: Andrew Jones <drjo...@redhat.com>
---
 lib/arm/asm/page.h | 21 +++++++++++----------
 1 file changed, 11 insertions(+), 10 deletions(-)

diff --git a/lib/arm/asm/page.h b/lib/arm/asm/page.h
index 606d76f5775cf..4602d735b7886 100644
--- a/lib/arm/asm/page.h
+++ b/lib/arm/asm/page.h
@@ -6,16 +6,16 @@
  * This work is licensed under the terms of the GNU LGPL, version 2.
  */
 
+#include "const.h"
+
 #define PAGE_SHIFT             12
-#ifndef __ASSEMBLY__
-#define PAGE_SIZE              (1UL << PAGE_SHIFT)
-#else
-#define PAGE_SIZE              (1 << PAGE_SHIFT)
-#endif
+#define PAGE_SIZE              (_AC(1,UL) << PAGE_SHIFT)
 #define PAGE_MASK              (~(PAGE_SIZE-1))
-#define PAGE_ALIGN(addr)       (((addr) + (PAGE_SIZE-1)) & PAGE_MASK)
 
 #ifndef __ASSEMBLY__
+
+#define PAGE_ALIGN(addr)       ALIGN(addr, PAGE_SIZE)
+
 #include <asm/setup.h>
 
 #ifndef __virt_to_phys
@@ -26,8 +26,9 @@
 #define __va(x)                        ((void 
*)__phys_to_virt((phys_addr_t)(x)))
 #define __pa(x)                        __virt_to_phys((unsigned long)(x))
 
-#define virt_to_pfn(kaddr)      (__pa(kaddr) >> PAGE_SHIFT)
-#define pfn_to_virt(pfn)        __va((pfn) << PAGE_SHIFT)
-#endif
+#define virt_to_pfn(kaddr)     (__pa(kaddr) >> PAGE_SHIFT)
+#define pfn_to_virt(pfn)       __va((pfn) << PAGE_SHIFT)
 
-#endif
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASMARM_PAGE_H_ */
-- 
1.9.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to