Instead of open coding the bit accesses uses standard style
*PageDeferred* macros. 

Signed-off-by: Andi Kleen <[EMAIL PROTECTED]>

---
 arch/x86/mm/pageattr_32.c |   14 ++++++++++----
 arch/x86/mm/pageattr_64.c |   11 +++++++++--
 2 files changed, 19 insertions(+), 6 deletions(-)

Index: linux/arch/x86/mm/pageattr_64.c
===================================================================
--- linux.orig/arch/x86/mm/pageattr_64.c
+++ linux/arch/x86/mm/pageattr_64.c
@@ -18,6 +18,13 @@ struct flush {
        unsigned long addr;
 };
 
+#define PG_deferred PG_arch_1
+
+#define PageDeferred(p) test_bit(PG_deferred, &(p)->flags)
+#define SetPageDeferred(p) set_bit(PG_deferred, &(p)->flags)
+#define ClearPageDeferred(p) clear_bit(PG_deferred, &(p)->flags)
+#define TestSetPageDeferred(p) test_and_set_bit(PG_deferred, &(p)->flags)
+
 pte_t *lookup_address(unsigned long address, int *level)
 { 
        pgd_t *pgd = pgd_offset_k(address);
@@ -106,7 +113,7 @@ static LIST_HEAD(flush_pages);
 
 static inline void save_page(struct page *fpage)
 {
-       if (!test_and_set_bit(PG_arch_1, &fpage->flags))
+       if (!TestSetPageDeferred(fpage))
                list_add(&fpage->lru, &deferred_pages);
 }
 
@@ -287,7 +294,7 @@ void global_flush_tlb(void)
 
        list_for_each_entry_safe(pg, next, &free_pages, lru) {
                list_del(&pg->lru);
-               clear_bit(PG_arch_1, &pg->flags);
+               ClearPageDeferred(pg);
                if (page_private(pg) != 0)
                        continue;
                ClearPagePrivate(pg);
Index: linux/arch/x86/mm/pageattr_32.c
===================================================================
--- linux.orig/arch/x86/mm/pageattr_32.c
+++ linux/arch/x86/mm/pageattr_32.c
@@ -14,8 +14,14 @@
 #include <asm/pgalloc.h>
 #include <asm/sections.h>
 
-/* Protected by init_mm.mmap_sem */
-/* Variables protected by cpa_lock */
+#define PG_deferred PG_arch_1
+
+#define PageDeferred(p) test_bit(PG_deferred, &(p)->flags)
+#define SetPageDeferred(p) set_bit(PG_deferred, &(p)->flags)
+#define ClearPageDeferred(p) clear_bit(PG_deferred, &(p)->flags)
+#define TestSetPageDeferred(p) test_and_set_bit(PG_deferred, &(p)->flags)
+
+/* Variables protected by init_mm.mmap_sem */
 static int full_flush;
 static struct list_head df_list = LIST_HEAD_INIT(df_list);
 static LIST_HEAD(flush_pages);
@@ -150,7 +156,7 @@ static inline void revert_page(struct pa
 
 static inline void save_page(struct page *kpte_page)
 {
-       if (!test_and_set_bit(PG_arch_1, &kpte_page->flags))
+       if (!TestSetPageDeferred(kpte_page))
                list_add(&kpte_page->lru, &df_list);
 }
 
@@ -285,7 +291,7 @@ void global_flush_tlb(void)
 
        list_for_each_entry_safe(pg, next, &free_pages, lru) {
                list_del(&pg->lru);
-               clear_bit(PG_arch_1, &pg->flags);
+               ClearPageDeferred(pg);
                if (PageReserved(pg) || !cpu_has_pse || page_private(pg) != 0)
                        continue;
                ClearPagePrivate(pg);
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to