There is (was) yet another function to load something into the cr3 register. We don't need it.
Signed-off-by: Glauber de Oliveira Costa <[EMAIL PROTECTED]> -- Glauber de Oliveira Costa Red Hat Inc. "Free as in Freedom"
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c index af1ec4d..f338fc5 100644 --- a/arch/x86_64/kernel/smp.c +++ b/arch/x86_64/kernel/smp.c @@ -76,7 +76,7 @@ static inline void leave_mm(int cpu) if (read_pda(mmu_state) == TLBSTATE_OK) BUG(); cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask); - load_cr3(swapper_pg_dir); + write_cr3(__pa(swapper_pg_dir)); } /* diff --git a/include/asm-x86_64/mmu_context.h b/include/asm-x86_64/mmu_context.h index 09add28..7781e99 100644 --- a/include/asm-x86_64/mmu_context.h +++ b/include/asm-x86_64/mmu_context.h @@ -22,11 +22,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) #endif } -static inline void load_cr3(pgd_t *pgd) -{ - asm volatile("movq %0,%%cr3" :: "r" (__pa(pgd)) : "memory"); -} - static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { @@ -39,7 +34,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, write_pda(active_mm, next); #endif cpu_set(cpu, next->cpu_vm_mask); - load_cr3(next->pgd); + write_cr3(__pa(next->pgd)); if (unlikely(next->context.ldt != prev->context.ldt)) load_LDT_nolock(&next->context); @@ -54,7 +49,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, * tlb flush IPI delivery. We must reload CR3 * to make sure to use no freed page tables. */ - load_cr3(next->pgd); + write_cr3(__pa(next->pgd)); load_LDT_nolock(&next->context); } }