This patch makes bt_ioremap can be used before paging_init via
providing an early implementation of set_fixmap that can be used
before paging_init. This makes boot_ioremap can be replaced by
bt_ioremap.

Signed-off-by: Huang Ying <[EMAIL PROTECTED]>

---
 arch/x86/kernel/setup_32.c |    1 
 arch/x86/mm/init_32.c      |    2 +
 arch/x86/mm/ioremap_32.c   |   87 +++++++++++++++++++++++++++++++++++++++++++--
 include/asm-x86/io_32.h    |    3 +
 4 files changed, 91 insertions(+), 2 deletions(-)

--- a/arch/x86/mm/ioremap_32.c
+++ b/arch/x86/mm/ioremap_32.c
@@ -208,6 +208,89 @@ void iounmap(volatile void __iomem *addr
 }
 EXPORT_SYMBOL(iounmap);
 
+static __initdata int after_paging_init;
+static __initdata unsigned long bm_pte[1024]
+                               __attribute__((aligned(PAGE_SIZE)));
+
+static inline unsigned long * __init bt_ioremap_pgd(unsigned long addr)
+{
+       return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
+}
+
+static inline unsigned long * __init bt_ioremap_pte(unsigned long addr)
+{
+       return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
+}
+
+void __init bt_ioremap_init(void)
+{
+       unsigned long *pgd;
+
+       pgd = bt_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
+       *pgd = __pa(bm_pte) | _PAGE_TABLE;
+       memset(bm_pte, 0, sizeof(bm_pte));
+       BUG_ON(pgd != bt_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
+}
+
+void __init bt_ioremap_clear(void)
+{
+       unsigned long *pgd;
+
+       pgd = bt_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
+       *pgd = 0;
+       __flush_tlb_all();
+}
+
+void __init bt_ioremap_reset(void)
+{
+       enum fixed_addresses idx;
+       unsigned long *pte, phys, addr;
+
+       after_paging_init = 1;
+       for (idx = FIX_BTMAP_BEGIN; idx <= FIX_BTMAP_END; idx--) {
+               addr = fix_to_virt(idx);
+               pte = bt_ioremap_pte(addr);
+               if (!*pte & _PAGE_PRESENT) {
+                       phys = *pte & PAGE_MASK;
+                       set_fixmap(idx, phys);
+               }
+       }
+}
+
+static void __init __bt_set_fixmap(enum fixed_addresses idx,
+                                  unsigned long phys, pgprot_t flags)
+{
+       unsigned long *pte, addr = __fix_to_virt(idx);
+
+       if (idx >= __end_of_fixed_addresses) {
+               BUG();
+               return;
+       }
+       pte = bt_ioremap_pte(addr);
+       if (pgprot_val(flags))
+               *pte = (phys & PAGE_MASK) | pgprot_val(flags);
+       else
+               *pte = 0;
+       __flush_tlb_one(addr);
+}
+
+static inline void __init bt_set_fixmap(enum fixed_addresses idx,
+                                       unsigned long phys)
+{
+       if (after_paging_init)
+               set_fixmap(idx, phys);
+       else
+               __bt_set_fixmap(idx, phys, PAGE_KERNEL);
+}
+
+static inline void __init bt_clear_fixmap(enum fixed_addresses idx)
+{
+       if (after_paging_init)
+               clear_fixmap(idx);
+       else
+               __bt_set_fixmap(idx, 0, __pgprot(0));
+}
+
 void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
 {
        unsigned long offset, last_addr;
@@ -244,7 +327,7 @@ void __init *bt_ioremap(unsigned long ph
         */
        idx = FIX_BTMAP_BEGIN;
        while (nrpages > 0) {
-               set_fixmap(idx, phys_addr);
+               bt_set_fixmap(idx, phys_addr);
                phys_addr += PAGE_SIZE;
                --idx;
                --nrpages;
@@ -267,7 +350,7 @@ void __init bt_iounmap(void *addr, unsig
 
        idx = FIX_BTMAP_BEGIN;
        while (nrpages > 0) {
-               clear_fixmap(idx);
+               bt_clear_fixmap(idx);
                --idx;
                --nrpages;
        }
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -423,9 +423,11 @@ static void __init pagetable_init (void)
         * Fixed mappings, only the page table structure has to be
         * created - mappings will be set by set_fixmap():
         */
+       bt_ioremap_clear();
        vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
        end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
        page_table_range_init(vaddr, end, pgd_base);
+       bt_ioremap_reset();
 
        permanent_kmaps_init(pgd_base);
 
--- a/include/asm-x86/io_32.h
+++ b/include/asm-x86/io_32.h
@@ -130,6 +130,9 @@ extern void iounmap(volatile void __iome
  * mappings, before the real ioremap() is functional.
  * A boot-time mapping is currently limited to at most 16 pages.
  */
+extern void bt_ioremap_init(void);
+extern void bt_ioremap_clear(void);
+extern void bt_ioremap_reset(void);
 extern void *bt_ioremap(unsigned long offset, unsigned long size);
 extern void bt_iounmap(void *addr, unsigned long size);
 extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
--- a/arch/x86/kernel/setup_32.c
+++ b/arch/x86/kernel/setup_32.c
@@ -624,6 +624,7 @@ void __init setup_arch(char **cmdline_p)
        memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
        pre_setup_arch_hook();
        early_cpu_init();
+       bt_ioremap_init();
 
        efi_check_bios_type();
 #ifdef CONFIG_EFI

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to