To allow a slow conversion of x86 over to struct boot_module, start with replacing all references to struct mod to the early_mod element of struct boot_module. These serves twofold, first to allow the incremental transition from struct mod fields to struct boot_module fields. The second is to allow the conversion of function definitions from taking struct mod parameters to accepting struct boot_module as needed when a transitioned field will be accessed.
Signed-off-by: Daniel P. Smith <dpsm...@apertussolutions.com> --- xen/arch/x86/setup.c | 61 ++++++++++++++++++++++++-------------------- 1 file changed, 34 insertions(+), 27 deletions(-) diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c index fd6cc7fac907..82a4375683d2 100644 --- a/xen/arch/x86/setup.c +++ b/xen/arch/x86/setup.c @@ -1360,15 +1360,15 @@ void asmlinkage __init noreturn __start_xen(unsigned long mbi_p) set_kexec_crash_area_size((u64)nr_pages << PAGE_SHIFT); kexec_reserve_area(); - initial_images = mod; + initial_images = boot_info->mods[0].early_mod; for ( i = 0; !efi_enabled(EFI_LOADER) && i < boot_info->nr_mods; i++ ) { - if ( mod[i].mod_start & (PAGE_SIZE - 1) ) + if ( boot_info->mods[i].early_mod->mod_start & (PAGE_SIZE - 1) ) panic("Bootloader didn't honor module alignment request\n"); - mod[i].mod_end -= mod[i].mod_start; - mod[i].mod_start >>= PAGE_SHIFT; - mod[i].reserved = 0; + boot_info->mods[i].early_mod->mod_end -= boot_info->mods[i].early_mod->mod_start; + boot_info->mods[i].early_mod->mod_start >>= PAGE_SHIFT; + boot_info->mods[i].early_mod->reserved = 0; } /* @@ -1379,6 +1379,7 @@ void asmlinkage __init noreturn __start_xen(unsigned long mbi_p) if ( xen_phys_start ) { + int idx = boot_info->nr_mods; relocated = true; /* @@ -1386,8 +1387,8 @@ void asmlinkage __init noreturn __start_xen(unsigned long mbi_p) * respective reserve_e820_ram() invocation below. No need to * query efi_boot_mem_unused() here, though. */ - mod[boot_info->nr_mods].mod_start = virt_to_mfn(_stext); - mod[boot_info->nr_mods].mod_end = __2M_rwdata_end - _stext; + boot_info->mods[idx].early_mod->mod_start = virt_to_mfn(_stext); + boot_info->mods[idx].early_mod->mod_end = __2M_rwdata_end - _stext; } boot_info->mods[0].headroom = bzimage_headroom( @@ -1480,9 +1481,9 @@ void asmlinkage __init noreturn __start_xen(unsigned long mbi_p) struct boot_module *bm = &boot_info->mods[j]; unsigned long size; - size = PAGE_ALIGN(bm->headroom + mod[j].mod_end); + size = PAGE_ALIGN(bm->headroom + bm->early_mod->mod_end); - if ( mod[j].reserved ) + if ( boot_info->mods[j].early_mod->reserved ) continue; /* Don't overlap with other modules (or Xen itself). */ @@ -1494,14 +1495,14 @@ void asmlinkage __init noreturn __start_xen(unsigned long mbi_p) if ( s < end && (bm->headroom || - ((end - size) >> PAGE_SHIFT) > mod[j].mod_start) ) + ((end - size) >> PAGE_SHIFT) > bm->early_mod->mod_start) ) { move_memory(end - size + bm->headroom, - (uint64_t)mod[j].mod_start << PAGE_SHIFT, - mod[j].mod_end); - mod[j].mod_start = (end - size) >> PAGE_SHIFT; - mod[j].mod_end += bm->headroom; - mod[j].reserved = 1; + (uint64_t)bm->early_mod->mod_start << PAGE_SHIFT, + bm->early_mod->mod_end); + bm->early_mod->mod_start = (end - size) >> PAGE_SHIFT; + bm->early_mod->mod_end += bm->headroom; + bm->early_mod->reserved = 1; } } @@ -1527,13 +1528,15 @@ void asmlinkage __init noreturn __start_xen(unsigned long mbi_p) #endif } - if ( boot_info->mods[0].headroom && !mod->reserved ) + if ( boot_info->mods[0].headroom && !boot_info->mods[0].early_mod->reserved ) panic("Not enough memory to relocate the dom0 kernel image\n"); for ( i = 0; i < boot_info->nr_mods; ++i ) { - uint64_t s = (uint64_t)mod[i].mod_start << PAGE_SHIFT; + uint64_t s = (uint64_t)boot_info->mods[i].early_mod->mod_start + << PAGE_SHIFT; - reserve_e820_ram(&boot_e820, s, s + PAGE_ALIGN(mod[i].mod_end)); + reserve_e820_ram(&boot_e820, s, + s + PAGE_ALIGN(boot_info->mods[i].early_mod->mod_end)); } if ( !xen_phys_start ) @@ -1611,8 +1614,9 @@ void asmlinkage __init noreturn __start_xen(unsigned long mbi_p) map_e = boot_e820.map[j].addr + boot_e820.map[j].size; for ( j = 0; j < boot_info->nr_mods; ++j ) { - uint64_t end = pfn_to_paddr(mod[j].mod_start) + - mod[j].mod_end; + uint64_t end = pfn_to_paddr( + boot_info->mods[j].early_mod->mod_start) + + boot_info->mods[j].early_mod->mod_end; if ( map_e < end ) map_e = end; @@ -1686,11 +1690,13 @@ void asmlinkage __init noreturn __start_xen(unsigned long mbi_p) for ( i = 0; i < boot_info->nr_mods; ++i ) { - set_pdx_range(mod[i].mod_start, - mod[i].mod_start + PFN_UP(mod[i].mod_end)); - map_pages_to_xen((unsigned long)mfn_to_virt(mod[i].mod_start), - _mfn(mod[i].mod_start), - PFN_UP(mod[i].mod_end), PAGE_HYPERVISOR); + set_pdx_range(boot_info->mods[i].early_mod->mod_start, + boot_info->mods[i].early_mod->mod_start + + PFN_UP(boot_info->mods[i].early_mod->mod_end)); + map_pages_to_xen( + (unsigned long)mfn_to_virt(boot_info->mods[i].early_mod->mod_start), + _mfn(boot_info->mods[i].early_mod->mod_start), + PFN_UP(boot_info->mods[i].early_mod->mod_end), PAGE_HYPERVISOR); } #ifdef CONFIG_KEXEC @@ -2081,8 +2087,9 @@ void asmlinkage __init noreturn __start_xen(unsigned long mbi_p) * We're going to setup domain0 using the module(s) that we stashed safely * above our heap. The second module, if present, is an initrd ramdisk. */ - dom0 = create_dom0(mod, boot_info->mods[0].headroom, - initrdidx < boot_info->nr_mods ? mod + initrdidx : NULL, + dom0 = create_dom0(boot_info->mods[0].early_mod, boot_info->mods[0].headroom, + initrdidx < boot_info->nr_mods ? + boot_info->mods[initrdidx].early_mod : NULL, kextra, boot_info->boot_loader_name); if ( !dom0 ) panic("Could not set up DOM0 guest OS\n"); -- 2.30.2