From: Borislav Petkov <b...@suse.de>

Use a version for both bitness by adding a helper which does the actual
container finding and parsing which can be used on any CPU - BSP or AP.
Streamlines the paths more.

Signed-off-by: Borislav Petkov <b...@suse.de>
---
 arch/x86/kernel/cpu/microcode/amd.c | 84 ++++++++++++++-----------------------
 1 file changed, 31 insertions(+), 53 deletions(-)

diff --git a/arch/x86/kernel/cpu/microcode/amd.c 
b/arch/x86/kernel/cpu/microcode/amd.c
index 782e01311e4e..f1a61f181c9a 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -271,7 +271,7 @@ static bool get_builtin_microcode(struct cpio_data *cp, 
unsigned int family)
 #endif
 }
 
-void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
+void __load_ucode_amd(unsigned int cpuid_1_eax, struct cpio_data *ret)
 {
        struct ucode_cpu_info *uci;
        struct cpio_data cp;
@@ -291,95 +291,74 @@ void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
        if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
                cp = find_microcode_in_initrd(path, use_pa);
 
-       if (!(cp.data && cp.size))
-               return;
-
        /* Needed in load_microcode_amd() */
        uci->cpu_sig.sig = cpuid_1_eax;
 
-       apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true, NULL);
+       *ret = cp;
 }
 
-#ifdef CONFIG_X86_32
-/*
- * On 32-bit, since AP's early load occurs before paging is turned on, we
- * cannot traverse cpu_equiv_table and microcode_cache in kernel heap memory.
- * So during cold boot, AP will apply_ucode_in_initrd() just like the BSP.
- * In save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch,
- * which is used upon resume from suspend.
- */
-void load_ucode_amd_ap(unsigned int cpuid_1_eax)
+void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
 {
-       struct microcode_amd *mc;
-       struct cpio_data cp;
-
-       mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
-       if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
-               __apply_microcode_amd(mc);
-               return;
-       }
+       struct cpio_data cp = { };
 
-       if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
-               cp = find_microcode_in_initrd((const char 
*)__pa_nodebug(ucode_path), true);
+       __load_ucode_amd(cpuid_1_eax, &cp);
 
        if (!(cp.data && cp.size))
                return;
 
-       /*
-        * This would set amd_ucode_patch above so that the following APs can
-        * use it directly instead of going down this path again.
-        */
        apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true, NULL);
 }
-#else
+
 void load_ucode_amd_ap(unsigned int cpuid_1_eax)
 {
        struct equiv_cpu_entry *eq;
        struct microcode_amd *mc;
+       struct cont_desc *desc;
        u16 eq_id;
 
+       if (IS_ENABLED(CONFIG_X86_32)) {
+               mc   = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
+               desc = (struct cont_desc *)__pa_nodebug(&cont);
+       } else {
+               mc   = (struct microcode_amd *)amd_ucode_patch;
+               desc = &cont;
+       }
+
        /* First AP hasn't cached it yet, go through the blob. */
-       if (!cont.data) {
-               struct cpio_data cp;
+       if (!desc->data) {
+               struct cpio_data cp = { };
 
-               if (cont.size == -1)
+               if (desc->size == -1)
                        return;
 
 reget:
-               if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax))) {
-                       cp = find_microcode_in_initrd(ucode_path, false);
-
-                       if (!(cp.data && cp.size)) {
-                               /*
-                                * Mark it so that other APs do not scan again
-                                * for no real reason and slow down boot
-                                * needlessly.
-                                */
-                               cont.size = -1;
-                               return;
-                       }
+               __load_ucode_amd(cpuid_1_eax, &cp);
+               if (!(cp.data && cp.size)) {
+                       /*
+                        * Mark it so that other APs do not scan again for no
+                        * real reason and slow down boot needlessly.
+                        */
+                       desc->size = -1;
+                       return;
                }
 
-               if (!apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, 
false, &cont)) {
-                       cont.data = NULL;
-                       cont.size = -1;
+               if (!apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, 
false, desc)) {
+                       desc->data = NULL;
+                       desc->size = -1;
                        return;
                }
        }
 
-       eq  = (struct equiv_cpu_entry *)(cont.data + CONTAINER_HDR_SZ);
+       eq  = (struct equiv_cpu_entry *)(desc->data + CONTAINER_HDR_SZ);
 
        eq_id = find_equiv_id(eq, cpuid_1_eax);
        if (!eq_id)
                return;
 
-       if (eq_id == cont.eq_id) {
+       if (eq_id == desc->eq_id) {
                u32 rev, dummy;
 
                microcode_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
-
-               mc = (struct microcode_amd *)amd_ucode_patch;
-
                if (mc && rev < mc->hdr.patch_id) {
                        if (!__apply_microcode_amd(mc))
                                ucode_new_rev = mc->hdr.patch_id;
@@ -394,7 +373,6 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
                goto reget;
        }
 }
-#endif /* CONFIG_X86_32 */
 
 static enum ucode_state
 load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);
-- 
2.11.0

Reply via email to