From: Yazen Ghannam <yazen.ghan...@amd.com>

Carve out the SMCA code in get_block_address() into a separate helper
function.

No functional change.

Signed-off-by: Yazen Ghannam <yazen.ghan...@amd.com>
---
v1->v2:
* New in this series.

 arch/x86/kernel/cpu/mcheck/mce_amd.c | 59 ++++++++++++++++++++----------------
 1 file changed, 33 insertions(+), 26 deletions(-)

diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c 
b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 12bc2863a4d6..7c451850d8aa 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -431,6 +431,37 @@ static void deferred_error_interrupt_enable(struct 
cpuinfo_x86 *c)
        wrmsr(MSR_CU_DEF_ERR, low, high);
 }
 
+static u32 smca_get_block_address(unsigned int cpu, unsigned int bank,
+                                 unsigned int block)
+{
+       u32 addr = 0;
+
+       if (smca_get_bank_type(bank) == SMCA_RESERVED)
+               return addr;
+
+       if (!block) {
+               addr = MSR_AMD64_SMCA_MCx_MISC(bank);
+       } else {
+               /*
+                * For SMCA enabled processors, BLKPTR field of the
+                * first MISC register (MCx_MISC0) indicates presence of
+                * additional MISC register set (MISC1-4).
+                */
+               u32 low, high;
+
+               if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), 
&low, &high))
+                       return addr;
+
+               if (!(low & MCI_CONFIG_MCAX))
+                       return addr;
+
+               if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), 
&low, &high) &&
+                   (low & MASK_BLKPTR_LO))
+                       addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
+       }
+       return addr;
+}
+
 static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 
high,
                             unsigned int bank, unsigned int block)
 {
@@ -451,32 +482,8 @@ static u32 get_block_address(unsigned int cpu, u32 
current_addr, u32 low, u32 hi
                }
        }
 
-       if (mce_flags.smca) {
-               if (smca_get_bank_type(bank) == SMCA_RESERVED)
-                       return addr;
-
-               if (!block) {
-                       addr = MSR_AMD64_SMCA_MCx_MISC(bank);
-               } else {
-                       /*
-                        * For SMCA enabled processors, BLKPTR field of the
-                        * first MISC register (MCx_MISC0) indicates presence of
-                        * additional MISC register set (MISC1-4).
-                        */
-                       u32 low, high;
-
-                       if (rdmsr_safe_on_cpu(cpu, 
MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
-                               return addr;
-
-                       if (!(low & MCI_CONFIG_MCAX))
-                               return addr;
-
-                       if (!rdmsr_safe_on_cpu(cpu, 
MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
-                           (low & MASK_BLKPTR_LO))
-                               addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 
1);
-               }
-               return addr;
-       }
+       if (mce_flags.smca)
+               return smca_get_block_address(cpu, bank, block);
 
        /* Fall back to method we used for older processors: */
        switch (block) {
-- 
2.14.1

Reply via email to