On 9/25/25 12:30, YiFei Zhu wrote:
A malicious kernel may control the instruction pointer in SMM in a
multi-processor VM by sending a sequence of IPIs via APIC:

CPU0                    CPU1
IPI(CPU1, MODE_INIT)
                        x86_cpu_exec_reset()
                        apic_init_reset()
                        s->wait_for_sipi = true
IPI(CPU1, MODE_SMI)
                        do_smm_enter()
                        env->hflags |= HF_SMM_MASK;
IPI(CPU1, MODE_STARTUP, vector)
                        do_cpu_sipi()
                        apic_sipi()
                        /* s->wait_for_sipi check passes */
                        cpu_x86_load_seg_cache_sipi(vector)

A different sequence, SMI INIT SIPI, is also buggy in TCG because
INIT is not blocked or latched during SMM. However, it is not
vulnerable to an instruction pointer control in the same way because
x86_cpu_exec_reset clears env->hflags, exiting SMM.

Thanks for the reports! For this bug, I prefer to have the CPU eat the SIPI instead of latching them:

diff --git a/hw/intc/apic.c b/hw/intc/apic.c
index 6d7859640c2..c7680338563 100644
--- a/hw/intc/apic.c
+++ b/hw/intc/apic.c
@@ -646,8 +646,6 @@ void apic_sipi(DeviceState *dev)
 {
     APICCommonState *s = APIC(dev);

-    cpu_reset_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI);
-
     if (!s->wait_for_sipi)
         return;
     cpu_x86_load_seg_cache_sipi(s->cpu, s->sipi_vector);
diff --git a/target/i386/helper.c b/target/i386/helper.c
index 651041ccfa6..a96834c4457 100644
--- a/target/i386/helper.c
+++ b/target/i386/helper.c
@@ -621,6 +621,9 @@ void do_cpu_init(X86CPU *cpu)

 void do_cpu_sipi(X86CPU *cpu)
 {
+    if (env->hflags & HF_SMM_MASK) {
+        return;
+    }
     apic_sipi(cpu->apic_state);
 }

diff --git a/target/i386/tcg/system/seg_helper.c b/target/i386/tcg/system/seg_helper.c
index 38072e51d72..8c7856be81e 100644
--- a/target/i386/tcg/system/seg_helper.c
+++ b/target/i386/tcg/system/seg_helper.c
@@ -182,6 +182,7 @@ bool x86_cpu_exec_interrupt(
         apic_poll_irq(cpu->apic_state);
         break;
     case CPU_INTERRUPT_SIPI:
+        cpu_reset_interrupt(cs, CPU_INTERRUPT_SIPI);
         do_cpu_sipi(cpu);
         break;
     case CPU_INTERRUPT_SMI:


Fixing INIT is harder, because it requires splitting CPU_INTERRUPT_INIT and CPU_INTERRUPT_RESET, but I'll take a look.

Paolo

Fixes: a9bad65d2c1f ("target-i386: wake up processors that receive an SMI")
Signed-off-by: YiFei Zhu <[email protected]>
---
  target/i386/cpu.c | 3 ++-
  1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 6d85149e6e..697cc4e63b 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -9762,7 +9762,8 @@ int x86_cpu_pending_interrupt(CPUState *cs, int 
interrupt_request)
      if (interrupt_request & CPU_INTERRUPT_POLL) {
          return CPU_INTERRUPT_POLL;
      }
-    if (interrupt_request & CPU_INTERRUPT_SIPI) {
+    if ((interrupt_request & CPU_INTERRUPT_SIPI) &&
+        !(env->hflags & HF_SMM_MASK)) {
          return CPU_INTERRUPT_SIPI;
      }



Reply via email to