Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package xen for openSUSE:Factory checked in 
at 2025-09-11 14:37:24
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/xen (Old)
 and      /work/SRC/openSUSE:Factory/.xen.new.1977 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "xen"

Thu Sep 11 14:37:24 2025 rev:366 rq:1303382 version:4.20.1_04

Changes:
--------
--- /work/SRC/openSUSE:Factory/xen/xen.changes  2025-07-11 21:29:30.303546621 
+0200
+++ /work/SRC/openSUSE:Factory/.xen.new.1977/xen.changes        2025-09-11 
14:37:34.027299026 +0200
@@ -1,0 +2,22 @@
+Wed Sep  3 13:50:00 CEST 2025 - jbeul...@suse.com
+
+- Upstream bug fixes (bsc#1027519)
+  687a40ac-x86-C6-eoi_errata-include-NEHALEM_EX.patch
+  68931694-x86-HPET-defer-LAPIC-EOI.patch
+  689b0c0c-EFI-cond-FreePages.patch
+  68a2e770-x86-mkelf32-pad-segment-to-2Mb.patch
+  68a2e7c8-x86-HVM-ioreq-inverted-condition.patch
+  68a6ed85-x86-setup-MMCFG-ahead-of-IOMMU.patch
+  68ac5f69-x86-adjustments-to-intel_init_ppin.patch
+
+-------------------------------------------------------------------
+Fri Aug 29 13:29:53 MDT 2025 - carn...@suse.com
+
+- bsc#1248807 - VUL-0: CVE-2025-27466, CVE-2025-58142,
+  CVE-2025-58143: xen: Mutiple vulnerabilities in the Viridian
+  interface (XSA-472)
+  xsa472-1.patch
+  xsa472-2.patch
+  xsa472-3.patch
+
+-------------------------------------------------------------------
@@ -6,2 +28,2 @@
-- bsc#1246112 - VUL-0: xen: More AMD transient execution attacks
-  (XSA-471)
+- bsc#1246112, bsc#1238896 - VUL-0: xen: More AMD transient
+  execution attack (CVE-2024-36350, CVE-2024-36357, XSA-471)

New:
----
  687a40ac-x86-C6-eoi_errata-include-NEHALEM_EX.patch
  68931694-x86-HPET-defer-LAPIC-EOI.patch
  689b0c0c-EFI-cond-FreePages.patch
  68a2e770-x86-mkelf32-pad-segment-to-2Mb.patch
  68a2e7c8-x86-HVM-ioreq-inverted-condition.patch
  68a6ed85-x86-setup-MMCFG-ahead-of-IOMMU.patch
  68ac5f69-x86-adjustments-to-intel_init_ppin.patch
  xsa472-1.patch
  xsa472-2.patch
  xsa472-3.patch

----------(New B)----------
  New:- Upstream bug fixes (bsc#1027519)
  687a40ac-x86-C6-eoi_errata-include-NEHALEM_EX.patch
  68931694-x86-HPET-defer-LAPIC-EOI.patch
  New:  687a40ac-x86-C6-eoi_errata-include-NEHALEM_EX.patch
  68931694-x86-HPET-defer-LAPIC-EOI.patch
  689b0c0c-EFI-cond-FreePages.patch
  New:  68931694-x86-HPET-defer-LAPIC-EOI.patch
  689b0c0c-EFI-cond-FreePages.patch
  68a2e770-x86-mkelf32-pad-segment-to-2Mb.patch
  New:  689b0c0c-EFI-cond-FreePages.patch
  68a2e770-x86-mkelf32-pad-segment-to-2Mb.patch
  68a2e7c8-x86-HVM-ioreq-inverted-condition.patch
  New:  68a2e770-x86-mkelf32-pad-segment-to-2Mb.patch
  68a2e7c8-x86-HVM-ioreq-inverted-condition.patch
  68a6ed85-x86-setup-MMCFG-ahead-of-IOMMU.patch
  New:  68a2e7c8-x86-HVM-ioreq-inverted-condition.patch
  68a6ed85-x86-setup-MMCFG-ahead-of-IOMMU.patch
  68ac5f69-x86-adjustments-to-intel_init_ppin.patch
  New:  68a6ed85-x86-setup-MMCFG-ahead-of-IOMMU.patch
  68ac5f69-x86-adjustments-to-intel_init_ppin.patch
  New:  interface (XSA-472)
  xsa472-1.patch
  xsa472-2.patch
  New:  xsa472-1.patch
  xsa472-2.patch
  xsa472-3.patch
  New:  xsa472-2.patch
  xsa472-3.patch
----------(New E)----------

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ xen.spec ++++++
--- /var/tmp/diff_new_pack.ZYiuxP/_old  2025-09-11 14:37:35.535362742 +0200
+++ /var/tmp/diff_new_pack.ZYiuxP/_new  2025-09-11 14:37:35.539362912 +0200
@@ -125,7 +125,7 @@
 BuildRequires:  python-rpm-macros
 Provides:       installhint(reboot-needed)
 
-Version:        4.20.1_02
+Version:        4.20.1_04
 Release:        0
 Summary:        Xen Virtualization: Hypervisor (aka VMM aka Microkernel)
 License:        GPL-2.0-only
@@ -160,7 +160,17 @@
 # For xen-libs
 Source99:       baselibs.conf
 # Upstream patches
+Patch1:         687a40ac-x86-C6-eoi_errata-include-NEHALEM_EX.patch
+Patch2:         68931694-x86-HPET-defer-LAPIC-EOI.patch
+Patch3:         689b0c0c-EFI-cond-FreePages.patch
+Patch4:         68a2e770-x86-mkelf32-pad-segment-to-2Mb.patch
+Patch5:         68a2e7c8-x86-HVM-ioreq-inverted-condition.patch
+Patch6:         68a6ed85-x86-setup-MMCFG-ahead-of-IOMMU.patch
+Patch7:         68ac5f69-x86-adjustments-to-intel_init_ppin.patch
 # EMBARGOED security fixes
+Patch101:       xsa472-1.patch
+Patch102:       xsa472-2.patch
+Patch103:       xsa472-3.patch
 # Our platform specific patches
 Patch400:       xen-destdir.patch
 Patch401:       vif-bridge-no-iptables.patch

++++++ 687a40ac-x86-C6-eoi_errata-include-NEHALEM_EX.patch ++++++
# Commit d225205a0b85b748151ebd880ee29dad55cb0a15
# Date 2025-07-18 13:40:12 +0100
# Author Andrew Cooper <andrew.coop...@citrix.com>
# Committer Andrew Cooper <andrew.coop...@citrix.com>
x86/idle: Fix the C6 eoi_errata[] list to include NEHALEM_EX

NEHALEM_EX is affected by the erratum too.

Change the comment to be the full erratum text, not an interpretation of it.

Fixes: 95807bcae47e ("C6 state with EOI issue fix for some Intel processors")
Reported-by: Jan Beulich <jbeul...@suse.com>
Signed-off-by: Andrew Cooper <andrew.coop...@citrix.com>
Reviewed-by: Jan Beulich <jbeul...@suse.com>

--- a/xen/arch/x86/acpi/cpu_idle.c
+++ b/xen/arch/x86/acpi/cpu_idle.c
@@ -584,18 +584,24 @@ bool errata_c6_workaround(void)
     {
 #define INTEL_FAM6_MODEL(m) { X86_VENDOR_INTEL, 6, m, X86_FEATURE_ALWAYS }
         /*
-         * Errata AAJ72: EOI Transaction May Not be Sent if Software Enters
-         * Core C6 During an Interrupt Service Routine"
+         * Errata AAJ72, etc: EOI Transaction May Not be Sent if Software
+         * Enters Core C6 During an Interrupt Service Routine
          *
-         * There was an errata with some Core i7 processors that an EOI
-         * transaction may not be sent if software enters core C6 during an
-         * interrupt service routine. So we don't enter deep Cx state if
-         * there is an EOI pending.
+         * If core C6 is entered after the start of an interrupt service
+         * routine but before a write to the APIC EOI (End of Interrupt)
+         * register, and the core is woken up by an event other than a fixed
+         * interrupt source the core may drop the EOI transaction the next
+         * time APIC EOI register is written and further interrupts from the
+         * same or lower priority level will be blocked.
+         *
+         * Software should check the ISR register and if any interrupts are in
+         * service only enter C1.
          */
         static const struct x86_cpu_id eoi_errata[] = {
-            INTEL_FAM6_MODEL(0x1a),
+            INTEL_FAM6_MODEL(0x1a), /* AAJ72 */
             INTEL_FAM6_MODEL(0x1e),
             INTEL_FAM6_MODEL(0x1f),
+            INTEL_FAM6_MODEL(0x2e), /* BA106 */
             INTEL_FAM6_MODEL(0x25),
             INTEL_FAM6_MODEL(0x2c),
             INTEL_FAM6_MODEL(0x2f),

++++++ 68931694-x86-HPET-defer-LAPIC-EOI.patch ++++++
# Commit 1db7829e56578970c1037c4dd1c27f939be4c054
# Date 2025-08-06 10:47:16 +0200
# Author Roger Pau Monne <roger....@citrix.com>
# Committer Roger Pau Monne <roger....@citrix.com>
x86/hpet: do local APIC EOI after interrupt processing

The current logic in the HPET interrupt ->ack() hook will perform a local
APIC EOI ahead of enabling interrupts, possibly leading to recursion in the
interrupt handler.

Fix this by doing the local APIC EOI strictly after the window with
interrupt enabled, as that prevents the recursion, and would only allow for
interrupts with higher priority to be serviced.

Use the generic ack_nonmaskable_msi_irq() and end_nonmaskable_irq()
functions, removing the need for hpet_msi_ack().

Reported-by: Andrew Cooper <andrew.coop...@citrix.com>
Fixes: 3ba523ff957c ('CPUIDLE: enable MSI capable HPET for timer broadcast')
Signed-off-by: Roger Pau Monné <roger....@citrix.com>
Reviewed-by: Andrew Cooper <andrew.coop...@citrix.com>

--- a/xen/arch/x86/hpet.c
+++ b/xen/arch/x86/hpet.c
@@ -299,13 +299,6 @@ static unsigned int cf_check hpet_msi_st
 
 #define hpet_msi_shutdown hpet_msi_mask
 
-static void cf_check hpet_msi_ack(struct irq_desc *desc)
-{
-    irq_complete_move(desc);
-    move_native_irq(desc);
-    ack_APIC_irq();
-}
-
 static void cf_check hpet_msi_set_affinity(
     struct irq_desc *desc, const cpumask_t *mask)
 {
@@ -333,7 +326,8 @@ static hw_irq_controller hpet_msi_type =
     .shutdown   = hpet_msi_shutdown,
     .enable        = hpet_msi_unmask,
     .disable    = hpet_msi_mask,
-    .ack        = hpet_msi_ack,
+    .ack        = ack_nonmaskable_msi_irq,
+    .end        = end_nonmaskable_irq,
     .set_affinity   = hpet_msi_set_affinity,
 };
 

++++++ 689b0c0c-EFI-cond-FreePages.patch ++++++
# Commit 7fddedd530561797d8ce5fba78e83cc9cc6b58dd
# Date 2025-08-12 11:40:28 +0200
# Author Ross Lagerwall <ross.lagerw...@citrix.com>
# Committer Jan Beulich <jbeul...@suse.com>
efi: Call FreePages() only if needed

If the config file is builtin, cfg.addr will be zero but Xen
unconditionally calls FreePages() on the address.

Xen may also call FreePages() with a zero address if blexit() is called
after this point since cfg.need_to_free is not set to false.

The UEFI specification does not say whether calling FreePages() with a
zero address is allowed so let's be cautious and use cfg.need_to_free
properly.

Fixes: 8a71d50ed40b ("efi: Enable booting unified hypervisor/kernel/initrd 
images")
Fixes: 04be2c3a0678 ("efi/boot.c: add file.need_to_free")
Signed-off-by: Ross Lagerwall <ross.lagerw...@citrix.com>
Acked-by: Andrew Cooper <andrew.coop...@citrix.com>
Acked-by Daniel P. Smith <dpsm...@apertussolutions.com>

--- a/xen/common/efi/boot.c
+++ b/xen/common/efi/boot.c
@@ -1484,8 +1484,11 @@ void EFIAPI __init noreturn efi_start(EF
 
         efi_arch_cfg_file_late(loaded_image, dir_handle, section.s);
 
-        efi_bs->FreePages(cfg.addr, PFN_UP(cfg.size));
-        cfg.addr = 0;
+        if ( cfg.need_to_free )
+        {
+            efi_bs->FreePages(cfg.addr, PFN_UP(cfg.size));
+            cfg.need_to_free = false;
+        }
 
         dir_handle->Close(dir_handle);
 

++++++ 68a2e770-x86-mkelf32-pad-segment-to-2Mb.patch ++++++
# Commit 4fb075201f54b16c0800af0107162461a93065fb
# Date 2025-08-18 10:42:24 +0200
# Author Jan Beulich <jbeul...@suse.com>
# Committer Jan Beulich <jbeul...@suse.com>
x86/mkelf32: pad load segment to 2Mb boundary

In order to legitimately set up initial mappings past _end[], we need
to make sure that the entire mapped range is inside a RAM region.
Therefore we need to inform the bootloader (or alike) that our allocated
size is larger than just the next SECTION_ALIGN-ed boundary past _end[].

This allows dropping a command line option from the tool, which was
introduced to work around a supposed linker bug, when the problem was
really Xen's.

While adjusting adjacent code, correct the argc check to also cover the
case correctly when --notes was passed.

Signed-off-by: Jan Beulich <jbeul...@suse.com>
Acked-by: Roger Pau Monné <roger....@citrix.com>

--- a/xen/arch/x86/Makefile
+++ b/xen/arch/x86/Makefile
@@ -126,8 +126,7 @@ orphan-handling-$(call ld-option,--orpha
 
 $(TARGET): TMP = $(dot-target).elf32
 $(TARGET): $(TARGET)-syms $(efi-y) $(obj)/boot/mkelf32
-       $(obj)/boot/mkelf32 $(notes_phdrs) $(TARGET)-syms $(TMP) 
$(XEN_IMG_OFFSET) \
-                      `$(NM) $(TARGET)-syms | sed -ne 's/^\([^ ]*\) . 
__2M_rwdata_end$$/0x\1/p'`
+       $(obj)/boot/mkelf32 $(notes_phdrs) $(TARGET)-syms $(TMP) 
$(XEN_IMG_OFFSET)
        od -t x4 -N 8192 $(TMP)  | grep 1badb002 > /dev/null || \
                { echo "No Multiboot1 header found" >&2; false; }
        od -t x4 -N 32768 $(TMP) | grep e85250d6 > /dev/null || \
--- a/xen/arch/x86/boot/mkelf32.c
+++ b/xen/arch/x86/boot/mkelf32.c
@@ -248,7 +248,6 @@ static void do_read(int fd, void *data,
 
 int main(int argc, char **argv)
 {
-    uint64_t   final_exec_addr;
     uint32_t   loadbase, dat_siz, mem_siz, note_base, note_sz, offset;
     char      *inimage, *outimage;
     int        infd, outfd;
@@ -261,22 +260,24 @@ int main(int argc, char **argv)
     Elf64_Ehdr in64_ehdr;
     Elf64_Phdr in64_phdr;
 
-    if ( argc < 5 )
+    if ( argc < 4 )
     {
+    help:
         fprintf(stderr, "Usage: mkelf32 [--notes] <in-image> <out-image> "
-                "<load-base> <final-exec-addr>\n");
+                "<load-base>\n");
         return 1;
     }
 
     if ( !strcmp(argv[1], "--notes") )
     {
+        if ( argc < 5 )
+            goto help;
         i = 2;
         num_phdrs = 2;
     }
     inimage  = argv[i++];
     outimage = argv[i++];
     loadbase = strtoul(argv[i++], NULL, 16);
-    final_exec_addr = strtoull(argv[i++], NULL, 16);
 
     infd = open(inimage, O_RDONLY);
     if ( infd == -1 )
@@ -339,9 +340,12 @@ int main(int argc, char **argv)
     (void)lseek(infd, in64_phdr.p_offset, SEEK_SET);
     dat_siz = (uint32_t)in64_phdr.p_filesz;
 
-    /* Do not use p_memsz: it does not include BSS alignment padding. */
-    /*mem_siz = (uint32_t)in64_phdr.p_memsz;*/
-    mem_siz = (uint32_t)(final_exec_addr - in64_phdr.p_vaddr);
+    /*
+     * We don't pad .bss in the linker script, but during early boot we map
+     * the Xen image using 2M pages.  To avoid running into adjacent non-RAM
+     * regions, pad the segment to the next 2M boundary.
+     */
+    mem_siz = ((uint32_t)in64_phdr.p_memsz + (1U << 20) - 1) & (-1U << 20);
 
     note_sz = note_base = offset = 0;
     if ( num_phdrs > 1 )

++++++ 68a2e7c8-x86-HVM-ioreq-inverted-condition.patch ++++++
# Commit 282ed258a59195698a81ab4408a17336eb6ea7ed
# Date 2025-08-18 10:43:52 +0200
# Author Oleksandr Tyshchenko <oleksandr_tyshche...@epam.com>
# Committer Jan Beulich <jbeul...@suse.com>
x86/hvm/ioreq: Fix condition in hvm_alloc_legacy_ioreq_gfn()

Fix the incorrect condition that causes hvm_alloc_legacy_ioreq_gfn()
to return INVALID_GFN even if the HVM param was installed properly by
the toolstack.

Fixes: 3486f398a3dd (' x86/hvm/ioreq: allow ioreq servers to use 
HVM_PARAM_[BUF]IOREQ_PFN')
Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshche...@epam.com>
Reviewed-by: Jan Beulich <jbeul...@suse.com>

--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -63,7 +63,7 @@ static gfn_t hvm_alloc_legacy_ioreq_gfn(
 
     for ( i = HVM_PARAM_IOREQ_PFN; i <= HVM_PARAM_BUFIOREQ_PFN; i++ )
     {
-        if ( !test_and_clear_bit(i, &d->arch.hvm.ioreq_gfn.legacy_mask) )
+        if ( test_and_clear_bit(i, &d->arch.hvm.ioreq_gfn.legacy_mask) )
             return _gfn(d->arch.hvm.params[i]);
     }
 

++++++ 68a6ed85-x86-setup-MMCFG-ahead-of-IOMMU.patch ++++++
# Commit c292772b4945d3a264a61c3c1920f1aebd17998b
# Date 2025-08-21 11:57:25 +0200
# Author Roger Pau Monne <roger....@citrix.com>
# Committer Roger Pau Monne <roger....@citrix.com>
x86/iommu: setup MMCFG ahead of IOMMU

Otherwise the PCI accesses to segments different than the first one done by
the IOMMU initialization code would silently fail by returning all ones.

Introduce a new helper, called pci_setup(), and move both the creation of
PCI segment 0 internal data structures, plus the parsing of ACPI MMCFG
table to it.  This moves acpi_mmcfg_init() slightly earlier from
acpi_boot_init() into pci_setup().

Note that further work will be needed to support systems where access
methods to segments different than 0 is not discoverable by Xen.

Fixes: 3950f2485bbc ('x86/x2APIC: defer probe until after IOMMU ACPI table 
parsing')
Signed-off-by: Roger Pau Monné <roger....@citrix.com>
Reviewed-by: Andrew Cooper <andrew.coop...@citrix.com>

--- a/xen/arch/x86/acpi/boot.c
+++ b/xen/arch/x86/acpi/boot.c
@@ -748,8 +748,6 @@ int __init acpi_boot_init(void)
 
        acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
 
-       acpi_mmcfg_init();
-
        erst_init();
 
        acpi_hest_init();
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -1898,6 +1898,12 @@ void asmlinkage __init noreturn __start_
     setup_system_domains();
 
     /*
+     * Initialize PCI (create segment 0, setup MMCFG access) ahead of IOMMU
+     * setup, as devices in segment > 0 must also be discoverable.
+     */
+    acpi_mmcfg_init();
+
+    /*
      * IOMMU-related ACPI table parsing has to happen before APIC probing, for
      * check_x2apic_preenabled() to be able to observe respective findings, in
      * particular iommu_intremap having got turned off.
--- a/xen/arch/x86/x86_64/mmconfig-shared.c
+++ b/xen/arch/x86/x86_64/mmconfig-shared.c
@@ -404,6 +404,9 @@ void __init acpi_mmcfg_init(void)
 
     pci_segments_init();
 
+    if ( acpi_disabled )
+        return;
+
     /* MMCONFIG disabled */
     if ((pci_probe & PCI_PROBE_MMCONF) == 0)
         return;

++++++ 68ac5f69-x86-adjustments-to-intel_init_ppin.patch ++++++
# Commit 49e6eb744eba01f5d4b4cfce49154c0802d4f5c6
# Date 2025-08-25 14:04:41 +0100
# Author Andrew Cooper <andrew.coop...@citrix.com>
# Committer Andrew Cooper <andrew.coop...@citrix.com>
x86/mce: Adjustments to intel_init_ppin()

There's no family check gating intel_init_ppin(), making it incorrect to
use on non Fam6 CPUs.

Emerald Rapids is the final CPU to have PPIN but lack the architectural
enumeration, so include it too.

Signed-off-by: Andrew Cooper <andrew.coop...@citrix.com>
Reviewed-by: Jan Beulich <jbeul...@suse.com>

--- a/xen/arch/x86/cpu/mcheck/mce_intel.c
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c
@@ -10,6 +10,7 @@
 #include <xen/cpu.h>
 #include <asm/processor.h>
 #include <public/sysctl.h>
+#include <asm/intel-family.h>
 #include <asm/system.h>
 #include <asm/msr.h>
 #include <asm/p2m.h>
@@ -859,7 +860,7 @@ static void intel_init_ppin(const struct
      * other purposes.  Despite the late addition of a CPUID bit (rendering
      * the MSR architectural), keep using the same detection logic there.
      */
-    switch ( c->x86_model )
+    switch ( c->x86 == 6 ? c->x86_model : 0 )
     {
         uint64_t val;
 
@@ -870,14 +871,15 @@ static void intel_init_ppin(const struct
             return;
         }
         fallthrough;
-    case 0x3e: /* IvyBridge X */
-    case 0x3f: /* Haswell X */
-    case 0x4f: /* Broadwell X */
-    case 0x55: /* Skylake X */
-    case 0x56: /* Broadwell Xeon D */
-    case 0x6a: /* Icelake X */
-    case 0x6c: /* Icelake D */
-    case 0x8f: /* Sapphire Rapids X */
+    case INTEL_FAM6_IVYBRIDGE_X:
+    case INTEL_FAM6_HASWELL_X:
+    case INTEL_FAM6_BROADWELL_X:
+    case INTEL_FAM6_BROADWELL_D:
+    case INTEL_FAM6_SKYLAKE_X:
+    case INTEL_FAM6_ICELAKE_X:
+    case INTEL_FAM6_ICELAKE_D:
+    case INTEL_FAM6_SAPPHIRERAPIDS_X:
+    case INTEL_FAM6_EMERALDRAPIDS_X:
 
         if ( (c != &boot_cpu_data && !ppin_msr) ||
              rdmsr_safe(MSR_PPIN_CTL, val) )

++++++ xsa472-1.patch ++++++
>From 262114a440bf7c32fd6d215e243b3eaebdd6d7cd Mon Sep 17 00:00:00 2001
From: Roger Pau Monne <roger....@citrix.com>
Date: Thu, 10 Jul 2025 15:51:40 +0200
Subject: [PATCH 1/3] x86/viridian: avoid NULL pointer dereference in
 update_reference_tsc()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The function is only called when the MSR has the enabled bit set, but even
then the page might not be mapped because the guest provided gfn is not
suitable.

Prevent a NULL pointer dereference in update_reference_tsc() by checking
whether the page is mapped.

This is CVE-2025-27466 / part of XSA-472.

Fixes: 386b3365221d ('viridian: use viridian_map/unmap_guest_page() for 
reference tsc page')
Signed-off-by: Roger Pau Monné <roger....@citrix.com>
Reviewed-by: Jan Beulich <jbeul...@suse.com>
---
 xen/arch/x86/hvm/viridian/time.c | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/xen/arch/x86/hvm/viridian/time.c b/xen/arch/x86/hvm/viridian/time.c
index 137577384f1e..ca6d526f46b7 100644
--- a/xen/arch/x86/hvm/viridian/time.c
+++ b/xen/arch/x86/hvm/viridian/time.c
@@ -26,6 +26,10 @@ static void update_reference_tsc(const struct domain *d, 
bool initialize)
     HV_REFERENCE_TSC_PAGE *p = rt->ptr;
     uint32_t seq;
 
+    /* Reference TSC page might not be mapped even if the MSR is enabled. */
+    if ( !p )
+        return;
+
     if ( initialize )
         clear_page(p);
 
-- 
2.49.0


++++++ xsa472-2.patch ++++++
>From 71c9568e290b51dfd7ab091ac98b272fd0aa0b90 Mon Sep 17 00:00:00 2001
From: Roger Pau Monne <roger....@citrix.com>
Date: Thu, 10 Jul 2025 15:58:51 +0200
Subject: [PATCH 2/3] x86/viridian: avoid NULL pointer dereference in
 viridian_synic_deliver_timer_msg()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The function is called unconditionally, regardless of whether the SIM page
is mapped.  Avoid a NULL pointer dereference in
viridian_synic_deliver_timer_msg() by checking whether the SIM page is
mapped.

This is CVE-2025-58142 / part of XSA-472.

Fixes: 26fba3c85571 ('viridian: add implementation of synthetic timers')
Signed-off-by: Roger Pau Monné <roger....@citrix.com>
Reviewed-by: Jan Beulich <jbeul...@suse.com>
---
 xen/arch/x86/hvm/viridian/synic.c | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/xen/arch/x86/hvm/viridian/synic.c 
b/xen/arch/x86/hvm/viridian/synic.c
index c3dc573b003d..e6cba7548f1b 100644
--- a/xen/arch/x86/hvm/viridian/synic.c
+++ b/xen/arch/x86/hvm/viridian/synic.c
@@ -338,6 +338,10 @@ bool viridian_synic_deliver_timer_msg(struct vcpu *v, 
unsigned int sintx,
         .DeliveryTime = delivery,
     };
 
+    /* Don't assume SIM page to be mapped. */
+    if ( !msg )
+        return false;
+
     /*
      * To avoid using an atomic test-and-set, and barrier before calling
      * vlapic_set_irq(), this function must be called in context of the
-- 
2.49.0


++++++ xsa472-3.patch ++++++
>From aed4cfd64d178aee677a8790440addda03678cd6 Mon Sep 17 00:00:00 2001
From: Roger Pau Monne <roger....@citrix.com>
Date: Thu, 3 Jul 2025 13:09:03 +0200
Subject: [PATCH 3/3] x86/viridian: protect concurrent modification of the
 reference TSC page
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The reference TSC page is shared between all vCPUs, and the data stored in
the domain struct.  However the handlers to set and clear it are not safe
against concurrent accesses.  It's possible for two (or more) vCPUs to call
HV_X64_MSR_REFERENCE_TSC at the same time and cause the in-use reference
TSC page to be freed, while still being on the p2m.  This creates an
information leak, where the page can end up mapped in another domain while
still being part of the original domain p2m.

It's also possible to underflow the reference counter, as multiple
concurrent writes to HV_X64_MSR_REFERENCE_TSC can create an imbalance on
the number of put_page_and_type() calls.

Introduce a lock to protect the reference TSC domain field, thus
serializing concurrent vCPU accesses.

This is CVE-2025-58143 / part of XSA-472.

Fixes: 386b3365221d ('viridian: use viridian_map/unmap_guest_page() for 
reference tsc page')
Signed-off-by: Roger Pau Monné <roger....@citrix.com>
Reviewed-by: Jan Beulich <jbeul...@suse.com>
---
 xen/arch/x86/hvm/viridian/time.c        | 4 ++++
 xen/arch/x86/hvm/viridian/viridian.c    | 2 ++
 xen/arch/x86/include/asm/hvm/viridian.h | 1 +
 3 files changed, 7 insertions(+)

diff --git a/xen/arch/x86/hvm/viridian/time.c b/xen/arch/x86/hvm/viridian/time.c
index ca6d526f46b7..9311858d63c0 100644
--- a/xen/arch/x86/hvm/viridian/time.c
+++ b/xen/arch/x86/hvm/viridian/time.c
@@ -108,8 +108,10 @@ static void time_ref_count_thaw(const struct domain *d)
 
     trc->off = (int64_t)trc->val - trc_val(d, 0);
 
+    spin_lock(&vd->lock);
     if ( vd->reference_tsc.msr.enabled )
         update_reference_tsc(d, false);
+    spin_unlock(&vd->lock);
 }
 
 static uint64_t time_ref_count(const struct domain *d)
@@ -331,6 +333,7 @@ int viridian_time_wrmsr(struct vcpu *v, uint32_t idx, 
uint64_t val)
         if ( !(viridian_feature_mask(d) & HVMPV_reference_tsc) )
             return X86EMUL_EXCEPTION;
 
+        spin_lock(&vd->lock);
         viridian_unmap_guest_page(&vd->reference_tsc);
         vd->reference_tsc.msr.raw = val;
         viridian_dump_guest_page(v, "REFERENCE_TSC", &vd->reference_tsc);
@@ -339,6 +342,7 @@ int viridian_time_wrmsr(struct vcpu *v, uint32_t idx, 
uint64_t val)
             viridian_map_guest_page(d, &vd->reference_tsc);
             update_reference_tsc(d, true);
         }
+        spin_unlock(&vd->lock);
         break;
 
     case HV_X64_MSR_TIME_REF_COUNT:
diff --git a/xen/arch/x86/hvm/viridian/viridian.c 
b/xen/arch/x86/hvm/viridian/viridian.c
index 7ea6c9016894..c0be24bd2210 100644
--- a/xen/arch/x86/hvm/viridian/viridian.c
+++ b/xen/arch/x86/hvm/viridian/viridian.c
@@ -494,6 +494,8 @@ int viridian_domain_init(struct domain *d)
     if ( !d->arch.hvm.viridian )
         return -ENOMEM;
 
+    spin_lock_init(&d->arch.hvm.viridian->lock);
+
     rc = viridian_synic_domain_init(d);
     if ( rc )
         goto fail;
diff --git a/xen/arch/x86/include/asm/hvm/viridian.h 
b/xen/arch/x86/include/asm/hvm/viridian.h
index 4c8ff6e80b6f..47c9d13841ac 100644
--- a/xen/arch/x86/include/asm/hvm/viridian.h
+++ b/xen/arch/x86/include/asm/hvm/viridian.h
@@ -71,6 +71,7 @@ struct viridian_domain
     DECLARE_BITMAP(hypercall_flags, _HCALL_nr);
     struct viridian_time_ref_count time_ref_count;
     struct viridian_page reference_tsc;
+    spinlock_t lock;
 };
 
 void cpuid_viridian_leaves(const struct vcpu *v, uint32_t leaf,
-- 
2.49.0

Reply via email to