Hello community,

here is the log from the commit of package xen for openSUSE:Factory checked in 
at 2015-10-14 16:43:18
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/xen (Old)
 and      /work/SRC/openSUSE:Factory/.xen.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "xen"

Changes:
--------
--- /work/SRC/openSUSE:Factory/xen/xen.changes  2015-09-24 07:16:59.000000000 
+0200
+++ /work/SRC/openSUSE:Factory/.xen.new/xen.changes     2015-10-14 
16:43:21.000000000 +0200
@@ -1,0 +2,45 @@
+Tue Oct  6 14:52:30 MDT 2015 - jfeh...@suse.com
+
+- bsc#949138 - Setting vcpu affinity under Xen causes libvirtd
+  abort 
+  54f4985f-libxl-fix-libvirtd-double-free.patch
+
+-------------------------------------------------------------------
+Tue Oct  6 09:04:17 MDT 2015 - carn...@suse.com
+
+- bsc#949046 - Increase %suse_version in SP1 to 1316
+  xen.spec
+- Update README.SUSE detailing dom0 ballooning recommendations
+
+-------------------------------------------------------------------
+Mon Oct  5 09:12:45 MDT 2015 - carn...@suse.com
+
+- bsc#945167 - Running command ’ xl pci-assignable-add 03:10.1’
+  secondly show errors
+  55f7f9d2-libxl-slightly-refine-pci-assignable-add-remove-handling.patch
+- Upstream patches from Jan
+  55f2e438-x86-hvm-fix-saved-pmtimer-and-hpet-values.patch
+  55f9345b-x86-MSI-fail-if-no-hardware-support.patch
+  5604f239-x86-PV-properly-populate-descriptor-tables.patch
+  5604f2e6-vt-d-fix-IM-bit-mask-and-unmask-of-FECTL_REG.patch
+  560a4af9-x86-EPT-tighten-conditions-of-IOMMU-mapping-updates.patch
+  560a7c36-x86-p2m-pt-delay-freeing-of-intermediate-page-tables.patch
+  560a7c53-x86-p2m-pt-ignore-pt-share-flag-for-shadow-mode-guests.patch
+  560bd926-credit1-fix-tickling-when-it-happens-from-a-remote-pCPU.patch
+  560e6d34-x86-p2m-pt-tighten-conditions-of-IOMMU-mapping-updates.patch
+
+-------------------------------------------------------------------
+Fri Oct  2 11:31:34 MDT 2015 - mlati...@suse.com
+
+- bsc#941074 - VmError: Device 51728 (vbd) could not be connected.
+  Hotplug scripts not working.
+  hotplug-Linux-block-performance-fix.patch
+
+-------------------------------------------------------------------
+Wed Sep 23 14:56:47 MDT 2015 - carn...@suse.com
+
+- bsc#947165 - VUL-0: CVE-2015-7311: xen: libxl fails to honour
+  readonly flag on disks with qemu-xen (xsa-142)
+  CVE-2015-7311-xsa142.patch
+
+-------------------------------------------------------------------

New:
----
  54f4985f-libxl-fix-libvirtd-double-free.patch
  55f2e438-x86-hvm-fix-saved-pmtimer-and-hpet-values.patch
  55f7f9d2-libxl-slightly-refine-pci-assignable-add-remove-handling.patch
  55f9345b-x86-MSI-fail-if-no-hardware-support.patch
  5604f239-x86-PV-properly-populate-descriptor-tables.patch
  5604f2e6-vt-d-fix-IM-bit-mask-and-unmask-of-FECTL_REG.patch
  560a4af9-x86-EPT-tighten-conditions-of-IOMMU-mapping-updates.patch
  560a7c36-x86-p2m-pt-delay-freeing-of-intermediate-page-tables.patch
  560a7c53-x86-p2m-pt-ignore-pt-share-flag-for-shadow-mode-guests.patch
  560bd926-credit1-fix-tickling-when-it-happens-from-a-remote-pCPU.patch
  560e6d34-x86-p2m-pt-tighten-conditions-of-IOMMU-mapping-updates.patch
  CVE-2015-7311-xsa142.patch
  hotplug-Linux-block-performance-fix.patch

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ xen.spec ++++++
--- /var/tmp/diff_new_pack.6TTrYH/_old  2015-10-14 16:43:25.000000000 +0200
+++ /var/tmp/diff_new_pack.6TTrYH/_new  2015-10-14 16:43:25.000000000 +0200
@@ -1,7 +1,7 @@
 #
 # spec file for package xen
 #
-# Copyright (c) 2015 SUSE LINUX GmbH, Nuernberg, Germany.
+# Copyright (c) 2015 SUSE LINUX Products GmbH, Nuernberg, Germany.
 #
 # All modifications and additions to the file contributed by third parties
 # remain the property of their copyright owners, unless otherwise agreed
@@ -15,7 +15,6 @@
 # Please submit bugfixes or comments via http://bugs.opensuse.org/
 #
 
-
 # needssslcertforbuild
 
 Name:           xen
@@ -46,7 +45,7 @@
 #
 %define max_cpus 4
 %ifarch x86_64
-%if %suse_version == 1315
+%if %suse_version >= 1315
 %define max_cpus 1024
 %else
 %define max_cpus 512
@@ -159,7 +158,7 @@
 %endif
 %endif
 
-Version:        4.5.1_08
+Version:        4.5.1_10
 Release:        0
 Summary:        Xen Virtualization: Hypervisor (aka VMM aka Microkernel)
 License:        GPL-2.0
@@ -202,34 +201,45 @@
 # http://xenbits.xensource.com/ext/xenalyze
 Source20000:    xenalyze.hg.tar.bz2
 # Upstream patches
-Patch1:         
55103616-vm-assist-prepare-for-discontiguous-used-bit-numbers.patch
-Patch2:         551ac326-xentop-add-support-for-qdisk.patch
-Patch3:         
552d0f49-x86-traps-identify-the-vcpu-in-context-when-dumping-regs.patch
-Patch4:         
5548e903-domctl-don-t-truncate-XEN_DOMCTL_max_mem-requests.patch
-Patch5:         5548e95d-x86-allow-to-suppress-M2P-user-mode-exposure.patch
-Patch6:         554cc211-libxl-add-qxl.patch
-Patch7:         
556d973f-unmodified-drivers-tolerate-IRQF_DISABLED-being-undefined.patch
-Patch8:         5576f178-kexec-add-more-pages-to-v1-environment.patch
-Patch9:         
55780be1-x86-EFI-adjust-EFI_MEMORY_WP-handling-for-spec-version-2.5.patch
-Patch10:        558bfaa0-x86-traps-avoid-using-current-too-early.patch
-Patch11:        5592a116-nested-EPT-fix-the-handling-of-nested-EPT.patch
-Patch12:        559b9dd6-x86-p2m-ept-don-t-unmap-in-use-EPT-pagetable.patch
-Patch13:        
559bc633-x86-cpupool-clear-proper-cpu_valid-bit-on-CPU-teardown.patch
-Patch14:        559bc64e-credit1-properly-deal-with-CPUs-not-in-any-pool.patch
-Patch15:        
559bc87f-x86-hvmloader-avoid-data-corruption-with-xenstore-rw.patch
-Patch16:        559bdde5-pull-in-latest-linux-earlycpio.patch
-Patch17:        
55a62eb0-xl-correct-handling-of-extra_config-in-main_cpupoolcreate.patch
-Patch18:        55a66a1e-make-rangeset_report_ranges-report-all-ranges.patch
-Patch19:        55a77e4f-dmar-device-scope-mem-leak-fix.patch
-Patch20:        55c1d83d-x86-gdt-Drop-write-only-xalloc-d-array.patch
-Patch21:        55c3232b-x86-mm-Make-hap-shadow-teardown-preemptible.patch
-Patch22:        55dc78e9-x86-amd_ucode-skip-updates-for-final-levels.patch
-Patch23:        
55dc7937-x86-IO-APIC-don-t-create-pIRQ-mapping-from-masked-RTE.patch
-Patch24:        
55df2f76-IOMMU-skip-domains-without-page-tables-when-dumping.patch
-Patch25:        55e43fd8-x86-NUMA-fix-setup_node.patch
-Patch26:        55e43ff8-x86-NUMA-don-t-account-hotplug-regions.patch
-Patch27:        
55e593f1-x86-NUMA-make-init_node_heap-respect-Xen-heap-limit.patch
-Patch28:        5537a4d8-libxl-use-DEBUG-log-level-instead-of-INFO.patch
+Patch1:         54f4985f-libxl-fix-libvirtd-double-free.patch
+Patch2:         
55103616-vm-assist-prepare-for-discontiguous-used-bit-numbers.patch
+Patch3:         551ac326-xentop-add-support-for-qdisk.patch
+Patch4:         
552d0f49-x86-traps-identify-the-vcpu-in-context-when-dumping-regs.patch
+Patch5:         5537a4d8-libxl-use-DEBUG-log-level-instead-of-INFO.patch
+Patch6:         
5548e903-domctl-don-t-truncate-XEN_DOMCTL_max_mem-requests.patch
+Patch7:         5548e95d-x86-allow-to-suppress-M2P-user-mode-exposure.patch
+Patch8:         554cc211-libxl-add-qxl.patch
+Patch9:         
556d973f-unmodified-drivers-tolerate-IRQF_DISABLED-being-undefined.patch
+Patch10:        5576f178-kexec-add-more-pages-to-v1-environment.patch
+Patch11:        
55780be1-x86-EFI-adjust-EFI_MEMORY_WP-handling-for-spec-version-2.5.patch
+Patch12:        558bfaa0-x86-traps-avoid-using-current-too-early.patch
+Patch13:        5592a116-nested-EPT-fix-the-handling-of-nested-EPT.patch
+Patch14:        559b9dd6-x86-p2m-ept-don-t-unmap-in-use-EPT-pagetable.patch
+Patch15:        
559bc633-x86-cpupool-clear-proper-cpu_valid-bit-on-CPU-teardown.patch
+Patch16:        559bc64e-credit1-properly-deal-with-CPUs-not-in-any-pool.patch
+Patch17:        
559bc87f-x86-hvmloader-avoid-data-corruption-with-xenstore-rw.patch
+Patch18:        559bdde5-pull-in-latest-linux-earlycpio.patch
+Patch19:        
55a62eb0-xl-correct-handling-of-extra_config-in-main_cpupoolcreate.patch
+Patch20:        55a66a1e-make-rangeset_report_ranges-report-all-ranges.patch
+Patch21:        55a77e4f-dmar-device-scope-mem-leak-fix.patch
+Patch22:        55c1d83d-x86-gdt-Drop-write-only-xalloc-d-array.patch
+Patch23:        55c3232b-x86-mm-Make-hap-shadow-teardown-preemptible.patch
+Patch24:        55dc78e9-x86-amd_ucode-skip-updates-for-final-levels.patch
+Patch25:        
55dc7937-x86-IO-APIC-don-t-create-pIRQ-mapping-from-masked-RTE.patch
+Patch26:        
55df2f76-IOMMU-skip-domains-without-page-tables-when-dumping.patch
+Patch27:        55e43fd8-x86-NUMA-fix-setup_node.patch
+Patch28:        55e43ff8-x86-NUMA-don-t-account-hotplug-regions.patch
+Patch29:        
55e593f1-x86-NUMA-make-init_node_heap-respect-Xen-heap-limit.patch
+Patch30:        55f2e438-x86-hvm-fix-saved-pmtimer-and-hpet-values.patch
+Patch31:        
55f7f9d2-libxl-slightly-refine-pci-assignable-add-remove-handling.patch
+Patch32:        55f9345b-x86-MSI-fail-if-no-hardware-support.patch
+Patch33:        5604f239-x86-PV-properly-populate-descriptor-tables.patch
+Patch34:        5604f2e6-vt-d-fix-IM-bit-mask-and-unmask-of-FECTL_REG.patch
+Patch35:        
560a4af9-x86-EPT-tighten-conditions-of-IOMMU-mapping-updates.patch
+Patch36:        
560a7c36-x86-p2m-pt-delay-freeing-of-intermediate-page-tables.patch
+Patch37:        
560a7c53-x86-p2m-pt-ignore-pt-share-flag-for-shadow-mode-guests.patch
+Patch38:        
560bd926-credit1-fix-tickling-when-it-happens-from-a-remote-pCPU.patch
+Patch39:        
560e6d34-x86-p2m-pt-tighten-conditions-of-IOMMU-mapping-updates.patch
 Patch131:       CVE-2015-4106-xsa131-9.patch
 Patch137:       CVE-2015-3259-xsa137.patch
 Patch139:       xsa139-qemuu.patch
@@ -247,6 +257,7 @@
 Patch14015:     xsa140-qemut-5.patch
 Patch14016:     xsa140-qemut-6.patch
 Patch14017:     xsa140-qemut-7.patch
+Patch142:       CVE-2015-7311-xsa142.patch
 # Upstream qemu
 Patch250:       VNC-Support-for-ExtendedKeyEvent-client-message.patch
 Patch251:       0001-net-move-the-tap-buffer-into-TAPState.patch
@@ -279,6 +290,7 @@
 Patch331:       xenpaging.doc.patch
 Patch332:       local_attach_support_for_phy.patch
 Patch333:       xen-c99-fix.patch
+Patch334:       hotplug-Linux-block-performance-fix.patch
 # Qemu traditional
 Patch350:       blktap.patch
 Patch351:       cdrom-removable.patch
@@ -625,6 +637,17 @@
 %patch26 -p1
 %patch27 -p1
 %patch28 -p1
+%patch29 -p1
+%patch30 -p1
+%patch31 -p1
+%patch32 -p1
+%patch33 -p1
+%patch34 -p1
+%patch35 -p1
+%patch36 -p1
+%patch37 -p1
+%patch38 -p1
+%patch39 -p1
 %patch131 -p1
 %patch137 -p1
 %patch139 -p1
@@ -642,6 +665,7 @@
 %patch14015 -p1
 %patch14016 -p1
 %patch14017 -p1
+%patch142 -p1
 # Upstream qemu patches
 %patch250 -p1
 %patch251 -p1
@@ -673,6 +697,7 @@
 %patch331 -p1
 %patch332 -p1
 %patch333 -p1
+%patch334 -p1
 # Qemu traditional
 %patch350 -p1
 %patch351 -p1

++++++ 54f4985f-libxl-fix-libvirtd-double-free.patch ++++++
References: bsc#949138

Subject: libxl: make some _dispose functions idempotent and tolerate NULL
From: Wei Liu wei.l...@citrix.com Wed Feb 25 14:56:02 2015 +0000
Date: Mon Mar 2 17:05:35 2015 +0000:
Git: 1ea68f1a82ef94b3cc644fa70307c5151f356baf

These functions are not generated, so we need to do it by hand.

Functions list:
 libxl_bitmap_dispose
 libxl_string_list_dispose
 libxl_key_value_list_dipose
 libxl_cpuid_dispose

Signed-off-by: Wei Liu <wei.l...@citrix.com>
Cc: Ian Campbell <ian.campb...@citrix.com>
Cc: Ian Jackson <ian.jack...@eu.citrix.com>
Acked-by: Ian Campbell <ian.campb...@citrix.com>

Index: xen-4.5.1-testing/tools/libxl/libxl.c
===================================================================
--- xen-4.5.1-testing.orig/tools/libxl/libxl.c
+++ xen-4.5.1-testing/tools/libxl/libxl.c
@@ -211,9 +211,12 @@ void libxl_string_list_dispose(libxl_str
     if (!sl)
         return;
 
-    for (i = 0; sl[i] != NULL; i++)
+    for (i = 0; sl[i] != NULL; i++) {
         free(sl[i]);
+        sl[i] = NULL;
+    }
     free(sl);
+    *psl = NULL;
 }
 
 void libxl_string_list_copy(libxl_ctx *ctx,
@@ -273,10 +276,14 @@ void libxl_key_value_list_dispose(libxl_
 
     for (i = 0; kvl[i] != NULL; i += 2) {
         free(kvl[i]);
-        if (kvl[i + 1])
+        kvl[i] = NULL;
+        if (kvl[i + 1]) {
             free(kvl[i + 1]);
+            kvl[i+1] = NULL;
+        }
     }
     free(kvl);
+    *pkvl = NULL;
 }
 
 void libxl_key_value_list_copy(libxl_ctx *ctx,
Index: xen-4.5.1-testing/tools/libxl/libxl_cpuid.c
===================================================================
--- xen-4.5.1-testing.orig/tools/libxl/libxl_cpuid.c
+++ xen-4.5.1-testing/tools/libxl/libxl_cpuid.c
@@ -28,10 +28,13 @@ void libxl_cpuid_dispose(libxl_cpuid_pol
         return;
     for (i = 0; cpuid_list[i].input[0] != XEN_CPUID_INPUT_UNUSED; i++) {
         for (j = 0; j < 4; j++)
-            if (cpuid_list[i].policy[j] != NULL)
+            if (cpuid_list[i].policy[j] != NULL) {
                 free(cpuid_list[i].policy[j]);
+                cpuid_list[i].policy[j] = NULL;
+            }
     }
     free(cpuid_list);
+    *p_cpuid_list = NULL;
     return;
 }
 
Index: xen-4.5.1-testing/tools/libxl/libxl_utils.c
===================================================================
--- xen-4.5.1-testing.orig/tools/libxl/libxl_utils.c
+++ xen-4.5.1-testing/tools/libxl/libxl_utils.c
@@ -604,7 +604,12 @@ void libxl_bitmap_init(libxl_bitmap *map
 
 void libxl_bitmap_dispose(libxl_bitmap *map)
 {
+    if (!map)
+        return;
+
     free(map->map);
+    map->map = NULL;
+    map->size = 0;
 }
 
 void libxl_bitmap_copy(libxl_ctx *ctx, libxl_bitmap *dptr,
++++++ 5537a4d8-libxl-use-DEBUG-log-level-instead-of-INFO.patch ++++++
--- /var/tmp/diff_new_pack.6TTrYH/_old  2015-10-14 16:43:25.000000000 +0200
+++ /var/tmp/diff_new_pack.6TTrYH/_new  2015-10-14 16:43:25.000000000 +0200
@@ -16,7 +16,7 @@
 ===================================================================
 --- xen-4.5.1-testing.orig/tools/libxl/libxl.c
 +++ xen-4.5.1-testing/tools/libxl/libxl.c
-@@ -1688,7 +1688,7 @@ static void devices_destroy_cb(libxl__eg
+@@ -1695,7 +1695,7 @@ static void devices_destroy_cb(libxl__eg
              _exit(-1);
          }
      }

++++++ 557eb620-gnttab-make-the-grant-table-lock-a-read-write-lock.patch ++++++
--- /var/tmp/diff_new_pack.6TTrYH/_old  2015-10-14 16:43:25.000000000 +0200
+++ /var/tmp/diff_new_pack.6TTrYH/_new  2015-10-14 16:43:25.000000000 +0200
@@ -113,7 +113,7 @@
          if ( idx != 0 )
 --- a/xen/arch/x86/mm.c
 +++ b/xen/arch/x86/mm.c
-@@ -4587,7 +4587,7 @@ int xenmem_add_to_physmap_one(
+@@ -4592,7 +4592,7 @@ int xenmem_add_to_physmap_one(
                  mfn = virt_to_mfn(d->shared_info);
              break;
          case XENMAPSPACE_grant_table:
@@ -122,7 +122,7 @@
  
              if ( d->grant_table->gt_version == 0 )
                  d->grant_table->gt_version = 1;
-@@ -4609,7 +4609,7 @@ int xenmem_add_to_physmap_one(
+@@ -4614,7 +4614,7 @@ int xenmem_add_to_physmap_one(
                      mfn = virt_to_mfn(d->grant_table->shared_raw[idx]);
              }
  

++++++ 5583d9c5-x86-MSI-X-cleanup.patch ++++++
--- /var/tmp/diff_new_pack.6TTrYH/_old  2015-10-14 16:43:26.000000000 +0200
+++ /var/tmp/diff_new_pack.6TTrYH/_new  2015-10-14 16:43:26.000000000 +0200
@@ -104,7 +104,7 @@
              u32 mask_bits;
              u16 seg = entry->dev->seg;
              u8 bus = entry->dev->bus;
-@@ -701,13 +705,14 @@ static u64 read_pci_mem_bar(u16 seg, u8 
+@@ -703,13 +707,14 @@ static u64 read_pci_mem_bar(u16 seg, u8 
   * requested MSI-X entries with allocated irqs or non-zero for otherwise.
   **/
  static int msix_capability_init(struct pci_dev *dev,
@@ -120,7 +120,7 @@
      u16 control;
      u64 table_paddr;
      u32 table_offset;
-@@ -719,7 +724,6 @@ static int msix_capability_init(struct p
+@@ -721,7 +726,6 @@ static int msix_capability_init(struct p
  
      ASSERT(spin_is_locked(&pcidevs_lock));
  
@@ -128,7 +128,7 @@
      control = pci_conf_read16(seg, bus, slot, func, msix_control_reg(pos));
      msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
  
-@@ -884,10 +888,9 @@ static int __pci_enable_msi(struct msi_i
+@@ -886,10 +890,9 @@ static int __pci_enable_msi(struct msi_i
      old_desc = find_msi_entry(pdev, msi->irq, PCI_CAP_ID_MSI);
      if ( old_desc )
      {
@@ -142,7 +142,7 @@
          *desc = old_desc;
          return 0;
      }
-@@ -895,10 +898,10 @@ static int __pci_enable_msi(struct msi_i
+@@ -897,10 +900,10 @@ static int __pci_enable_msi(struct msi_i
      old_desc = find_msi_entry(pdev, -1, PCI_CAP_ID_MSIX);
      if ( old_desc )
      {
@@ -157,7 +157,7 @@
      }
  
      return msi_capability_init(pdev, msi->irq, desc, msi->entry_nr);
-@@ -912,7 +915,6 @@ static void __pci_disable_msi(struct msi
+@@ -914,7 +917,6 @@ static void __pci_disable_msi(struct msi
      msi_set_enable(dev, 0);
  
      BUG_ON(list_empty(&dev->msi_list));
@@ -165,7 +165,7 @@
  }
  
  /**
-@@ -932,7 +934,7 @@ static void __pci_disable_msi(struct msi
+@@ -934,7 +936,7 @@ static void __pci_disable_msi(struct msi
   **/
  static int __pci_enable_msix(struct msi_info *msi, struct msi_desc **desc)
  {
@@ -174,7 +174,7 @@
      struct pci_dev *pdev;
      u16 control;
      u8 slot = PCI_SLOT(msi->devfn);
-@@ -941,23 +943,22 @@ static int __pci_enable_msix(struct msi_
+@@ -943,23 +945,22 @@ static int __pci_enable_msix(struct msi_
  
      ASSERT(spin_is_locked(&pcidevs_lock));
      pdev = pci_get_pdev(msi->seg, msi->bus, msi->devfn);
@@ -204,7 +204,7 @@
          *desc = old_desc;
          return 0;
      }
-@@ -965,15 +966,13 @@ static int __pci_enable_msix(struct msi_
+@@ -967,15 +968,13 @@ static int __pci_enable_msix(struct msi_
      old_desc = find_msi_entry(pdev, -1, PCI_CAP_ID_MSI);
      if ( old_desc )
      {
@@ -225,7 +225,7 @@
  }
  
  static void _pci_cleanup_msix(struct arch_msix *msix)
-@@ -991,19 +990,16 @@ static void _pci_cleanup_msix(struct arc
+@@ -993,19 +992,16 @@ static void _pci_cleanup_msix(struct arc
  
  static void __pci_disable_msix(struct msi_desc *entry)
  {
@@ -254,7 +254,7 @@
      msix_set_enable(dev, 0);
  
      BUG_ON(list_empty(&dev->msi_list));
-@@ -1045,7 +1041,7 @@ int pci_prepare_msix(u16 seg, u8 bus, u8
+@@ -1047,7 +1043,7 @@ int pci_prepare_msix(u16 seg, u8 bus, u8
          u16 control = pci_conf_read16(seg, bus, slot, func,
                                        msix_control_reg(pos));
  
@@ -263,7 +263,7 @@
                                    multi_msix_capable(control));
      }
      spin_unlock(&pcidevs_lock);
-@@ -1064,8 +1060,8 @@ int pci_enable_msi(struct msi_info *msi,
+@@ -1066,8 +1062,8 @@ int pci_enable_msi(struct msi_info *msi,
      if ( !use_msi )
          return -EPERM;
  
@@ -274,7 +274,7 @@
  }
  
  /*
-@@ -1115,7 +1111,9 @@ int pci_restore_msi_state(struct pci_dev
+@@ -1117,7 +1113,9 @@ int pci_restore_msi_state(struct pci_dev
      if ( !pdev )
          return -EINVAL;
  

++++++ 5583da09-x86-MSI-track-host-and-guest-masking-separately.patch ++++++
--- /var/tmp/diff_new_pack.6TTrYH/_old  2015-10-14 16:43:26.000000000 +0200
+++ /var/tmp/diff_new_pack.6TTrYH/_new  2015-10-14 16:43:26.000000000 +0200
@@ -230,7 +230,7 @@
      .enable       = unmask_msi_irq,
      .disable      = mask_msi_irq,
      .ack          = ack_maskable_msi_irq,
-@@ -591,7 +603,8 @@ static int msi_capability_init(struct pc
+@@ -593,7 +605,8 @@ static int msi_capability_init(struct pc
          entry[i].msi_attrib.is_64 = is_64bit_address(control);
          entry[i].msi_attrib.entry_nr = i;
          entry[i].msi_attrib.maskbit = is_mask_bit_support(control);
@@ -240,7 +240,7 @@
          entry[i].msi_attrib.pos = pos;
          if ( entry[i].msi_attrib.maskbit )
              entry[i].msi.mpos = mpos;
-@@ -817,7 +830,8 @@ static int msix_capability_init(struct p
+@@ -819,7 +832,8 @@ static int msix_capability_init(struct p
          entry->msi_attrib.is_64 = 1;
          entry->msi_attrib.entry_nr = msi->entry_nr;
          entry->msi_attrib.maskbit = 1;
@@ -250,7 +250,7 @@
          entry->msi_attrib.pos = pos;
          entry->irq = msi->irq;
          entry->dev = dev;
-@@ -1152,7 +1166,8 @@ int pci_restore_msi_state(struct pci_dev
+@@ -1154,7 +1168,8 @@ int pci_restore_msi_state(struct pci_dev
  
          for ( i = 0; ; )
          {
@@ -260,7 +260,7 @@
  
              if ( !--nr )
                  break;
-@@ -1304,7 +1319,7 @@ static void dump_msi(unsigned char key)
+@@ -1306,7 +1321,7 @@ static void dump_msi(unsigned char key)
          else
              mask = '?';
          printk(" %-6s%4u vec=%02x%7s%6s%3sassert%5s%7s"
@@ -269,7 +269,7 @@
                 type, irq,
                 (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT,
                 data & MSI_DATA_DELIVERY_LOWPRI ? "lowest" : "fixed",
-@@ -1312,7 +1327,10 @@ static void dump_msi(unsigned char key)
+@@ -1314,7 +1329,10 @@ static void dump_msi(unsigned char key)
                 data & MSI_DATA_LEVEL_ASSERT ? "" : "de",
                 addr & MSI_ADDR_DESTMODE_LOGIC ? "log" : "phys",
                 addr & MSI_ADDR_REDIRECTION_LOWPRI ? "lowest" : "cpu",
@@ -317,18 +317,18 @@
  static unsigned int iommu_msi_startup(struct irq_desc *desc)
 --- a/xen/drivers/passthrough/vtd/iommu.c
 +++ b/xen/drivers/passthrough/vtd/iommu.c
-@@ -996,7 +996,7 @@ static void dma_msi_unmask(struct irq_de
-     spin_lock_irqsave(&iommu->register_lock, flags);
-     dmar_writel(iommu->reg, DMAR_FECTL_REG, 0);
+@@ -999,7 +999,7 @@ static void dma_msi_unmask(struct irq_de
+     sts &= ~DMA_FECTL_IM;
+     dmar_writel(iommu->reg, DMAR_FECTL_REG, sts);
      spin_unlock_irqrestore(&iommu->register_lock, flags);
 -    iommu->msi.msi_attrib.masked = 0;
 +    iommu->msi.msi_attrib.host_masked = 0;
  }
  
  static void dma_msi_mask(struct irq_desc *desc)
-@@ -1008,7 +1008,7 @@ static void dma_msi_mask(struct irq_desc
-     spin_lock_irqsave(&iommu->register_lock, flags);
-     dmar_writel(iommu->reg, DMAR_FECTL_REG, DMA_FECTL_IM);
+@@ -1014,7 +1014,7 @@ static void dma_msi_mask(struct irq_desc
+     sts |= DMA_FECTL_IM;
+     dmar_writel(iommu->reg, DMAR_FECTL_REG, sts);
      spin_unlock_irqrestore(&iommu->register_lock, flags);
 -    iommu->msi.msi_attrib.masked = 1;
 +    iommu->msi.msi_attrib.host_masked = 1;

++++++ 55b0a218-x86-PCI-CFG-write-intercept.patch ++++++
--- /var/tmp/diff_new_pack.6TTrYH/_old  2015-10-14 16:43:26.000000000 +0200
+++ /var/tmp/diff_new_pack.6TTrYH/_new  2015-10-14 16:43:26.000000000 +0200
@@ -14,7 +14,7 @@
 
 --- a/xen/arch/x86/msi.c
 +++ b/xen/arch/x86/msi.c
-@@ -1108,6 +1108,12 @@ void pci_cleanup_msi(struct pci_dev *pde
+@@ -1110,6 +1110,12 @@ void pci_cleanup_msi(struct pci_dev *pde
      msi_free_irqs(pdev);
  }
  

++++++ 55b0a255-x86-MSI-X-maskall.patch ++++++
--- /var/tmp/diff_new_pack.6TTrYH/_old  2015-10-14 16:43:26.000000000 +0200
+++ /var/tmp/diff_new_pack.6TTrYH/_new  2015-10-14 16:43:26.000000000 +0200
@@ -15,7 +15,7 @@
 
 --- a/xen/arch/x86/msi.c
 +++ b/xen/arch/x86/msi.c
-@@ -843,6 +843,12 @@ static int msix_capability_init(struct p
+@@ -845,6 +845,12 @@ static int msix_capability_init(struct p
  
      if ( !msix->used_entries )
      {
@@ -28,7 +28,7 @@
          if ( rangeset_add_range(mmio_ro_ranges, msix->table.first,
                                  msix->table.last) )
              WARN();
-@@ -1111,6 +1117,34 @@ void pci_cleanup_msi(struct pci_dev *pde
+@@ -1113,6 +1119,34 @@ void pci_cleanup_msi(struct pci_dev *pde
  int pci_msi_conf_write_intercept(struct pci_dev *pdev, unsigned int reg,
                                   unsigned int size, uint32_t *data)
  {

++++++ 55b0a283-x86-MSI-X-teardown.patch ++++++
--- /var/tmp/diff_new_pack.6TTrYH/_old  2015-10-14 16:43:26.000000000 +0200
+++ /var/tmp/diff_new_pack.6TTrYH/_new  2015-10-14 16:43:26.000000000 +0200
@@ -283,7 +283,7 @@
  }
  
  void ack_nonmaskable_msi_irq(struct irq_desc *desc)
-@@ -740,6 +809,9 @@ static int msix_capability_init(struct p
+@@ -742,6 +811,9 @@ static int msix_capability_init(struct p
      control = pci_conf_read16(seg, bus, slot, func, msix_control_reg(pos));
      msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
  
@@ -293,7 +293,7 @@
      if ( desc )
      {
          entry = alloc_msi_entry(1);
-@@ -879,7 +951,8 @@ static int msix_capability_init(struct p
+@@ -881,7 +953,8 @@ static int msix_capability_init(struct p
      ++msix->used_entries;
  
      /* Restore MSI-X enabled bits */
@@ -303,7 +303,7 @@
  
      return 0;
  }
-@@ -1024,8 +1097,16 @@ static void __pci_disable_msix(struct ms
+@@ -1026,8 +1099,16 @@ static void __pci_disable_msix(struct ms
  
      BUG_ON(list_empty(&dev->msi_list));
  
@@ -322,7 +322,7 @@
      pci_conf_write16(seg, bus, slot, func, msix_control_reg(pos), control);
  
      _pci_cleanup_msix(dev->msix);
-@@ -1199,15 +1280,24 @@ int pci_restore_msi_state(struct pci_dev
+@@ -1201,15 +1282,24 @@ int pci_restore_msi_state(struct pci_dev
              nr = entry->msi.nvec;
          }
          else if ( entry->msi_attrib.type == PCI_CAP_ID_MSIX )

++++++ 55b0a2ab-x86-MSI-X-enable.patch ++++++
--- /var/tmp/diff_new_pack.6TTrYH/_old  2015-10-14 16:43:26.000000000 +0200
+++ /var/tmp/diff_new_pack.6TTrYH/_new  2015-10-14 16:43:26.000000000 +0200
@@ -171,7 +171,7 @@
  }
  
  int __setup_msi_irq(struct irq_desc *desc, struct msi_desc *msidesc,
-@@ -803,20 +848,38 @@ static int msix_capability_init(struct p
+@@ -805,20 +850,38 @@ static int msix_capability_init(struct p
      u8 bus = dev->bus;
      u8 slot = PCI_SLOT(dev->devfn);
      u8 func = PCI_FUNC(dev->devfn);
@@ -211,7 +211,7 @@
          ASSERT(msi);
      }
  
-@@ -847,6 +910,8 @@ static int msix_capability_init(struct p
+@@ -849,6 +912,8 @@ static int msix_capability_init(struct p
      {
          if ( !msi || !msi->table_base )
          {
@@ -220,7 +220,7 @@
              xfree(entry);
              return -ENXIO;
          }
-@@ -889,6 +954,8 @@ static int msix_capability_init(struct p
+@@ -891,6 +956,8 @@ static int msix_capability_init(struct p
  
          if ( idx < 0 )
          {
@@ -229,7 +229,7 @@
              xfree(entry);
              return idx;
          }
-@@ -915,7 +982,7 @@ static int msix_capability_init(struct p
+@@ -917,7 +984,7 @@ static int msix_capability_init(struct p
  
      if ( !msix->used_entries )
      {
@@ -238,7 +238,7 @@
          if ( !msix->guest_maskall )
              control &= ~PCI_MSIX_FLAGS_MASKALL;
          else
-@@ -951,8 +1018,8 @@ static int msix_capability_init(struct p
+@@ -953,8 +1020,8 @@ static int msix_capability_init(struct p
      ++msix->used_entries;
  
      /* Restore MSI-X enabled bits */
@@ -249,7 +249,7 @@
  
      return 0;
  }
-@@ -1092,8 +1159,15 @@ static void __pci_disable_msix(struct ms
+@@ -1094,8 +1161,15 @@ static void __pci_disable_msix(struct ms
                                             PCI_CAP_ID_MSIX);
      u16 control = pci_conf_read16(seg, bus, slot, func,
                                    msix_control_reg(entry->msi_attrib.pos));
@@ -266,7 +266,7 @@
  
      BUG_ON(list_empty(&dev->msi_list));
  
-@@ -1105,8 +1179,11 @@ static void __pci_disable_msix(struct ms
+@@ -1107,8 +1181,11 @@ static void __pci_disable_msix(struct ms
                 "cannot disable IRQ %d: masking MSI-X on %04x:%02x:%02x.%u\n",
                 entry->irq, dev->seg, dev->bus,
                 PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
@@ -279,7 +279,7 @@
      pci_conf_write16(seg, bus, slot, func, msix_control_reg(pos), control);
  
      _pci_cleanup_msix(dev->msix);
-@@ -1255,6 +1332,8 @@ int pci_restore_msi_state(struct pci_dev
+@@ -1257,6 +1334,8 @@ int pci_restore_msi_state(struct pci_dev
      list_for_each_entry_safe( entry, tmp, &pdev->msi_list, list )
      {
          unsigned int i = 0, nr = 1;
@@ -288,7 +288,7 @@
  
          irq = entry->irq;
          desc = &irq_desc[irq];
-@@ -1281,10 +1360,18 @@ int pci_restore_msi_state(struct pci_dev
+@@ -1283,10 +1362,18 @@ int pci_restore_msi_state(struct pci_dev
          }
          else if ( entry->msi_attrib.type == PCI_CAP_ID_MSIX )
          {
@@ -308,7 +308,7 @@
                  return -ENXIO;
              }
          }
-@@ -1314,11 +1401,9 @@ int pci_restore_msi_state(struct pci_dev
+@@ -1316,11 +1403,9 @@ int pci_restore_msi_state(struct pci_dev
          if ( entry->msi_attrib.type == PCI_CAP_ID_MSI )
          {
              unsigned int cpos = msi_control_reg(entry->msi_attrib.pos);
@@ -322,7 +322,7 @@
              multi_msi_enable(control, entry->msi.nvec);
              pci_conf_write16(pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
                               PCI_FUNC(pdev->devfn), cpos, control);
-@@ -1326,7 +1411,9 @@ int pci_restore_msi_state(struct pci_dev
+@@ -1328,7 +1413,9 @@ int pci_restore_msi_state(struct pci_dev
              msi_set_enable(pdev, 1);
          }
          else if ( entry->msi_attrib.type == PCI_CAP_ID_MSIX )

++++++ 55b0a2db-x86-MSI-track-guest-masking.patch ++++++
--- /var/tmp/diff_new_pack.6TTrYH/_old  2015-10-14 16:43:26.000000000 +0200
+++ /var/tmp/diff_new_pack.6TTrYH/_new  2015-10-14 16:43:26.000000000 +0200
@@ -15,7 +15,7 @@
 
 --- a/xen/arch/x86/msi.c
 +++ b/xen/arch/x86/msi.c
-@@ -1303,6 +1303,37 @@ int pci_msi_conf_write_intercept(struct 
+@@ -1305,6 +1305,37 @@ int pci_msi_conf_write_intercept(struct 
          return 1;
      }
  

++++++ 55e593f1-x86-NUMA-make-init_node_heap-respect-Xen-heap-limit.patch ++++++
--- /var/tmp/diff_new_pack.6TTrYH/_old  2015-10-14 16:43:26.000000000 +0200
+++ /var/tmp/diff_new_pack.6TTrYH/_new  2015-10-14 16:43:26.000000000 +0200
@@ -23,7 +23,6 @@
 Signed-off-by: Jan Beulich <jbeul...@suse.com>
 Reviewed-by: Andrew Cooper <andrew.coop...@citrix.com>
 Acked-by: Ian Campbell <ian.campb...@citrix.com>
-Release-acked-by: Wei Liu <wei.l...@citrix.com>
 
 # Commit 0a7167d9b20cdc48e6ea320fbbb920b3267c9757
 # Date 2015-09-04 14:58:07 +0100

++++++ 55f2e438-x86-hvm-fix-saved-pmtimer-and-hpet-values.patch ++++++
# Commit 244582a01dcb49fa30083725964a066937cc94f2
# Date 2015-09-11 16:24:56 +0200
# Author Kouya Shimura <ko...@jp.fujitsu.com>
# Committer Jan Beulich <jbeul...@suse.com>
x86/hvm: fix saved pmtimer and hpet values

The ACPI PM timer is sometimes broken on live migration.
Since vcpu->arch.hvm_vcpu.guest_time is always zero in other than
"delay for missed ticks mode". Even in "delay for missed ticks mode",
vcpu's guest_time field is not valid (i.e. zero) when
the state of vcpu is "blocked". (see pt_save_timer function)

The original author (Tim Deegan) of pmtimer_save() must have intended
that it saves the last scheduled time of the vcpu. Unfortunately it was
already implied this bug. FYI, there is no other timer mode than
"delay for missed ticks mode" then.

For consistency with HPET, pmtimer_save() should refer hvm_get_guest_time()
to update the counter as well as hpet_save() does.

Without this patch, the clock of windows server 2012R2 without HPET
might leap forward several minutes on live migration.

Signed-off-by: Kouya Shimura <ko...@jp.fujitsu.com>

Retain use of ->arch.hvm_vcpu.guest_time when non-zero. Do the inverse
adjustment for vHPET.

Signed-off-by: Jan Beulich <jbeul...@suse.com>
Reviewed-by: Tim Deegan <t...@xen.org>
Reviewed-by: Kouya Shimura <ko...@jp.fujitsu.com>

--- a/xen/arch/x86/hvm/hpet.c
+++ b/xen/arch/x86/hvm/hpet.c
@@ -506,11 +506,13 @@ const struct hvm_mmio_handler hpet_mmio_
 static int hpet_save(struct domain *d, hvm_domain_context_t *h)
 {
     HPETState *hp = domain_vhpet(d);
+    struct vcpu *v = pt_global_vcpu_target(d);
     int rc;
     uint64_t guest_time;
 
     write_lock(&hp->lock);
-    guest_time = guest_time_hpet(hp);
+    guest_time = (v->arch.hvm_vcpu.guest_time ?: hvm_get_guest_time(v)) /
+                 STIME_PER_HPET_TICK;
 
     /* Write the proper value into the main counter */
     if ( hpet_enabled(hp) )
--- a/xen/arch/x86/hvm/pmtimer.c
+++ b/xen/arch/x86/hvm/pmtimer.c
@@ -250,10 +250,12 @@ static int pmtimer_save(struct domain *d
 
     spin_lock(&s->lock);
 
-    /* Update the counter to the guest's current time.  We always save
-     * with the domain paused, so the saved time should be after the
-     * last_gtime, but just in case, make sure we only go forwards */
-    x = ((s->vcpu->arch.hvm_vcpu.guest_time - s->last_gtime) * s->scale) >> 32;
+    /*
+     * Update the counter to the guest's current time.  Make sure it only
+     * goes forwards.
+     */
+    x = (((s->vcpu->arch.hvm_vcpu.guest_time ?: hvm_get_guest_time(s->vcpu)) -
+          s->last_gtime) * s->scale) >> 32;
     if ( x < 1UL<<31 )
         s->pm.tmr_val += x;
     if ( (s->pm.tmr_val & TMR_VAL_MSB) != msb )
++++++ 55f7f9d2-libxl-slightly-refine-pci-assignable-add-remove-handling.patch 
++++++
References: bsc#945167

# Commit 6e1e3480c3878bac5d244925974a6852c47c809b
# Date 2015-09-15 11:58:26 +0100
# Author Jan Beulich <jbeul...@suse.com>
# Committer Ian Campbell <ian.campb...@citrix.com>
libxl: slightly refine pci-assignable-{add, remove} handling

While it appears to be intentional for "xl pci-assignable-remove" to
not re-bind the original driver by default (requires the -r option),
permanently losing the information which driver was originally used
seems bad. Make "add; remove; add; remove -r" re-bind the original
driver by allowing "remove" to delete the information only upon
successful re-bind.

In the course of this I also noticed that binding information is lost
when upon first "add" pciback isn't loaded yet, due to its presence not
being checked for early enough. Adjust pciback_dev_is_assigned()
accordingly, and properly distinguish "yes" and "error" returns in the
"add" case (removing a redundant error message from the "remove" path
for consistency).

Signed-off-by: Jan Beulich <jbeul...@suse.com>
Reviewed-by: George Dunlap <george.dun...@citrix.com>
Acked-by: Ian Campbell <ian.campb...@citrix.com>

--- a/tools/libxl/libxl_pci.c
+++ b/tools/libxl/libxl_pci.c
@@ -543,6 +543,17 @@ static int pciback_dev_is_assigned(libxl
     int rc;
     struct stat st;
 
+    if ( access(SYSFS_PCIBACK_DRIVER, F_OK) < 0 ) {
+        if ( errno == ENOENT ) {
+            LIBXL__LOG(ctx, LIBXL__LOG_ERROR,
+                       "Looks like pciback driver is not loaded");
+        } else {
+            LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR,
+                             "Can't access "SYSFS_PCIBACK_DRIVER);
+        }
+        return -1;
+    }
+
     spath = libxl__sprintf(gc, SYSFS_PCIBACK_DRIVER"/"PCI_BDF,
                            pcidev->domain, pcidev->bus,
                            pcidev->dev, pcidev->func);
@@ -658,6 +669,7 @@ static int libxl__device_pci_assignable_
     libxl_ctx *ctx = libxl__gc_owner(gc);
     unsigned dom, bus, dev, func;
     char *spath, *driver_path = NULL;
+    int rc;
     struct stat st;
 
     /* Local copy for convenience */
@@ -674,7 +686,11 @@ static int libxl__device_pci_assignable_
     }
 
     /* Check to see if it's already assigned to pciback */
-    if ( pciback_dev_is_assigned(gc, pcidev) ) {
+    rc = pciback_dev_is_assigned(gc, pcidev);
+    if ( rc < 0 ) {
+        return ERROR_FAIL;
+    }
+    if ( rc ) {
         LIBXL__LOG(ctx, LIBXL__LOG_WARNING, PCI_BDF" already assigned to 
pciback",
                    dom, bus, dev, func);
         return 0;
@@ -692,11 +708,18 @@ static int libxl__device_pci_assignable_
     if ( rebind ) {
         if ( driver_path ) {
             pci_assignable_driver_path_write(gc, pcidev, driver_path);
+        } else if ( (driver_path =
+                     pci_assignable_driver_path_read(gc, pcidev)) != NULL ) {
+            LIBXL__LOG(ctx, LIBXL__LOG_INFO,
+                       PCI_BDF" not bound to a driver, will be rebound to %s",
+                       dom, bus, dev, func, driver_path);
         } else {
             LIBXL__LOG(ctx, LIBXL__LOG_WARNING,
                        PCI_BDF" not bound to a driver, will not be rebound.",
                        dom, bus, dev, func);
         }
+    } else {
+        pci_assignable_driver_path_remove(gc, pcidev);
     }
 
     if ( pciback_dev_assign(gc, pcidev) ) {
@@ -717,7 +740,6 @@ static int libxl__device_pci_assignable_
 
     /* Unbind from pciback */
     if ( (rc=pciback_dev_is_assigned(gc, pcidev)) < 0 ) {
-        LIBXL__LOG(ctx, LIBXL__LOG_ERROR, "Checking if pciback was assigned");
         return ERROR_FAIL;
     } else if ( rc ) {
         pciback_dev_unassign(gc, pcidev);
@@ -741,9 +763,9 @@ static int libxl__device_pci_assignable_
                                  "Couldn't bind device to %s", driver_path);
                 return -1;
             }
-        }
 
-        pci_assignable_driver_path_remove(gc, pcidev);
+            pci_assignable_driver_path_remove(gc, pcidev);
+        }
     } else {
         if ( rebind ) {
             LIBXL__LOG(ctx, LIBXL__LOG_WARNING,
++++++ 55f9345b-x86-MSI-fail-if-no-hardware-support.patch ++++++
# Commit c7d5d5d8ea1ecbd6ef8b47dace4dec825f0f6e48
# Date 2015-09-16 11:20:27 +0200
# Author Jan Beulich <jbeul...@suse.com>
# Committer Jan Beulich <jbeul...@suse.com>
x86/MSI: fail if no hardware support

This is to guard against buggy callers (luckily Dom0 only) invoking
the respective hypercall for a device not being MSI-capable.

Signed-off-by: Jan Beulich <jbeul...@suse.com>
Reviewed-by: Andrew Cooper <andrew.coop...@citrix.com>

--- a/xen/arch/x86/msi.c
+++ b/xen/arch/x86/msi.c
@@ -566,6 +566,8 @@ static int msi_capability_init(struct pc
 
     ASSERT(spin_is_locked(&pcidevs_lock));
     pos = pci_find_cap_offset(seg, bus, slot, func, PCI_CAP_ID_MSI);
+    if ( !pos )
+        return -ENODEV;
     control = pci_conf_read16(seg, bus, slot, func, msi_control_reg(pos));
     maxvec = multi_msi_capable(control);
     if ( nvec > maxvec )
++++++ 5604f239-x86-PV-properly-populate-descriptor-tables.patch ++++++
# Commit cf6d39f81992c29a637c603dbabf1e21a0ea563f
# Date 2015-09-25 09:05:29 +0200
# Author Jan Beulich <jbeul...@suse.com>
# Committer Jan Beulich <jbeul...@suse.com>
x86/PV: properly populate descriptor tables

Us extending the GDT limit past the Xen descriptors so far meant that
guests (including user mode programs) accessing any descriptor table
slot above the original OS'es limit but below the first Xen descriptor
caused a #PF, converted to a #GP in our #PF handler. Which is quite
different from the native behavior, where some of such accesses (LAR
and LSL) don't fault. Mimic that behavior by mapping a blank page into
unused slots.

While not strictly required, treat the LDT the same for consistency.

Reported-by: Andrew Cooper <andrew.coop...@citrix.com>
Signed-off-by: Jan Beulich <jbeul...@suse.com>
Reviewed-by: Andrew Cooper <andrew.coop...@citrix.com>

--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -505,12 +505,13 @@ void update_cr3(struct vcpu *v)
     make_cr3(v, cr3_mfn);
 }
 
+static const char __section(".bss.page_aligned") zero_page[PAGE_SIZE];
 
 static void invalidate_shadow_ldt(struct vcpu *v, int flush)
 {
     l1_pgentry_t *pl1e;
-    int i;
-    unsigned long pfn;
+    unsigned int i;
+    unsigned long pfn, zero_pfn = PFN_DOWN(__pa(zero_page));
     struct page_info *page;
 
     BUG_ON(unlikely(in_irq()));
@@ -526,8 +527,10 @@ static void invalidate_shadow_ldt(struct
     for ( i = 16; i < 32; i++ )
     {
         pfn = l1e_get_pfn(pl1e[i]);
-        if ( pfn == 0 ) continue;
-        l1e_write(&pl1e[i], l1e_empty());
+        if ( !(l1e_get_flags(pl1e[i]) & _PAGE_PRESENT) || pfn == zero_pfn )
+            continue;
+        l1e_write(&pl1e[i],
+                  l1e_from_pfn(zero_pfn, __PAGE_HYPERVISOR & ~_PAGE_RW));
         page = mfn_to_page(pfn);
         ASSERT_PAGE_IS_TYPE(page, PGT_seg_desc_page);
         ASSERT_PAGE_IS_DOMAIN(page, v->domain);
@@ -4360,16 +4363,18 @@ long do_update_va_mapping_otherdomain(un
 void destroy_gdt(struct vcpu *v)
 {
     l1_pgentry_t *pl1e;
-    int i;
-    unsigned long pfn;
+    unsigned int i;
+    unsigned long pfn, zero_pfn = PFN_DOWN(__pa(zero_page));
 
     v->arch.pv_vcpu.gdt_ents = 0;
     pl1e = gdt_ldt_ptes(v->domain, v);
     for ( i = 0; i < FIRST_RESERVED_GDT_PAGE; i++ )
     {
-        if ( (pfn = l1e_get_pfn(pl1e[i])) != 0 )
+        pfn = l1e_get_pfn(pl1e[i]);
+        if ( (l1e_get_flags(pl1e[i]) & _PAGE_PRESENT) && pfn != zero_pfn )
             put_page_and_type(mfn_to_page(pfn));
-        l1e_write(&pl1e[i], l1e_empty());
+        l1e_write(&pl1e[i],
+                  l1e_from_pfn(zero_pfn, __PAGE_HYPERVISOR & ~_PAGE_RW));
         v->arch.pv_vcpu.gdt_frames[i] = 0;
     }
 }
@@ -4382,7 +4387,7 @@ long set_gdt(struct vcpu *v, 
     struct domain *d = v->domain;
     l1_pgentry_t *pl1e;
     /* NB. There are 512 8-byte entries per GDT page. */
-    int i, nr_pages = (entries + 511) / 512;
+    unsigned int i, nr_pages = (entries + 511) / 512;
 
     if ( entries > FIRST_RESERVED_GDT_ENTRY )
         return -EINVAL;
++++++ 5604f2e6-vt-d-fix-IM-bit-mask-and-unmask-of-FECTL_REG.patch ++++++
# Commit 86f3ff9fc4cc3cb69b96c1de74bcc51f738fe2b9
# Date 2015-09-25 09:08:22 +0200
# Author Quan Xu <quan...@intel.com>
# Committer Jan Beulich <jbeul...@suse.com>
vt-d: fix IM bit mask and unmask of Fault Event Control Register

Bit 0:29 in Fault Event Control Register are 'Reserved and Preserved',
software cannot write 0 to it unconditionally. Software must preserve
the value read for writes.

Signed-off-by: Quan Xu <quan...@intel.com>
Acked-by: Yang Zhang <yang.z.zh...@intel.com>

# Commit 26b300bd727ef00a8f60329212a83c3b027a48f7
# Date 2015-09-25 18:03:04 +0200
# Author Quan Xu <quan...@intel.com>
# Committer Jan Beulich <jbeul...@suse.com>
vt-d: fix IM bit unmask of Fault Event Control Register in init_vtd_hw()

Bit 0:29 in Fault Event Control Register are 'Reserved and Preserved',
software cannot write 0 to it unconditionally. Software must preserve
the value read for writes.

Suggested-by: Jan Beulich <jbeul...@suse.com>
Signed-off-by: Quan Xu <quan...@intel.com>

--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -991,10 +991,13 @@ static void dma_msi_unmask(struct irq_de
 {
     struct iommu *iommu = desc->action->dev_id;
     unsigned long flags;
+    u32 sts;
 
     /* unmask it */
     spin_lock_irqsave(&iommu->register_lock, flags);
-    dmar_writel(iommu->reg, DMAR_FECTL_REG, 0);
+    sts = dmar_readl(iommu->reg, DMAR_FECTL_REG);
+    sts &= ~DMA_FECTL_IM;
+    dmar_writel(iommu->reg, DMAR_FECTL_REG, sts);
     spin_unlock_irqrestore(&iommu->register_lock, flags);
     iommu->msi.msi_attrib.masked = 0;
 }
@@ -1003,10 +1006,13 @@ static void dma_msi_mask(struct irq_desc
 {
     unsigned long flags;
     struct iommu *iommu = desc->action->dev_id;
+    u32 sts;
 
     /* mask it */
     spin_lock_irqsave(&iommu->register_lock, flags);
-    dmar_writel(iommu->reg, DMAR_FECTL_REG, DMA_FECTL_IM);
+    sts = dmar_readl(iommu->reg, DMAR_FECTL_REG);
+    sts |= DMA_FECTL_IM;
+    dmar_writel(iommu->reg, DMAR_FECTL_REG, sts);
     spin_unlock_irqrestore(&iommu->register_lock, flags);
     iommu->msi.msi_attrib.masked = 1;
 }
@@ -2002,6 +2008,7 @@ static int init_vtd_hw(void)
     struct iommu_flush *flush = NULL;
     int ret;
     unsigned long flags;
+    u32 sts;
 
     /*
      * Basic VT-d HW init: set VT-d interrupt, clear VT-d faults.  
@@ -2015,7 +2022,9 @@ static int init_vtd_hw(void)
         clear_fault_bits(iommu);
 
         spin_lock_irqsave(&iommu->register_lock, flags);
-        dmar_writel(iommu->reg, DMAR_FECTL_REG, 0);
+        sts = dmar_readl(iommu->reg, DMAR_FECTL_REG);
+        sts &= ~DMA_FECTL_IM;
+        dmar_writel(iommu->reg, DMAR_FECTL_REG, sts);
         spin_unlock_irqrestore(&iommu->register_lock, flags);
     }
 
++++++ 560a4af9-x86-EPT-tighten-conditions-of-IOMMU-mapping-updates.patch ++++++
# Commit 6c0e4ad60850032c9bbd5d18b8446421c97e08e4
# Date 2015-09-29 10:25:29 +0200
# Author Jan Beulich <jbeul...@suse.com>
# Committer Jan Beulich <jbeul...@suse.com>
x86/EPT: tighten conditions of IOMMU mapping updates

Permission changes should also result in updates or TLB flushes.

Signed-off-by: Jan Beulich <jbeul...@suse.com>
Acked-by: Kevin Tian <kevin.t...@intel.com>
Reviewed-by: George Dunlap <george.dun...@citrix.com>

--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -619,6 +619,7 @@ ept_set_entry(struct p2m_domain *p2m, un
     uint8_t ipat = 0;
     int need_modify_vtd_table = 1;
     int vtd_pte_present = 0;
+    unsigned int iommu_flags = p2m_get_iommu_flags(p2mt);
     enum { sync_off, sync_on, sync_check } needs_sync = sync_check;
     ept_entry_t old_entry = { .epte = 0 };
     ept_entry_t new_entry = { .epte = 0 };
@@ -749,8 +750,9 @@ ept_set_entry(struct p2m_domain *p2m, un
         new_entry.mfn = mfn_x(mfn);
 
         /* Safe to read-then-write because we hold the p2m lock */
-        if ( ept_entry->mfn == new_entry.mfn )
-             need_modify_vtd_table = 0;
+        if ( ept_entry->mfn == new_entry.mfn &&
+             p2m_get_iommu_flags(ept_entry->sa_p2mt) == iommu_flags )
+            need_modify_vtd_table = 0;
 
         ept_p2m_type_to_flags(&new_entry, p2mt, p2ma);
     }
@@ -775,11 +777,9 @@ out:
             iommu_pte_flush(d, gfn, &ept_entry->epte, order, vtd_pte_present);
         else
         {
-            unsigned int flags = p2m_get_iommu_flags(p2mt);
-
-            if ( flags != 0 )
+            if ( iommu_flags )
                 for ( i = 0; i < (1 << order); i++ )
-                    iommu_map_page(d, gfn + i, mfn_x(mfn) + i, flags);
+                    iommu_map_page(d, gfn + i, mfn_x(mfn) + i, iommu_flags);
             else
                 for ( i = 0; i < (1 << order); i++ )
                     iommu_unmap_page(d, gfn + i);
++++++ 560a7c36-x86-p2m-pt-delay-freeing-of-intermediate-page-tables.patch 
++++++
# Commit 960265fbd878cdc9841473b755e4ccc9eb1942d2
# Date 2015-09-29 13:55:34 +0200
# Author Jan Beulich <jbeul...@suse.com>
# Committer Jan Beulich <jbeul...@suse.com>
x86/p2m-pt: delay freeing of intermediate page tables

Old intermediate page tables must be freed only after IOMMU side
updates/flushes have got carried out.

Signed-off-by: Jan Beulich <jbeul...@suse.com>
Reviewed-by: George Dunlap <george.dun...@citrix.com>

--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -486,8 +486,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
     /* XXX -- this might be able to be faster iff current->domain == d */
     void *table;
     unsigned long i, gfn_remainder = gfn;
-    l1_pgentry_t *p2m_entry;
-    l1_pgentry_t entry_content;
+    l1_pgentry_t *p2m_entry, entry_content;
+    /* Intermediate table to free if we're replacing it with a superpage. */
+    l1_pgentry_t intermediate_entry = l1e_empty();
     l2_pgentry_t l2e_content;
     l3_pgentry_t l3e_content;
     int rc;
@@ -535,7 +536,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
      */
     if ( page_order == PAGE_ORDER_1G )
     {
-        l1_pgentry_t old_entry = l1e_empty();
         p2m_entry = p2m_find_entry(table, &gfn_remainder, gfn,
                                    L3_PAGETABLE_SHIFT - PAGE_SHIFT,
                                    L3_PAGETABLE_ENTRIES);
@@ -545,7 +545,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
         {
             /* We're replacing a non-SP page with a superpage.  Make sure to
              * handle freeing the table properly. */
-            old_entry = *p2m_entry;
+            intermediate_entry = *p2m_entry;
         }
 
         ASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct);
@@ -563,10 +563,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
 
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 3);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
-
-        /* Free old intermediate tables if necessary */
-        if ( l1e_get_flags(old_entry) & _PAGE_PRESENT )
-            p2m_free_entry(p2m, &old_entry, page_order);
     }
     else 
     {
@@ -607,7 +603,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
     }
     else if ( page_order == PAGE_ORDER_2M )
     {
-        l1_pgentry_t old_entry = l1e_empty();
         p2m_entry = p2m_find_entry(table, &gfn_remainder, gfn,
                                    L2_PAGETABLE_SHIFT - PAGE_SHIFT,
                                    L2_PAGETABLE_ENTRIES);
@@ -619,7 +614,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
         {
             /* We're replacing a non-SP page with a superpage.  Make sure to
              * handle freeing the table properly. */
-            old_entry = *p2m_entry;
+            intermediate_entry = *p2m_entry;
         }
         
         ASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct);
@@ -640,10 +635,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
 
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 2);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
-
-        /* Free old intermediate tables if necessary */
-        if ( l1e_get_flags(old_entry) & _PAGE_PRESENT )
-            p2m_free_entry(p2m, &old_entry, page_order);
     }
 
     /* Track the highest gfn for which we have ever had a valid mapping */
@@ -671,6 +662,14 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
         }
     }
 
+    /*
+     * Free old intermediate tables if necessary.  This has to be the
+     * last thing we do, after removal from the IOMMU tables, so as to
+     * avoid a potential use-after-free.
+     */
+    if ( l1e_get_flags(intermediate_entry) & _PAGE_PRESENT )
+        p2m_free_entry(p2m, &intermediate_entry, page_order);
+
  out:
     unmap_domain_page(table);
     return rc;
++++++ 560a7c53-x86-p2m-pt-ignore-pt-share-flag-for-shadow-mode-guests.patch 
++++++
# Commit c0a85795d864dd64c116af661bf676d66ddfd5fc
# Date 2015-09-29 13:56:03 +0200
# Author Jan Beulich <jbeul...@suse.com>
# Committer Jan Beulich <jbeul...@suse.com>
x86/p2m-pt: ignore pt-share flag for shadow mode guests

There is no page table sharing in shadow mode.

Signed-off-by: Jan Beulich <jbeul...@suse.com>
Reviewed-by: George Dunlap <george.dun...@citrix.com>

--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -644,7 +644,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
 
     if ( iommu_enabled && need_iommu(p2m->domain) )
     {
-        if ( iommu_hap_pt_share )
+        if ( iommu_use_hap_pt(p2m->domain) )
         {
             if ( old_mfn && (old_mfn != mfn_x(mfn)) )
                 amd_iommu_flush_pages(p2m->domain, gfn, page_order);
++++++ 560bd926-credit1-fix-tickling-when-it-happens-from-a-remote-pCPU.patch 
++++++
# Commit ea5637968a09a81a64fa5fd73ce49b4ea9789e12
# Date 2015-09-30 14:44:22 +0200
# Author Dario Faggioli <dario.faggi...@citrix.com>
# Committer Jan Beulich <jbeul...@suse.com>
credit1: fix tickling when it happens from a remote pCPU

especially if that is also from a different cpupool than the
processor of the vCPU that triggered the tickling.

In fact, it is possible that we get as far as calling vcpu_unblock()-->
vcpu_wake()-->csched_vcpu_wake()-->__runq_tickle() for the vCPU 'vc',
but all while running on a pCPU that is different from 'vc->processor'.

For instance, this can happen when an HVM domain runs in a cpupool,
with a different scheduler than the default one, and issues IOREQs
to Dom0, running in Pool-0 with the default scheduler.
In fact, right in this case, the following crash can be observed:

(XEN) ----[ Xen-4.7-unstable  x86_64  debug=y  Tainted:    C ]----
(XEN) CPU:    7
(XEN) RIP:    e008:[<ffff82d0801230de>] __runq_tickle+0x18f/0x430
(XEN) RFLAGS: 0000000000010086   CONTEXT: hypervisor (d1v0)
(XEN) rax: 0000000000000001   rbx: ffff8303184fee00   rcx: 0000000000000000
(XEN) ... ... ...
(XEN) Xen stack trace from rsp=ffff83031fa57a08:
(XEN)    ffff82d0801fe664 ffff82d08033c820 0000000100000002 0000000a00000001
(XEN)    0000000000006831 0000000000000000 0000000000000000 0000000000000000
(XEN) ... ... ...
(XEN) Xen call trace:
(XEN)    [<ffff82d0801230de>] __runq_tickle+0x18f/0x430
(XEN)    [<ffff82d08012348a>] csched_vcpu_wake+0x10b/0x110
(XEN)    [<ffff82d08012b421>] vcpu_wake+0x20a/0x3ce
(XEN)    [<ffff82d08012b91c>] vcpu_unblock+0x4b/0x4e
(XEN)    [<ffff82d080167bd0>] vcpu_kick+0x17/0x61
(XEN)    [<ffff82d080167c46>] vcpu_mark_events_pending+0x2c/0x2f
(XEN)    [<ffff82d08010ac35>] evtchn_fifo_set_pending+0x381/0x3f6
(XEN)    [<ffff82d08010a0f6>] notify_via_xen_event_channel+0xc9/0xd6
(XEN)    [<ffff82d0801c29ed>] hvm_send_ioreq+0x3e9/0x441
(XEN)    [<ffff82d0801bba7d>] hvmemul_do_io+0x23f/0x2d2
(XEN)    [<ffff82d0801bbb43>] hvmemul_do_io_buffer+0x33/0x64
(XEN)    [<ffff82d0801bc92b>] hvmemul_do_pio_buffer+0x35/0x37
(XEN)    [<ffff82d0801cc49f>] handle_pio+0x58/0x14c
(XEN)    [<ffff82d0801eabcb>] vmx_vmexit_handler+0x16b3/0x1bea
(XEN)    [<ffff82d0801efd21>] vmx_asm_vmexit_handler+0x41/0xc0

In this case, pCPU 7 is not in Pool-0, while the (Dom0's) vCPU being
woken is. pCPU's 7 pool has a different scheduler than credit, but it
is, however, right from pCPU 7 that we are waking the Dom0's vCPUs.
Therefore, the current code tries to access csched_balance_mask for
pCPU 7, but that is not defined, and hence the Oops.

(Note that, in case the two pools run the same scheduler we see no
Oops, but things are still conceptually wrong.)

Cure things by making the csched_balance_mask macro accept a
parameter for fetching a specific pCPU's mask (instead than always
using smp_processor_id()).

Signed-off-by: Dario Faggioli <dario.faggi...@citrix.com>
Reviewed-by: Juergen Gross <jgr...@suse.com>
Reviewed-by: George Dunlap <george.dun...@citrix.com>

--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -154,10 +154,10 @@ struct csched_pcpu {
  * Convenience macro for accessing the per-PCPU cpumask we need for
  * implementing the two steps (soft and hard affinity) balancing logic.
  * It is stored in csched_pcpu so that serialization is not an issue,
- * as there is a csched_pcpu for each PCPU and we always hold the
- * runqueue spin-lock when using this.
+ * as there is a csched_pcpu for each PCPU, and we always hold the
+ * runqueue lock for the proper PCPU when using this.
  */
-#define csched_balance_mask (CSCHED_PCPU(smp_processor_id())->balance_mask)
+#define csched_balance_mask(c) (CSCHED_PCPU(c)->balance_mask)
 
 /*
  * Virtual CPU
@@ -396,9 +396,10 @@ __runq_tickle(unsigned int cpu, struct c
 
             /* Are there idlers suitable for new (for this balance step)? */
             csched_balance_cpumask(new->vcpu, balance_step,
-                                   csched_balance_mask);
-            cpumask_and(csched_balance_mask, csched_balance_mask, &idle_mask);
-            new_idlers_empty = cpumask_empty(csched_balance_mask);
+                                   csched_balance_mask(cpu));
+            cpumask_and(csched_balance_mask(cpu),
+                        csched_balance_mask(cpu), &idle_mask);
+            new_idlers_empty = cpumask_empty(csched_balance_mask(cpu));
 
             /*
              * Let's not be too harsh! If there aren't idlers suitable
@@ -1475,8 +1476,9 @@ csched_runq_steal(int peer_cpu, int cpu,
                  && !__vcpu_has_soft_affinity(vc, vc->cpu_hard_affinity) )
                 continue;
 
-            csched_balance_cpumask(vc, balance_step, csched_balance_mask);
-            if ( __csched_vcpu_is_migrateable(vc, cpu, csched_balance_mask) )
+            csched_balance_cpumask(vc, balance_step, csched_balance_mask(cpu));
+            if ( __csched_vcpu_is_migrateable(vc, cpu,
+                                              csched_balance_mask(cpu)) )
             {
                 /* We got a candidate. Grab it! */
                 TRACE_3D(TRC_CSCHED_STOLEN_VCPU, peer_cpu,
++++++ 560e6d34-x86-p2m-pt-tighten-conditions-of-IOMMU-mapping-updates.patch 
++++++
# Commit 660fd65d5578a95ec5eac522128bba23325179eb
# Date 2015-10-02 13:40:36 +0200
# Author Jan Beulich <jbeul...@suse.com>
# Committer Jan Beulich <jbeul...@suse.com>
x86/p2m-pt: tighten conditions of IOMMU mapping updates

Whether the MFN changes does not depend on the new entry being valid
(but solely on the old one), and the need to update or TLB-flush also
depends on permission changes.

Signed-off-by: Jan Beulich <jbeul...@suse.com>
Reviewed-by: Andrew Cooper <andrew.coop...@citrix.com>
Reviewed-by: George Dunlap <george.dun...@citrix.com>

--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -493,7 +493,18 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
     l3_pgentry_t l3e_content;
     int rc;
     unsigned int iommu_pte_flags = p2m_get_iommu_flags(p2mt);
-    unsigned long old_mfn = 0;
+    /*
+     * old_mfn and iommu_old_flags control possible flush/update needs on the
+     * IOMMU: We need to flush when MFN or flags (i.e. permissions) change.
+     * iommu_old_flags being initialized to zero covers the case of the entry
+     * getting replaced being a non-present (leaf or intermediate) one. For
+     * present leaf entries the real value will get calculated below, while
+     * for present intermediate entries ~0 (guaranteed != iommu_pte_flags)
+     * will be used (to cover all cases of what the leaf entries underneath
+     * the intermediate one might be).
+     */
+    unsigned int flags, iommu_old_flags = 0;
+    unsigned long old_mfn = INVALID_MFN;
 
     if ( tb_init_done )
     {
@@ -540,12 +551,20 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
                                    L3_PAGETABLE_SHIFT - PAGE_SHIFT,
                                    L3_PAGETABLE_ENTRIES);
         ASSERT(p2m_entry);
-        if ( (l1e_get_flags(*p2m_entry) & _PAGE_PRESENT) &&
-             !(l1e_get_flags(*p2m_entry) & _PAGE_PSE) )
+        flags = l1e_get_flags(*p2m_entry);
+        if ( flags & _PAGE_PRESENT )
         {
-            /* We're replacing a non-SP page with a superpage.  Make sure to
-             * handle freeing the table properly. */
-            intermediate_entry = *p2m_entry;
+            if ( flags & _PAGE_PSE )
+            {
+                iommu_old_flags =
+                    p2m_get_iommu_flags(p2m_flags_to_type(flags));
+                old_mfn = l1e_get_pfn(*p2m_entry);
+            }
+            else
+            {
+                iommu_old_flags = ~0;
+                intermediate_entry = *p2m_entry;
+            }
         }
 
         ASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct);
@@ -556,10 +575,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
         entry_content.l1 = l3e_content.l3;
 
         if ( entry_content.l1 != 0 )
-        {
             p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
-            old_mfn = l1e_get_pfn(*p2m_entry);
-        }
 
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 3);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
@@ -584,7 +600,10 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
         p2m_entry = p2m_find_entry(table, &gfn_remainder, gfn,
                                    0, L1_PAGETABLE_ENTRIES);
         ASSERT(p2m_entry);
-        
+        iommu_old_flags =
+            p2m_get_iommu_flags(p2m_flags_to_type(l1e_get_flags(*p2m_entry)));
+        old_mfn = l1e_get_pfn(*p2m_entry);
+
         if ( mfn_valid(mfn) || (p2mt == p2m_mmio_direct)
                             || p2m_is_paging(p2mt) )
             entry_content = p2m_l1e_from_pfn(mfn_x(mfn),
@@ -593,10 +612,8 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
             entry_content = l1e_empty();
 
         if ( entry_content.l1 != 0 )
-        {
             p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
-            old_mfn = l1e_get_pfn(*p2m_entry);
-        }
+
         /* level 1 entry */
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 1);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
@@ -607,14 +624,20 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
                                    L2_PAGETABLE_SHIFT - PAGE_SHIFT,
                                    L2_PAGETABLE_ENTRIES);
         ASSERT(p2m_entry);
-        
-        /* FIXME: Deal with 4k replaced by 2meg pages */
-        if ( (l1e_get_flags(*p2m_entry) & _PAGE_PRESENT) &&
-             !(l1e_get_flags(*p2m_entry) & _PAGE_PSE) )
-        {
-            /* We're replacing a non-SP page with a superpage.  Make sure to
-             * handle freeing the table properly. */
-            intermediate_entry = *p2m_entry;
+        flags = l1e_get_flags(*p2m_entry);
+        if ( flags & _PAGE_PRESENT )
+        {
+            if ( flags & _PAGE_PSE )
+            {
+                iommu_old_flags =
+                    p2m_get_iommu_flags(p2m_flags_to_type(flags));
+                old_mfn = l1e_get_pfn(*p2m_entry);
+            }
+            else
+            {
+                iommu_old_flags = ~0;
+                intermediate_entry = *p2m_entry;
+            }
         }
         
         ASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct);
@@ -628,10 +651,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
         entry_content.l1 = l2e_content.l2;
 
         if ( entry_content.l1 != 0 )
-        {
             p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
-            old_mfn = l1e_get_pfn(*p2m_entry);
-        }
 
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 2);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
@@ -642,17 +662,17 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
          && (gfn + (1UL << page_order) - 1 > p2m->max_mapped_pfn) )
         p2m->max_mapped_pfn = gfn + (1UL << page_order) - 1;
 
-    if ( iommu_enabled && need_iommu(p2m->domain) )
+    if ( iommu_enabled && need_iommu(p2m->domain) &&
+         (iommu_old_flags != iommu_pte_flags || old_mfn != mfn_x(mfn)) )
     {
         if ( iommu_use_hap_pt(p2m->domain) )
         {
-            if ( old_mfn && (old_mfn != mfn_x(mfn)) )
+            if ( iommu_old_flags )
                 amd_iommu_flush_pages(p2m->domain, gfn, page_order);
         }
         else
         {
-            unsigned int flags = p2m_get_iommu_flags(p2mt);
-
+            flags = p2m_get_iommu_flags(p2mt);
             if ( flags != 0 )
                 for ( i = 0; i < (1UL << page_order); i++ )
                     iommu_map_page(p2m->domain, gfn+i, mfn_x(mfn)+i, flags);
++++++ CVE-2015-7311-xsa142.patch ++++++
>From 07ca00703f76ad392eda5ee52cce1197cf49c30a Mon Sep 17 00:00:00 2001
From: Stefano Stabellini <stefano.stabell...@eu.citrix.com>
Subject: [PATCH v2.1 for-4.5] libxl: handle read-only drives with qemu-xen

The current libxl code doesn't deal with read-only drives at all.

Upstream QEMU and qemu-xen only support read-only cdrom drives: make
sure to specify "readonly=on" for cdrom drives and return error in case
the user requested a non-cdrom read-only drive.

This is XSA-142, discovered by Lin Liu
(https://bugzilla.redhat.com/show_bug.cgi?id=1257893).

Signed-off-by: Stefano Stabellini <stefano.stabell...@eu.citrix.com>

Backport to Xen 4.5 and earlier, apropos of report and review from
Michael Young.

Signed-off-by: Ian Jackson <ian.jack...@eu.citrix.com>
---
 tools/libxl/libxl_dm.c |   13 +++++++++----
 1 file changed, 9 insertions(+), 4 deletions(-)

Index: xen-4.5.1-testing/tools/libxl/libxl_dm.c
===================================================================
--- xen-4.5.1-testing.orig/tools/libxl/libxl_dm.c
+++ xen-4.5.1-testing/tools/libxl/libxl_dm.c
@@ -812,13 +812,18 @@ static char ** libxl__build_device_model
             if (disks[i].is_cdrom) {
                 if (disks[i].format == LIBXL_DISK_FORMAT_EMPTY)
                     drive = libxl__sprintf
-                        (gc, 
"if=ide,index=%d,media=cdrom,cache=writeback,id=ide-%i",
-                         disk, dev_number);
+                        (gc, 
"if=ide,index=%d,readonly=%s,media=cdrom,cache=writeback,id=ide-%i",
+                         disk, disks[i].readwrite ? "off" : "on", dev_number);
                 else
                     drive = libxl__sprintf
-                        (gc, 
"file=%s,if=ide,index=%d,media=cdrom,format=%s,cache=writeback,id=ide-%i",
-                         disks[i].pdev_path, disk, format, dev_number);
+                        (gc, 
"file=%s,if=ide,index=%d,readonly=%s,media=cdrom,format=%s,cache=writeback,id=ide-%i",
+                         disks[i].pdev_path, disk, disks[i].readwrite ? "off" 
: "on", format, dev_number);
             } else {
+                if (!disks[i].readwrite) {
+                    LIBXL__LOG(ctx, LIBXL__LOG_ERROR, "qemu-xen doesn't 
support read-only disk drivers");
+                    return NULL;
+                }
+
                 if (disks[i].format == LIBXL_DISK_FORMAT_EMPTY) {
                     LIBXL__LOG(ctx, LIBXL__LOG_WARNING, "cannot support"
                                " empty disk format for %s", disks[i].vdev);
++++++ README.SUSE ++++++
--- /var/tmp/diff_new_pack.6TTrYH/_old  2015-10-14 16:43:26.000000000 +0200
+++ /var/tmp/diff_new_pack.6TTrYH/_new  2015-10-14 16:43:26.000000000 +0200
@@ -500,13 +500,16 @@
 
 Dom0 Memory Ballooning
 ----------------------
-For some server deployments it may be best to dedicate a fixed amount of RAM
-rather than relying on dom0 ballooning. The amount of RAM dedicated to dom0
-should never be less than the recommended minimum amount for running your SUSE
-distribution in native mode. The actual amount of RAM needed for dom0 will
-depend on how much physical RAM your host contains and the number of VMs you
-plan on running simultaneously. The following example shows the syntax for
-doing this.  This would be added to your grub1 or grub2 configuration;
+It is strongly recommended that you dedicate a fixed amount of RAM to dom0
+rather than relying on dom0 auto ballooning. Doing so will ensure your dom0
+has enough resources to operate well and will improve startup times for your
+VMs. The amount of RAM dedicated to dom0 should never be less than the
+recommended minimum amount for running your SUSE distribution in native mode.
+The actual amount of RAM needed for dom0 depends on several factors including
+how much physical RAM is on the host, the number of physical CPUs, and the
+number of VMs running simultaneously where each VM has a specific requirement
+for RAM. The following example shows the syntax for doing this. This would be
+added to your grub1 or grub2 configuration;
 
 Grub2 Example:
   Edit /etc/default/grub and add,

++++++ hotplug-Linux-block-performance-fix.patch ++++++
Reference: bsc#941074

During the attachment of a loopback mounted image file, the mode of all
curent instances of this device already attached to other domains must be
checked. This requires finding all loopback devices pointing to the inode
of the shared image file, and then comparing the major and minor number of
these devices to the major and minor number of every vbd device found in the
xenstore database.

Prior to this patch, the entire xenstore database is walked for every instance
of every loopback device pointing to the same shared image file. This process
causes the block attachment process to becomes exponentially slower with every
additional attachment of a shared image.

Rather than scanning all of xenstore for every instance of a shared loopback
device, this patch creates a list of the major and minor numbers from all
matching loopback devices. After generating this list, Xenstore is walked
once, and major and minor numbers from every vbd are checked against the list.
If a match is found, the mode of that vbd is checked for compatibility with
the mode of the device being attached.

Signed-off-by: Mike Latimer <mlatimer@xxxxxxxx>
---
 tools/hotplug/Linux/block | 89 ++++++++++++++++++++++++++++++-----------------
 1 file changed, 57 insertions(+), 32 deletions(-)

Index: xen-4.4.3-testing/tools/hotplug/Linux/block
===================================================================
--- xen-4.4.3-testing.orig/tools/hotplug/Linux/block
+++ xen-4.4.3-testing/tools/hotplug/Linux/block
@@ -38,7 +38,7 @@ find_free_loopback_dev() {
 }
 
 ##
-# check_sharing device mode
+# check_sharing devtype device mode [inode]
 #
 # Check whether the device requested is already in use.  To use the device in
 # read-only mode, it may be in use in read-only mode, but may not be in use in
@@ -47,19 +47,44 @@ find_free_loopback_dev() {
 #
 # Prints one of
 #
-#    'local': the device may not be used because it is mounted in the current
-#             (i.e. the privileged domain) in a way incompatible with the
-#             requested mode;
-#    'guest': the device may not be used because it already mounted by a guest
-#             in a way incompatible with the requested mode; or
-#    'ok':    the device may be used.
+#    'local $d': the device ($d) may not be used because it is mounted in the
+#                current (i.e. the privileged domain) in a way incompatible
+#                with the requested mode;
+#    'guest $d': the device may not be used because it is already mounted
+#                through device $d by a guest in a way incompatible with the
+#                requested mode; or
+#    'ok':       the device may be used.
 #
 check_sharing()
 {
-  local dev="$1"
-  local mode="$2"
+  local devtype=$1
+  local dev="$2"
+  local mode="$3"
+  local devmm=","
+
+  if [ "$devtype" = "file" ];
+  then
+    local inode="$4"
+
+    shared_list=$(losetup -a |
+          sed -n -e 
"s@^\([^:]\+\)\(:[[:blank:]]\[0*${dev}\]:${inode}[[:blank:]](.*)\)@\1@p" )
+    for dev in $shared_list
+    do
+      if [ -n "$dev" ]
+      then
+        devmm="${devmm}$(device_major_minor $dev),"
+      fi
+    done
+    # if $devmm is unchanged, file being checked is not a shared loopback 
device
+    if [ "$devmm" = "," ];
+    then
+      echo 'ok'
+      return
+    fi
+  else
+    devmm=${devmm}$(device_major_minor "$dev")","
+  fi
 
-  local devmm=$(device_major_minor "$dev")
   local file
 
   if [ "$mode" = 'w' ]
@@ -75,9 +100,10 @@ check_sharing()
     then
       local d=$(device_major_minor "$file")
 
-      if [ "$d" = "$devmm" ]
+      # checking for $d in $devmm is best through the [[...]] bashism
+      if [[ "$devmm" == *",$d,"* ]]
       then
-        echo 'local'
+        echo "local $d"
         return
       fi
     fi
@@ -90,13 +116,14 @@ check_sharing()
     do
       d=$(xenstore_read_default "$base_path/$dom/$dev/physical-device" "")
 
-      if [ "$d" = "$devmm" ]
+      # checking for $d in $devmm is best through the [[...]] bashism
+      if [ -n "$d" ] && [[ "$devmm" == *",$d,"* ]]
       then
         if [ "$mode" = 'w' ]
         then
           if ! same_vm $dom
           then
-            echo 'guest'
+            echo "guest $d"
             return
           fi
         else
@@ -107,7 +134,7 @@ check_sharing()
           then
             if ! same_vm $dom
             then
-              echo 'guest'
+              echo "guest $d"
               return
             fi
           fi
@@ -129,6 +156,7 @@ check_device_sharing()
 {
   local dev="$1"
   local mode=$(canonicalise_mode "$2")
+  local type="device"
   local result
 
   if [ "x$mode" = 'x!' ]
@@ -136,33 +164,38 @@ check_device_sharing()
     return 0
   fi
 
-  result=$(check_sharing "$dev" "$mode")
+  result=$(check_sharing "$type" "$dev" "$mode")
 
   if [ "$result" != 'ok' ]
   then
-    do_ebusy "Device $dev is mounted " "$mode" "$result"
+    do_ebusy "Device $dev is mounted " "$mode" "${result%% *}"
   fi
 }
 
 
 ##
-# check_device_sharing file dev mode
+# check_device_sharing file dev mode inode
 #
-# Perform the sharing check for the given file mounted through the given
-# loopback interface, in the given mode.
+# Perform the sharing check for the given file, with its corresponding
+# device, inode and mode. As the file can be mounted multiple times,
+# the inode is passed through to check_sharing for all instances to be
+# checked.
 #
 check_file_sharing()
 {
   local file="$1"
   local dev="$2"
   local mode="$3"
+  local inode="$4"
+  local type="file"
+  local result
 
-  result=$(check_sharing "$dev" "$mode")
+  result=$(check_sharing "$type" "$dev" "$mode" "$inode")
 
   if [ "$result" != 'ok' ]
   then
-    do_ebusy "File $file is loopback-mounted through $dev,
-which is mounted " "$mode" "$result"
+    do_ebusy "File $file is loopback-mounted through ${result#* },
+which is mounted " "$mode" "${result%% *}"
   fi
 }
 
@@ -279,15 +312,7 @@ mount it read-write in a guest domain."
             fatal "Unable to lookup $file: dev: $dev inode: $inode"
           fi
 
-          shared_list=$(losetup -a |
-                sed -n -e 
"s@^\([^:]\+\)\(:[[:blank:]]\[0*${dev}\]:${inode}[[:blank:]](.*)\)@\1@p" )
-          for dev in $shared_list
-          do
-            if [ -n "$dev" ]
-            then
-              check_file_sharing "$file" "$dev" "$mode"
-            fi
-          done
+          check_file_sharing "$file" "$dev" "$mode" "$inode"
         fi
 
         loopdev=$(losetup -f 2>/dev/null || find_free_loopback_dev)
++++++ libxl.add-option-to-disable-disk-cache-flushes-in-qdisk.patch ++++++
--- /var/tmp/diff_new_pack.6TTrYH/_old  2015-10-14 16:43:26.000000000 +0200
+++ /var/tmp/diff_new_pack.6TTrYH/_new  2015-10-14 16:43:26.000000000 +0200
@@ -11,7 +11,7 @@
 ===================================================================
 --- xen-4.5.1-testing.orig/tools/libxl/libxl.c
 +++ xen-4.5.1-testing/tools/libxl/libxl.c
-@@ -2818,6 +2818,8 @@ static void device_disk_add(libxl__egc *
+@@ -2825,6 +2825,8 @@ static void device_disk_add(libxl__egc *
          flexarray_append_pair(back, "discard-enable",
                                libxl_defbool_val(disk->discard_enable) ?
                                "1" : "0");

++++++ libxl.pvscsi.patch ++++++
--- /var/tmp/diff_new_pack.6TTrYH/_old  2015-10-14 16:43:26.000000000 +0200
+++ /var/tmp/diff_new_pack.6TTrYH/_new  2015-10-14 16:43:26.000000000 +0200
@@ -31,8 +31,10 @@
 7de6f49 support character devices too
 c84381b allow /dev/sda as scsi devspec
 f11e3a2 pvscsi
---- a/docs/man/xl.cfg.pod.5
-+++ b/docs/man/xl.cfg.pod.5
+Index: xen-4.5.1-testing/docs/man/xl.cfg.pod.5
+===================================================================
+--- xen-4.5.1-testing.orig/docs/man/xl.cfg.pod.5
++++ xen-4.5.1-testing/docs/man/xl.cfg.pod.5
 @@ -448,6 +448,36 @@ value is optional if this is a guest dom
  
  =back
@@ -70,8 +72,10 @@
  =item B<vfb=[ "VFB_SPEC_STRING", "VFB_SPEC_STRING", ...]>
  
  Specifies the paravirtual framebuffer devices which should be supplied
---- a/docs/man/xl.pod.1
-+++ b/docs/man/xl.pod.1
+Index: xen-4.5.1-testing/docs/man/xl.pod.1
+===================================================================
+--- xen-4.5.1-testing.orig/docs/man/xl.pod.1
++++ xen-4.5.1-testing/docs/man/xl.pod.1
 @@ -1323,6 +1323,26 @@ List virtual trusted platform modules fo
  
  =back
@@ -99,9 +103,11 @@
  =head1 PCI PASS-THROUGH
  
  =over 4
---- a/tools/libxl/libxl.c
-+++ b/tools/libxl/libxl.c
-@@ -2310,6 +2310,273 @@ int libxl_devid_to_device_vtpm(libxl_ctx
+Index: xen-4.5.1-testing/tools/libxl/libxl.c
+===================================================================
+--- xen-4.5.1-testing.orig/tools/libxl/libxl.c
++++ xen-4.5.1-testing/tools/libxl/libxl.c
+@@ -2317,6 +2317,273 @@ int libxl_devid_to_device_vtpm(libxl_ctx
      return rc;
  }
  
@@ -375,7 +381,7 @@
  
  
/******************************************************************************/
  
-@@ -4185,6 +4452,8 @@ out:
+@@ -4192,6 +4459,8 @@ out:
   * libxl_device_vkb_destroy
   * libxl_device_vfb_remove
   * libxl_device_vfb_destroy
@@ -384,7 +390,7 @@
   */
  #define DEFINE_DEVICE_REMOVE(type, removedestroy, f)                    \
      int libxl_device_##type##_##removedestroy(libxl_ctx *ctx,           \
-@@ -4240,6 +4509,10 @@ DEFINE_DEVICE_REMOVE(vtpm, destroy, 1)
+@@ -4247,6 +4516,10 @@ DEFINE_DEVICE_REMOVE(vtpm, destroy, 1)
   * 1. add support for secondary consoles to xenconsoled
   * 2. dynamically add/remove qemu chardevs via qmp messages. */
  
@@ -395,7 +401,7 @@
  #undef DEFINE_DEVICE_REMOVE
  
  
/******************************************************************************/
-@@ -4249,6 +4522,7 @@ DEFINE_DEVICE_REMOVE(vtpm, destroy, 1)
+@@ -4256,6 +4529,7 @@ DEFINE_DEVICE_REMOVE(vtpm, destroy, 1)
   * libxl_device_disk_add
   * libxl_device_nic_add
   * libxl_device_vtpm_add
@@ -403,7 +409,7 @@
   */
  
  #define DEFINE_DEVICE_ADD(type)                                         \
-@@ -4280,6 +4554,9 @@ DEFINE_DEVICE_ADD(nic)
+@@ -4287,6 +4561,9 @@ DEFINE_DEVICE_ADD(nic)
  /* vtpm */
  DEFINE_DEVICE_ADD(vtpm)
  
@@ -413,7 +419,7 @@
  #undef DEFINE_DEVICE_ADD
  
  
/******************************************************************************/
-@@ -6822,6 +7099,20 @@ out:
+@@ -6829,6 +7106,20 @@ out:
      return rc;
  }
  
@@ -434,8 +440,10 @@
  /*
   * Local variables:
   * mode: C
---- a/tools/libxl/libxl.h
-+++ b/tools/libxl/libxl.h
+Index: xen-4.5.1-testing/tools/libxl/libxl.h
+===================================================================
+--- xen-4.5.1-testing.orig/tools/libxl/libxl.h
++++ xen-4.5.1-testing/tools/libxl/libxl.h
 @@ -1238,6 +1238,26 @@ libxl_device_vtpm *libxl_device_vtpm_lis
  int libxl_device_vtpm_getinfo(libxl_ctx *ctx, uint32_t domid,
                                 libxl_device_vtpm *vtpm, libxl_vtpminfo 
*vtpminfo);
@@ -491,8 +499,10 @@
  #endif /* LIBXL_H */
  
  /*
---- a/tools/libxl/libxl_create.c
-+++ b/tools/libxl/libxl_create.c
+Index: xen-4.5.1-testing/tools/libxl/libxl_create.c
+===================================================================
+--- xen-4.5.1-testing.orig/tools/libxl/libxl_create.c
++++ xen-4.5.1-testing/tools/libxl/libxl_create.c
 @@ -1141,6 +1141,7 @@ static void domcreate_rebuild_done(libxl
      libxl__multidev_begin(ao, &dcs->multidev);
      dcs->multidev.callback = domcreate_launch_dm;
@@ -501,8 +511,10 @@
      libxl__multidev_prepared(egc, &dcs->multidev, 0);
  
      return;
---- a/tools/libxl/libxl_device.c
-+++ b/tools/libxl/libxl_device.c
+Index: xen-4.5.1-testing/tools/libxl/libxl_device.c
+===================================================================
+--- xen-4.5.1-testing.orig/tools/libxl/libxl_device.c
++++ xen-4.5.1-testing/tools/libxl/libxl_device.c
 @@ -541,6 +541,7 @@ void libxl__multidev_prepared(libxl__egc
   * The following functions are defined:
   * libxl__add_disks
@@ -544,8 +556,10 @@
  
/******************************************************************************/
  
  int libxl__device_destroy(libxl__gc *gc, libxl__device *dev)
---- a/tools/libxl/libxl_internal.h
-+++ b/tools/libxl/libxl_internal.h
+Index: xen-4.5.1-testing/tools/libxl/libxl_internal.h
+===================================================================
+--- xen-4.5.1-testing.orig/tools/libxl/libxl_internal.h
++++ xen-4.5.1-testing/tools/libxl/libxl_internal.h
 @@ -1079,6 +1079,7 @@ _hidden int libxl__device_disk_setdefaul
  _hidden int libxl__device_nic_setdefault(libxl__gc *gc, libxl_device_nic *nic,
                                           uint32_t domid);
@@ -576,8 +590,10 @@
  /*----- device model creation -----*/
  
  /* First layer; wraps libxl__spawn_spawn. */
---- a/tools/libxl/libxl_types.idl
-+++ b/tools/libxl/libxl_types.idl
+Index: xen-4.5.1-testing/tools/libxl/libxl_types.idl
+===================================================================
+--- xen-4.5.1-testing.orig/tools/libxl/libxl_types.idl
++++ xen-4.5.1-testing/tools/libxl/libxl_types.idl
 @@ -540,6 +540,26 @@ libxl_device_channel = Struct("device_ch
             ])),
  ])
@@ -643,8 +659,10 @@
  libxl_vcpuinfo = Struct("vcpuinfo", [
      ("vcpuid", uint32),
      ("cpu", uint32),
---- a/tools/libxl/libxl_types_internal.idl
-+++ b/tools/libxl/libxl_types_internal.idl
+Index: xen-4.5.1-testing/tools/libxl/libxl_types_internal.idl
+===================================================================
+--- xen-4.5.1-testing.orig/tools/libxl/libxl_types_internal.idl
++++ xen-4.5.1-testing/tools/libxl/libxl_types_internal.idl
 @@ -22,6 +22,7 @@ libxl__device_kind = Enumeration("device
      (6, "VKBD"),
      (7, "CONSOLE"),
@@ -653,8 +671,10 @@
      ])
  
  libxl__console_backend = Enumeration("console_backend", [
---- a/tools/libxl/xl.h
-+++ b/tools/libxl/xl.h
+Index: xen-4.5.1-testing/tools/libxl/xl.h
+===================================================================
+--- xen-4.5.1-testing.orig/tools/libxl/xl.h
++++ xen-4.5.1-testing/tools/libxl/xl.h
 @@ -83,6 +83,9 @@ int main_channellist(int argc, char **ar
  int main_blockattach(int argc, char **argv);
  int main_blocklist(int argc, char **argv);
@@ -665,8 +685,10 @@
  int main_vtpmattach(int argc, char **argv);
  int main_vtpmlist(int argc, char **argv);
  int main_vtpmdetach(int argc, char **argv);
---- a/tools/libxl/xl_cmdimpl.c
-+++ b/tools/libxl/xl_cmdimpl.c
+Index: xen-4.5.1-testing/tools/libxl/xl_cmdimpl.c
+===================================================================
+--- xen-4.5.1-testing.orig/tools/libxl/xl_cmdimpl.c
++++ xen-4.5.1-testing/tools/libxl/xl_cmdimpl.c
 @@ -17,6 +17,7 @@
  #include "libxl_osdeps.h"
  
@@ -1139,8 +1161,10 @@
  int main_vtpmattach(int argc, char **argv)
  {
      int opt;
---- a/tools/libxl/xl_cmdtable.c
-+++ b/tools/libxl/xl_cmdtable.c
+Index: xen-4.5.1-testing/tools/libxl/xl_cmdtable.c
+===================================================================
+--- xen-4.5.1-testing.orig/tools/libxl/xl_cmdtable.c
++++ xen-4.5.1-testing/tools/libxl/xl_cmdtable.c
 @@ -372,6 +372,21 @@ struct cmd_spec cmd_table[] = {
        "Destroy a domain's virtual block device",
        "<Domain> <DevId>",

++++++ libxl.set-migration-constraints-from-cmdline.patch ++++++
--- /var/tmp/diff_new_pack.6TTrYH/_old  2015-10-14 16:43:26.000000000 +0200
+++ /var/tmp/diff_new_pack.6TTrYH/_new  2015-10-14 16:43:26.000000000 +0200
@@ -243,7 +243,7 @@
 ===================================================================
 --- xen-4.5.1-testing.orig/tools/libxl/libxl.c
 +++ xen-4.5.1-testing/tools/libxl/libxl.c
-@@ -944,7 +944,8 @@ static void domain_suspend_cb(libxl__egc
+@@ -951,7 +951,8 @@ static void domain_suspend_cb(libxl__egc
  
  }
  
@@ -253,7 +253,7 @@
                           const libxl_asyncop_how *ao_how)
  {
      AO_CREATE(ctx, domid, ao_how);
-@@ -965,8 +966,14 @@ int libxl_domain_suspend(libxl_ctx *ctx,
+@@ -972,8 +973,14 @@ int libxl_domain_suspend(libxl_ctx *ctx,
      dss->domid = domid;
      dss->fd = fd;
      dss->type = type;
@@ -270,7 +270,7 @@
  
      libxl__domain_suspend(egc, dss);
      return AO_INPROGRESS;
-@@ -975,6 +982,20 @@ int libxl_domain_suspend(libxl_ctx *ctx,
+@@ -982,6 +989,20 @@ int libxl_domain_suspend(libxl_ctx *ctx,
      return AO_ABORT(rc);
  }
  

++++++ local_attach_support_for_phy.patch ++++++
--- /var/tmp/diff_new_pack.6TTrYH/_old  2015-10-14 16:43:26.000000000 +0200
+++ /var/tmp/diff_new_pack.6TTrYH/_new  2015-10-14 16:43:26.000000000 +0200
@@ -14,7 +14,7 @@
 ===================================================================
 --- xen-4.5.1-testing.orig/tools/libxl/libxl.c
 +++ xen-4.5.1-testing/tools/libxl/libxl.c
-@@ -3053,6 +3053,16 @@ void libxl__device_disk_local_initiate_a
+@@ -3060,6 +3060,16 @@ void libxl__device_disk_local_initiate_a
  
      switch (disk->backend) {
          case LIBXL_DISK_BACKEND_PHY:
@@ -31,7 +31,7 @@
              LIBXL__LOG(ctx, LIBXL__LOG_DEBUG, "locally attaching PHY disk %s",
                         disk->pdev_path);
              dev = disk->pdev_path;
-@@ -3132,7 +3142,7 @@ static void local_device_attach_cb(libxl
+@@ -3139,7 +3149,7 @@ static void local_device_attach_cb(libxl
      }
  
      dev = GCSPRINTF("/dev/%s", disk->vdev);
@@ -40,7 +40,7 @@
  
      rc = libxl__device_from_disk(gc, LIBXL_TOOLSTACK_DOMID, disk, &device);
      if (rc < 0)
-@@ -3172,6 +3182,7 @@ void libxl__device_disk_local_initiate_d
+@@ -3179,6 +3189,7 @@ void libxl__device_disk_local_initiate_d
      if (!dls->diskpath) goto out;
  
      switch (disk->backend) {
@@ -48,7 +48,7 @@
          case LIBXL_DISK_BACKEND_QDISK:
              if (disk->vdev != NULL) {
                  GCNEW(device);
-@@ -3189,7 +3200,6 @@ void libxl__device_disk_local_initiate_d
+@@ -3196,7 +3207,6 @@ void libxl__device_disk_local_initiate_d
              /* disk->vdev == NULL; fall through */
          default:
              /*



Reply via email to