Hello community,

here is the log from the commit of package xen for openSUSE:Factory checked in 
at 2017-03-09 01:35:29
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/xen (Old)
 and      /work/SRC/openSUSE:Factory/.xen.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "xen"

Thu Mar  9 01:35:29 2017 rev:226 rq:461996 version:4.8.0_04

Changes:
--------
--- /work/SRC/openSUSE:Factory/xen/xen.changes  2017-02-03 20:05:39.761672482 
+0100
+++ /work/SRC/openSUSE:Factory/.xen.new/xen.changes     2017-03-09 
01:35:31.444041826 +0100
@@ -1,0 +2,48 @@
+Thu Mar  2 15:21:25 MST 2017 - carn...@suse.com
+
+- bsc#1027654 - XEN fails to build against glibc 2.25
+  glibc-2.25-compatibility-fix.patch
+  libxl.pvscsi.patch
+
+-------------------------------------------------------------------
+Thu Feb 16 11:42:23 UTC 2017 - oher...@suse.de
+
+- fate#316613: Refresh and enable libxl.pvscsi.patch
+
+-------------------------------------------------------------------
+Fri Feb 10 11:22:01 MST 2017 - carn...@suse.com
+
+- bsc#1024834 - VUL-0: CVE-2017-2620: xen: cirrus_bitblt_cputovideo
+  does not check if memory region is safe (XSA-209)
+  
CVE-2017-2620-xsa209-qemut-cirrus_bitblt_cputovideo-does-not-check-if-memory-region-safe.patch
+
+-------------------------------------------------------------------
+Wed Feb  8 10:19:24 MST 2017 - carn...@suse.com
+
+- bsc#1023948 - [pvusb][sles12sp3][openqa] Segmentation fault
+  happened when adding usbctrl devices via xl
+  
589b3272-libxl-dont-segfault-when-creating-domain-with-invalid-pvusb-device.patch
+
+-------------------------------------------------------------------
+Thu Feb  2 09:57:01 MST 2017 - carn...@suse.com
+
+- Upstream patches from Jan (bsc#1027519)
+  587d04d6-x86-xstate-fix-array-overrun-with-LWP.patch
+  587de4a9-x86emul-VEX-B-ignored-in-compat-mode.patch
+  5882129d-x86emul-LOCK-check-adjustments.patch
+  58821300-x86-segment-attribute-handling.patch
+  58873c1f-x86emul-correct-FPU-stub-asm-constraints.patch
+  58873c80-x86-hvm-do-not-set-msr_tsc_adjust-on-.patch
+  5887888f-credit2-use-the-correct-scratch-cpumask.patch
+  5887888f-credit2-never-consider-CPUs-outside-of-pool.patch
+  5887888f-credit2-fix-shutdown-suspend-with-cpupools.patch
+  5888b1b3-x86-emulate-dont-assume-addr_size-32-implies-protmode.patch
+
+-------------------------------------------------------------------
+Wed Feb  1 09:36:25 MST 2017 - carn...@suse.com
+
+- bsc#1023004 - VUL-0: CVE-2017-2615: qemu: display: cirrus: oob
+  access while doing bitblt copy backward mode
+  
CVE-2017-2615-qemut-display-cirrus-oob-access-while-doing-bitblt-copy-backward-mode.patch
+
+-------------------------------------------------------------------
@@ -4,4 +52,3 @@
-- Xen 4.8 requires the acpica package (iasl) to build.
-  fate#322313 and fate#322150 require the acpica package to be
-  ported to aarch64 which is now down. Enable aarch64 in the spec
-  for building Xen on aarch64.
+- fate#322313 and fate#322150 require the acpica package ported to
+  aarch64 which Xen 4.8 needs to build. Temporarily disable aarch64
+  until these fates are complete.
@@ -45,0 +93,7 @@
+Wed Jan  4 14:59:04 MST 2017 - carn...@suse.com
+
+- bsc#1015169 - VUL-0: CVE-2016-9921, CVE-2016-9922: xen: qemu:
+  display: cirrus_vga: a divide by zero in cirrus_do_copy
+  CVE-2016-9921-qemut-display-cirrus_vga-divide-by-zero-in-cirrus_do_copy.patch
+
+-------------------------------------------------------------------
@@ -62,2 +116,2 @@
-- bsc#1014298 - VUL-0: xen: x86 PV guests may be able to mask
-  interrupts (XSA-202)
+- bsc#1014298 - VUL-0: CVE-2016-10024: xen: x86 PV guests may be
+  able to mask interrupts (XSA-202)
@@ -65,2 +119,2 @@
-- bsc#1014300 - VUL-0: xen: x86: missing NULL pointer check in
-  VMFUNC emulation (XSA-203)
+- bsc#1014300 - VUL-0: CVE-2016-10025: xen: x86: missing NULL
+  pointer check in VMFUNC emulation (XSA-203)

New:
----
  587d04d6-x86-xstate-fix-array-overrun-with-LWP.patch
  587de4a9-x86emul-VEX-B-ignored-in-compat-mode.patch
  5882129d-x86emul-LOCK-check-adjustments.patch
  58821300-x86-segment-attribute-handling.patch
  58873c1f-x86emul-correct-FPU-stub-asm-constraints.patch
  58873c80-x86-hvm-do-not-set-msr_tsc_adjust-on-.patch
  5887888f-credit2-fix-shutdown-suspend-with-cpupools.patch
  5887888f-credit2-never-consider-CPUs-outside-of-pool.patch
  5887888f-credit2-use-the-correct-scratch-cpumask.patch
  5888b1b3-x86-emulate-dont-assume-addr_size-32-implies-protmode.patch
  
589b3272-libxl-dont-segfault-when-creating-domain-with-invalid-pvusb-device.patch
  CVE-2016-9921-qemut-display-cirrus_vga-divide-by-zero-in-cirrus_do_copy.patch
  
CVE-2017-2615-qemut-display-cirrus-oob-access-while-doing-bitblt-copy-backward-mode.patch
  
CVE-2017-2620-xsa209-qemut-cirrus_bitblt_cputovideo-does-not-check-if-memory-region-safe.patch
  glibc-2.25-compatibility-fix.patch

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ xen.spec ++++++
--- /var/tmp/diff_new_pack.qSTB51/_old  2017-03-09 01:35:38.990973044 +0100
+++ /var/tmp/diff_new_pack.qSTB51/_new  2017-03-09 01:35:38.994972478 +0100
@@ -152,7 +152,7 @@
 %endif
 %endif
 
-Version:        4.8.0_03
+Version:        4.8.0_04
 Release:        0
 Summary:        Xen Virtualization: Hypervisor (aka VMM aka Microkernel)
 License:        GPL-2.0
@@ -202,6 +202,17 @@
 Patch10:        585aa407-x86-HVM-NULL-check-before-using-VMFUNC-hook.patch
 Patch11:        585bd5fe-x86-emul-correct-VMFUNC-return-value-handling.patch
 Patch12:        
586ba81c-x86-cpu-dont-update-this_cpu-for-guest-get_cpu_vendor.patch
+Patch13:        587d04d6-x86-xstate-fix-array-overrun-with-LWP.patch
+Patch14:        587de4a9-x86emul-VEX-B-ignored-in-compat-mode.patch
+Patch15:        5882129d-x86emul-LOCK-check-adjustments.patch
+Patch16:        58821300-x86-segment-attribute-handling.patch
+Patch17:        58873c1f-x86emul-correct-FPU-stub-asm-constraints.patch
+Patch18:        58873c80-x86-hvm-do-not-set-msr_tsc_adjust-on-.patch
+Patch19:        5887888f-credit2-use-the-correct-scratch-cpumask.patch
+Patch20:        5887888f-credit2-never-consider-CPUs-outside-of-pool.patch
+Patch21:        5887888f-credit2-fix-shutdown-suspend-with-cpupools.patch
+Patch22:        
5888b1b3-x86-emulate-dont-assume-addr_size-32-implies-protmode.patch
+Patch23:        
589b3272-libxl-dont-segfault-when-creating-domain-with-invalid-pvusb-device.patch
 # Upstream qemu-traditional patches
 Patch250:       VNC-Support-for-ExtendedKeyEvent-client-message.patch
 Patch251:       0001-net-move-the-tap-buffer-into-TAPState.patch
@@ -240,6 +251,9 @@
 Patch284:       
CVE-2016-8667-qemut-dma-rc4030-divide-by-zero-error-in-set_next_tick.patch
 Patch285:       
CVE-2016-8669-qemut-char-divide-by-zero-error-in-serial_update_parameters.patch
 Patch286:       
CVE-2016-8910-qemut-net-rtl8139-infinite-loop-while-transmit-in-Cplus-mode.patch
+Patch287:       
CVE-2016-9921-qemut-display-cirrus_vga-divide-by-zero-in-cirrus_do_copy.patch
+Patch288:       
CVE-2017-2615-qemut-display-cirrus-oob-access-while-doing-bitblt-copy-backward-mode.patch
+Patch289:       
CVE-2017-2620-xsa209-qemut-cirrus_bitblt_cputovideo-does-not-check-if-memory-region-safe.patch
 # qemu-traditional patches that are not upstream
 Patch350:       blktap.patch
 Patch351:       cdrom-removable.patch
@@ -277,6 +291,7 @@
 Patch421:       xenpaging.doc.patch
 Patch422:       stubdom-have-iovec.patch
 Patch423:       vif-route.patch
+Patch424:       glibc-2.25-compatibility-fix.patch
 # Other bug fixes or features
 Patch451:       xenconsole-no-multiple-connections.patch
 Patch452:       hibernate.patch
@@ -526,6 +541,17 @@
 %patch10 -p1
 %patch11 -p1
 %patch12 -p1
+%patch13 -p1
+%patch14 -p1
+%patch15 -p1
+%patch16 -p1
+%patch17 -p1
+%patch18 -p1
+%patch19 -p1
+%patch20 -p1
+%patch21 -p1
+%patch22 -p1
+%patch23 -p1
 # Upstream qemu patches
 %patch250 -p1
 %patch251 -p1
@@ -564,6 +590,9 @@
 %patch284 -p1
 %patch285 -p1
 %patch286 -p1
+%patch287 -p1
+%patch288 -p1
+%patch289 -p1
 # Qemu traditional
 %patch350 -p1
 %patch351 -p1
@@ -601,6 +630,7 @@
 %patch421 -p1
 %patch422 -p1
 %patch423 -p1
+%patch424 -p1
 # Other bug fixes or features
 %patch451 -p1
 %patch452 -p1
@@ -612,7 +642,7 @@
 %patch458 -p1
 %patch459 -p1
 # libxl.pvscsi.patch
-#%patch460 -p1
+%patch460 -p1
 %patch461 -p1
 %patch462 -p1
 %patch463 -p1

++++++ 587d04d6-x86-xstate-fix-array-overrun-with-LWP.patch ++++++
# Commit fe0d67576e335c02becf1cea8e67005509fa90b6
# Date 2017-01-16 17:37:26 +0000
# Author Andrew Cooper <andrew.coop...@citrix.com>
# Committer Andrew Cooper <andrew.coop...@citrix.com>
x86/xstate: Fix array overrun on hardware with LWP

c/s da62246e4c "x86/xsaves: enable xsaves/xrstors/xsavec in xen" introduced
setup_xstate_features() to allocate and fill xstate_offsets[] and
xstate_sizes[].

However, fls() casts xfeature_mask to 32bits which truncates LWP out of the
calculation.  As a result, the arrays are allocated too short, and the cpuid
infrastructure reads off the end of them when calculating xstate_size for the
guest.

On one test system, this results in 0x3fec83c0 being returned as the maximum
size of an xsave area, which surprisingly appears not to bother Windows or
Linux too much.  I suspect they both use current size based on xcr0, which Xen
forwards from real hardware.

Signed-off-by: Andrew Cooper <andrew.coop...@citrix.com>
Reviewed-by: Jan Beulich <jbeul...@suse.com>

--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -92,7 +92,7 @@ static int setup_xstate_features(bool_t
 
     if ( bsp )
     {
-        xstate_features = fls(xfeature_mask);
+        xstate_features = flsl(xfeature_mask);
         xstate_offsets = xzalloc_array(unsigned int, xstate_features);
         if ( !xstate_offsets )
             return -ENOMEM;
++++++ 587de4a9-x86emul-VEX-B-ignored-in-compat-mode.patch ++++++
# Commit 89c76ee7f60777b81c8fd0475a6af7c84e72a791
# Date 2017-01-17 10:32:25 +0100
# Author Jan Beulich <jbeul...@suse.com>
# Committer Jan Beulich <jbeul...@suse.com>
x86emul: VEX.B is ignored in compatibility mode

While VEX.R and VEX.X are guaranteed to be 1 in compatibility mode
(and hence a respective mode_64bit() check can be dropped), VEX.B can
be encoded as zero, but would be ignored by the processor. Since we
emulate instructions in 64-bit mode (except possibly in the test
harness), we need to force the bit to 1 in order to not act on the
wrong {X,Y,Z}MM register (which has no bad effect on 32-bit test
harness builds, as there the bit would again be ignored by the
hardware, and would by default be expected to be 1 anyway).

We must not, however, fiddle with the high bit of VEX.VVVV in the
decode phase, as that would undermine the checking of instructions
requiring the field to be all ones independent of mode. This is
being enforced in copy_REX_VEX() instead.

Signed-off-by: Jan Beulich <jbeul...@suse.com>
Reviewed-by: Andrew Cooper <andrew.coop...@citrix.com>

# Commit beb82042447c5d6e7073d816d6afc25c5a423cde
# Date 2017-01-25 15:08:59 +0100
# Author Jan Beulich <jbeul...@suse.com>
# Committer Jan Beulich <jbeul...@suse.com>
x86emul: correct VEX/XOP/EVEX operand size handling for 16-bit code

Operand size defaults to 32 bits in that case, but would not have been
set that way in the absence of an operand size override.

Reported-by: Wei Liu <wei.l...@citrix.com> (by AFL fuzzing)
Signed-off-by: Jan Beulich <jbeul...@suse.com>
Reviewed-by: Andrew Cooper <andrew.coop...@citrix.com>

--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -331,7 +331,11 @@ union vex {
 
 #define copy_REX_VEX(ptr, rex, vex) do { \
     if ( (vex).opcx != vex_none ) \
+    { \
+        if ( !mode_64bit() ) \
+            vex.reg |= 8; \
         ptr[0] = 0xc4, ptr[1] = (vex).raw[0], ptr[2] = (vex).raw[1]; \
+    } \
     else if ( mode_64bit() ) \
         ptr[1] = rex | REX_PREFIX; \
 } while (0)
@@ -2027,6 +2031,11 @@ x86_decode(
             case 8:
                 /* VEX / XOP / EVEX */
                 generate_exception_if(rex_prefix || vex.pfx, EXC_UD, -1);
+                /*
+                 * With operand size override disallowed (see above), op_bytes
+                 * should not have changed from its default.
+                 */
+                ASSERT(op_bytes == def_op_bytes);
 
                 vex.raw[0] = modrm;
                 if ( b == 0xc5 )
@@ -2053,6 +2062,12 @@ x86_decode(
                             op_bytes = 8;
                         }
                     }
+                    else
+                    {
+                        /* Operand size fixed at 4 (no override via W bit). */
+                        op_bytes = 4;
+                        vex.b = 1;
+                    }
                     switch ( b )
                     {
                     case 0x62:
@@ -2071,7 +2086,7 @@ x86_decode(
                         break;
                     }
                 }
-                if ( mode_64bit() && !vex.r )
+                if ( !vex.r )
                     rex_prefix |= REX_R;
 
                 ext = vex.opcx;
++++++ 5882129d-x86emul-LOCK-check-adjustments.patch ++++++
# Commit f2d4f4ba80de8a03a1b0f300d271715a88a8433d
# Date 2017-01-20 14:37:33 +0100
# Author Jan Beulich <jbeul...@suse.com>
# Committer Jan Beulich <jbeul...@suse.com>
x86emul: LOCK check adjustments

BT, being encoded as DstBitBase just like BT{C,R,S}, nevertheless does
not write its (register or memory) operand and hence also doesn't allow
a LOCK prefix to be used.

Signed-off-by: Jan Beulich <jbeul...@suse.com>
Reviewed-by: Andrew Cooper <andrew.coop...@citrix.com>

--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -5065,6 +5065,7 @@ x86_emulate(
     }
 
     case X86EMUL_OPC(0x0f, 0xa3): bt: /* bt */
+        generate_exception_if(lock_prefix, EXC_UD, 0);
         emulate_2op_SrcV_nobyte("bt", src, dst, _regs.eflags);
         dst.type = OP_NONE;
         break;
++++++ 58821300-x86-segment-attribute-handling.patch ++++++
# Commit 366ff5f1b3252f9069d5aedb2ffc2567bb0a37c9
# Date 2017-01-20 14:39:12 +0100
# Author Jan Beulich <jbeul...@suse.com>
# Committer Jan Beulich <jbeul...@suse.com>
x86: segment attribute handling adjustments

Null selector loads into SS (possible in 64-bit mode only, and only in
rings other than ring 3) must not alter SS.DPL. (This was found to be
an issue on KVM, and fixed in Linux commit 33ab91103b.)

Further arch_set_info_hvm_guest() didn't make sure that the ASSERT()s
in hvm_set_segment_register() wouldn't trigger: Add further checks, but
tolerate (adjust) clear accessed (CS, SS, DS, ES) and busy (TR) bits.

Finally the setting of the accessed bits for user segments was lost by
commit dd5c85e312 ("x86/hvm: Reposition the modification of raw segment
data from the VMCB/VMCS"), yet VMX requires them to be set for usable
segments. Add respective ASSERT()s (the only path not properly setting
them was arch_set_info_hvm_guest()).

Signed-off-by: Jan Beulich <jbeul...@suse.com>
Reviewed-by: Andrew Cooper <andrew.coop...@citrix.com>

--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1315,16 +1315,24 @@ static inline int check_segment(struct s
         return 0;
     }
 
-    if ( seg != x86_seg_tr && !reg->attr.fields.s )
+    if ( seg == x86_seg_tr )
     {
-        gprintk(XENLOG_ERR,
-                "System segment provided for a code or data segment\n");
-        return -EINVAL;
-    }
+        if ( reg->attr.fields.s )
+        {
+            gprintk(XENLOG_ERR, "Code or data segment provided for TR\n");
+            return -EINVAL;
+        }
 
-    if ( seg == x86_seg_tr && reg->attr.fields.s )
+        if ( reg->attr.fields.type != SYS_DESC_tss_busy )
+        {
+            gprintk(XENLOG_ERR, "Non-32-bit-TSS segment provided for TR\n");
+            return -EINVAL;
+        }
+    }
+    else if ( !reg->attr.fields.s )
     {
-        gprintk(XENLOG_ERR, "Code or data segment provided for TR\n");
+        gprintk(XENLOG_ERR,
+                "System segment provided for a code or data segment\n");
         return -EINVAL;
     }
 
@@ -1387,7 +1395,8 @@ int arch_set_info_hvm_guest(struct vcpu
 #define SEG(s, r) ({                                                        \
     s = (struct segment_register){ .base = (r)->s ## _base,                 \
                                    .limit = (r)->s ## _limit,               \
-                                   .attr.bytes = (r)->s ## _ar };           \
+                                   .attr.bytes = (r)->s ## _ar |            \
+                                       (x86_seg_##s != x86_seg_tr ? 1 : 2) }; \
     check_segment(&s, x86_seg_ ## s); })
 
         rc = SEG(cs, regs);
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -1360,6 +1360,11 @@ protmode_load_seg(
         }
         memset(sreg, 0, sizeof(*sreg));
         sreg->sel = sel;
+
+        /* Since CPL == SS.DPL, we need to put back DPL. */
+        if ( seg == x86_seg_ss )
+            sreg->attr.fields.dpl = sel;
+
         return X86EMUL_OKAY;
     }
 
++++++ 58873c1f-x86emul-correct-FPU-stub-asm-constraints.patch ++++++
# Commit 3dfbb8df335f12297cfc7db9d3df2b74c474921b
# Date 2017-01-24 12:35:59 +0100
# Author Jan Beulich <jbeul...@suse.com>
# Committer Jan Beulich <jbeul...@suse.com>
x86emul: correct FPU stub asm() constraints

Properly inform the compiler about fic's role as both an input (its
insn_bytes field) and output (its exn_raised field).

Take the opportunity and bring emulate_fpu_insn_stub() more in line
with emulate_fpu_insn_stub_eflags().

Signed-off-by: Jan Beulich <jbeul...@suse.com>
Reviewed-by: Andrew Cooper <andrew.coop...@citrix.com>

--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -874,15 +874,15 @@ do{ struct fpu_insn_ctxt fic;
     put_fpu(&fic);                                      \
 } while (0)
 
-#define emulate_fpu_insn_stub(_bytes...)                                \
+#define emulate_fpu_insn_stub(bytes...)                                 \
 do {                                                                    \
-    uint8_t *buf = get_stub(stub);                                      \
-    unsigned int _nr = sizeof((uint8_t[]){ _bytes });                   \
-    struct fpu_insn_ctxt fic = { .insn_bytes = _nr };                   \
-    memcpy(buf, ((uint8_t[]){ _bytes, 0xc3 }), _nr + 1);                \
-    get_fpu(X86EMUL_FPU_fpu, &fic);                                     \
-    stub.func();                                                        \
-    put_fpu(&fic);                                                      \
+    unsigned int nr_ = sizeof((uint8_t[]){ bytes });                    \
+    struct fpu_insn_ctxt fic_ = { .insn_bytes = nr_ };                  \
+    memcpy(get_stub(stub), ((uint8_t[]){ bytes, 0xc3 }), nr_ + 1);      \
+    get_fpu(X86EMUL_FPU_fpu, &fic_);                                    \
+    asm volatile ( "call *%[stub]" : "+m" (fic_) :                      \
+                   [stub] "rm" (stub.func) );                           \
+    put_fpu(&fic_);                                                     \
     put_stub(stub);                                                     \
 } while (0)
 
@@ -897,7 +897,7 @@ do {
                    "call *%[func];"                                     \
                    _POST_EFLAGS("[eflags]", "[mask]", "[tmp]")          \
                    : [eflags] "+g" (_regs.eflags),                      \
-                     [tmp] "=&r" (tmp_)                                 \
+                     [tmp] "=&r" (tmp_), "+m" (fic_)                    \
                    : [func] "rm" (stub.func),                           \
                      [mask] "i" (EFLG_ZF|EFLG_PF|EFLG_CF) );            \
     put_fpu(&fic_);                                                     \
++++++ 58873c80-x86-hvm-do-not-set-msr_tsc_adjust-on-.patch ++++++
# Commit 98297f09bd07bb63407909aae1d309d8adeb572e
# Date 2017-01-24 12:37:36 +0100
# Author Joao Martins <joao.m.mart...@oracle.com>
# Committer Jan Beulich <jbeul...@suse.com>
x86/hvm: do not set msr_tsc_adjust on hvm_set_guest_tsc_fixed

Commit 6e03363 ("x86: Implement TSC adjust feature for HVM guest")
implemented TSC_ADJUST MSR for hvm guests. Though while booting
an HVM guest the boot CPU would have a value set with delta_tsc -
guest tsc while secondary CPUS would have 0. For example one can
observe:
 $ xen-hvmctx 17 | grep tsc_adjust
 TSC_ADJUST: tsc_adjust ff9377dfef47fe66
 TSC_ADJUST: tsc_adjust 0
 TSC_ADJUST: tsc_adjust 0
 TSC_ADJUST: tsc_adjust 0

Upcoming Linux 4.10 now validates whether this MSR is correct and
adjusts them accordingly under the following conditions: values of < 0
(our case for CPU 0) or != 0 or values > 7FFFFFFF. In this conditions it
will force set to 0 and for the CPUs that the value doesn't match all
together. If this msr is not correct we would see messages such as:

[Firmware Bug]: TSC ADJUST: CPU0: -30517044286984129 force to 0

And on HVM guests supporting TSC_ADJUST (requiring at least Haswell
Intel) it won't boot.

Our current vCPU 0 values are incorrect and according to Intel SDM which on
section "Time-Stamp Counter Adjustment" states that "On RESET, the value
of the IA32_TSC_ADJUST MSR is 0." hence we should set it 0 and be
consistent across multiple vCPUs. Perhaps this MSR should be only
changed by the guest which already happens through
hvm_set_guest_tsc_adjust(..) routines (see below). After this patch
guests running Linux 4.10 will see a valid IA32_TSC_ADJUST msr of value
 0 for all CPUs and are able to boot.

On the same section of the spec ("Time-Stamp Counter Adjustment") it is
also stated:
"If an execution of WRMSR to the IA32_TIME_STAMP_COUNTER MSR
 adds (or subtracts) value X from the TSC, the logical processor also
 adds (or subtracts) value X from the IA32_TSC_ADJUST MSR.

 Unlike the TSC, the value of the IA32_TSC_ADJUST MSR changes only in
 response to WRMSR (either to the MSR itself, or to the
 IA32_TIME_STAMP_COUNTER MSR). Its value does not otherwise change as
 time elapses. Software seeking to adjust the TSC can do so by using
 WRMSR to write the same value to the IA32_TSC_ADJUST MSR on each logical
 processor."

This suggests these MSRs values should only be changed through guest i.e.
throught write intercept msrs. We keep IA32_TSC MSR logic such that writes
accomodate adjustments to TSC_ADJUST, hence no functional change in the
msr_tsc_adjust for IA32_TSC msr. Though, we do that in a separate routine
namely hvm_set_guest_tsc_msr instead of through hvm_set_guest_tsc(...).

Signed-off-by: Joao Martins <joao.m.mart...@oracle.com>
Reviewed-by: Jan Beulich <jbeul...@suse.com>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -387,13 +387,20 @@ void hvm_set_guest_tsc_fixed(struct vcpu
     }
 
     delta_tsc = guest_tsc - tsc;
-    v->arch.hvm_vcpu.msr_tsc_adjust += delta_tsc
-                          - v->arch.hvm_vcpu.cache_tsc_offset;
     v->arch.hvm_vcpu.cache_tsc_offset = delta_tsc;
 
     hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, at_tsc);
 }
 
+static void hvm_set_guest_tsc_msr(struct vcpu *v, u64 guest_tsc)
+{
+    uint64_t tsc_offset = v->arch.hvm_vcpu.cache_tsc_offset;
+
+    hvm_set_guest_tsc(v, guest_tsc);
+    v->arch.hvm_vcpu.msr_tsc_adjust += v->arch.hvm_vcpu.cache_tsc_offset
+                          - tsc_offset;
+}
+
 void hvm_set_guest_tsc_adjust(struct vcpu *v, u64 tsc_adjust)
 {
     v->arch.hvm_vcpu.cache_tsc_offset += tsc_adjust
@@ -3940,7 +3947,7 @@ int hvm_msr_write_intercept(unsigned int
         break;
 
     case MSR_IA32_TSC:
-        hvm_set_guest_tsc(v, msr_content);
+        hvm_set_guest_tsc_msr(v, msr_content);
         break;
 
     case MSR_IA32_TSC_ADJUST:
++++++ 5887888f-credit2-fix-shutdown-suspend-with-cpupools.patch ++++++
# Commit 7478ebe1602e6bb8242a18840b15757a1d5ad18a
# Date 2017-01-24 17:02:07 +0000
# Author Dario Faggioli <dario.faggi...@citrix.com>
# Committer George Dunlap <george.dun...@citrix.com>
xen: credit2: fix shutdown/suspend when playing with cpupools.

In fact, during shutdown/suspend, we temporarily move all
the vCPUs to the BSP (i.e., pCPU 0, as of now). For Credit2
domains, we call csched2_vcpu_migrate(), expects to find the
target pCPU in the domain's pool

Therefore, if Credit2 is the default scheduler and we have
removed pCPU 0 from cpupool0, shutdown/suspend fails like
this:

 RIP:    e008:[<ffff82d08012906d>] sched_credit2.c#migrate+0x274/0x2d1
 Xen call trace:
    [<ffff82d08012906d>] sched_credit2.c#migrate+0x274/0x2d1
    [<ffff82d080129138>] sched_credit2.c#csched2_vcpu_migrate+0x6e/0x86
    [<ffff82d08012c468>] schedule.c#vcpu_move_locked+0x69/0x6f
    [<ffff82d08012ec14>] cpu_disable_scheduler+0x3d7/0x430
    [<ffff82d08019669b>] __cpu_disable+0x299/0x2b0
    [<ffff82d0801012f8>] cpu.c#take_cpu_down+0x2f/0x38
    [<ffff82d0801312d8>] stop_machine.c#stopmachine_action+0x7f/0x8d
    [<ffff82d0801330b8>] tasklet.c#do_tasklet_work+0x74/0xab
    [<ffff82d0801333ed>] do_tasklet+0x66/0x8b
    [<ffff82d080166a73>] domain.c#idle_loop+0x3b/0x5e

 ****************************************
 Panic on CPU 8:
 Assertion 'svc->vcpu->processor < nr_cpu_ids' failed at sched_credit2.c:1729
 ****************************************

On the other hand, if Credit2 is the scheduler of another
pool, when trying (still during shutdown/suspend) to move
the vCPUs of the Credit2 domains to pCPU 0, it figures
out that pCPU 0 is not a Credit2 pCPU, and fails like this:

 RIP:    e008:[<ffff82d08012916b>] 
sched_credit2.c#csched2_vcpu_migrate+0xa1/0x107
 Xen call trace:
    [<ffff82d08012916b>] sched_credit2.c#csched2_vcpu_migrate+0xa1/0x107
    [<ffff82d08012c4e9>] schedule.c#vcpu_move_locked+0x69/0x6f
    [<ffff82d08012edfc>] cpu_disable_scheduler+0x3d7/0x430
    [<ffff82d08019687b>] __cpu_disable+0x299/0x2b0
    [<ffff82d0801012f8>] cpu.c#take_cpu_down+0x2f/0x38
    [<ffff82d0801314c0>] stop_machine.c#stopmachine_action+0x7f/0x8d
    [<ffff82d0801332a0>] tasklet.c#do_tasklet_work+0x74/0xab
    [<ffff82d0801335d5>] do_tasklet+0x66/0x8b
    [<ffff82d080166c53>] domain.c#idle_loop+0x3b/0x5e

The solution is to recognise the specific situation, inside
csched2_vcpu_migrate() and, considering it is something temporary,
which only happens during shutdown/suspend, quickly deal with it.

Then, in the resume path, in restore_vcpu_affinity(), things
are set back to normal, and a new v->processor is chosen, for
each vCPU, from the proper set of pCPUs (i.e., the ones of
the proper cpupool).

Signed-off-by: Dario Faggioli <dario.faggi...@citrix.com>
Acked-by: George Dunlap <george.dun...@citrix.com>

# Commit ad5808d9057248e7879cf375662f0a449fff7005
# Date 2017-02-01 14:44:51 +0000
# Author Dario Faggioli <dario.faggi...@citrix.com>
# Committer George Dunlap <george.dun...@citrix.com>
xen: credit2: non Credit2 pCPUs are ok during shutdown/suspend.

Commit 7478ebe1602e6 ("xen: credit2: fix shutdown/suspend
when playing with cpupools"), while doing the right thing
for actual code, forgot to update the ASSERT()s accordingly,
in csched2_vcpu_migrate().

In fact, as stated there already, during shutdown/suspend,
we must allow a Credit2 vCPU to temporarily migrate to a
non Credit2 BSP, without any ASSERT() triggering.

Move them down, after the check for whether or not we are
shutting down, where the assumption that the pCPU must be
valid Credit2 ones, is valid.

Signed-off-by: Dario Faggioli <dario.faggi...@citrix.com>

--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -1946,10 +1946,40 @@ static void
 csched2_vcpu_migrate(
     const struct scheduler *ops, struct vcpu *vc, unsigned int new_cpu)
 {
+    struct domain *d = vc->domain;
     struct csched2_vcpu * const svc = CSCHED2_VCPU(vc);
     struct csched2_runqueue_data *trqd;
+    s_time_t now = NOW();
 
-    /* Check if new_cpu is valid */
+    /*
+     * Being passed a target pCPU which is outside of our cpupool is only
+     * valid if we are shutting down (or doing ACPI suspend), and we are
+     * moving everyone to BSP, no matter whether or not BSP is inside our
+     * cpupool.
+     *
+     * And since there indeed is the chance that it is not part of it, all
+     * we must do is remove _and_ unassign the vCPU from any runqueue, as
+     * well as updating v->processor with the target, so that the suspend
+     * process can continue.
+     *
+     * It will then be during resume that a new, meaningful, value for
+     * v->processor will be chosen, and during actual domain unpause that
+     * the vCPU will be assigned to and added to the proper runqueue.
+     */
+    if ( unlikely(!cpumask_test_cpu(new_cpu, cpupool_domain_cpumask(d))) )
+    {
+        ASSERT(system_state == SYS_STATE_suspend);
+        if ( __vcpu_on_runq(svc) )
+        {
+            __runq_remove(svc);
+            update_load(ops, svc->rqd, NULL, -1, now);
+        }
+        __runq_deassign(svc);
+        vc->processor = new_cpu;
+        return;
+    }
+
+    /* If here, new_cpu must be a valid Credit2 pCPU, and in our affinity. */
     ASSERT(cpumask_test_cpu(new_cpu, &CSCHED2_PRIV(ops)->initialized));
     ASSERT(cpumask_test_cpu(new_cpu, vc->cpu_hard_affinity));
 
@@ -1964,7 +1994,7 @@ csched2_vcpu_migrate(
      * pointing to a pcpu where we can't run any longer.
      */
     if ( trqd != svc->rqd )
-        migrate(ops, svc, trqd, NOW());
+        migrate(ops, svc, trqd, now);
     else
         vc->processor = new_cpu;
 }
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -633,8 +633,11 @@ void vcpu_force_reschedule(struct vcpu *
 
 void restore_vcpu_affinity(struct domain *d)
 {
+    unsigned int cpu = smp_processor_id();
     struct vcpu *v;
 
+    ASSERT(system_state == SYS_STATE_resume);
+
     for_each_vcpu ( d, v )
     {
         spinlock_t *lock = vcpu_schedule_lock_irq(v);
@@ -643,18 +646,34 @@ void restore_vcpu_affinity(struct domain
         {
             cpumask_copy(v->cpu_hard_affinity, v->cpu_hard_affinity_saved);
             v->affinity_broken = 0;
+
         }
 
-        if ( v->processor == smp_processor_id() )
+        /*
+         * During suspend (in cpu_disable_scheduler()), we moved every vCPU
+         * to BSP (which, as of now, is pCPU 0), as a temporary measure to
+         * allow the nonboot processors to have their data structure freed
+         * and go to sleep. But nothing guardantees that the BSP is a valid
+         * pCPU for a particular domain.
+         *
+         * Therefore, here, before actually unpausing the domains, we should
+         * set v->processor of each of their vCPUs to something that will
+         * make sense for the scheduler of the cpupool in which they are in.
+         */
+        cpumask_and(cpumask_scratch_cpu(cpu), v->cpu_hard_affinity,
+                    cpupool_domain_cpumask(v->domain));
+        v->processor = cpumask_any(cpumask_scratch_cpu(cpu));
+
+        if ( v->processor == cpu )
         {
             set_bit(_VPF_migrating, &v->pause_flags);
-            vcpu_schedule_unlock_irq(lock, v);
+            spin_unlock_irq(lock);;
             vcpu_sleep_nosync(v);
             vcpu_migrate(v);
         }
         else
         {
-            vcpu_schedule_unlock_irq(lock, v);
+            spin_unlock_irq(lock);
         }
     }
 
++++++ 5887888f-credit2-never-consider-CPUs-outside-of-pool.patch ++++++
# Commit e7191920261d20e52ca4c06a03589a1155981b04
# Date 2017-01-24 17:02:07 +0000
# Author Dario Faggioli <dario.faggi...@citrix.com>
# Committer George Dunlap <george.dun...@citrix.com>
xen: credit2: never consider CPUs outside of our cpupool.

In fact, relying on the mask of what pCPUs belong to
which Credit2 runqueue is not enough. If we only do that,
when Credit2 is the boot scheduler, we may ASSERT() or
panic when moving a pCPU from Pool-0 to another cpupool.

This is because pCPUs outside of any pool are considered
part of cpupool0. This puts us at risk of crash when those
same pCPUs are added to another pool and something
different than the idle domain is found to be running
on them.

Note that, even if we prevent the above to happen (which
is the purpose of this patch), this is still pretty bad,
in fact, when we remove a pCPU from Pool-0:
- in Credit1, as we do *not* update prv->ncpus and
  prv->credit, which means we're considering the wrong
  total credits when doing accounting;
- in Credit2, the pCPU remains part of one runqueue,
  and is hence at least considered during load balancing,
  even if no vCPU should really run there.

In Credit1, this "only" causes skewed accounting and
no crashes because there is a lot of `cpumask_and`ing
going on with the cpumask of the domains' cpupool
(which, BTW, comes at a price).

A quick and not to involved (and easily backportable)
solution for Credit2, is to do exactly the same.

Signed-off-by: Dario Faggioli <dario.faggi...@citrix.com
Acked-by: George Dunlap <george.dun...@citrix.com>

--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -510,19 +510,22 @@ void smt_idle_mask_clear(unsigned int cp
  */
 static int get_fallback_cpu(struct csched2_vcpu *svc)
 {
-    int fallback_cpu, cpu = svc->vcpu->processor;
+    struct vcpu *v = svc->vcpu;
+    int cpu = v->processor;
 
-    if ( likely(cpumask_test_cpu(cpu, svc->vcpu->cpu_hard_affinity)) )
-        return cpu;
+    cpumask_and(cpumask_scratch_cpu(cpu), v->cpu_hard_affinity,
+                cpupool_domain_cpumask(v->domain));
 
-    cpumask_and(cpumask_scratch_cpu(cpu), svc->vcpu->cpu_hard_affinity,
-                &svc->rqd->active);
-    fallback_cpu = cpumask_first(cpumask_scratch_cpu(cpu));
-    if ( likely(fallback_cpu < nr_cpu_ids) )
-        return fallback_cpu;
+    if ( likely(cpumask_test_cpu(cpu, cpumask_scratch_cpu(cpu))) )
+        return cpu;
 
-    cpumask_and(cpumask_scratch, svc->vcpu->cpu_hard_affinity,
-                cpupool_domain_cpumask(svc->vcpu->domain));
+    if ( likely(cpumask_intersects(cpumask_scratch_cpu(cpu),
+                                   &svc->rqd->active)) )
+    {
+        cpumask_and(cpumask_scratch_cpu(cpu), &svc->rqd->active,
+                    cpumask_scratch_cpu(cpu));
+        return cpumask_first(cpumask_scratch_cpu(cpu));
+    }
 
     ASSERT(!cpumask_empty(cpumask_scratch_cpu(cpu)));
 
@@ -940,6 +943,9 @@ runq_tickle(const struct scheduler *ops,
                     (unsigned char *)&d);
     }
 
+    cpumask_and(cpumask_scratch_cpu(cpu), new->vcpu->cpu_hard_affinity,
+                cpupool_domain_cpumask(new->vcpu->domain));
+
     /*
      * First of all, consider idle cpus, checking if we can just
      * re-use the pcpu where we were running before.
@@ -952,7 +958,7 @@ runq_tickle(const struct scheduler *ops,
         cpumask_andnot(&mask, &rqd->idle, &rqd->smt_idle);
     else
         cpumask_copy(&mask, &rqd->smt_idle);
-    cpumask_and(&mask, &mask, new->vcpu->cpu_hard_affinity);
+    cpumask_and(&mask, &mask, cpumask_scratch_cpu(cpu));
     i = cpumask_test_or_cycle(cpu, &mask);
     if ( i < nr_cpu_ids )
     {
@@ -967,7 +973,7 @@ runq_tickle(const struct scheduler *ops,
      * gone through the scheduler yet.
      */
     cpumask_andnot(&mask, &rqd->idle, &rqd->tickled);
-    cpumask_and(&mask, &mask, new->vcpu->cpu_hard_affinity);
+    cpumask_and(&mask, &mask, cpumask_scratch_cpu(cpu));
     i = cpumask_test_or_cycle(cpu, &mask);
     if ( i < nr_cpu_ids )
     {
@@ -983,7 +989,7 @@ runq_tickle(const struct scheduler *ops,
      */
     cpumask_andnot(&mask, &rqd->active, &rqd->idle);
     cpumask_andnot(&mask, &mask, &rqd->tickled);
-    cpumask_and(&mask, &mask, new->vcpu->cpu_hard_affinity);
+    cpumask_and(&mask, &mask, cpumask_scratch_cpu(cpu));
     if ( cpumask_test_cpu(cpu, &mask) )
     {
         cur = CSCHED2_VCPU(curr_on_cpu(cpu));
@@ -1525,6 +1531,9 @@ csched2_cpu_pick(const struct scheduler
         goto out;
     }
 
+    cpumask_and(cpumask_scratch_cpu(cpu), vc->cpu_hard_affinity,
+                cpupool_domain_cpumask(vc->domain));
+
     /*
      * First check to see if we're here because someone else suggested a place
      * for us to move.
@@ -1536,13 +1545,13 @@ csched2_cpu_pick(const struct scheduler
             printk(XENLOG_WARNING "%s: target runqueue disappeared!\n",
                    __func__);
         }
-        else
+        else if ( cpumask_intersects(cpumask_scratch_cpu(cpu),
+                                     &svc->migrate_rqd->active) )
         {
-            cpumask_and(cpumask_scratch_cpu(cpu), vc->cpu_hard_affinity,
+            cpumask_and(cpumask_scratch_cpu(cpu), cpumask_scratch_cpu(cpu),
                         &svc->migrate_rqd->active);
             new_cpu = cpumask_any(cpumask_scratch_cpu(cpu));
-            if ( new_cpu < nr_cpu_ids )
-                goto out_up;
+            goto out_up;
         }
         /* Fall-through to normal cpu pick */
     }
@@ -1570,12 +1579,12 @@ csched2_cpu_pick(const struct scheduler
          */
         if ( rqd == svc->rqd )
         {
-            if ( cpumask_intersects(vc->cpu_hard_affinity, &rqd->active) )
+            if ( cpumask_intersects(cpumask_scratch_cpu(cpu), &rqd->active) )
                 rqd_avgload = max_t(s_time_t, rqd->b_avgload - svc->avgload, 
0);
         }
         else if ( spin_trylock(&rqd->lock) )
         {
-            if ( cpumask_intersects(vc->cpu_hard_affinity, &rqd->active) )
+            if ( cpumask_intersects(cpumask_scratch_cpu(cpu), &rqd->active) )
                 rqd_avgload = rqd->b_avgload;
 
             spin_unlock(&rqd->lock);
@@ -1597,7 +1606,7 @@ csched2_cpu_pick(const struct scheduler
         goto out_up;
     }
 
-    cpumask_and(cpumask_scratch_cpu(cpu), vc->cpu_hard_affinity,
+    cpumask_and(cpumask_scratch_cpu(cpu), cpumask_scratch_cpu(cpu),
                 &prv->rqd[min_rqi].active);
     new_cpu = cpumask_any(cpumask_scratch_cpu(cpu));
     BUG_ON(new_cpu >= nr_cpu_ids);
@@ -1713,6 +1722,8 @@ static void migrate(const struct schedul
         __runq_deassign(svc);
 
         cpumask_and(cpumask_scratch_cpu(cpu), svc->vcpu->cpu_hard_affinity,
+                    cpupool_domain_cpumask(svc->vcpu->domain));
+        cpumask_and(cpumask_scratch_cpu(cpu), cpumask_scratch_cpu(cpu),
                     &trqd->active);
         svc->vcpu->processor = cpumask_any(cpumask_scratch_cpu(cpu));
         ASSERT(svc->vcpu->processor < nr_cpu_ids);
@@ -1738,8 +1749,14 @@ static void migrate(const struct schedul
 static bool_t vcpu_is_migrateable(struct csched2_vcpu *svc,
                                   struct csched2_runqueue_data *rqd)
 {
+    struct vcpu *v = svc->vcpu;
+    int cpu = svc->vcpu->processor;
+
+    cpumask_and(cpumask_scratch_cpu(cpu), v->cpu_hard_affinity,
+                cpupool_domain_cpumask(v->domain));
+
     return !(svc->flags & CSFLAG_runq_migrate_request) &&
-           cpumask_intersects(svc->vcpu->cpu_hard_affinity, &rqd->active);
+           cpumask_intersects(cpumask_scratch_cpu(cpu), &rqd->active);
 }
 
 static void balance_load(const struct scheduler *ops, int cpu, s_time_t now)
++++++ 5887888f-credit2-use-the-correct-scratch-cpumask.patch ++++++
# Commit 548db8742872399936a2090cbcdfd5e1b34fcbcc
# Date 2017-01-24 17:02:07 +0000
# Author Dario Faggioli <dario.faggi...@citrix.com>
# Committer George Dunlap <george.dun...@citrix.com>
xen: credit2: use the correct scratch cpumask.

In fact, there is one scratch mask per each CPU. When
you use the one of a CPU, it must be true that:
 - the CPU belongs to your cpupool and scheduler,
 - you own the runqueue lock (the one you take via
   {v,p}cpu_schedule_lock()) for that CPU.

This was not the case within the following functions:

get_fallback_cpu(), csched2_cpu_pick(): as we can't be
sure we either are on, or hold the lock for, the CPU
that is in the vCPU's 'v->processor'.

migrate(): it's ok, when called from balance_load(),
because that comes from csched2_schedule(), which takes
the runqueue lock of the CPU where it executes. But it is
not ok when we come from csched2_vcpu_migrate(), which
can be called from other places.

The fix is to explicitly use the scratch space of the
CPUs for which we know we hold the runqueue lock.

Signed-off-by: Dario Faggioli <dario.faggi...@citrix.com>
Reported-by: Jan Beulich <jbeul...@suse.com>
Reviewed-by: George Dunlap <george.dun...@citrix.com>

--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -510,24 +510,23 @@ void smt_idle_mask_clear(unsigned int cp
  */
 static int get_fallback_cpu(struct csched2_vcpu *svc)
 {
-    int cpu;
+    int fallback_cpu, cpu = svc->vcpu->processor;
 
-    if ( likely(cpumask_test_cpu(svc->vcpu->processor,
-                                 svc->vcpu->cpu_hard_affinity)) )
-        return svc->vcpu->processor;
+    if ( likely(cpumask_test_cpu(cpu, svc->vcpu->cpu_hard_affinity)) )
+        return cpu;
 
-    cpumask_and(cpumask_scratch, svc->vcpu->cpu_hard_affinity,
+    cpumask_and(cpumask_scratch_cpu(cpu), svc->vcpu->cpu_hard_affinity,
                 &svc->rqd->active);
-    cpu = cpumask_first(cpumask_scratch);
-    if ( likely(cpu < nr_cpu_ids) )
-        return cpu;
+    fallback_cpu = cpumask_first(cpumask_scratch_cpu(cpu));
+    if ( likely(fallback_cpu < nr_cpu_ids) )
+        return fallback_cpu;
 
     cpumask_and(cpumask_scratch, svc->vcpu->cpu_hard_affinity,
                 cpupool_domain_cpumask(svc->vcpu->domain));
 
-    ASSERT(!cpumask_empty(cpumask_scratch));
+    ASSERT(!cpumask_empty(cpumask_scratch_cpu(cpu)));
 
-    return cpumask_first(cpumask_scratch);
+    return cpumask_first(cpumask_scratch_cpu(cpu));
 }
 
 /*
@@ -1492,7 +1491,7 @@ static int
 csched2_cpu_pick(const struct scheduler *ops, struct vcpu *vc)
 {
     struct csched2_private *prv = CSCHED2_PRIV(ops);
-    int i, min_rqi = -1, new_cpu;
+    int i, min_rqi = -1, new_cpu, cpu = vc->processor;
     struct csched2_vcpu *svc = CSCHED2_VCPU(vc);
     s_time_t min_avgload = MAX_LOAD;
 
@@ -1512,7 +1511,7 @@ csched2_cpu_pick(const struct scheduler
      * just grab the prv lock.  Instead, we'll have to trylock, and
      * do something else reasonable if we fail.
      */
-    ASSERT(spin_is_locked(per_cpu(schedule_data, 
vc->processor).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
 
     if ( !read_trylock(&prv->lock) )
     {
@@ -1539,9 +1538,9 @@ csched2_cpu_pick(const struct scheduler
         }
         else
         {
-            cpumask_and(cpumask_scratch, vc->cpu_hard_affinity,
+            cpumask_and(cpumask_scratch_cpu(cpu), vc->cpu_hard_affinity,
                         &svc->migrate_rqd->active);
-            new_cpu = cpumask_any(cpumask_scratch);
+            new_cpu = cpumask_any(cpumask_scratch_cpu(cpu));
             if ( new_cpu < nr_cpu_ids )
                 goto out_up;
         }
@@ -1598,9 +1597,9 @@ csched2_cpu_pick(const struct scheduler
         goto out_up;
     }
 
-    cpumask_and(cpumask_scratch, vc->cpu_hard_affinity,
+    cpumask_and(cpumask_scratch_cpu(cpu), vc->cpu_hard_affinity,
                 &prv->rqd[min_rqi].active);
-    new_cpu = cpumask_any(cpumask_scratch);
+    new_cpu = cpumask_any(cpumask_scratch_cpu(cpu));
     BUG_ON(new_cpu >= nr_cpu_ids);
 
  out_up:
@@ -1675,6 +1674,8 @@ static void migrate(const struct schedul
                     struct csched2_runqueue_data *trqd, 
                     s_time_t now)
 {
+    int cpu = svc->vcpu->processor;
+
     if ( unlikely(tb_init_done) )
     {
         struct {
@@ -1696,7 +1697,7 @@ static void migrate(const struct schedul
         svc->migrate_rqd = trqd;
         __set_bit(_VPF_migrating, &svc->vcpu->pause_flags);
         __set_bit(__CSFLAG_runq_migrate_request, &svc->flags);
-        cpu_raise_softirq(svc->vcpu->processor, SCHEDULE_SOFTIRQ);
+        cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
         SCHED_STAT_CRANK(migrate_requested);
     }
     else
@@ -1711,9 +1712,9 @@ static void migrate(const struct schedul
         }
         __runq_deassign(svc);
 
-        cpumask_and(cpumask_scratch, svc->vcpu->cpu_hard_affinity,
+        cpumask_and(cpumask_scratch_cpu(cpu), svc->vcpu->cpu_hard_affinity,
                     &trqd->active);
-        svc->vcpu->processor = cpumask_any(cpumask_scratch);
+        svc->vcpu->processor = cpumask_any(cpumask_scratch_cpu(cpu));
         ASSERT(svc->vcpu->processor < nr_cpu_ids);
 
         __runq_assign(svc, trqd);
++++++ 5888b1b3-x86-emulate-dont-assume-addr_size-32-implies-protmode.patch 
++++++
# Commit 05118b1596ffe4559549edbb28bd0124a7316123
# Date 2017-01-25 15:09:55 +0100
# Author George Dunlap <george.dun...@citrix.com>
# Committer Jan Beulich <jbeul...@suse.com>
x86/emulate: don't assume that addr_size == 32 implies protected mode

Callers of x86_emulate() generally define addr_size based on the code
segment.  In vm86 mode, the code segment is set by the hardware to be
16-bits; but it is entirely possible to enable protected mode, set the
CS to 32-bits, and then disable protected mode.  (This is commonly
called "unreal mode".)

But the instruction decoder only checks for protected mode when
addr_size == 16.  So in unreal mode, hardware will throw a #UD for VEX
prefixes, but our instruction decoder will decode them, triggering an
ASSERT() further on in _get_fpu().  (With debug=n the emulator will
incorrectly emulate the instruction rather than throwing a #UD, but
this is only a bug, not a crash, so it's not a security issue.)

Teach the instruction decoder to check that we're in protected mode,
even if addr_size is 32.

Signed-off-by: George Dunlap <george.dun...@citrix.com>

Split real mode and VM86 mode handling, as VM86 mode is strictly 16-bit
at all times. Re-base.

Signed-off-by: Jan Beulich <jbeul...@suse.com>
Reviewed-by: Andrew Cooper <andrew.coop...@citrix.com>

--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -2026,11 +2026,11 @@ x86_decode(
             default:
                 BUG(); /* Shouldn't be possible. */
             case 2:
-                if ( in_realmode(ctxt, ops) || (state->regs->eflags & EFLG_VM) 
)
+                if ( state->regs->eflags & EFLG_VM )
                     break;
                 /* fall through */
             case 4:
-                if ( modrm_mod != 3 )
+                if ( modrm_mod != 3 || in_realmode(ctxt, ops) )
                     break;
                 /* fall through */
             case 8:
++++++ 
589b3272-libxl-dont-segfault-when-creating-domain-with-invalid-pvusb-device.patch
 ++++++
Subject: libxl: don't segfault when creating domain with invalid pvusb device
From: Juergen Gross jgr...@suse.com Wed Feb 8 14:34:08 2017 +0100
Date: Wed Feb 8 15:00:02 2017 +0000:
Git: 58b4db0832de1ee355a342feea7e1545219bef12

Creating a domain with an invalid controller specification for a pvusb
device will currently segfault.

Avoid this by bailing out early in case of a mandatory xenstore path
not existing.

Signed-of-by: Juergen Gross <jgr...@suse.com>
Acked-by: Ian Jackson <ian.jack...@eu.citrix.com>
Acked-by: Wei Liu <wei.l...@citrix.com>

Index: xen-4.8.0-testing/tools/libxl/libxl_internal.h
===================================================================
--- xen-4.8.0-testing.orig/tools/libxl/libxl_internal.h
+++ xen-4.8.0-testing/tools/libxl/libxl_internal.h
@@ -727,6 +727,13 @@ int libxl__xs_mknod(libxl__gc *gc, xs_tr
 
 /* On success, *result_out came from the gc.
  * On error, *result_out is undefined.
+ * ENOENT is regarded as error.
+ */
+int libxl__xs_read_mandatory(libxl__gc *gc, xs_transaction_t t,
+                             const char *path, const char **result_out);
+
+/* On success, *result_out came from the gc.
+ * On error, *result_out is undefined.
  * ENOENT counts as success but sets *result_out=0
  */
 int libxl__xs_read_checked(libxl__gc *gc, xs_transaction_t t,
Index: xen-4.8.0-testing/tools/libxl/libxl_usb.c
===================================================================
--- xen-4.8.0-testing.orig/tools/libxl/libxl_usb.c
+++ xen-4.8.0-testing/tools/libxl/libxl_usb.c
@@ -652,9 +652,9 @@ int libxl_device_usbctrl_getinfo(libxl_c
     usbctrlinfo->devid = usbctrl->devid;
 
 #define READ_SUBPATH(path, subpath) ({                                  \
-        rc = libxl__xs_read_checked(gc, XBT_NULL,                       \
-                                    GCSPRINTF("%s/" subpath, path),     \
-                                    &tmp);                              \
+        rc = libxl__xs_read_mandatory(gc, XBT_NULL,                     \
+                                      GCSPRINTF("%s/" subpath, path),   \
+                                      &tmp);                            \
         if (rc) goto out;                                               \
         (char *)tmp;                                                    \
     })
Index: xen-4.8.0-testing/tools/libxl/libxl_xshelp.c
===================================================================
--- xen-4.8.0-testing.orig/tools/libxl/libxl_xshelp.c
+++ xen-4.8.0-testing/tools/libxl/libxl_xshelp.c
@@ -193,6 +193,18 @@ char *libxl__xs_libxl_path(libxl__gc *gc
     return s;
 }
 
+int libxl__xs_read_mandatory(libxl__gc *gc, xs_transaction_t t,
+                             const char *path, const char **result_out)
+{
+    char *result = libxl__xs_read(gc, t, path);
+    if (!result) {
+        LOGE(ERROR, "xenstore read failed: `%s'", path);
+        return ERROR_FAIL;
+    }
+    *result_out = result;
+    return 0;
+}
+
 int libxl__xs_read_checked(libxl__gc *gc, xs_transaction_t t,
                            const char *path, const char **result_out)
 {
++++++ 
CVE-2016-9921-qemut-display-cirrus_vga-divide-by-zero-in-cirrus_do_copy.patch 
++++++
References: bsc#1015169 CVE-2016-9921 CVE-2016-9922

Subject: display: cirrus: check vga bits per pixel(bpp) value
From: Prasad J Pandit p...@fedoraproject.org Tue Oct 18 13:15:17 2016 +0530
Date: Mon Dec 5 11:01:55 2016 +0100:
Git: 4299b90e9ba9ce5ca9024572804ba751aa1a7e70

In Cirrus CLGD 54xx VGA Emulator, if cirrus graphics mode is VGA,
'cirrus_get_bpp' returns zero(0), which could lead to a divide
by zero error in while copying pixel data. The same could occur
via blit pitch values. Add check to avoid it.

Reported-by: Huawei PSIRT <ps...@huawei.com>
Signed-off-by: Prasad J Pandit <p...@fedoraproject.org>
Message-id: 1476776717-24807-1-git-send-email-ppan...@redhat.com
Signed-off-by: Gerd Hoffmann <kra...@redhat.com>

Index: xen-4.8.0-testing/tools/qemu-xen-traditional-dir-remote/hw/cirrus_vga.c
===================================================================
--- xen-4.8.0-testing.orig/tools/qemu-xen-traditional-dir-remote/hw/cirrus_vga.c
+++ xen-4.8.0-testing/tools/qemu-xen-traditional-dir-remote/hw/cirrus_vga.c
@@ -306,6 +306,9 @@ static void cirrus_vga_mem_writew(void *
 static bool blit_region_is_unsafe(struct CirrusVGAState *s,
                                   int32_t pitch, int32_t addr)
 {
+    if (!pitch) {
+        return true;
+    }
     if (pitch < 0) {
         int64_t min = addr
             + ((int64_t)s->cirrus_blt_height-1) * pitch;
@@ -752,7 +755,7 @@ static int cirrus_bitblt_videotovideo_pa
                                             s->cirrus_addr_mask));
 }
 
-static void cirrus_do_copy(CirrusVGAState *s, int dst, int src, int w, int h)
+static int cirrus_do_copy(CirrusVGAState *s, int dst, int src, int w, int h)
 {
     int sx = 0, sy = 0;
     int dx = 0, dy = 0;
@@ -765,6 +768,9 @@ static void cirrus_do_copy(CirrusVGAStat
         int width, height;
 
         depth = s->get_bpp((VGAState *)s) / 8;
+        if (!depth) {
+            return 0;
+        }
         s->get_resolution((VGAState *)s, &width, &height);
 
         /* extra x, y */
@@ -818,6 +824,8 @@ static void cirrus_do_copy(CirrusVGAStat
     cirrus_invalidate_region(s, s->cirrus_blt_dstaddr,
                                s->cirrus_blt_dstpitch, s->cirrus_blt_width,
                                s->cirrus_blt_height);
+
+    return 1;
 }
 
 static int cirrus_bitblt_videotovideo_copy(CirrusVGAState * s)
@@ -825,11 +833,9 @@ static int cirrus_bitblt_videotovideo_co
     if (blit_is_unsafe(s))
         return 0;
 
-    cirrus_do_copy(s, s->cirrus_blt_dstaddr - s->start_addr,
+    return cirrus_do_copy(s, s->cirrus_blt_dstaddr - s->start_addr,
             s->cirrus_blt_srcaddr - s->start_addr,
             s->cirrus_blt_width, s->cirrus_blt_height);
-
-    return 1;
 }
 
 /***************************************
++++++ 
CVE-2017-2615-qemut-display-cirrus-oob-access-while-doing-bitblt-copy-backward-mode.patch
 ++++++
References: bsc#1023004 CVE-2017-2615

When doing bitblt copy in backward mode, we should minus the
blt width first just like the adding in the forward mode. This
can avoid the oob access of the front of vga's vram.

Signed-off-by: Li Qiang <address@hidden>
Message-id: address@hidden

{ kraxel: with backward blits (negative pitch) addr is the topmost
          address, so check it as-is against vram size ]

Cc: address@hidden
Cc: P J P <address@hidden>
Cc: Laszlo Ersek <address@hidden>
Cc: Paolo Bonzini <address@hidden>
Cc: Wolfgang Bumiller <address@hidden>
Fixes: d3532a0db02296e687711b8cdc7791924efccea0 (CVE-2014-8106)
Signed-off-by: Gerd Hoffmann <address@hidden>
---
 hw/display/cirrus_vga.c | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

Index: xen-4.8.0-testing/tools/qemu-xen-traditional-dir-remote/hw/cirrus_vga.c
===================================================================
--- xen-4.8.0-testing.orig/tools/qemu-xen-traditional-dir-remote/hw/cirrus_vga.c
+++ xen-4.8.0-testing/tools/qemu-xen-traditional-dir-remote/hw/cirrus_vga.c
@@ -311,10 +311,9 @@ static bool blit_region_is_unsafe(struct
     }
     if (pitch < 0) {
         int64_t min = addr
-            + ((int64_t)s->cirrus_blt_height-1) * pitch;
-        int32_t max = addr
-            + s->cirrus_blt_width;
-        if (min < 0 || max >= s->vram_size) {
+            + ((int64_t)s->cirrus_blt_height - 1) * pitch
+            - s->cirrus_blt_width;
+        if (min < -1 || addr >= s->vram_size) {
             return true;
         }
     } else {
++++++ 
CVE-2017-2620-xsa209-qemut-cirrus_bitblt_cputovideo-does-not-check-if-memory-region-safe.patch
 ++++++
References: bsc#1024834 CVE-2017-2620 XSA-209

From: Gerd Hoffmann <kra...@redhat.com>
Subject: [PATCH 3/3] cirrus: add blit_is_unsafe call to cirrus_bitblt_cputovideo

CIRRUS_BLTMODE_MEMSYSSRC blits do NOT check blit destination
and blit width, at all.  Oops.  Fix it.

Security impact: high.

The missing blit destination check allows to write to host memory.
Basically same as CVE-2014-8106 for the other blit variants.

The missing blit width check allows to overflow cirrus_bltbuf,
with the attractive target cirrus_srcptr (current cirrus_bltbuf write
position) being located right after cirrus_bltbuf in CirrusVGAState.

Due to cirrus emulation writing cirrus_bltbuf bytewise the attacker
hasn't full control over cirrus_srcptr though, only one byte can be
changed.  Once the first byte has been modified further writes land
elsewhere.

[ This is CVE-2017-2620 / XSA-209  - Ian Jackson ]

Signed-off-by: Gerd Hoffmann <kra...@redhat.com>
---
Index: xen-4.8.0-testing/tools/qemu-xen-traditional-dir-remote/hw/cirrus_vga.c
===================================================================
--- xen-4.8.0-testing.orig/tools/qemu-xen-traditional-dir-remote/hw/cirrus_vga.c
+++ xen-4.8.0-testing/tools/qemu-xen-traditional-dir-remote/hw/cirrus_vga.c
@@ -905,6 +905,10 @@ static int cirrus_bitblt_cputovideo(Cirr
 {
     int w;
 
+    if (blit_is_unsafe(s)) {
+        return 0;
+    }
+
     s->cirrus_blt_mode &= ~CIRRUS_BLTMODE_MEMSYSSRC;
     s->cirrus_srcptr = &s->cirrus_bltbuf[0];
     s->cirrus_srcptr_end = &s->cirrus_bltbuf[0];
@@ -930,6 +934,10 @@ static int cirrus_bitblt_cputovideo(Cirr
        }
         s->cirrus_srccounter = s->cirrus_blt_srcpitch * s->cirrus_blt_height;
     }
+
+    /* the blit_is_unsafe call above should catch this */
+    assert(s->cirrus_blt_srcpitch <= CIRRUS_BLTBUFSIZE);
+
     s->cirrus_srcptr = s->cirrus_bltbuf;
     s->cirrus_srcptr_end = s->cirrus_bltbuf + s->cirrus_blt_srcpitch;
     cirrus_update_memory_access(s);
++++++ glibc-2.25-compatibility-fix.patch ++++++
References: bsc#1027654

Index: xen-4.8.0-testing/tools/blktap2/control/tap-ctl-allocate.c
===================================================================
--- xen-4.8.0-testing.orig/tools/blktap2/control/tap-ctl-allocate.c
+++ xen-4.8.0-testing/tools/blktap2/control/tap-ctl-allocate.c
@@ -34,7 +34,7 @@
 #include <getopt.h>
 #include <libgen.h>
 #include <sys/stat.h>
-#include <sys/types.h>
+#include <sys/sysmacros.h>
 #include <sys/ioctl.h>
 #include <linux/major.h>
 
Index: xen-4.8.0-testing/tools/libxl/libxl_device.c
===================================================================
--- xen-4.8.0-testing.orig/tools/libxl/libxl_device.c
+++ xen-4.8.0-testing/tools/libxl/libxl_device.c
@@ -18,6 +18,8 @@
 
 #include "libxl_internal.h"
 
+#include <sys/sysmacros.h>
+
 char *libxl__device_frontend_path(libxl__gc *gc, libxl__device *device)
 {
     char *dom_path = libxl__xs_get_dompath(gc, device->domid);
++++++ libxl.pvscsi.patch ++++++
--- /var/tmp/diff_new_pack.qSTB51/_old  2017-03-09 01:35:39.454907342 +0100
+++ /var/tmp/diff_new_pack.qSTB51/_new  2017-03-09 01:35:39.454907342 +0100
@@ -34,10 +34,10 @@
  tools/libxl/xl_cmdtable.c            |   15 
  16 files changed, 2326 insertions(+), 4 deletions(-)
 
-Index: xen-4.7.1-testing/docs/man/xl.cfg.pod.5.in
+Index: xen-4.8.0-testing/docs/man/xl.cfg.pod.5.in
 ===================================================================
---- xen-4.7.1-testing.orig/docs/man/xl.cfg.pod.5.in
-+++ xen-4.7.1-testing/docs/man/xl.cfg.pod.5.in
+--- xen-4.8.0-testing.orig/docs/man/xl.cfg.pod.5.in
++++ xen-4.8.0-testing/docs/man/xl.cfg.pod.5.in
 @@ -517,6 +517,62 @@ value is optional if this is a guest dom
  
  =back
@@ -101,11 +101,11 @@
  =item B<vfb=[ "VFB_SPEC_STRING", "VFB_SPEC_STRING", ...]>
  
  Specifies the paravirtual framebuffer devices which should be supplied
-Index: xen-4.7.1-testing/docs/man/xl.pod.1.in
+Index: xen-4.8.0-testing/docs/man/xl.pod.1.in
 ===================================================================
---- xen-4.7.1-testing.orig/docs/man/xl.pod.1.in
-+++ xen-4.7.1-testing/docs/man/xl.pod.1.in
-@@ -1423,6 +1423,24 @@ List virtual trusted platform modules fo
+--- xen-4.8.0-testing.orig/docs/man/xl.pod.1.in
++++ xen-4.8.0-testing/docs/man/xl.pod.1.in
+@@ -1436,6 +1436,24 @@ List virtual trusted platform modules fo
  
  =back
  
@@ -130,11 +130,11 @@
  =head1 PCI PASS-THROUGH
  
  =over 4
-Index: xen-4.7.1-testing/tools/libxl/Makefile
+Index: xen-4.8.0-testing/tools/libxl/Makefile
 ===================================================================
---- xen-4.7.1-testing.orig/tools/libxl/Makefile
-+++ xen-4.7.1-testing/tools/libxl/Makefile
-@@ -108,6 +108,7 @@ endif
+--- xen-4.8.0-testing.orig/tools/libxl/Makefile
++++ xen-4.8.0-testing/tools/libxl/Makefile
+@@ -129,6 +129,7 @@ endif
  LIBXL_LIBS += -lyajl
  
  LIBXL_OBJS = flexarray.o libxl.o libxl_create.o libxl_dm.o libxl_pci.o \
@@ -142,7 +142,7 @@
                        libxl_dom.o libxl_exec.o libxl_xshelp.o libxl_device.o \
                        libxl_internal.o libxl_utils.o libxl_uuid.o \
                        libxl_json.o libxl_aoutils.o libxl_numa.o libxl_vnuma.o 
\
-@@ -151,6 +152,7 @@ AUTOINCS= libxlu_cfg_y.h libxlu_cfg_l.h
+@@ -173,6 +174,7 @@ AUTOINCS= libxlu_cfg_y.h libxlu_cfg_l.h
  AUTOSRCS= libxlu_cfg_y.c libxlu_cfg_l.c
  AUTOSRCS += _libxl_save_msgs_callout.c _libxl_save_msgs_helper.c
  LIBXLU_OBJS = libxlu_cfg_y.o libxlu_cfg_l.o libxlu_cfg.o \
@@ -150,45 +150,11 @@
        libxlu_disk_l.o libxlu_disk.o libxlu_vif.o libxlu_pci.o
  $(LIBXLU_OBJS): CFLAGS += $(CFLAGS_libxenctrl) # For xentoollog.h
  
-Index: xen-4.7.1-testing/tools/libxl/libxl.c
+Index: xen-4.8.0-testing/tools/libxl/libxl.h
 ===================================================================
---- xen-4.7.1-testing.orig/tools/libxl/libxl.c
-+++ xen-4.7.1-testing/tools/libxl/libxl.c
-@@ -4399,6 +4399,7 @@ DEFINE_DEVICE_REMOVE_CUSTOM(usbctrl, des
- /* The following functions are defined:
-  * libxl_device_disk_add
-  * libxl_device_nic_add
-+ * libxl_device_vscsictrl_add
-  * libxl_device_vtpm_add
-  * libxl_device_usbctrl_add
-  * libxl_device_usbdev_add
-@@ -4430,6 +4431,9 @@ DEFINE_DEVICE_ADD(disk)
- /* nic */
- DEFINE_DEVICE_ADD(nic)
- 
-+/* vscsi */
-+DEFINE_DEVICE_ADD(vscsictrl)
-+
- /* vtpm */
- DEFINE_DEVICE_ADD(vtpm)
- 
-@@ -7382,6 +7386,11 @@ int libxl_retrieve_domain_configuration(
- 
-     MERGE(nic, nics, COMPARE_DEVID, {});
- 
-+    MERGE(vscsictrl, vscsictrls, COMPARE_DEVID, {
-+            libxl_device_vscsictrl_dispose(dst);
-+            libxl_device_vscsictrl_copy(CTX, dst, src);
-+          });
-+
-     MERGE(vtpm, vtpms, COMPARE_DEVID, {});
- 
-     MERGE(pci, pcidevs, COMPARE_PCI, {});
-Index: xen-4.7.1-testing/tools/libxl/libxl.h
-===================================================================
---- xen-4.7.1-testing.orig/tools/libxl/libxl.h
-+++ xen-4.7.1-testing/tools/libxl/libxl.h
-@@ -880,6 +880,13 @@ void libxl_mac_copy(libxl_ctx *ctx, libx
+--- xen-4.8.0-testing.orig/tools/libxl/libxl.h
++++ xen-4.8.0-testing/tools/libxl/libxl.h
+@@ -915,6 +915,13 @@ void libxl_mac_copy(libxl_ctx *ctx, libx
  #define LIBXL_HAVE_PCITOPOLOGY 1
  
  /*
@@ -202,7 +168,7 @@
   * LIBXL_HAVE_SOCKET_BITMAP
   *
   * If this is defined, then libxl_socket_bitmap_alloc and
-@@ -1710,6 +1717,41 @@ int libxl_device_channel_getinfo(libxl_c
+@@ -1809,6 +1816,41 @@ int libxl_device_channel_getinfo(libxl_c
                                   libxl_device_channel *channel,
                                   libxl_channelinfo *channelinfo);
  
@@ -244,135 +210,35 @@
  /* Virtual TPMs */
  int libxl_device_vtpm_add(libxl_ctx *ctx, uint32_t domid, libxl_device_vtpm 
*vtpm,
                            const libxl_asyncop_how *ao_how)
-Index: xen-4.7.1-testing/tools/libxl/libxl_create.c
-===================================================================
---- xen-4.7.1-testing.orig/tools/libxl/libxl_create.c
-+++ xen-4.7.1-testing/tools/libxl/libxl_create.c
-@@ -742,6 +742,8 @@ static void domcreate_bootloader_done(li
- static void domcreate_launch_dm(libxl__egc *egc, libxl__multidev *aodevs,
-                                 int ret);
- 
-+static void domcreate_attach_vscsictrls(libxl__egc *egc, libxl__multidev 
*multidev,
-+                                        int ret);
- static void domcreate_attach_vtpms(libxl__egc *egc, libxl__multidev *multidev,
-                                    int ret);
- static void domcreate_attach_usbctrls(libxl__egc *egc,
-@@ -1434,13 +1436,13 @@ static void domcreate_devmodel_started(l
-     if (d_config->num_nics > 0) {
-         /* Attach nics */
-         libxl__multidev_begin(ao, &dcs->multidev);
--        dcs->multidev.callback = domcreate_attach_vtpms;
-+        dcs->multidev.callback = domcreate_attach_vscsictrls;
-         libxl__add_nics(egc, ao, domid, d_config, &dcs->multidev);
-         libxl__multidev_prepared(egc, &dcs->multidev, 0);
-         return;
-     }
- 
--    domcreate_attach_vtpms(egc, &dcs->multidev, 0);
-+    domcreate_attach_vscsictrls(egc, &dcs->multidev, 0);
-     return;
- 
- error_out:
-@@ -1448,7 +1450,7 @@ error_out:
-     domcreate_complete(egc, dcs, ret);
- }
- 
--static void domcreate_attach_vtpms(libxl__egc *egc,
-+static void domcreate_attach_vscsictrls(libxl__egc *egc,
-                                    libxl__multidev *multidev,
-                                    int ret)
- {
-@@ -1463,6 +1465,39 @@ static void domcreate_attach_vtpms(libxl
-        goto error_out;
-    }
- 
-+    /* Plug vscsi devices */
-+   if (d_config->num_vscsictrls > 0) {
-+       /* Attach vscsictrls */
-+       libxl__multidev_begin(ao, &dcs->multidev);
-+       dcs->multidev.callback = domcreate_attach_vtpms;
-+       libxl__add_vscsictrls(egc, ao, domid, d_config, &dcs->multidev);
-+       libxl__multidev_prepared(egc, &dcs->multidev, 0);
-+       return;
-+   }
-+
-+   domcreate_attach_vtpms(egc, multidev, 0);
-+   return;
-+
-+error_out:
-+   assert(ret);
-+   domcreate_complete(egc, dcs, ret);
-+}
-+
-+static void domcreate_attach_vtpms(libxl__egc *egc,
-+                                   libxl__multidev *multidev,
-+                                   int ret)
-+{
-+   libxl__domain_create_state *dcs = CONTAINER_OF(multidev, *dcs, multidev);
-+   STATE_AO_GC(dcs->ao);
-+   int domid = dcs->guest_domid;
-+
-+   libxl_domain_config* const d_config = dcs->guest_config;
-+
-+   if(ret) {
-+       LOG(ERROR, "unable to add vscsi devices");
-+       goto error_out;
-+   }
-+
-     /* Plug vtpm devices */
-    if (d_config->num_vtpms > 0) {
-        /* Attach vtpms */
-Index: xen-4.7.1-testing/tools/libxl/libxl_device.c
+Index: xen-4.8.0-testing/tools/libxl/libxl_create.c
 ===================================================================
---- xen-4.7.1-testing.orig/tools/libxl/libxl_device.c
-+++ xen-4.7.1-testing/tools/libxl/libxl_device.c
-@@ -684,6 +684,7 @@ void libxl__multidev_prepared(libxl__egc
-  * The following functions are defined:
-  * libxl__add_disks
-  * libxl__add_nics
-+ * libxl__add_vscsictrls
-  * libxl__add_vtpms
-  * libxl__add_usbctrls
-  * libxl__add_usbs
-@@ -705,6 +706,7 @@ void libxl__multidev_prepared(libxl__egc
- 
- DEFINE_DEVICES_ADD(disk)
- DEFINE_DEVICES_ADD(nic)
-+DEFINE_DEVICES_ADD(vscsictrl)
- DEFINE_DEVICES_ADD(vtpm)
- DEFINE_DEVICES_ADD(usbctrl)
- DEFINE_DEVICES_ADD(usbdev)
-Index: xen-4.7.1-testing/tools/libxl/libxl_internal.h
+--- xen-4.8.0-testing.orig/tools/libxl/libxl_create.c
++++ xen-4.8.0-testing/tools/libxl/libxl_create.c
+@@ -1439,6 +1439,7 @@ const struct libxl_device_type *device_t
+     &libxl__disk_devtype,
+     &libxl__nic_devtype,
+     &libxl__vtpm_devtype,
++    &libxl__vscsictrl_devtype,
+     &libxl__usbctrl_devtype,
+     &libxl__usbdev_devtype,
+     &libxl__pcidev_devtype,
+Index: xen-4.8.0-testing/tools/libxl/libxl_internal.h
 ===================================================================
---- xen-4.7.1-testing.orig/tools/libxl/libxl_internal.h
-+++ xen-4.7.1-testing/tools/libxl/libxl_internal.h
-@@ -2630,6 +2630,10 @@ _hidden void libxl__device_nic_add(libxl
-                                    libxl_device_nic *nic,
-                                    libxl__ao_device *aodev);
- 
-+_hidden void libxl__device_vscsictrl_add(libxl__egc *egc, uint32_t domid,
-+                                         libxl_device_vscsictrl *vscsictrl,
-+                                         libxl__ao_device *aodev);
-+
- _hidden void libxl__device_vtpm_add(libxl__egc *egc, uint32_t domid,
-                                    libxl_device_vtpm *vtpm,
-                                    libxl__ao_device *aodev);
-@@ -3488,6 +3492,10 @@ _hidden void libxl__add_nics(libxl__egc
-                              libxl_domain_config *d_config,
-                              libxl__multidev *multidev);
- 
-+_hidden void libxl__add_vscsictrls(libxl__egc *egc, libxl__ao *ao, uint32_t 
domid,
-+                                   libxl_domain_config *d_config,
-+                                   libxl__multidev *multidev);
-+
- _hidden void libxl__add_vtpms(libxl__egc *egc, libxl__ao *ao, uint32_t domid,
-                              libxl_domain_config *d_config,
-                              libxl__multidev *multidev);
-Index: xen-4.7.1-testing/tools/libxl/libxl_types.idl
+--- xen-4.8.0-testing.orig/tools/libxl/libxl_internal.h
++++ xen-4.8.0-testing/tools/libxl/libxl_internal.h
+@@ -3511,6 +3511,7 @@ static inline int *libxl__device_type_ge
+ extern const struct libxl_device_type libxl__disk_devtype;
+ extern const struct libxl_device_type libxl__nic_devtype;
+ extern const struct libxl_device_type libxl__vtpm_devtype;
++extern const struct libxl_device_type libxl__vscsictrl_devtype;
+ extern const struct libxl_device_type libxl__usbctrl_devtype;
+ extern const struct libxl_device_type libxl__usbdev_devtype;
+ extern const struct libxl_device_type libxl__pcidev_devtype;
+Index: xen-4.8.0-testing/tools/libxl/libxl_types.idl
 ===================================================================
---- xen-4.7.1-testing.orig/tools/libxl/libxl_types.idl
-+++ xen-4.7.1-testing/tools/libxl/libxl_types.idl
-@@ -698,6 +698,43 @@ libxl_device_channel = Struct("device_ch
+--- xen-4.8.0-testing.orig/tools/libxl/libxl_types.idl
++++ xen-4.8.0-testing/tools/libxl/libxl_types.idl
+@@ -703,6 +703,43 @@ libxl_device_channel = Struct("device_ch
             ])),
  ])
  
@@ -416,7 +282,7 @@
  libxl_domain_config = Struct("domain_config", [
      ("c_info", libxl_domain_create_info),
      ("b_info", libxl_domain_build_info),
-@@ -709,6 +746,7 @@ libxl_domain_config = Struct("domain_con
+@@ -714,6 +751,7 @@ libxl_domain_config = Struct("domain_con
      ("dtdevs", Array(libxl_device_dtdev, "num_dtdevs")),
      ("vfbs", Array(libxl_device_vfb, "num_vfbs")),
      ("vkbs", Array(libxl_device_vkb, "num_vkbs")),
@@ -424,7 +290,7 @@
      ("vtpms", Array(libxl_device_vtpm, "num_vtpms")),
      # a channel manifests as a console with a name,
      # see docs/misc/channels.txt
-@@ -746,6 +784,21 @@ libxl_nicinfo = Struct("nicinfo", [
+@@ -751,6 +789,21 @@ libxl_nicinfo = Struct("nicinfo", [
      ("rref_rx", integer),
      ], dir=DIR_OUT)
  
@@ -446,11 +312,11 @@
  libxl_vtpminfo = Struct("vtpminfo", [
      ("backend", string),
      ("backend_id", uint32),
-Index: xen-4.7.1-testing/tools/libxl/libxl_types_internal.idl
+Index: xen-4.8.0-testing/tools/libxl/libxl_types_internal.idl
 ===================================================================
---- xen-4.7.1-testing.orig/tools/libxl/libxl_types_internal.idl
-+++ xen-4.7.1-testing/tools/libxl/libxl_types_internal.idl
-@@ -24,6 +24,7 @@ libxl__device_kind = Enumeration("device
+--- xen-4.8.0-testing.orig/tools/libxl/libxl_types_internal.idl
++++ xen-4.8.0-testing/tools/libxl/libxl_types_internal.idl
+@@ -25,6 +25,7 @@ libxl__device_kind = Enumeration("device
      (8, "VTPM"),
      (9, "VUSB"),
      (10, "QUSB"),
@@ -458,11 +324,11 @@
      ])
  
  libxl__console_backend = Enumeration("console_backend", [
-Index: xen-4.7.1-testing/tools/libxl/libxl_vscsi.c
+Index: xen-4.8.0-testing/tools/libxl/libxl_vscsi.c
 ===================================================================
 --- /dev/null
-+++ xen-4.7.1-testing/tools/libxl/libxl_vscsi.c
-@@ -0,0 +1,1169 @@
++++ xen-4.8.0-testing/tools/libxl/libxl_vscsi.c
+@@ -0,0 +1,1176 @@
 +/*
 + * Copyright (C) 2016      SUSE Linux GmbH
 + * Author Olaf Hering <o...@aepfle.de>
@@ -841,8 +707,7 @@
 +            if (rc) goto out;
 +        }
 +
-+        libxl__xs_writev(gc, t, be_path,
-+                         libxl__xs_kvs_of_flexarray(gc, back, back->count));
++        libxl__xs_writev(gc, t, be_path, libxl__xs_kvs_of_flexarray(gc, 
back));
 +
 +        rc = libxl__xs_transaction_commit(gc, &t);
 +        if (!rc) break;
@@ -1083,10 +948,8 @@
 +        }
 +
 +        libxl__device_generic_add(gc, t, aodev->dev,
-+                                  libxl__xs_kvs_of_flexarray(gc, back,
-+                                                             back->count),
-+                                  libxl__xs_kvs_of_flexarray(gc, front,
-+                                                             front->count),
++                                  libxl__xs_kvs_of_flexarray(gc, back),
++                                  libxl__xs_kvs_of_flexarray(gc, front),
 +                                  NULL);
 +
 +        rc = libxl__xs_transaction_commit(gc, &t);
@@ -1193,8 +1056,7 @@
 +            if (rc) goto out;
 +        }
 +
-+        libxl__xs_writev(gc, t, be_path,
-+                         libxl__xs_kvs_of_flexarray(gc, back, back->count));
++        libxl__xs_writev(gc, t, be_path, libxl__xs_kvs_of_flexarray(gc, 
back));
 +
 +        rc = libxl__xs_transaction_commit(gc, &t);
 +        if (!rc) break;
@@ -1369,7 +1231,7 @@
 +    return AO_INPROGRESS;
 +}
 +
-+void libxl__device_vscsictrl_add(libxl__egc *egc, uint32_t domid,
++static void libxl__device_vscsictrl_add(libxl__egc *egc, uint32_t domid,
 +                                 libxl_device_vscsictrl *vscsictrl,
 +                                 libxl__ao_device *aodev)
 +{
@@ -1625,6 +1487,17 @@
 +    GC_FREE;
 +}
 +
++static int libxl_device_vscsictrl_compare(libxl_device_vscsictrl *d1,
++                                          libxl_device_vscsictrl *d2)
++{
++    return COMPARE_DEVID(d1, d2);
++}
++
++LIBXL_DEFINE_DEVICE_ADD(vscsictrl)
++static LIBXL_DEFINE_DEVICES_ADD(vscsictrl)
++//LIBXL_DEFINE_DEVICE_REMOVE(vscsictrl)
++DEFINE_DEVICE_TYPE_STRUCT(vscsictrl);
++
 +/*
 + * Local variables:
 + * mode: C
@@ -1632,11 +1505,11 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-Index: xen-4.7.1-testing/tools/libxl/libxlu_vscsi.c
+Index: xen-4.8.0-testing/tools/libxl/libxlu_vscsi.c
 ===================================================================
 --- /dev/null
-+++ xen-4.7.1-testing/tools/libxl/libxlu_vscsi.c
-@@ -0,0 +1,667 @@
++++ xen-4.8.0-testing/tools/libxl/libxlu_vscsi.c
+@@ -0,0 +1,668 @@
 +/*
 + * libxlu_vscsi.c - xl configuration file parsing: setup and helper functions
 + *
@@ -1659,6 +1532,7 @@
 +#include <ctype.h>
 +#include <dirent.h>
 +#include <sys/stat.h>
++#include <sys/sysmacros.h>
 +#include <fcntl.h>
 +#include "libxlu_internal.h"
 +
@@ -2304,10 +2178,10 @@
 +    return ERROR_INVAL;
 +}
 +#endif
-Index: xen-4.7.1-testing/tools/libxl/libxlutil.h
+Index: xen-4.8.0-testing/tools/libxl/libxlutil.h
 ===================================================================
---- xen-4.7.1-testing.orig/tools/libxl/libxlutil.h
-+++ xen-4.7.1-testing/tools/libxl/libxlutil.h
+--- xen-4.8.0-testing.orig/tools/libxl/libxlutil.h
++++ xen-4.8.0-testing/tools/libxl/libxlutil.h
 @@ -118,6 +118,25 @@ int xlu_rdm_parse(XLU_Config *cfg, libxl
  int xlu_vif_parse_rate(XLU_Config *cfg, const char *rate,
                         libxl_device_nic *nic);
@@ -2334,10 +2208,10 @@
  #endif /* LIBXLUTIL_H */
  
  /*
-Index: xen-4.7.1-testing/tools/libxl/xl.h
+Index: xen-4.8.0-testing/tools/libxl/xl.h
 ===================================================================
---- xen-4.7.1-testing.orig/tools/libxl/xl.h
-+++ xen-4.7.1-testing/tools/libxl/xl.h
+--- xen-4.8.0-testing.orig/tools/libxl/xl.h
++++ xen-4.8.0-testing/tools/libxl/xl.h
 @@ -89,6 +89,9 @@ int main_channellist(int argc, char **ar
  int main_blockattach(int argc, char **argv);
  int main_blocklist(int argc, char **argv);
@@ -2348,11 +2222,11 @@
  int main_vtpmattach(int argc, char **argv);
  int main_vtpmlist(int argc, char **argv);
  int main_vtpmdetach(int argc, char **argv);
-Index: xen-4.7.1-testing/tools/libxl/xl_cmdimpl.c
+Index: xen-4.8.0-testing/tools/libxl/xl_cmdimpl.c
 ===================================================================
---- xen-4.7.1-testing.orig/tools/libxl/xl_cmdimpl.c
-+++ xen-4.7.1-testing/tools/libxl/xl_cmdimpl.c
-@@ -1325,7 +1325,7 @@ static void parse_config_data(const char
+--- xen-4.8.0-testing.orig/tools/libxl/xl_cmdimpl.c
++++ xen-4.8.0-testing/tools/libxl/xl_cmdimpl.c
+@@ -1328,7 +1328,7 @@ static void parse_config_data(const char
      long l, vcpus = 0;
      XLU_Config *config;
      XLU_ConfigList *cpus, *vbds, *nics, *pcis, *cvfbs, *cpuids, *vtpms,
@@ -2361,7 +2235,7 @@
      XLU_ConfigList *channels, *ioports, *irqs, *iomem, *viridian, *dtdevs;
      int num_ioports, num_irqs, num_iomem, num_cpus, num_viridian;
      int pci_power_mgmt = 0;
-@@ -1855,6 +1855,17 @@ static void parse_config_data(const char
+@@ -1863,6 +1863,17 @@ static void parse_config_data(const char
          }
      }
  
@@ -2379,7 +2253,7 @@
      if (!xlu_cfg_get_list(config, "vtpm", &vtpms, 0, 0)) {
          d_config->num_vtpms = 0;
          d_config->vtpms = NULL;
-@@ -7416,6 +7427,218 @@ int main_blockdetach(int argc, char **ar
+@@ -7601,6 +7612,218 @@ int main_blockdetach(int argc, char **ar
      return rc;
  }
  
@@ -2598,11 +2472,11 @@
  int main_vtpmattach(int argc, char **argv)
  {
      int opt;
-Index: xen-4.7.1-testing/tools/libxl/xl_cmdtable.c
+Index: xen-4.8.0-testing/tools/libxl/xl_cmdtable.c
 ===================================================================
---- xen-4.7.1-testing.orig/tools/libxl/xl_cmdtable.c
-+++ xen-4.7.1-testing/tools/libxl/xl_cmdtable.c
-@@ -354,6 +354,21 @@ struct cmd_spec cmd_table[] = {
+--- xen-4.8.0-testing.orig/tools/libxl/xl_cmdtable.c
++++ xen-4.8.0-testing/tools/libxl/xl_cmdtable.c
+@@ -357,6 +357,21 @@ struct cmd_spec cmd_table[] = {
        "Destroy a domain's virtual block device",
        "<Domain> <DevId>",
      },

++++++ libxl.set-migration-constraints-from-cmdline.patch ++++++
--- /var/tmp/diff_new_pack.qSTB51/_old  2017-03-09 01:35:39.466905643 +0100
+++ /var/tmp/diff_new_pack.qSTB51/_new  2017-03-09 01:35:39.466905643 +0100
@@ -291,7 +291,7 @@
 ===================================================================
 --- xen-4.8.0-testing.orig/tools/libxl/libxl.h
 +++ xen-4.8.0-testing/tools/libxl/libxl.h
-@@ -1371,8 +1371,23 @@ int libxl_domain_suspend(libxl_ctx *ctx,
+@@ -1378,8 +1378,23 @@ int libxl_domain_suspend(libxl_ctx *ctx,
                           int flags, /* LIBXL_SUSPEND_* */
                           const libxl_asyncop_how *ao_how)
                           LIBXL_EXTERNAL_CALLERS_ONLY;
@@ -331,7 +331,7 @@
 ===================================================================
 --- xen-4.8.0-testing.orig/tools/libxl/libxl_internal.h
 +++ xen-4.8.0-testing/tools/libxl/libxl_internal.h
-@@ -3279,6 +3279,10 @@ struct libxl__domain_save_state {
+@@ -3286,6 +3286,10 @@ struct libxl__domain_save_state {
      /* private */
      int rc;
      int hvm;
@@ -387,7 +387,7 @@
 ===================================================================
 --- xen-4.8.0-testing.orig/tools/libxl/xl_cmdimpl.c
 +++ xen-4.8.0-testing/tools/libxl/xl_cmdimpl.c
-@@ -4682,6 +4682,8 @@ static void migrate_do_preamble(int send
+@@ -4693,6 +4693,8 @@ static void migrate_do_preamble(int send
  }
  
  static void migrate_domain(uint32_t domid, const char *rune, int debug,
@@ -396,7 +396,7 @@
                             const char *override_config_file)
  {
      pid_t child = -1;
-@@ -4690,7 +4692,13 @@ static void migrate_domain(uint32_t domi
+@@ -4701,7 +4703,13 @@ static void migrate_domain(uint32_t domi
      char *away_domname;
      char rc_buf;
      uint8_t *config_data;
@@ -411,7 +411,7 @@
  
      save_domain_core_begin(domid, override_config_file,
                             &config_data, &config_len);
-@@ -4709,10 +4717,12 @@ static void migrate_domain(uint32_t domi
+@@ -4720,10 +4728,12 @@ static void migrate_domain(uint32_t domi
      xtl_stdiostream_adjust_flags(logger, XTL_STDIOSTREAM_HIDE_PROGRESS, 0);
  
      if (debug)
@@ -427,7 +427,7 @@
                  " (rc=%d)\n", rc);
          if (rc == ERROR_GUEST_TIMEDOUT)
              goto failed_suspend;
-@@ -5135,13 +5145,18 @@ int main_migrate(int argc, char **argv)
+@@ -5146,13 +5156,18 @@ int main_migrate(int argc, char **argv)
      char *rune = NULL;
      char *host;
      int opt, daemonize = 1, monitor = 1, debug = 0, pause_after_migration = 0;
@@ -447,7 +447,7 @@
      case 'C':
          config_filename = optarg;
          break;
-@@ -5161,6 +5176,18 @@ int main_migrate(int argc, char **argv)
+@@ -5172,6 +5187,18 @@ int main_migrate(int argc, char **argv)
      case 0x100: /* --debug */
          debug = 1;
          break;
@@ -466,7 +466,7 @@
      case 0x200: /* --live */
          /* ignored for compatibility with xm */
          break;
-@@ -5194,7 +5221,8 @@ int main_migrate(int argc, char **argv)
+@@ -5205,7 +5232,8 @@ int main_migrate(int argc, char **argv)
                    pause_after_migration ? " -p" : "");
      }
  

++++++ tmp_build.patch ++++++
--- /var/tmp/diff_new_pack.qSTB51/_old  2017-03-09 01:35:39.546894316 +0100
+++ /var/tmp/diff_new_pack.qSTB51/_new  2017-03-09 01:35:39.546894316 +0100
@@ -1,11 +1,14 @@
-Notes: During the make process we can't have both
-xenstore and domu-xenstore linking the sub command
-files from /usr/bin. For example,
+Note: During the make process we can't have both xenstore and
+domu-xenstore linking the sub command files from /usr/bin.
+
+For example,
 xen-tools: /usr/bin/xenstore-ls -> xenstore
 xen-tools-domU: /usr/bin/xenstore-ls -> domu-xenstore
-The last thing to do this link wins so messes up
-what should be in the packaging. For this reason
-we put domu-xenstore and links in /bin
+
+The last thing to create this link overwrites the previous link
+and breaks the packaging. For this reason this patch puts domu-xenstore
+with its links in /bin so as to not interfere with the regular xenstore
+links.
 
 ---
  tools/xenstore/Makefile |    6 ++++--


Reply via email to