This patch removes the redundant save functions and renames the
save_one* to save. It then changes the domain param to vcpu in the
save funcs.

Signed-off-by: Alexandru Isaila <aisa...@bitdefender.com>

---
Changes since V11:
        - Remove enum return type for save funcs.
---
 xen/arch/x86/cpu/mcheck/vmce.c | 19 ++---------
 xen/arch/x86/hvm/hpet.c        |  3 +-
 xen/arch/x86/hvm/hvm.c         | 75 +++++-------------------------------------
 xen/arch/x86/hvm/i8254.c       |  3 +-
 xen/arch/x86/hvm/irq.c         |  9 +++--
 xen/arch/x86/hvm/mtrr.c        | 19 ++---------
 xen/arch/x86/hvm/pmtimer.c     |  3 +-
 xen/arch/x86/hvm/rtc.c         |  3 +-
 xen/arch/x86/hvm/save.c        | 26 +++------------
 xen/arch/x86/hvm/vioapic.c     |  3 +-
 xen/arch/x86/hvm/viridian.c    | 22 +++----------
 xen/arch/x86/hvm/vlapic.c      | 34 ++++++-------------
 xen/arch/x86/hvm/vpic.c        |  3 +-
 xen/include/asm-x86/hvm/save.h |  2 +-
 14 files changed, 50 insertions(+), 174 deletions(-)

diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
index 35044d7..b53ad7c 100644
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -349,7 +349,7 @@ int vmce_wrmsr(uint32_t msr, uint64_t val)
     return ret;
 }
 
-static int vmce_save_vcpu_ctxt_one(struct vcpu *v, hvm_domain_context_t *h)
+static int vmce_save_vcpu_ctxt(struct vcpu *v, hvm_domain_context_t *h)
 {
     struct hvm_vmce_vcpu ctxt = {
         .caps = v->arch.vmce.mcg_cap,
@@ -361,21 +361,6 @@ static int vmce_save_vcpu_ctxt_one(struct vcpu *v, 
hvm_domain_context_t *h)
     return hvm_save_entry(VMCE_VCPU, v->vcpu_id, h, &ctxt);
 }
 
-static int vmce_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
-{
-    struct vcpu *v;
-    int err = 0;
-
-    for_each_vcpu ( d, v )
-    {
-        err = vmce_save_vcpu_ctxt_one(v, h);
-        if ( err )
-            break;
-    }
-
-    return err;
-}
-
 static int vmce_load_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
 {
     unsigned int vcpuid = hvm_load_instance(h);
@@ -396,7 +381,7 @@ static int vmce_load_vcpu_ctxt(struct domain *d, 
hvm_domain_context_t *h)
 }
 
 HVM_REGISTER_SAVE_RESTORE(VMCE_VCPU, vmce_save_vcpu_ctxt,
-                          vmce_save_vcpu_ctxt_one,
+                          NULL,
                           vmce_load_vcpu_ctxt, 1, HVMSR_PER_VCPU);
 
 /*
diff --git a/xen/arch/x86/hvm/hpet.c b/xen/arch/x86/hvm/hpet.c
index aff8613..6e7f744 100644
--- a/xen/arch/x86/hvm/hpet.c
+++ b/xen/arch/x86/hvm/hpet.c
@@ -516,8 +516,9 @@ static const struct hvm_mmio_ops hpet_mmio_ops = {
 };
 
 
-static int hpet_save(struct domain *d, hvm_domain_context_t *h)
+static int hpet_save(struct vcpu *vcpu, hvm_domain_context_t *h)
 {
+    struct domain *d = vcpu->domain;
     HPETState *hp = domain_vhpet(d);
     struct vcpu *v = pt_global_vcpu_target(d);
     int rc;
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index f140305..122552e 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -740,7 +740,7 @@ void hvm_domain_destroy(struct domain *d)
     destroy_vpci_mmcfg(d);
 }
 
-static int hvm_save_tsc_adjust_one(struct vcpu *v, hvm_domain_context_t *h)
+static int hvm_save_tsc_adjust(struct vcpu *v, hvm_domain_context_t *h)
 {
     struct hvm_tsc_adjust ctxt = {
         .tsc_adjust = v->arch.hvm_vcpu.msr_tsc_adjust,
@@ -749,21 +749,6 @@ static int hvm_save_tsc_adjust_one(struct vcpu *v, 
hvm_domain_context_t *h)
     return hvm_save_entry(TSC_ADJUST, v->vcpu_id, h, &ctxt);
 }
 
-static int hvm_save_tsc_adjust(struct domain *d, hvm_domain_context_t *h)
-{
-    struct vcpu *v;
-    int err = 0;
-
-    for_each_vcpu ( d, v )
-    {
-        err = hvm_save_tsc_adjust_one(v, h);
-        if ( err )
-            break;
-    }
-
-    return err;
-}
-
 static int hvm_load_tsc_adjust(struct domain *d, hvm_domain_context_t *h)
 {
     unsigned int vcpuid = hvm_load_instance(h);
@@ -785,10 +770,10 @@ static int hvm_load_tsc_adjust(struct domain *d, 
hvm_domain_context_t *h)
 }
 
 HVM_REGISTER_SAVE_RESTORE(TSC_ADJUST, hvm_save_tsc_adjust,
-                          hvm_save_tsc_adjust_one,
+                          NULL,
                           hvm_load_tsc_adjust, 1, HVMSR_PER_VCPU);
 
-static int hvm_save_cpu_ctxt_one(struct vcpu *v, hvm_domain_context_t *h)
+static int hvm_save_cpu_ctxt(struct vcpu *v, hvm_domain_context_t *h)
 {
     struct segment_register seg;
     struct hvm_hw_cpu ctxt = {};
@@ -898,20 +883,6 @@ static int hvm_save_cpu_ctxt_one(struct vcpu *v, 
hvm_domain_context_t *h)
     return hvm_save_entry(CPU, v->vcpu_id, h, &ctxt);
 }
 
-static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
-{
-    struct vcpu *v;
-    int err = 0;
-
-    for_each_vcpu ( d, v )
-    {
-        err = hvm_save_cpu_ctxt_one(v, h);
-        if ( err )
-            break;
-    }
-    return err;
-}
-
 /* Return a string indicating the error, or NULL for valid. */
 const char *hvm_efer_valid(const struct vcpu *v, uint64_t value,
                            signed int cr0_pg)
@@ -1183,7 +1154,7 @@ static int hvm_load_cpu_ctxt(struct domain *d, 
hvm_domain_context_t *h)
     return 0;
 }
 
-HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, hvm_save_cpu_ctxt_one,
+HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, NULL,
                           hvm_load_cpu_ctxt,
                           1, HVMSR_PER_VCPU);
 
@@ -1191,7 +1162,7 @@ HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, 
hvm_save_cpu_ctxt_one,
                                            save_area) + \
                                   xstate_ctxt_size(xcr0))
 
-static int hvm_save_cpu_xsave_states_one(struct vcpu *v, hvm_domain_context_t 
*h)
+static int hvm_save_cpu_xsave_states(struct vcpu *v, hvm_domain_context_t *h)
 {
     struct hvm_hw_cpu_xsave *ctxt;
     unsigned int size = HVM_CPU_XSAVE_SIZE(v->arch.xcr0_accum);
@@ -1215,21 +1186,6 @@ static int hvm_save_cpu_xsave_states_one(struct vcpu *v, 
hvm_domain_context_t *h
     return 0;
 }
 
-static int hvm_save_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h)
-{
-    struct vcpu *v;
-    int err = 0;
-
-    for_each_vcpu ( d, v )
-    {
-        err = hvm_save_cpu_xsave_states_one(v, h);
-        if ( err )
-            break;
-    }
-
-    return err;
-}
-
 /*
  * Structure layout conformity checks, documenting correctness of the cast in
  * the invocation of validate_xstate() below.
@@ -1367,7 +1323,7 @@ static const uint32_t msrs_to_send[] = {
 };
 static unsigned int __read_mostly msr_count_max = ARRAY_SIZE(msrs_to_send);
 
-static int hvm_save_cpu_msrs_one(struct vcpu *v, hvm_domain_context_t *h)
+static int hvm_save_cpu_msrs(struct vcpu *v, hvm_domain_context_t *h)
 {
     struct hvm_save_descriptor *desc = _p(&h->data[h->cur]);
     struct hvm_msr *ctxt;
@@ -1428,21 +1384,6 @@ static int hvm_save_cpu_msrs_one(struct vcpu *v, 
hvm_domain_context_t *h)
     return 0;
 }
 
-static int hvm_save_cpu_msrs(struct domain *d, hvm_domain_context_t *h)
-{
-    struct vcpu *v;
-    int err = 0;
-
-    for_each_vcpu ( d, v )
-    {
-        err = hvm_save_cpu_msrs_one(v, h);
-        if ( err )
-            break;
-    }
-
-    return err;
-}
-
 static int hvm_load_cpu_msrs(struct domain *d, hvm_domain_context_t *h)
 {
     unsigned int i, vcpuid = hvm_load_instance(h);
@@ -1535,7 +1476,7 @@ static int __init hvm_register_CPU_save_and_restore(void)
     hvm_register_savevm(CPU_XSAVE_CODE,
                         "CPU_XSAVE",
                         hvm_save_cpu_xsave_states,
-                        hvm_save_cpu_xsave_states_one,
+                        NULL,
                         hvm_load_cpu_xsave_states,
                         HVM_CPU_XSAVE_SIZE(xfeature_mask) +
                             sizeof(struct hvm_save_descriptor),
@@ -1548,7 +1489,7 @@ static int __init hvm_register_CPU_save_and_restore(void)
         hvm_register_savevm(CPU_MSR_CODE,
                             "CPU_MSR",
                             hvm_save_cpu_msrs,
-                            hvm_save_cpu_msrs_one,
+                            NULL,
                             hvm_load_cpu_msrs,
                             HVM_CPU_MSR_SIZE(msr_count_max) +
                                 sizeof(struct hvm_save_descriptor),
diff --git a/xen/arch/x86/hvm/i8254.c b/xen/arch/x86/hvm/i8254.c
index ec77b23..d51463d 100644
--- a/xen/arch/x86/hvm/i8254.c
+++ b/xen/arch/x86/hvm/i8254.c
@@ -390,8 +390,9 @@ void pit_stop_channel0_irq(PITState *pit)
     spin_unlock(&pit->lock);
 }
 
-static int pit_save(struct domain *d, hvm_domain_context_t *h)
+static int pit_save(struct vcpu *v, hvm_domain_context_t *h)
 {
+    struct domain *d = v->domain;
     PITState *pit = domain_vpit(d);
     int rc;
 
diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c
index 770eab7..a405e7f 100644
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -630,8 +630,9 @@ static int __init dump_irq_info_key_init(void)
 }
 __initcall(dump_irq_info_key_init);
 
-static int irq_save_pci(struct domain *d, hvm_domain_context_t *h)
+static int irq_save_pci(struct vcpu *v, hvm_domain_context_t *h)
 {
+    struct domain *d = v->domain;
     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
     unsigned int asserted, pdev, pintx;
     int rc;
@@ -662,16 +663,18 @@ static int irq_save_pci(struct domain *d, 
hvm_domain_context_t *h)
     return rc;
 }
 
-static int irq_save_isa(struct domain *d, hvm_domain_context_t *h)
+static int irq_save_isa(struct vcpu *v, hvm_domain_context_t *h)
 {
+    struct domain *d = v->domain;
     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
 
     /* Save ISA IRQ lines */
     return ( hvm_save_entry(ISA_IRQ, 0, h, &hvm_irq->isa_irq) );
 }
 
-static int irq_save_link(struct domain *d, hvm_domain_context_t *h)
+static int irq_save_link(struct vcpu *v, hvm_domain_context_t *h)
 {
+    struct domain *d = v->domain;
     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
 
     /* Save PCI-ISA link state */
diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c
index 1cb2a2e..b7c9bd6 100644
--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -718,7 +718,7 @@ int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t 
gfn_start,
     return 0;
 }
 
-static int hvm_save_mtrr_msr_one(struct vcpu *v, hvm_domain_context_t *h)
+static int hvm_save_mtrr_msr(struct vcpu *v, hvm_domain_context_t *h)
 {
     const struct mtrr_state *mtrr_state = &v->arch.hvm_vcpu.mtrr;
     struct hvm_hw_mtrr hw_mtrr = {
@@ -758,21 +758,6 @@ static int hvm_save_mtrr_msr_one(struct vcpu *v, 
hvm_domain_context_t *h)
     return hvm_save_entry(MTRR, v->vcpu_id, h, &hw_mtrr);
 }
 
-static int hvm_save_mtrr_msr(struct domain *d, hvm_domain_context_t *h)
-{
-    struct vcpu *v;
-    int err = 0;
-
-    /* save mtrr&pat */
-    for_each_vcpu(d, v)
-    {
-       err = hvm_save_mtrr_msr_one(v, h);
-       if ( err )
-           break;
-    }
-    return err;
-}
-
 static int hvm_load_mtrr_msr(struct domain *d, hvm_domain_context_t *h)
 {
     int vcpuid, i;
@@ -823,7 +808,7 @@ static int hvm_load_mtrr_msr(struct domain *d, 
hvm_domain_context_t *h)
     return 0;
 }
 
-HVM_REGISTER_SAVE_RESTORE(MTRR, hvm_save_mtrr_msr, hvm_save_mtrr_msr_one,
+HVM_REGISTER_SAVE_RESTORE(MTRR, hvm_save_mtrr_msr, NULL,
                           hvm_load_mtrr_msr, 1, HVMSR_PER_VCPU);
 
 void memory_type_changed(struct domain *d)
diff --git a/xen/arch/x86/hvm/pmtimer.c b/xen/arch/x86/hvm/pmtimer.c
index 0a5e8ce..461d2df 100644
--- a/xen/arch/x86/hvm/pmtimer.c
+++ b/xen/arch/x86/hvm/pmtimer.c
@@ -249,8 +249,9 @@ static int handle_pmt_io(
     return X86EMUL_OKAY;
 }
 
-static int acpi_save(struct domain *d, hvm_domain_context_t *h)
+static int acpi_save(struct vcpu *v, hvm_domain_context_t *h)
 {
+    struct domain *d = v->domain;
     struct hvm_hw_acpi *acpi = &d->arch.hvm_domain.acpi;
     PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
     uint32_t x, msb = acpi->tmr_val & TMR_VAL_MSB;
diff --git a/xen/arch/x86/hvm/rtc.c b/xen/arch/x86/hvm/rtc.c
index ce7e71b..ea2fbd3 100644
--- a/xen/arch/x86/hvm/rtc.c
+++ b/xen/arch/x86/hvm/rtc.c
@@ -737,8 +737,9 @@ void rtc_migrate_timers(struct vcpu *v)
 }
 
 /* Save RTC hardware state */
-static int rtc_save(struct domain *d, hvm_domain_context_t *h)
+static int rtc_save(struct vcpu *v, hvm_domain_context_t *h)
 {
+    struct domain *d = v->domain;
     RTCState *s = domain_vrtc(d);
     int rc;
 
diff --git a/xen/arch/x86/hvm/save.c b/xen/arch/x86/hvm/save.c
index d57648d..c579e49 100644
--- a/xen/arch/x86/hvm/save.c
+++ b/xen/arch/x86/hvm/save.c
@@ -174,7 +174,7 @@ int hvm_save_one(struct domain *d, unsigned int typecode, 
unsigned int instance,
             rv = hvm_sr_handlers[typecode].save_one(d->vcpu[instance],
                                                     &ctxt);
         else
-            rv = hvm_sr_handlers[typecode].save(d, &ctxt);
+            rv = hvm_sr_handlers[typecode].save(d->vcpu[instance], &ctxt);
 
         if ( rv != 0 )
         {
@@ -207,7 +207,8 @@ int hvm_save_one(struct domain *d, unsigned int typecode, 
unsigned int instance,
     {
         for_each_vcpu ( d, v )
         {
-            if ( (rv = hvm_sr_handlers[typecode].save(d, &ctxt)) != 0 )
+            if ( (rv = hvm_sr_handlers[typecode].save(d->vcpu[instance],
+                                                      &ctxt)) != 0 )
             {
                 printk(XENLOG_G_ERR "HVM%d save: failed to save type %"PRIu16" 
(%d)\n",
                        d->domain_id, typecode, rv);
@@ -250,7 +251,6 @@ int hvm_save(struct domain *d, hvm_domain_context_t *h)
     struct hvm_save_header hdr;
     struct hvm_save_end end;
     hvm_save_handler handler;
-    hvm_save_one_handler save_one_handler;
     unsigned int i, rc;
     struct vcpu *v = NULL;
 
@@ -280,14 +280,13 @@ int hvm_save(struct domain *d, hvm_domain_context_t *h)
     for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ )
     {
         handler = hvm_sr_handlers[i].save;
-        save_one_handler = hvm_sr_handlers[i].save_one;
-        if ( save_one_handler != NULL )
+        if ( handler != NULL )
         {
             printk(XENLOG_G_INFO "HVM%d save: %s\n",
                    d->domain_id, hvm_sr_handlers[i].name);
             for_each_vcpu ( d, v )
             {
-                rc = save_one_handler(v, h);
+                rc = handler(v, h);
 
                 if( rc != 0 )
                 {
@@ -298,21 +297,6 @@ int hvm_save(struct domain *d, hvm_domain_context_t *h)
                 }
             }
         }
-        else if ( handler != NULL )
-        {
-            printk(XENLOG_G_INFO "HVM%d save: %s\n",
-                   d->domain_id, hvm_sr_handlers[i].name);
-
-            rc = handler(d, h);
-
-            if( rc != 0 )
-            {
-                printk(XENLOG_G_ERR
-                       "HVM%d save: failed to save type %"PRIu16"\n",
-                       d->domain_id, i);
-                return -EFAULT;
-            }
-        }
     }
 
     /* Save an end-of-file marker */
diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c
index 66f54e4..cec4b1b 100644
--- a/xen/arch/x86/hvm/vioapic.c
+++ b/xen/arch/x86/hvm/vioapic.c
@@ -569,8 +569,9 @@ int vioapic_get_trigger_mode(const struct domain *d, 
unsigned int gsi)
     return vioapic->redirtbl[pin].fields.trig_mode;
 }
 
-static int ioapic_save(struct domain *d, hvm_domain_context_t *h)
+static int ioapic_save(struct vcpu *v, hvm_domain_context_t *h)
 {
+    struct domain *d = v->domain;
     struct hvm_vioapic *s;
 
     if ( !has_vioapic(d) )
diff --git a/xen/arch/x86/hvm/viridian.c b/xen/arch/x86/hvm/viridian.c
index f72c8a9..1991829 100644
--- a/xen/arch/x86/hvm/viridian.c
+++ b/xen/arch/x86/hvm/viridian.c
@@ -990,8 +990,9 @@ out:
     return HVM_HCALL_completed;
 }
 
-static int viridian_save_domain_ctxt(struct domain *d, hvm_domain_context_t *h)
+static int viridian_save_domain_ctxt(struct vcpu *v, hvm_domain_context_t *h)
 {
+    struct domain *d = v->domain;
     struct hvm_viridian_domain_context ctxt = {
         .time_ref_count = d->arch.hvm_domain.viridian.time_ref_count.val,
         .hypercall_gpa  = d->arch.hvm_domain.viridian.hypercall_gpa.raw,
@@ -1026,7 +1027,7 @@ static int viridian_load_domain_ctxt(struct domain *d, 
hvm_domain_context_t *h)
 HVM_REGISTER_SAVE_RESTORE(VIRIDIAN_DOMAIN, viridian_save_domain_ctxt, NULL,
                           viridian_load_domain_ctxt, 1, HVMSR_PER_DOM);
 
-static int viridian_save_vcpu_ctxt_one(struct vcpu *v, hvm_domain_context_t *h)
+static int viridian_save_vcpu_ctxt(struct vcpu *v, hvm_domain_context_t *h)
 {
     struct hvm_viridian_vcpu_context ctxt = {};
 
@@ -1039,21 +1040,6 @@ static int viridian_save_vcpu_ctxt_one(struct vcpu *v, 
hvm_domain_context_t *h)
     return hvm_save_entry(VIRIDIAN_VCPU, v->vcpu_id, h, &ctxt);
 }
 
-static int viridian_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
-{
-    struct vcpu *v;
-    int err = 0;
-
-    for_each_vcpu ( d, v )
-    {
-        err = viridian_save_vcpu_ctxt_one(v, h);
-        if ( err )
-            break;
-    }
-
-    return err;
-}
-
 static int viridian_load_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
 {
     int vcpuid;
@@ -1085,7 +1071,7 @@ static int viridian_load_vcpu_ctxt(struct domain *d, 
hvm_domain_context_t *h)
 }
 
 HVM_REGISTER_SAVE_RESTORE(VIRIDIAN_VCPU, viridian_save_vcpu_ctxt,
-                          viridian_save_vcpu_ctxt_one,
+                          NULL,
                           viridian_load_vcpu_ctxt, 1, HVMSR_PER_VCPU);
 
 static int __init parse_viridian_version(const char *arg)
diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
index eff6070..e51dff7 100644
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -1435,45 +1435,31 @@ static void lapic_rearm(struct vlapic *s)
     s->timer_last_update = s->pt.last_plt_gtime;
 }
 
-static int lapic_save_hidden(struct domain *d, hvm_domain_context_t *h)
+static int lapic_save_hidden(struct vcpu *v, hvm_domain_context_t *h)
 {
-    struct vcpu *v;
+    struct domain *d = v->domain;
     struct vlapic *s;
-    int rc = 0;
 
     if ( !has_vlapic(d) )
         return 0;
 
-    for_each_vcpu ( d, v )
-    {
-        s = vcpu_vlapic(v);
-        if ( (rc = hvm_save_entry(LAPIC, v->vcpu_id, h, &s->hw)) != 0 )
-            break;
-    }
-
-    return rc;
+    s = vcpu_vlapic(v);
+    return hvm_save_entry(LAPIC, v->vcpu_id, h, &s->hw);
 }
 
-static int lapic_save_regs(struct domain *d, hvm_domain_context_t *h)
+static int lapic_save_regs(struct vcpu *v, hvm_domain_context_t *h)
 {
-    struct vcpu *v;
+    struct domain *d = v->domain;
     struct vlapic *s;
-    int rc = 0;
 
     if ( !has_vlapic(d) )
         return 0;
 
-    for_each_vcpu ( d, v )
-    {
-        if ( hvm_funcs.sync_pir_to_irr )
-            hvm_funcs.sync_pir_to_irr(v);
-
-        s = vcpu_vlapic(v);
-        if ( (rc = hvm_save_entry(LAPIC_REGS, v->vcpu_id, h, s->regs)) != 0 )
-            break;
-    }
+    if ( hvm_funcs.sync_pir_to_irr )
+        hvm_funcs.sync_pir_to_irr(v);
 
-    return rc;
+    s = vcpu_vlapic(v);
+    return hvm_save_entry(LAPIC_REGS, v->vcpu_id, h, s->regs);
 }
 
 /*
diff --git a/xen/arch/x86/hvm/vpic.c b/xen/arch/x86/hvm/vpic.c
index ca9b4cb..e03d8cf 100644
--- a/xen/arch/x86/hvm/vpic.c
+++ b/xen/arch/x86/hvm/vpic.c
@@ -371,8 +371,9 @@ static int vpic_intercept_elcr_io(
     return X86EMUL_OKAY;
 }
 
-static int vpic_save(struct domain *d, hvm_domain_context_t *h)
+static int vpic_save(struct vcpu *v, hvm_domain_context_t *h)
 {
+    struct domain *d = v->domain;
     struct hvm_hw_vpic *s;
     int i;
 
diff --git a/xen/include/asm-x86/hvm/save.h b/xen/include/asm-x86/hvm/save.h
index 2538628..e64b4df 100644
--- a/xen/include/asm-x86/hvm/save.h
+++ b/xen/include/asm-x86/hvm/save.h
@@ -95,7 +95,7 @@ static inline uint16_t hvm_load_instance(struct 
hvm_domain_context *h)
  * The save handler may save multiple instances of a type into the buffer;
  * the load handler will be called once for each instance found when
  * restoring.  Both return non-zero on error. */
-typedef int (*hvm_save_handler) (struct domain *d, 
+typedef int (*hvm_save_handler) (struct  vcpu *v,
                                  hvm_domain_context_t *h);
 typedef int (*hvm_save_one_handler)(struct  vcpu *v,
                                     hvm_domain_context_t *h);
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Reply via email to