> -----Original Message----- > From: Jan Beulich [mailto:jbeul...@suse.com] > Sent: 13 December 2016 11:28 > To: xen-devel <xen-de...@lists.xenproject.org> > Cc: Andrew Cooper <andrew.coop...@citrix.com>; Paul Durrant > <paul.durr...@citrix.com> > Subject: [PATCH v4 2/5] x86emul: generalize exception handling for rep_* > hooks > > If any of those hooks returns X86EMUL_EXCEPTION, some register state > should still get updated if some iterations have been performed (but > the rIP update will get suppressed if not all of them did get handled). > This updating is done by register_address_increment() and > __put_rep_prefix() (which hence must no longer be bypassed). As a > result put_rep_prefix() can then skip most of the writeback, but needs > to ensure proper completion of the executed instruction. > > While on the HVM side the VA -> LA -> PA translation process ensures > that an exception would be raised on the first iteration only, doing so > would unduly complicate the PV side code about to be added. > > Signed-off-by: Jan Beulich <jbeul...@suse.com> > Reviewed-by: Andrew Cooper <andrew.coop...@citrix.com>
Reviewed-by: Paul Durrant <paul.durr...@citrix.com> > --- > v4: Move changes to xen/arch/x86/hvm/emulate.c here (from a later > patch). Re-base. > v3: Broken out from a later patch. > > --- a/xen/arch/x86/hvm/emulate.c > +++ b/xen/arch/x86/hvm/emulate.c > @@ -459,6 +459,7 @@ static int hvmemul_linear_to_phys( > { > if ( pfec & (PFEC_page_paged | PFEC_page_shared) ) > return X86EMUL_RETRY; > + *reps = 0; > x86_emul_pagefault(pfec, addr, &hvmemul_ctxt->ctxt); > return X86EMUL_EXCEPTION; > } > @@ -478,6 +479,7 @@ static int hvmemul_linear_to_phys( > if ( pfec & (PFEC_page_paged | PFEC_page_shared) ) > return X86EMUL_RETRY; > done /= bytes_per_rep; > + *reps = done; > if ( done == 0 ) > { > ASSERT(!reverse); > @@ -486,7 +488,6 @@ static int hvmemul_linear_to_phys( > x86_emul_pagefault(pfec, addr & PAGE_MASK, &hvmemul_ctxt- > >ctxt); > return X86EMUL_EXCEPTION; > } > - *reps = done; > break; > } > > @@ -572,6 +573,7 @@ static int hvmemul_virtual_to_linear( > * neither know the exact error code to be used, nor can we easily > * determine the kind of exception (#GP or #TS) in that case. > */ > + *reps = 0; > if ( is_x86_user_segment(seg) ) > x86_emul_hw_exception((seg == x86_seg_ss) > ? TRAP_stack_error > --- a/xen/arch/x86/x86_emulate/x86_emulate.c > +++ b/xen/arch/x86/x86_emulate/x86_emulate.c > @@ -971,7 +971,11 @@ static void __put_rep_prefix( > > #define put_rep_prefix(reps_completed) ({ \ > if ( rep_prefix() ) \ > + { \ > __put_rep_prefix(&_regs, ctxt->regs, ad_bytes, reps_completed); \ > + if ( unlikely(rc == X86EMUL_EXCEPTION) ) \ > + goto no_writeback; \ > + } \ > }) > > /* Clip maximum repetitions so that the index register at most just wraps. */ > @@ -2919,14 +2923,9 @@ x86_emulate( > dst.mem.off = truncate_ea_and_reps(_regs.edi, nr_reps, dst.bytes); > if ( (rc = ioport_access_check(port, dst.bytes, ctxt, ops)) != 0 ) > goto done; > - if ( (nr_reps > 1) && (ops->rep_ins != NULL) && > + if ( (nr_reps == 1) || !ops->rep_ins || > ((rc = ops->rep_ins(port, dst.mem.seg, dst.mem.off, dst.bytes, > - &nr_reps, ctxt)) != X86EMUL_UNHANDLEABLE) ) > - { > - if ( rc != 0 ) > - goto done; > - } > - else > + &nr_reps, ctxt)) == X86EMUL_UNHANDLEABLE) ) > { > fail_if(ops->read_io == NULL); > if ( (rc = ops->read_io(port, dst.bytes, &dst.val, ctxt)) != 0 ) > @@ -2936,6 +2935,8 @@ x86_emulate( > } > register_address_adjust(_regs.edi, nr_reps * dst.bytes); > put_rep_prefix(nr_reps); > + if ( rc != X86EMUL_OKAY ) > + goto done; > break; > } > > @@ -2946,14 +2947,9 @@ x86_emulate( > ea.mem.off = truncate_ea_and_reps(_regs.esi, nr_reps, dst.bytes); > if ( (rc = ioport_access_check(port, dst.bytes, ctxt, ops)) != 0 ) > goto done; > - if ( (nr_reps > 1) && (ops->rep_outs != NULL) && > + if ( (nr_reps == 1) || !ops->rep_outs || > ((rc = ops->rep_outs(ea.mem.seg, ea.mem.off, port, dst.bytes, > - &nr_reps, ctxt)) != X86EMUL_UNHANDLEABLE) ) > - { > - if ( rc != 0 ) > - goto done; > - } > - else > + &nr_reps, ctxt)) == X86EMUL_UNHANDLEABLE) ) > { > if ( (rc = read_ulong(ea.mem.seg, truncate_ea(_regs.esi), > &dst.val, dst.bytes, ctxt, ops)) != 0 ) > @@ -2965,6 +2961,8 @@ x86_emulate( > } > register_address_adjust(_regs.esi, nr_reps * dst.bytes); > put_rep_prefix(nr_reps); > + if ( rc != X86EMUL_OKAY ) > + goto done; > break; > } > > @@ -3187,15 +3185,10 @@ x86_emulate( > dst.mem.seg = x86_seg_es; > dst.mem.off = truncate_ea_and_reps(_regs.edi, nr_reps, dst.bytes); > src.mem.off = truncate_ea_and_reps(_regs.esi, nr_reps, dst.bytes); > - if ( (nr_reps > 1) && (ops->rep_movs != NULL) && > + if ( (nr_reps == 1) || !ops->rep_movs || > ((rc = ops->rep_movs(ea.mem.seg, src.mem.off, > dst.mem.seg, dst.mem.off, dst.bytes, > - &nr_reps, ctxt)) != X86EMUL_UNHANDLEABLE) ) > - { > - if ( rc != 0 ) > - goto done; > - } > - else > + &nr_reps, ctxt)) == X86EMUL_UNHANDLEABLE) ) > { > if ( (rc = read_ulong(ea.mem.seg, src.mem.off, > &dst.val, dst.bytes, ctxt, ops)) != 0 ) > @@ -3206,6 +3199,8 @@ x86_emulate( > register_address_adjust(_regs.esi, nr_reps * dst.bytes); > register_address_adjust(_regs.edi, nr_reps * dst.bytes); > put_rep_prefix(nr_reps); > + if ( rc != X86EMUL_OKAY ) > + goto done; > break; > } > > @@ -3244,11 +3239,12 @@ x86_emulate( > dst.val = src.val; > dst.type = OP_MEM; > nr_reps = 1; > + rc = X86EMUL_OKAY; > } > - else if ( rc != X86EMUL_OKAY ) > - goto done; > register_address_adjust(_regs.edi, nr_reps * dst.bytes); > put_rep_prefix(nr_reps); > + if ( rc != X86EMUL_OKAY ) > + goto done; > break; > } > > _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org https://lists.xen.org/xen-devel