Paolo Bonzini <pbonz...@redhat.com> writes:

> Yes, that is correct. It's more work but also more maintainable.

I originally suggested keeping the locking choice up in the main loop
because I suspect most guests will stick to BQL IRQs until they find it
is a bottle neck.

cpu_handle_interrupt/exception have never been my favourite functions
but perhaps there is a way to re-factor and clean them up to keep this
in core code?

I do worry that hiding BQL activity in the guest code makes it harder to
reason about what locks are currently held when reading the code.

>
> Thanks,
>
> Paolo
>
> Il ven 31 lug 2020, 22:09 Robert Foley <robert.fo...@linaro.org> ha scritto:
>
>> On Fri, 31 Jul 2020 at 14:02, Paolo Bonzini <pbonz...@redhat.com> wrote:
>> >
>> > On 31/07/20 14:51, Robert Foley wrote:
>> > > This change removes the implied BQL from the cpu_handle_interrupt,
>> > > and cpu_handle_exception paths. We can now select per-arch if
>> > > the BQL is needed or not by using the bql_interrupt flag.
>> > > By default, the core code holds the BQL.
>> > > One benefit of this change is that it leaves it up to the arch
>> > > to make the change to remove BQL when it makes sense.
>> > >
>> > > Signed-off-by: Robert Foley <robert.fo...@linaro.org>
>> >
>> > No, please just modify all implementation to do lock/unlock.  It's a
>> > simpler patch than this on.
>>
>> Sure, we will update the patch based on this.
>>
>> To clarify, the suggestion here is to remove the bql_interrupt flag
>> that we added and change all the per-arch interrupt callback code to
>> do the lock/unlock of the BQL?  So for example change
>> x86_cpu_exec_interrupt, and arm_cpu_exec_interrupt, etc to lock/unlock BQL?
>>
>> Thanks,
>> -Rob
>>
>>
>> >
>> > Paolo
>> >
>> > > ---
>> > >  accel/tcg/cpu-exec.c | 34 ++++++++++++++++++++++++++--------
>> > >  1 file changed, 26 insertions(+), 8 deletions(-)
>> > >
>> > > diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
>> > > index 80d0e649b2..cde27ee0bf 100644
>> > > --- a/accel/tcg/cpu-exec.c
>> > > +++ b/accel/tcg/cpu-exec.c
>> > > @@ -517,9 +517,13 @@ static inline bool cpu_handle_exception(CPUState
>> *cpu, int *ret)
>> > >  #else
>> > >          if (replay_exception()) {
>> > >              CPUClass *cc = CPU_GET_CLASS(cpu);
>> > > -            qemu_mutex_lock_iothread();
>> > > +            if (cc->bql_interrupt) {
>> > > +                qemu_mutex_lock_iothread();
>> > > +            }
>> > >              cc->do_interrupt(cpu);
>> > > -            qemu_mutex_unlock_iothread();
>> > > +            if (cc->bql_interrupt) {
>> > > +                qemu_mutex_unlock_iothread();
>> > > +            }
>> > >              cpu->exception_index = -1;
>> > >
>> > >              if (unlikely(cpu->singlestep_enabled)) {
>> > > @@ -558,7 +562,7 @@ static inline bool cpu_handle_interrupt(CPUState
>> *cpu,
>> > >      if (unlikely(cpu_interrupt_request(cpu))) {
>> > >          int interrupt_request;
>> > >
>> > > -        qemu_mutex_lock_iothread();
>> > > +        cpu_mutex_lock(cpu);
>> > >          interrupt_request = cpu_interrupt_request(cpu);
>> > >          if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
>> > >              /* Mask out external interrupts for this step. */
>> > > @@ -567,7 +571,7 @@ static inline bool cpu_handle_interrupt(CPUState
>> *cpu,
>> > >          if (interrupt_request & CPU_INTERRUPT_DEBUG) {
>> > >              cpu_reset_interrupt(cpu, CPU_INTERRUPT_DEBUG);
>> > >              cpu->exception_index = EXCP_DEBUG;
>> > > -            qemu_mutex_unlock_iothread();
>> > > +            cpu_mutex_unlock(cpu);
>> > >              return true;
>> > >          }
>> > >          if (replay_mode == REPLAY_MODE_PLAY &&
>> !replay_has_interrupt()) {
>> > > @@ -577,13 +581,15 @@ static inline bool cpu_handle_interrupt(CPUState
>> *cpu,
>> > >              cpu_reset_interrupt(cpu, CPU_INTERRUPT_HALT);
>> > >              cpu_halted_set(cpu, 1);
>> > >              cpu->exception_index = EXCP_HLT;
>> > > -            qemu_mutex_unlock_iothread();
>> > > +            cpu_mutex_unlock(cpu);
>> > >              return true;
>> > >          }
>> > >  #if defined(TARGET_I386)
>> > >          else if (interrupt_request & CPU_INTERRUPT_INIT) {
>> > >              X86CPU *x86_cpu = X86_CPU(cpu);
>> > >              CPUArchState *env = &x86_cpu->env;
>> > > +            cpu_mutex_unlock(cpu);
>> > > +            qemu_mutex_lock_iothread();
>> > >              replay_interrupt();
>> > >              cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
>> > >              do_cpu_init(x86_cpu);
>> > > @@ -595,7 +601,7 @@ static inline bool cpu_handle_interrupt(CPUState
>> *cpu,
>> > >          else if (interrupt_request & CPU_INTERRUPT_RESET) {
>> > >              replay_interrupt();
>> > >              cpu_reset(cpu);
>> > > -            qemu_mutex_unlock_iothread();
>> > > +            cpu_mutex_unlock(cpu);
>> > >              return true;
>> > >          }
>> > >  #endif
>> > > @@ -604,7 +610,15 @@ static inline bool cpu_handle_interrupt(CPUState
>> *cpu,
>> > >             True when it is, and we should restart on a new TB,
>> > >             and via longjmp via cpu_loop_exit.  */
>> > >          else {
>> > > +            cpu_mutex_unlock(cpu);
>> > > +            if (cc->bql_interrupt) {
>> > > +                qemu_mutex_lock_iothread();
>> > > +            }
>> > >              if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
>> > > +                if (cc->bql_interrupt) {
>> > > +                    qemu_mutex_unlock_iothread();
>> > > +                }
>> > > +                cpu_mutex_lock(cpu);
>> > >                  replay_interrupt();
>> > >                  /*
>> > >                   * After processing the interrupt, ensure an
>> EXCP_DEBUG is
>> > > @@ -614,6 +628,11 @@ static inline bool cpu_handle_interrupt(CPUState
>> *cpu,
>> > >                  cpu->exception_index =
>> > >                      (cpu->singlestep_enabled ? EXCP_DEBUG : -1);
>> > >                  *last_tb = NULL;
>> > > +            } else {
>> > > +                if (cc->bql_interrupt) {
>> > > +                    qemu_mutex_unlock_iothread();
>> > > +                }
>> > > +                cpu_mutex_lock(cpu);
>> > >              }
>> > >              /* The target hook may have updated the
>> 'cpu->interrupt_request';
>> > >               * reload the 'interrupt_request' value */
>> > > @@ -627,7 +646,7 @@ static inline bool cpu_handle_interrupt(CPUState
>> *cpu,
>> > >          }
>> > >
>> > >          /* If we exit via cpu_loop_exit/longjmp it is reset in
>> cpu_exec */
>> > > -        qemu_mutex_unlock_iothread();
>> > > +        cpu_mutex_unlock(cpu);
>> > >      }
>> > >
>> > >      /* Finally, check if we need to exit to the main loop.  */
>> > > @@ -691,7 +710,6 @@ static inline void cpu_loop_exec_tb(CPUState *cpu,
>> TranslationBlock *tb,
>> > >      }
>> > >  #endif
>> > >  }
>> > > -
>> > >  /* main execution loop */
>> > >
>> > >  int cpu_exec(CPUState *cpu)
>> > >
>> >
>>
>>


-- 
Alex Bennée

Reply via email to