On 03/06/16 23:40, Alex Bennée wrote:
> In preparation for multi-threaded TCG we remove tcg_exec_all and move
> all the CPU cycling into the main thread function. When MTTCG is enabled
> we shall use a separate thread function which only handles one vCPU.
>
> Signed-off-by: Alex Bennée <alex.ben...@linaro.org>

Reviewed-by: Sergey Fedorov <sergey.fedo...@linaro.org>

>
> ---
> v2
>   - update timer calls to new API on rebase
> v3
>   - move tcg_cpu_exec above thread function, drop static fwd declaration
> ---
>  cpus.c | 185 
> +++++++++++++++++++++++++++++++----------------------------------
>  1 file changed, 89 insertions(+), 96 deletions(-)
>
> diff --git a/cpus.c b/cpus.c
> index c404dd7..4cc2ce6 100644
> --- a/cpus.c
> +++ b/cpus.c
> @@ -69,7 +69,6 @@
>  
>  #endif /* CONFIG_LINUX */
>  
> -static CPUState *next_cpu;
>  int64_t max_delay;
>  int64_t max_advance;
>  
> @@ -1119,7 +1118,67 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
>  #endif
>  }
>  
> -static void tcg_exec_all(void);
> +static int64_t tcg_get_icount_limit(void)
> +{
> +    int64_t deadline;
> +
> +    if (replay_mode != REPLAY_MODE_PLAY) {
> +        deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
> +
> +        /* Maintain prior (possibly buggy) behaviour where if no deadline
> +         * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more 
> than
> +         * INT32_MAX nanoseconds ahead, we still use INT32_MAX
> +         * nanoseconds.
> +         */
> +        if ((deadline < 0) || (deadline > INT32_MAX)) {
> +            deadline = INT32_MAX;
> +        }
> +
> +        return qemu_icount_round(deadline);
> +    } else {
> +        return replay_get_instructions();
> +    }
> +}
> +
> +static int tcg_cpu_exec(CPUState *cpu)
> +{
> +    int ret;
> +#ifdef CONFIG_PROFILER
> +    int64_t ti;
> +#endif
> +
> +#ifdef CONFIG_PROFILER
> +    ti = profile_getclock();
> +#endif
> +    if (use_icount) {
> +        int64_t count;
> +        int decr;
> +        timers_state.qemu_icount -= (cpu->icount_decr.u16.low
> +                                    + cpu->icount_extra);
> +        cpu->icount_decr.u16.low = 0;
> +        cpu->icount_extra = 0;
> +        count = tcg_get_icount_limit();
> +        timers_state.qemu_icount += count;
> +        decr = (count > 0xffff) ? 0xffff : count;
> +        count -= decr;
> +        cpu->icount_decr.u16.low = decr;
> +        cpu->icount_extra = count;
> +    }
> +    ret = cpu_exec(cpu);
> +#ifdef CONFIG_PROFILER
> +    tcg_time += profile_getclock() - ti;
> +#endif
> +    if (use_icount) {
> +        /* Fold pending instructions back into the
> +           instruction counter, and clear the interrupt flag.  */
> +        timers_state.qemu_icount -= (cpu->icount_decr.u16.low
> +                        + cpu->icount_extra);
> +        cpu->icount_decr.u32 = 0;
> +        cpu->icount_extra = 0;
> +        replay_account_executed_instructions();
> +    }
> +    return ret;
> +}
>  
>  static void *qemu_tcg_cpu_thread_fn(void *arg)
>  {
> @@ -1150,8 +1209,35 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
>      /* process any pending work */
>      atomic_mb_set(&exit_request, 1);
>  
> +    cpu = first_cpu;
> +
>      while (1) {
> -        tcg_exec_all();
> +        /* Account partial waits to QEMU_CLOCK_VIRTUAL.  */
> +        qemu_account_warp_timer();
> +
> +        if (!cpu) {
> +            cpu = first_cpu;
> +        }
> +
> +        for (; cpu != NULL && !exit_request; cpu = CPU_NEXT(cpu)) {
> +
> +            qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
> +                              (cpu->singlestep_enabled & SSTEP_NOTIMER) == 
> 0);
> +
> +            if (cpu_can_run(cpu)) {
> +                int r = tcg_cpu_exec(cpu);
> +                if (r == EXCP_DEBUG) {
> +                    cpu_handle_guest_debug(cpu);
> +                    break;
> +                }
> +            } else if (cpu->stop || cpu->stopped) {
> +                break;
> +            }
> +
> +        } /* for cpu.. */
> +
> +        /* Pairs with smp_wmb in qemu_cpu_kick.  */
> +        atomic_mb_set(&exit_request, 0);
>  
>          if (use_icount) {
>              int64_t deadline = 
> qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
> @@ -1448,99 +1534,6 @@ int vm_stop_force_state(RunState state)
>      }
>  }
>  
> -static int64_t tcg_get_icount_limit(void)
> -{
> -    int64_t deadline;
> -
> -    if (replay_mode != REPLAY_MODE_PLAY) {
> -        deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
> -
> -        /* Maintain prior (possibly buggy) behaviour where if no deadline
> -         * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more 
> than
> -         * INT32_MAX nanoseconds ahead, we still use INT32_MAX
> -         * nanoseconds.
> -         */
> -        if ((deadline < 0) || (deadline > INT32_MAX)) {
> -            deadline = INT32_MAX;
> -        }
> -
> -        return qemu_icount_round(deadline);
> -    } else {
> -        return replay_get_instructions();
> -    }
> -}
> -
> -static int tcg_cpu_exec(CPUState *cpu)
> -{
> -    int ret;
> -#ifdef CONFIG_PROFILER
> -    int64_t ti;
> -#endif
> -
> -#ifdef CONFIG_PROFILER
> -    ti = profile_getclock();
> -#endif
> -    if (use_icount) {
> -        int64_t count;
> -        int decr;
> -        timers_state.qemu_icount -= (cpu->icount_decr.u16.low
> -                                    + cpu->icount_extra);
> -        cpu->icount_decr.u16.low = 0;
> -        cpu->icount_extra = 0;
> -        count = tcg_get_icount_limit();
> -        timers_state.qemu_icount += count;
> -        decr = (count > 0xffff) ? 0xffff : count;
> -        count -= decr;
> -        cpu->icount_decr.u16.low = decr;
> -        cpu->icount_extra = count;
> -    }
> -    ret = cpu_exec(cpu);
> -#ifdef CONFIG_PROFILER
> -    tcg_time += profile_getclock() - ti;
> -#endif
> -    if (use_icount) {
> -        /* Fold pending instructions back into the
> -           instruction counter, and clear the interrupt flag.  */
> -        timers_state.qemu_icount -= (cpu->icount_decr.u16.low
> -                        + cpu->icount_extra);
> -        cpu->icount_decr.u32 = 0;
> -        cpu->icount_extra = 0;
> -        replay_account_executed_instructions();
> -    }
> -    return ret;
> -}
> -
> -static void tcg_exec_all(void)
> -{
> -    int r;
> -
> -    /* Account partial waits to QEMU_CLOCK_VIRTUAL.  */
> -    qemu_account_warp_timer();
> -
> -    if (next_cpu == NULL) {
> -        next_cpu = first_cpu;
> -    }
> -    for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) 
> {
> -        CPUState *cpu = next_cpu;
> -
> -        qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
> -                          (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
> -
> -        if (cpu_can_run(cpu)) {
> -            r = tcg_cpu_exec(cpu);
> -            if (r == EXCP_DEBUG) {
> -                cpu_handle_guest_debug(cpu);
> -                break;
> -            }
> -        } else if (cpu->stop || cpu->stopped) {
> -            break;
> -        }
> -    }
> -
> -    /* Pairs with smp_wmb in qemu_cpu_kick.  */
> -    atomic_mb_set(&exit_request, 0);
> -}
> -
>  void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
>  {
>      /* XXX: implement xxx_cpu_list for targets that still miss it */


Reply via email to