Florian Hofhammer <[email protected]> writes:

> This patch adds a plugin API function that allows diverting the program
> counter during execution. A potential use case for this functionality is
> to skip over parts of the code, e.g., by hooking into a specific
> instruction and setting the PC to the next instruction in the callback.
>
> Redirecting control flow via cpu_loop_exit() works fine in callbacks
> that are triggered during code execution due to cpu_exec_setjmp() still
> being part of the call stack. If we want to use the new API in syscall
> callbacks, cpu_exec_setjmp() already returned and the longjmp()
> triggered by cpu_loop_exit() is undefined behavior. For this reason, we
> introduce a new return constant QEMU_ESETPC and do another setjmp()
> before executing syscall plugin callbacks and potentially the syscall
> itself.

I think we want to split this up into:

  - introduce the flags
  - re-factor the linux-user to use an inline helper 
(qemu_syscall_finalize_pc(ret)?)
  - implement the api call, tweak qemu_finalize_pc

>
> Link: https://lists.nongnu.org/archive/html/qemu-devel/2025-08/msg00656.html
> Signed-off-by: Florian Hofhammer <[email protected]>
> ---
>  include/qemu/qemu-plugin.h         | 15 +++++++++++++++
>  linux-user/aarch64/cpu_loop.c      |  2 +-
>  linux-user/alpha/cpu_loop.c        |  2 +-
>  linux-user/arm/cpu_loop.c          |  2 +-
>  linux-user/hexagon/cpu_loop.c      |  2 +-
>  linux-user/hppa/cpu_loop.c         |  4 ++++
>  linux-user/i386/cpu_loop.c         |  8 +++++---
>  linux-user/include/special-errno.h |  8 ++++++++
>  linux-user/loongarch64/cpu_loop.c  |  5 +++--
>  linux-user/m68k/cpu_loop.c         |  2 +-
>  linux-user/microblaze/cpu_loop.c   |  2 +-
>  linux-user/mips/cpu_loop.c         |  5 +++--
>  linux-user/openrisc/cpu_loop.c     |  2 +-
>  linux-user/ppc/cpu_loop.c          |  6 ++++--
>  linux-user/riscv/cpu_loop.c        |  2 +-
>  linux-user/s390x/cpu_loop.c        |  2 +-
>  linux-user/sh4/cpu_loop.c          |  2 +-
>  linux-user/sparc/cpu_loop.c        |  4 +++-
>  linux-user/syscall.c               |  8 ++++++++
>  linux-user/xtensa/cpu_loop.c       |  3 +++
>  plugins/api.c                      | 17 ++++++++++++++++-
>  plugins/core.c                     | 25 ++++++++++++++-----------
>  22 files changed, 96 insertions(+), 32 deletions(-)
>
> diff --git a/include/qemu/qemu-plugin.h b/include/qemu/qemu-plugin.h
> index 60de4fdd3f..f976b62030 100644
> --- a/include/qemu/qemu-plugin.h
> +++ b/include/qemu/qemu-plugin.h
> @@ -321,11 +321,14 @@ typedef struct {
>   * @QEMU_PLUGIN_CB_NO_REGS: callback does not access the CPU's regs
>   * @QEMU_PLUGIN_CB_R_REGS: callback reads the CPU's regs
>   * @QEMU_PLUGIN_CB_RW_REGS: callback reads and writes the CPU's regs
> + * @QEMU_PLUGIN_CB_RW_REGS_PC: callback reads and writes the CPU's
> + *                             regs and updates the PC
>   */
>  enum qemu_plugin_cb_flags {
>      QEMU_PLUGIN_CB_NO_REGS,
>      QEMU_PLUGIN_CB_R_REGS,
>      QEMU_PLUGIN_CB_RW_REGS,
> +    QEMU_PLUGIN_CB_RW_REGS_PC,
>  };

Hmm should we consider making this an enum? or...

>  
<snip>
> diff --git a/plugins/core.c b/plugins/core.c
> index b4b783008f..9e9a066669 100644
> --- a/plugins/core.c
> +++ b/plugins/core.c
> @@ -395,15 +395,16 @@ void plugin_register_dyn_cb__udata(GArray **arr,
>                                     enum qemu_plugin_cb_flags flags,
>                                     void *udata)
>  {
> -    static TCGHelperInfo info[3] = {
> +    static TCGHelperInfo info[4] = {
>          [QEMU_PLUGIN_CB_NO_REGS].flags = TCG_CALL_NO_RWG,
>          [QEMU_PLUGIN_CB_R_REGS].flags = TCG_CALL_NO_WG,
>          [QEMU_PLUGIN_CB_RW_REGS].flags = 0,
> +        [QEMU_PLUGIN_CB_RW_REGS_PC].flags = 0,
>          /*
>           * Match qemu_plugin_vcpu_udata_cb_t:
>           *   void (*)(uint32_t, void *)
>           */
> -        [0 ... 2].typemask = (dh_typemask(void, 0) |
> +        [0 ... 3].typemask = (dh_typemask(void, 0) |
>                                dh_typemask(i32, 1) |
>                                dh_typemask(ptr, 2))

...are we effectively ordering callback types here?

>      };
> @@ -425,15 +426,16 @@ void plugin_register_dyn_cond_cb__udata(GArray **arr,
>                                          uint64_t imm,
>                                          void *udata)
>  {
> -    static TCGHelperInfo info[3] = {
> +    static TCGHelperInfo info[4] = {
>          [QEMU_PLUGIN_CB_NO_REGS].flags = TCG_CALL_NO_RWG,
>          [QEMU_PLUGIN_CB_R_REGS].flags = TCG_CALL_NO_WG,
>          [QEMU_PLUGIN_CB_RW_REGS].flags = 0,
> +        [QEMU_PLUGIN_CB_RW_REGS_PC].flags = 0,
>          /*
>           * Match qemu_plugin_vcpu_udata_cb_t:
>           *   void (*)(uint32_t, void *)
>           */
> -        [0 ... 2].typemask = (dh_typemask(void, 0) |
> +        [0 ... 3].typemask = (dh_typemask(void, 0) |
>                                dh_typemask(i32, 1) |
>                                dh_typemask(ptr, 2))
>      };
> @@ -464,15 +466,16 @@ void plugin_register_vcpu_mem_cb(GArray **arr,
>          !__builtin_types_compatible_p(qemu_plugin_meminfo_t, uint32_t) &&
>          !__builtin_types_compatible_p(qemu_plugin_meminfo_t, int32_t));
>  
> -    static TCGHelperInfo info[3] = {
> +    static TCGHelperInfo info[4] = {
>          [QEMU_PLUGIN_CB_NO_REGS].flags = TCG_CALL_NO_RWG,
>          [QEMU_PLUGIN_CB_R_REGS].flags = TCG_CALL_NO_WG,
>          [QEMU_PLUGIN_CB_RW_REGS].flags = 0,
> +        [QEMU_PLUGIN_CB_RW_REGS_PC].flags = 0,
>          /*
>           * Match qemu_plugin_vcpu_mem_cb_t:
>           *   void (*)(uint32_t, qemu_plugin_meminfo_t, uint64_t, void *)
>           */
> -        [0 ... 2].typemask =
> +        [0 ... 3].typemask =
>              (dh_typemask(void, 0) |
>               dh_typemask(i32, 1) |
>               (__builtin_types_compatible_p(qemu_plugin_meminfo_t, uint32_t)
> @@ -534,7 +537,7 @@ qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, 
> uint64_t a1, uint64_t a2,
>      QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
>          qemu_plugin_vcpu_syscall_cb_t func = cb->f.vcpu_syscall;
>  
> -        qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_RW_REGS);
> +        qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_RW_REGS_PC);
>          func(cb->ctx->id, cpu->cpu_index, num, a1, a2, a3, a4, a5, a6, a7, 
> a8);
>          qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_NO_REGS);
>      }
> @@ -558,7 +561,7 @@ void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t 
> num, int64_t ret)
>      QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
>          qemu_plugin_vcpu_syscall_ret_cb_t func = cb->f.vcpu_syscall_ret;
>  
> -        qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_RW_REGS);
> +        qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_RW_REGS_PC);
>          func(cb->ctx->id, cpu->cpu_index, num, ret);
>          qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_NO_REGS);
>      }
> @@ -568,7 +571,7 @@ void qemu_plugin_vcpu_idle_cb(CPUState *cpu)
>  {
>      /* idle and resume cb may be called before init, ignore in this case */
>      if (cpu->cpu_index < plugin.num_vcpus) {
> -        qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_RW_REGS);
> +        qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_RW_REGS_PC);
>          plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_IDLE);
>          qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_NO_REGS);
>      }
> @@ -577,7 +580,7 @@ void qemu_plugin_vcpu_idle_cb(CPUState *cpu)
>  void qemu_plugin_vcpu_resume_cb(CPUState *cpu)
>  {
>      if (cpu->cpu_index < plugin.num_vcpus) {
> -        qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_RW_REGS);
> +        qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_RW_REGS_PC);
>          plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_RESUME);
>          qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_NO_REGS);
>      }
> @@ -848,6 +851,6 @@ enum qemu_plugin_cb_flags 
> tcg_call_to_qemu_plugin_cb_flags(int flags)
>      } else if (flags & TCG_CALL_NO_WG) {
>          return QEMU_PLUGIN_CB_R_REGS;
>      } else {
> -        return QEMU_PLUGIN_CB_RW_REGS;
> +        return QEMU_PLUGIN_CB_RW_REGS_PC;
>      }
>  }

-- 
Alex Bennée
Virtualisation Tech Lead @ Linaro

Reply via email to