On 1/17/21 5:48 PM, Philippe Mathieu-Daudé wrote:
> Watchpoint funtions use cpu_restore_state() which is only
> available when TCG accelerator is built. Restrict them
> to TCG.
> 
> Signed-off-by: Philippe Mathieu-Daudé <f4...@amsat.org>

I am doing some of this in my series, and I did not notice that
cpu_watchpoint_insert was also TCG only.

Probably we should merge this somehow.

I thought it was used by gdbstub.c as well, passing flags BP_GDB .

I noticed that gdbstub does something else entirely for kvm_enabled(), ie, 
kvm_insert_breakpoint,
but what about the other accels, it seems that the code flows to the 
cpu_breakpoint_insert and watchpoint_insert..?

should cpu_breakpoint_insert have the same fate then?

And is this really all TCG specific?

>From gdbstub.c:1020:

static int gdb_breakpoint_insert(int type, target_ulong addr, target_ulong len)
{
    CPUState *cpu;
    int err = 0;

    if (kvm_enabled()) {
        return kvm_insert_breakpoint(gdbserver_state.c_cpu, addr, len, type);
    }

    switch (type) {
    case GDB_BREAKPOINT_SW:
    case GDB_BREAKPOINT_HW:
        CPU_FOREACH(cpu) {
            err = cpu_breakpoint_insert(cpu, addr, BP_GDB, NULL);
            if (err) {
                break;
            }
        }
        return err;
#ifndef CONFIG_USER_ONLY
    case GDB_WATCHPOINT_WRITE:
    case GDB_WATCHPOINT_READ:
    case GDB_WATCHPOINT_ACCESS:
        CPU_FOREACH(cpu) {
            err = cpu_watchpoint_insert(cpu, addr, len,
                                        xlat_gdb_type(cpu, type), NULL);




> ---
> RFC because we could keep that code by adding an empty
>     stub for cpu_restore_state(), but it is unclear as
>     the function is named generically.
> ---
>  include/hw/core/cpu.h | 4 ++--
>  softmmu/physmem.c     | 4 ++++
>  2 files changed, 6 insertions(+), 2 deletions(-)
> 
> diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
> index 140fa32a5e3..1b4af30db04 100644
> --- a/include/hw/core/cpu.h
> +++ b/include/hw/core/cpu.h
> @@ -1033,7 +1033,7 @@ static inline bool cpu_breakpoint_test(CPUState *cpu, 
> vaddr pc, int mask)
>      return false;
>  }
>  
> -#ifdef CONFIG_USER_ONLY
> +#if !defined(CONFIG_TCG) || defined(CONFIG_USER_ONLY)
>  static inline int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
>                                          int flags, CPUWatchpoint 
> **watchpoint)
>  {
> @@ -1098,7 +1098,7 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, 
> vaddr len,
>   * If no watchpoint is registered for the range, the result is 0.
>   */
>  int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len);
> -#endif
> +#endif /* !CONFIG_TCG || CONFIG_USER_ONLY */
>  
>  /**
>   * cpu_get_address_space:
> diff --git a/softmmu/physmem.c b/softmmu/physmem.c
> index 65602ed548e..5135a6371b5 100644
> --- a/softmmu/physmem.c
> +++ b/softmmu/physmem.c
> @@ -765,6 +765,7 @@ AddressSpace *cpu_get_address_space(CPUState *cpu, int 
> asidx)
>      return cpu->cpu_ases[asidx].as;
>  }
>  
> +#ifdef CONFIG_TCG
>  /* Add a watchpoint.  */
>  int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
>                            int flags, CPUWatchpoint **watchpoint)
> @@ -873,6 +874,7 @@ int cpu_watchpoint_address_matches(CPUState *cpu, vaddr 
> addr, vaddr len)
>      }
>      return ret;
>  }
> +#endif /* CONFIG_TCG */
>  
>  /* Called from RCU critical section */
>  static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
> @@ -2356,6 +2358,7 @@ ram_addr_t qemu_ram_addr_from_host(void *ptr)
>      return block->offset + offset;
>  }
>  
> +#ifdef CONFIG_TCG
>  /* Generate a debug exception if a watchpoint has been hit.  */
>  void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
>                            MemTxAttrs attrs, int flags, uintptr_t ra)
> @@ -2424,6 +2427,7 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, 
> vaddr len,
>          }
>      }
>  }
> +#endif /* CONFIG_TCG */
>  
>  static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
>                                   MemTxAttrs attrs, void *buf, hwaddr len);
> 


Reply via email to