Re: [PATCH v6 1/3] hw/intc: Move mtimer/mtimecmp to aclint

2022-07-25 Thread Atish Patra
On Sat, Jul 23, 2022 at 2:43 AM Andrew Jones  wrote:
>
> On Thu, Jul 21, 2022 at 06:00:44PM -0700, Atish Patra wrote:
> > Historically, The mtime/mtimecmp has been part of the CPU because
> > they are per hart entities. However, they actually belong to aclint
> > which is a MMIO device.
> >
> > Move them to the ACLINT device. This also emulates the real hardware
> > more closely.
> >
> > Reviewed-by: Anup Patel 
> > Reviewed-by: Alistair Francis 
> > Signed-off-by: Atish Patra 
> > ---
> >  hw/intc/riscv_aclint.c | 41 --
> >  hw/timer/ibex_timer.c  | 18 ++-
> >  include/hw/intc/riscv_aclint.h |  2 ++
> >  include/hw/timer/ibex_timer.h  |  2 ++
> >  target/riscv/cpu.h |  2 --
> >  target/riscv/machine.c |  5 ++---
> >  6 files changed, 42 insertions(+), 28 deletions(-)
> >
> > diff --git a/hw/intc/riscv_aclint.c b/hw/intc/riscv_aclint.c
> > index e7942c4e5a32..47f355224612 100644
> > --- a/hw/intc/riscv_aclint.c
> > +++ b/hw/intc/riscv_aclint.c
> > @@ -32,6 +32,7 @@
> >  #include "hw/intc/riscv_aclint.h"
> >  #include "qemu/timer.h"
> >  #include "hw/irq.h"
> > +#include "migration/vmstate.h"
> >
> >  typedef struct riscv_aclint_mtimer_callback {
> >  RISCVAclintMTimerState *s;
> > @@ -65,8 +66,8 @@ static void 
> > riscv_aclint_mtimer_write_timecmp(RISCVAclintMTimerState *mtimer,
> >
> >  uint64_t rtc_r = cpu_riscv_read_rtc(mtimer);
> >
> > -cpu->env.timecmp = value;
> > -if (cpu->env.timecmp <= rtc_r) {
> > +mtimer->timecmp[hartid] = value;
> > +if (mtimer->timecmp[hartid] <= rtc_r) {
> >  /*
> >   * If we're setting an MTIMECMP value in the "past",
> >   * immediately raise the timer interrupt
> > @@ -77,7 +78,7 @@ static void 
> > riscv_aclint_mtimer_write_timecmp(RISCVAclintMTimerState *mtimer,
> >
> >  /* otherwise, set up the future timer interrupt */
> >  qemu_irq_lower(mtimer->timer_irqs[hartid - mtimer->hartid_base]);
> > -diff = cpu->env.timecmp - rtc_r;
> > +diff = mtimer->timecmp[hartid] - rtc_r;
> >  /* back to ns (note args switched in muldiv64) */
> >  uint64_t ns_diff = muldiv64(diff, NANOSECONDS_PER_SECOND, 
> > timebase_freq);
> >
> > @@ -102,7 +103,7 @@ static void 
> > riscv_aclint_mtimer_write_timecmp(RISCVAclintMTimerState *mtimer,
> >  next = MIN(next, INT64_MAX);
> >  }
> >
> > -timer_mod(cpu->env.timer, next);
> > +timer_mod(mtimer->timers[hartid], next);
> >  }
> >
> >  /*
> > @@ -133,11 +134,11 @@ static uint64_t riscv_aclint_mtimer_read(void 
> > *opaque, hwaddr addr,
> >"aclint-mtimer: invalid hartid: %zu", hartid);
> >  } else if ((addr & 0x7) == 0) {
> >  /* timecmp_lo for RV32/RV64 or timecmp for RV64 */
> > -uint64_t timecmp = env->timecmp;
> > +uint64_t timecmp = mtimer->timecmp[hartid];
> >  return (size == 4) ? (timecmp & 0x) : timecmp;
> >  } else if ((addr & 0x7) == 4) {
> >  /* timecmp_hi */
> > -uint64_t timecmp = env->timecmp;
> > +uint64_t timecmp = mtimer->timecmp[hartid];
> >  return (timecmp >> 32) & 0x;
> >  } else {
> >  qemu_log_mask(LOG_UNIMP,
> > @@ -177,7 +178,7 @@ static void riscv_aclint_mtimer_write(void *opaque, 
> > hwaddr addr,
> >  } else if ((addr & 0x7) == 0) {
> >  if (size == 4) {
> >  /* timecmp_lo for RV32/RV64 */
> > -uint64_t timecmp_hi = env->timecmp >> 32;
> > +uint64_t timecmp_hi = mtimer->timecmp[hartid] >> 32;
> >  riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu), 
> > hartid,
> >  timecmp_hi << 32 | (value & 0x));
> >  } else {
> > @@ -188,7 +189,7 @@ static void riscv_aclint_mtimer_write(void *opaque, 
> > hwaddr addr,
> >  } else if ((addr & 0x7) == 4) {
> >  if (size == 4) {
> >  /* timecmp_hi for RV32/RV64 */
> > -uint64_t timecmp_lo = env->timecmp;
> > +uint64_t timecmp_lo = mtimer->timecmp[hartid];
> >  riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu), 
> > hartid,
> >  value << 32 | (timecmp_lo & 0x));
> >  } else {
> > @@ -234,7 +235,7 @@ static void riscv_aclint_mtimer_write(void *opaque, 
> > hwaddr addr,
> >  }
> >  riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu),
> >mtimer->hartid_base + i,
> > -  env->timecmp);
> > +  mtimer->timecmp[i]);
> >  }
> >  return;
> >  }
> > @@ -284,6 +285,8 @@ static void riscv_aclint_mtimer_realize(DeviceState 
> > *dev, Error **errp)
> >  s->timer_irqs = g_new(qemu_irq, s->num_harts);
> >  

Re: [PATCH v6 1/3] hw/intc: Move mtimer/mtimecmp to aclint

2022-07-23 Thread Andrew Jones
On Thu, Jul 21, 2022 at 06:00:44PM -0700, Atish Patra wrote:
> Historically, The mtime/mtimecmp has been part of the CPU because
> they are per hart entities. However, they actually belong to aclint
> which is a MMIO device.
> 
> Move them to the ACLINT device. This also emulates the real hardware
> more closely.
> 
> Reviewed-by: Anup Patel 
> Reviewed-by: Alistair Francis 
> Signed-off-by: Atish Patra 
> ---
>  hw/intc/riscv_aclint.c | 41 --
>  hw/timer/ibex_timer.c  | 18 ++-
>  include/hw/intc/riscv_aclint.h |  2 ++
>  include/hw/timer/ibex_timer.h  |  2 ++
>  target/riscv/cpu.h |  2 --
>  target/riscv/machine.c |  5 ++---
>  6 files changed, 42 insertions(+), 28 deletions(-)
> 
> diff --git a/hw/intc/riscv_aclint.c b/hw/intc/riscv_aclint.c
> index e7942c4e5a32..47f355224612 100644
> --- a/hw/intc/riscv_aclint.c
> +++ b/hw/intc/riscv_aclint.c
> @@ -32,6 +32,7 @@
>  #include "hw/intc/riscv_aclint.h"
>  #include "qemu/timer.h"
>  #include "hw/irq.h"
> +#include "migration/vmstate.h"
>  
>  typedef struct riscv_aclint_mtimer_callback {
>  RISCVAclintMTimerState *s;
> @@ -65,8 +66,8 @@ static void 
> riscv_aclint_mtimer_write_timecmp(RISCVAclintMTimerState *mtimer,
>  
>  uint64_t rtc_r = cpu_riscv_read_rtc(mtimer);
>  
> -cpu->env.timecmp = value;
> -if (cpu->env.timecmp <= rtc_r) {
> +mtimer->timecmp[hartid] = value;
> +if (mtimer->timecmp[hartid] <= rtc_r) {
>  /*
>   * If we're setting an MTIMECMP value in the "past",
>   * immediately raise the timer interrupt
> @@ -77,7 +78,7 @@ static void 
> riscv_aclint_mtimer_write_timecmp(RISCVAclintMTimerState *mtimer,
>  
>  /* otherwise, set up the future timer interrupt */
>  qemu_irq_lower(mtimer->timer_irqs[hartid - mtimer->hartid_base]);
> -diff = cpu->env.timecmp - rtc_r;
> +diff = mtimer->timecmp[hartid] - rtc_r;
>  /* back to ns (note args switched in muldiv64) */
>  uint64_t ns_diff = muldiv64(diff, NANOSECONDS_PER_SECOND, timebase_freq);
>  
> @@ -102,7 +103,7 @@ static void 
> riscv_aclint_mtimer_write_timecmp(RISCVAclintMTimerState *mtimer,
>  next = MIN(next, INT64_MAX);
>  }
>  
> -timer_mod(cpu->env.timer, next);
> +timer_mod(mtimer->timers[hartid], next);
>  }
>  
>  /*
> @@ -133,11 +134,11 @@ static uint64_t riscv_aclint_mtimer_read(void *opaque, 
> hwaddr addr,
>"aclint-mtimer: invalid hartid: %zu", hartid);
>  } else if ((addr & 0x7) == 0) {
>  /* timecmp_lo for RV32/RV64 or timecmp for RV64 */
> -uint64_t timecmp = env->timecmp;
> +uint64_t timecmp = mtimer->timecmp[hartid];
>  return (size == 4) ? (timecmp & 0x) : timecmp;
>  } else if ((addr & 0x7) == 4) {
>  /* timecmp_hi */
> -uint64_t timecmp = env->timecmp;
> +uint64_t timecmp = mtimer->timecmp[hartid];
>  return (timecmp >> 32) & 0x;
>  } else {
>  qemu_log_mask(LOG_UNIMP,
> @@ -177,7 +178,7 @@ static void riscv_aclint_mtimer_write(void *opaque, 
> hwaddr addr,
>  } else if ((addr & 0x7) == 0) {
>  if (size == 4) {
>  /* timecmp_lo for RV32/RV64 */
> -uint64_t timecmp_hi = env->timecmp >> 32;
> +uint64_t timecmp_hi = mtimer->timecmp[hartid] >> 32;
>  riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu), 
> hartid,
>  timecmp_hi << 32 | (value & 0x));
>  } else {
> @@ -188,7 +189,7 @@ static void riscv_aclint_mtimer_write(void *opaque, 
> hwaddr addr,
>  } else if ((addr & 0x7) == 4) {
>  if (size == 4) {
>  /* timecmp_hi for RV32/RV64 */
> -uint64_t timecmp_lo = env->timecmp;
> +uint64_t timecmp_lo = mtimer->timecmp[hartid];
>  riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu), 
> hartid,
>  value << 32 | (timecmp_lo & 0x));
>  } else {
> @@ -234,7 +235,7 @@ static void riscv_aclint_mtimer_write(void *opaque, 
> hwaddr addr,
>  }
>  riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu),
>mtimer->hartid_base + i,
> -  env->timecmp);
> +  mtimer->timecmp[i]);
>  }
>  return;
>  }
> @@ -284,6 +285,8 @@ static void riscv_aclint_mtimer_realize(DeviceState *dev, 
> Error **errp)
>  s->timer_irqs = g_new(qemu_irq, s->num_harts);
>  qdev_init_gpio_out(dev, s->timer_irqs, s->num_harts);
>  
> +s->timers = g_malloc0(s->num_harts * sizeof(QEMUTimer));

It looks like we're overallocating the space here, since we want an
array of QEMUTimer pointers, not QEMUTimer objects. Also, QEMU
prefers g_new to g_malloc 

[PATCH v6 1/3] hw/intc: Move mtimer/mtimecmp to aclint

2022-07-21 Thread Atish Patra
Historically, The mtime/mtimecmp has been part of the CPU because
they are per hart entities. However, they actually belong to aclint
which is a MMIO device.

Move them to the ACLINT device. This also emulates the real hardware
more closely.

Reviewed-by: Anup Patel 
Reviewed-by: Alistair Francis 
Signed-off-by: Atish Patra 
---
 hw/intc/riscv_aclint.c | 41 --
 hw/timer/ibex_timer.c  | 18 ++-
 include/hw/intc/riscv_aclint.h |  2 ++
 include/hw/timer/ibex_timer.h  |  2 ++
 target/riscv/cpu.h |  2 --
 target/riscv/machine.c |  5 ++---
 6 files changed, 42 insertions(+), 28 deletions(-)

diff --git a/hw/intc/riscv_aclint.c b/hw/intc/riscv_aclint.c
index e7942c4e5a32..47f355224612 100644
--- a/hw/intc/riscv_aclint.c
+++ b/hw/intc/riscv_aclint.c
@@ -32,6 +32,7 @@
 #include "hw/intc/riscv_aclint.h"
 #include "qemu/timer.h"
 #include "hw/irq.h"
+#include "migration/vmstate.h"
 
 typedef struct riscv_aclint_mtimer_callback {
 RISCVAclintMTimerState *s;
@@ -65,8 +66,8 @@ static void 
riscv_aclint_mtimer_write_timecmp(RISCVAclintMTimerState *mtimer,
 
 uint64_t rtc_r = cpu_riscv_read_rtc(mtimer);
 
-cpu->env.timecmp = value;
-if (cpu->env.timecmp <= rtc_r) {
+mtimer->timecmp[hartid] = value;
+if (mtimer->timecmp[hartid] <= rtc_r) {
 /*
  * If we're setting an MTIMECMP value in the "past",
  * immediately raise the timer interrupt
@@ -77,7 +78,7 @@ static void 
riscv_aclint_mtimer_write_timecmp(RISCVAclintMTimerState *mtimer,
 
 /* otherwise, set up the future timer interrupt */
 qemu_irq_lower(mtimer->timer_irqs[hartid - mtimer->hartid_base]);
-diff = cpu->env.timecmp - rtc_r;
+diff = mtimer->timecmp[hartid] - rtc_r;
 /* back to ns (note args switched in muldiv64) */
 uint64_t ns_diff = muldiv64(diff, NANOSECONDS_PER_SECOND, timebase_freq);
 
@@ -102,7 +103,7 @@ static void 
riscv_aclint_mtimer_write_timecmp(RISCVAclintMTimerState *mtimer,
 next = MIN(next, INT64_MAX);
 }
 
-timer_mod(cpu->env.timer, next);
+timer_mod(mtimer->timers[hartid], next);
 }
 
 /*
@@ -133,11 +134,11 @@ static uint64_t riscv_aclint_mtimer_read(void *opaque, 
hwaddr addr,
   "aclint-mtimer: invalid hartid: %zu", hartid);
 } else if ((addr & 0x7) == 0) {
 /* timecmp_lo for RV32/RV64 or timecmp for RV64 */
-uint64_t timecmp = env->timecmp;
+uint64_t timecmp = mtimer->timecmp[hartid];
 return (size == 4) ? (timecmp & 0x) : timecmp;
 } else if ((addr & 0x7) == 4) {
 /* timecmp_hi */
-uint64_t timecmp = env->timecmp;
+uint64_t timecmp = mtimer->timecmp[hartid];
 return (timecmp >> 32) & 0x;
 } else {
 qemu_log_mask(LOG_UNIMP,
@@ -177,7 +178,7 @@ static void riscv_aclint_mtimer_write(void *opaque, hwaddr 
addr,
 } else if ((addr & 0x7) == 0) {
 if (size == 4) {
 /* timecmp_lo for RV32/RV64 */
-uint64_t timecmp_hi = env->timecmp >> 32;
+uint64_t timecmp_hi = mtimer->timecmp[hartid] >> 32;
 riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu), 
hartid,
 timecmp_hi << 32 | (value & 0x));
 } else {
@@ -188,7 +189,7 @@ static void riscv_aclint_mtimer_write(void *opaque, hwaddr 
addr,
 } else if ((addr & 0x7) == 4) {
 if (size == 4) {
 /* timecmp_hi for RV32/RV64 */
-uint64_t timecmp_lo = env->timecmp;
+uint64_t timecmp_lo = mtimer->timecmp[hartid];
 riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu), 
hartid,
 value << 32 | (timecmp_lo & 0x));
 } else {
@@ -234,7 +235,7 @@ static void riscv_aclint_mtimer_write(void *opaque, hwaddr 
addr,
 }
 riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu),
   mtimer->hartid_base + i,
-  env->timecmp);
+  mtimer->timecmp[i]);
 }
 return;
 }
@@ -284,6 +285,8 @@ static void riscv_aclint_mtimer_realize(DeviceState *dev, 
Error **errp)
 s->timer_irqs = g_new(qemu_irq, s->num_harts);
 qdev_init_gpio_out(dev, s->timer_irqs, s->num_harts);
 
+s->timers = g_malloc0(s->num_harts * sizeof(QEMUTimer));
+s->timecmp = g_new0(uint64_t, s->num_harts);
 /* Claim timer interrupt bits */
 for (i = 0; i < s->num_harts; i++) {
 RISCVCPU *cpu = RISCV_CPU(qemu_get_cpu(s->hartid_base + i));
@@ -310,6 +313,18 @@ static void riscv_aclint_mtimer_reset_enter(Object *obj, 
ResetType type)
 riscv_aclint_mtimer_write(mtimer, mtimer->time_base, 0, 8);
 }
 
+static const VMStateDescription vmstate_riscv_mtimer = {
+.name = "riscv_mtimer",