[PATCH 1/1] timecounter: Make cyclecounter struct part of timecounter struct

2017-12-01 Thread Sagar Arun Kamble
There is no real need for the users of timecounters to define cyclecounter
and timecounter variables separately. Since timecounter will always be
based on cyclecounter, have cyclecounter struct as member of timecounter
struct.

Suggested-by: Chris Wilson 
Signed-off-by: Sagar Arun Kamble 
Cc: Chris Wilson 
Cc: Richard Cochran 
Cc: John Stultz 
Cc: Thomas Gleixner 
Cc: Stephen Boyd 
Cc: linux-ker...@vger.kernel.org
Cc: linux-arm-ker...@lists.infradead.org
Cc: net...@vger.kernel.org
Cc: intel-wired-...@lists.osuosl.org
Cc: linux-r...@vger.kernel.org
Cc: alsa-de...@alsa-project.org
Cc: kvmarm@lists.cs.columbia.edu
---
 arch/microblaze/kernel/timer.c | 20 ++--
 drivers/clocksource/arm_arch_timer.c   | 19 ++--
 drivers/net/ethernet/amd/xgbe/xgbe-dev.c   |  3 +-
 drivers/net/ethernet/amd/xgbe/xgbe-ptp.c   |  9 +++---
 drivers/net/ethernet/amd/xgbe/xgbe.h   |  1 -
 drivers/net/ethernet/broadcom/bnx2x/bnx2x.h|  1 -
 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c   | 20 ++--
 drivers/net/ethernet/freescale/fec.h   |  1 -
 drivers/net/ethernet/freescale/fec_ptp.c   | 30 +-
 drivers/net/ethernet/intel/e1000e/e1000.h  |  1 -
 drivers/net/ethernet/intel/e1000e/netdev.c | 27 
 drivers/net/ethernet/intel/e1000e/ptp.c|  2 +-
 drivers/net/ethernet/intel/igb/igb.h   |  1 -
 drivers/net/ethernet/intel/igb/igb_ptp.c   | 25 ---
 drivers/net/ethernet/intel/ixgbe/ixgbe.h   |  1 -
 drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c   | 17 +-
 drivers/net/ethernet/mellanox/mlx4/en_clock.c  | 28 -
 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h   |  1 -
 .../net/ethernet/mellanox/mlx5/core/lib/clock.c| 34 ++--
 drivers/net/ethernet/qlogic/qede/qede_ptp.c| 20 ++--
 drivers/net/ethernet/ti/cpts.c | 36 --
 drivers/net/ethernet/ti/cpts.h |  1 -
 include/linux/mlx5/driver.h|  1 -
 include/linux/timecounter.h|  4 +--
 include/sound/hdaudio.h|  1 -
 kernel/time/timecounter.c  | 28 -
 sound/hda/hdac_stream.c|  7 +++--
 virt/kvm/arm/arch_timer.c  |  6 ++--
 28 files changed, 163 insertions(+), 182 deletions(-)

diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c
index 7de941c..b7f89e9 100644
--- a/arch/microblaze/kernel/timer.c
+++ b/arch/microblaze/kernel/timer.c
@@ -199,27 +199,25 @@ static u64 xilinx_read(struct clocksource *cs)
return (u64)xilinx_clock_read();
 }
 
-static struct timecounter xilinx_tc = {
-   .cc = NULL,
-};
-
 static u64 xilinx_cc_read(const struct cyclecounter *cc)
 {
return xilinx_read(NULL);
 }
 
-static struct cyclecounter xilinx_cc = {
-   .read = xilinx_cc_read,
-   .mask = CLOCKSOURCE_MASK(32),
-   .shift = 8,
+static struct timecounter xilinx_tc = {
+   .cc.read = xilinx_cc_read,
+   .cc.mask = CLOCKSOURCE_MASK(32),
+   .cc.mult = 0,
+   .cc.shift = 8,
 };
 
 static int __init init_xilinx_timecounter(void)
 {
-   xilinx_cc.mult = div_sc(timer_clock_freq, NSEC_PER_SEC,
-   xilinx_cc.shift);
+   struct cyclecounter *cc = _tc.cc;
+
+   cc->mult = div_sc(timer_clock_freq, NSEC_PER_SEC, cc->shift);
 
-   timecounter_init(_tc, _cc, sched_clock());
+   timecounter_init(_tc, sched_clock());
 
return 0;
 }
diff --git a/drivers/clocksource/arm_arch_timer.c 
b/drivers/clocksource/arm_arch_timer.c
index 57cb2f0..31543e5 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -179,11 +179,6 @@ static u64 arch_counter_read_cc(const struct cyclecounter 
*cc)
.flags  = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-static struct cyclecounter cyclecounter __ro_after_init = {
-   .read   = arch_counter_read_cc,
-   .mask   = CLOCKSOURCE_MASK(56),
-};
-
 struct ate_acpi_oem_info {
char oem_id[ACPI_OEM_ID_SIZE + 1];
char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
@@ -915,7 +910,10 @@ static u64 arch_counter_get_cntvct_mem(void)
return ((u64) vct_hi << 32) | vct_lo;
 }
 
-static struct arch_timer_kvm_info arch_timer_kvm_info;
+static struct arch_timer_kvm_info arch_timer_kvm_info = {
+   .timecounter.cc.read = arch_counter_read_cc,
+   .timecounter.cc.mask = CLOCKSOURCE_MASK(56),
+};
 
 struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
 {
@@ -925,6 +923,7 @@ struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
 static void __init 

Re: [PATCH 10/37] KVM: arm64: Slightly improve debug save/restore functions

2017-12-01 Thread Christoffer Dall
On Tue, Nov 07, 2017 at 03:22:29PM +0100, Andrew Jones wrote:
> On Thu, Oct 12, 2017 at 12:41:14PM +0200, Christoffer Dall wrote:
> > The debug save/restore functions can be improved by using the has_vhe()
> > static key instead of the instruction alternative.  Using the static key
> > uses the same paradigm as we're going to use elsewhere, it makes the
> > code more readable, and it generates slightly better code (no
> > stack setups and function calls unless necessary).
> > 
> > We also use a static key on the restore path, because it will be
> > marginally faster than loading a value from memory.
> > 
> > Finally, we don't have to conditionally clear the debug dirty flag if
> > it's set, we can just clear it.
> > 
> > Signed-off-by: Christoffer Dall 
> > ---
> >  arch/arm64/kvm/hyp/debug-sr.c | 22 +-
> >  1 file changed, 9 insertions(+), 13 deletions(-)
> > 
> > diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
> > index 0fc0758..a2291b6 100644
> > --- a/arch/arm64/kvm/hyp/debug-sr.c
> > +++ b/arch/arm64/kvm/hyp/debug-sr.c
> > @@ -75,11 +75,6 @@
> >  
> >  #define psb_csync()asm volatile("hint #17")
> >  
> > -static void __hyp_text __debug_save_spe_vhe(u64 *pmscr_el1)
> > -{
> > -   /* The vcpu can run. but it can't hide. */
> > -}
> > -
> >  static void __hyp_text __debug_save_spe_nvhe(u64 *pmscr_el1)
> >  {
> > u64 reg;
> > @@ -109,10 +104,6 @@ static void __hyp_text __debug_save_spe_nvhe(u64 
> > *pmscr_el1)
> > dsb(nsh);
> >  }
> >  
> > -static hyp_alternate_select(__debug_save_spe,
> > -   __debug_save_spe_nvhe, __debug_save_spe_vhe,
> > -   ARM64_HAS_VIRT_HOST_EXTN);
> > -
> >  static void __hyp_text __debug_restore_spe(u64 pmscr_el1)
> >  {
> > if (!pmscr_el1)
> > @@ -174,17 +165,22 @@ void __hyp_text __debug_cond_save_host_state(struct 
> > kvm_vcpu *vcpu)
> >  {
> > __debug_save_state(vcpu, >arch.host_debug_state.regs,
> >kern_hyp_va(vcpu->arch.host_cpu_context));
> > -   __debug_save_spe()(>arch.host_debug_state.pmscr_el1);
> > +
> > +   /* Non-VHE: Disable and flush SPE data generation
> > +* VHE: The vcpu can run. but it can't hide. */
> 
> Not the standard comment format.
> s/./,/
> 
> I'm glad you kept the funny comment :-)
> 
> 
> > +   if (!has_vhe())
> > +   __debug_save_spe_nvhe(>arch.host_debug_state.pmscr_el1);
> >  }
> >  
> >  void __hyp_text __debug_cond_restore_host_state(struct kvm_vcpu *vcpu)
> >  {
> > -   __debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
> > +   if (!has_vhe())
> > +   __debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
> > +
> > __debug_restore_state(vcpu, >arch.host_debug_state.regs,
> >   kern_hyp_va(vcpu->arch.host_cpu_context));
> >  
> > -   if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
> > -   vcpu->arch.debug_flags &= ~KVM_ARM64_DEBUG_DIRTY;
> > +   vcpu->arch.debug_flags &= ~KVM_ARM64_DEBUG_DIRTY;
> 
> Guess I should have read ahead before commenting on this in the last
> patch :-)
> 
> >  }
> >  
> >  u32 __hyp_text __kvm_get_mdcr_el2(void)
> > -- 
> > 2.9.0
> >
> 
> Do we still need to pass vcpu->arch.host_debug_state.pmscr_el1 as a
> parameter to __debug_save_spe_nvhe and __debug_restore_spe? Or can
> we just pass the vcpu 

We could change that, but that's unrelated to any optimization and I
believe the idea behind doing the code this way is that it can be reused
for another context later on if it should become relevant.

> and remove the "if (!pmscr_el1) return" in
> __debug_restore_spe? 

That would change the logic (hint: the debug function takes a value, not
a pointer), so I don't think so.

> Should __debug_restore_spe be renamed to have
> a _nvhe for consistency?
> 
Yes.

Thanks,
-Christoffer
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH 11/37] KVM: arm64: Improve debug register save/restore flow

2017-12-01 Thread Christoffer Dall
On Tue, Nov 07, 2017 at 03:48:57PM +0100, Andrew Jones wrote:
> On Thu, Oct 12, 2017 at 12:41:15PM +0200, Christoffer Dall wrote:
> > Instead of having multiple calls from the world switch path to the debug
> > logic, each figuring out if the dirty bit is set and if we should
> > save/restore the debug registes, let's just provide two hooks to the
> > debug save/restore functionality, one for switching to the guest
> > context, and one for switching to the host context, and we get the
> > benefit of only having to evaluate the dirty flag once on each path,
> > plus we give the compiler some more room to inline some of this
> > functionality.
> > 
> > Signed-off-by: Christoffer Dall 
> > ---
> >  arch/arm64/include/asm/kvm_hyp.h | 10 ++-
> >  arch/arm64/kvm/hyp/debug-sr.c| 56 
> > +++-
> >  arch/arm64/kvm/hyp/switch.c  |  6 ++---
> >  3 files changed, 42 insertions(+), 30 deletions(-)
> > 
> > diff --git a/arch/arm64/include/asm/kvm_hyp.h 
> > b/arch/arm64/include/asm/kvm_hyp.h
> > index 08d3bb6..a0e5a70 100644
> > --- a/arch/arm64/include/asm/kvm_hyp.h
> > +++ b/arch/arm64/include/asm/kvm_hyp.h
> > @@ -139,14 +139,8 @@ void __sysreg_restore_guest_state(struct 
> > kvm_cpu_context *ctxt);
> >  void __sysreg32_save_state(struct kvm_vcpu *vcpu);
> >  void __sysreg32_restore_state(struct kvm_vcpu *vcpu);
> >  
> > -void __debug_save_state(struct kvm_vcpu *vcpu,
> > -   struct kvm_guest_debug_arch *dbg,
> > -   struct kvm_cpu_context *ctxt);
> > -void __debug_restore_state(struct kvm_vcpu *vcpu,
> > -  struct kvm_guest_debug_arch *dbg,
> > -  struct kvm_cpu_context *ctxt);
> > -void __debug_cond_save_host_state(struct kvm_vcpu *vcpu);
> > -void __debug_cond_restore_host_state(struct kvm_vcpu *vcpu);
> > +void __debug_switch_to_guest(struct kvm_vcpu *vcpu);
> > +void __debug_switch_to_host(struct kvm_vcpu *vcpu);
> >  
> >  void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
> >  void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
> > diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
> > index a2291b6..b4cd8e0 100644
> > --- a/arch/arm64/kvm/hyp/debug-sr.c
> > +++ b/arch/arm64/kvm/hyp/debug-sr.c
> > @@ -116,16 +116,13 @@ static void __hyp_text __debug_restore_spe(u64 
> > pmscr_el1)
> > write_sysreg_s(pmscr_el1, PMSCR_EL1);
> >  }
> >  
> > -void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu,
> > -  struct kvm_guest_debug_arch *dbg,
> > -  struct kvm_cpu_context *ctxt)
> > +static void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu,
> > + struct kvm_guest_debug_arch *dbg,
> > + struct kvm_cpu_context *ctxt)
> >  {
> > u64 aa64dfr0;
> > int brps, wrps;
> >  
> > -   if (!(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY))
> > -   return;
> > -
> > aa64dfr0 = read_sysreg(id_aa64dfr0_el1);
> > brps = (aa64dfr0 >> 12) & 0xf;
> > wrps = (aa64dfr0 >> 20) & 0xf;
> > @@ -138,16 +135,13 @@ void __hyp_text __debug_save_state(struct kvm_vcpu 
> > *vcpu,
> > ctxt->sys_regs[MDCCINT_EL1] = read_sysreg(mdccint_el1);
> >  }
> >  
> > -void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu,
> > - struct kvm_guest_debug_arch *dbg,
> > - struct kvm_cpu_context *ctxt)
> > +static void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu,
> > +struct kvm_guest_debug_arch *dbg,
> > +struct kvm_cpu_context *ctxt)
> >  {
> > u64 aa64dfr0;
> > int brps, wrps;
> >  
> > -   if (!(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY))
> > -   return;
> > -
> > aa64dfr0 = read_sysreg(id_aa64dfr0_el1);
> >  
> > brps = (aa64dfr0 >> 12) & 0xf;
> > @@ -161,24 +155,50 @@ void __hyp_text __debug_restore_state(struct kvm_vcpu 
> > *vcpu,
> > write_sysreg(ctxt->sys_regs[MDCCINT_EL1], mdccint_el1);
> >  }
> >  
> > -void __hyp_text __debug_cond_save_host_state(struct kvm_vcpu *vcpu)
> > +void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu)
> >  {
> > -   __debug_save_state(vcpu, >arch.host_debug_state.regs,
> > -  kern_hyp_va(vcpu->arch.host_cpu_context));
> > +   struct kvm_cpu_context *__host_ctxt;
> > +   struct kvm_cpu_context *__guest_ctxt;
> > +   struct kvm_guest_debug_arch *__host_dbg;
> > +   struct kvm_guest_debug_arch *__guest_dbg;
> 
> I caught in your reply to Marc that the __ prefix here is for hyp mode
> accessible code and data, but do we also need to use it for stack data?
> No big deal, but it's not very pretty.
> 

sure.

> >  
> > /* Non-VHE: Disable and flush SPE data generation
> >  * VHE: The vcpu can run. but it can't hide. */
> > if (!has_vhe())
> >  

Re: [PATCH 09/37] KVM: arm64: Move debug dirty flag calculation out of world switch

2017-12-01 Thread Christoffer Dall
On Tue, Nov 07, 2017 at 03:09:19PM +0100, Andrew Jones wrote:
> On Thu, Oct 12, 2017 at 12:41:13PM +0200, Christoffer Dall wrote:
> > There is no need to figure out inside the world-switch if we should
> > save/restore the debug registers or not, we can might as well do that in
> > the higher level debug setup code, making it easier to optimize down the
> > line.
> > 
> > Signed-off-by: Christoffer Dall 
> > ---
> >  arch/arm64/kvm/debug.c| 9 +
> >  arch/arm64/kvm/hyp/debug-sr.c | 6 --
> >  2 files changed, 9 insertions(+), 6 deletions(-)
> > 
> > diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
> > index dbadfaf..62550de19 100644
> > --- a/arch/arm64/kvm/debug.c
> > +++ b/arch/arm64/kvm/debug.c
> > @@ -193,6 +193,15 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
> > if (trap_debug)
> > vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
> >  
> > +   /*
> > +* If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY is set, perform
> > +* a full save/restore cycle.
> 
> The commit message implies testing KVM_ARM64_DEBUG_DIRTY, but it only
> tests KDE and MDE.
> 

You meant the comment, right?

> > +*/
> > +   if ((vcpu_sys_reg(vcpu, MDSCR_EL1) & DBG_MDSCR_KDE) ||
> > +   (vcpu_sys_reg(vcpu, MDSCR_EL1) & DBG_MDSCR_MDE))
> 
> nit: could also write as
> 
>  if (vcpu_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
> 

I actually prefer it the other way, and I think the compiler will figure
out what to do in terms of efficiency.

> > +   vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
> > +
> 
> It looks like there's only one flag for debug_flags - this dirty flag,
> which I guess is also used to trigger trapping. So maybe this could be a
> second flag of a "lazy state" field, as I suggested earlier?
> 

I'm going to leave this one for now, but we can improve that later on if
we want to save a little space in the vcpu struct, or rather if we want
to rearrange things to make frequently accessed fields fit in the same
cache line.

Thanks,
-Christoffer
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH 13/37] KVM: arm64: Introduce VHE-specific kvm_vcpu_run

2017-12-01 Thread Christoffer Dall
On Tue, Nov 07, 2017 at 04:25:29PM +0100, Andrew Jones wrote:
> On Thu, Oct 12, 2017 at 12:41:17PM +0200, Christoffer Dall wrote:
> > So far this is just a copy of the legacy non-VHE switch function, where
> > we only change the existing calls to has_vhe() in both the original and
> > new functions.
> > 
> > Signed-off-by: Christoffer Dall 
> > ---
> >  arch/arm/include/asm/kvm_asm.h   |  4 +++
> >  arch/arm64/include/asm/kvm_asm.h |  2 ++
> >  arch/arm64/kvm/hyp/switch.c  | 57 
> > 
> >  virt/kvm/arm/arm.c   |  5 +++-
> >  4 files changed, 67 insertions(+), 1 deletion(-)
> > 
> > diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
> > index 36dd296..1a7bc5f 100644
> > --- a/arch/arm/include/asm/kvm_asm.h
> > +++ b/arch/arm/include/asm/kvm_asm.h
> > @@ -70,8 +70,12 @@ extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu 
> > *vcpu);
> >  
> >  extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high);
> >  
> > +/* no VHE on 32-bit :( */
> > +static inline int kvm_vcpu_run(struct kvm_vcpu *vcpu) { return 0; }
> > +
> >  extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
> >  
> > +
> 
> stray new blank line
> 
> >  extern void __init_stage2_translation(void);
> >  
> >  extern u64 __vgic_v3_get_ich_vtr_el2(void);
> > diff --git a/arch/arm64/include/asm/kvm_asm.h 
> > b/arch/arm64/include/asm/kvm_asm.h
> > index 7e48a39..2eb5b23 100644
> > --- a/arch/arm64/include/asm/kvm_asm.h
> > +++ b/arch/arm64/include/asm/kvm_asm.h
> > @@ -57,6 +57,8 @@ extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu 
> > *vcpu);
> >  
> >  extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high);
> >  
> > +extern int kvm_vcpu_run(struct kvm_vcpu *vcpu);
> > +
> >  extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
> >  
> >  extern u64 __vgic_v3_get_ich_vtr_el2(void);
> > diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
> > index ed30af5..8a0f38f 100644
> > --- a/arch/arm64/kvm/hyp/switch.c
> > +++ b/arch/arm64/kvm/hyp/switch.c
> > @@ -319,6 +319,63 @@ static bool __hyp_text fixup_guest_exit(struct 
> > kvm_vcpu *vcpu, u64 *exit_code)
> > return false;
> >  }
> >  
> > +/* Switch to the guest for VHE systems running in EL2 */
> > +int kvm_vcpu_run(struct kvm_vcpu *vcpu)
> > +{
> > +   struct kvm_cpu_context *host_ctxt;
> > +   struct kvm_cpu_context *guest_ctxt;
> > +   u64 exit_code;
> > +
> > +   vcpu = kern_hyp_va(vcpu);
> > +
> > +   host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
> > +   host_ctxt->__hyp_running_vcpu = vcpu;
> > +   guest_ctxt = >arch.ctxt;
> > +
> > +   __sysreg_save_host_state(host_ctxt);
> > +
> > +   __activate_traps(vcpu);
> > +   __activate_vm(vcpu);
> > +
> > +   __vgic_restore_state(vcpu);
> > +   __timer_enable_traps(vcpu);
> > +
> > +   /*
> > +* We must restore the 32-bit state before the sysregs, thanks
> > +* to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
> > +*/
> > +   __sysreg32_restore_state(vcpu);
> > +   __sysreg_restore_guest_state(guest_ctxt);
> > +   __debug_switch_to_guest(vcpu);
> > +
> > +   /* Jump in the fire! */
> > +again:
> > +   exit_code = __guest_enter(vcpu, host_ctxt);
> > +   /* And we're baaack! */
> > +
> > +   if (fixup_guest_exit(vcpu, _code))
> > +   goto again;
> > +
> > +   __sysreg_save_guest_state(guest_ctxt);
> > +   __sysreg32_save_state(vcpu);
> > +   __timer_disable_traps(vcpu);
> > +   __vgic_save_state(vcpu);
> > +
> > +   __deactivate_traps(vcpu);
> > +   __deactivate_vm(vcpu);
> > +
> > +   __sysreg_restore_host_state(host_ctxt);
> > +
> > +   /*
> > +* This must come after restoring the host sysregs, since a non-VHE
> > +* system may enable SPE here and make use of the TTBRs.
> > +*/
> > +   __debug_switch_to_host(vcpu);
> > +
> > +   return exit_code;
> > +}
> > +
> > +/* Switch to the guest for legacy non-VHE systems */
> >  int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
> >  {
> > struct kvm_cpu_context *host_ctxt;
> > diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
> > index cf121b2..b11647a 100644
> > --- a/virt/kvm/arm/arm.c
> > +++ b/virt/kvm/arm/arm.c
> > @@ -706,7 +706,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, 
> > struct kvm_run *run)
> > trace_kvm_entry(*vcpu_pc(vcpu));
> > guest_enter_irqoff();
> >  
> > -   ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
> > +   if (has_vhe())
> > +   ret = kvm_vcpu_run(vcpu);
> > +   else
> > +   ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
> 
> So kvm_vcpu_run() is going to be VHE only, but the only way for people
> stepping through the code to know that is by seeing this callsite or by
> reading the comment above it. Everywhere else we make it much more clear
> with the _vhe and _nvhe suffixes. I wonder if we should keep that pattern
> by adding
> 
>  static int kvm_vcpu_run(struct kvm_vcpu *vcpu)

Re: [PATCH v5 3/8] KVM: arm/arm64: Don't cache the timer IRQ level

2017-12-01 Thread Andre Przywara
Hi,

On 20/11/17 19:16, Christoffer Dall wrote:
> The timer was modeled after a strict idea of modelling an interrupt line
> level in software, meaning that only transitions in the level needed to
> be reported to the VGIC.  This works well for the timer, because the
> arch timer code is in complete control of the device and can track the
> transitions of the line.
> 
> However, as we are about to support using the HW bit in the VGIC not
> just for the timer, but also for VFIO which cannot track transitions of
> the interrupt line, we have to decide on an interface for level
> triggered mapped interrupts to the GIC, which both the timer and VFIO
> can use.
> 
> VFIO only sees an asserting transition of the physical interrupt line,
> and tells the VGIC when that happens.  That means that part of the
> interrupt flow is offloaded to the hardware.
> 
> To use the same interface for VFIO devices and the timer, we therefore
> have to change the timer (we cannot change VFIO because it doesn't know
> the details of the device it is assigning to a VM).
> 
> Luckily, changing the timer is simple, we just need to stop 'caching'
> the line level, but instead let the VGIC know the state of the timer
> every time there is a potential change in the line level, and when the
> line level should be asserted from the timer ISR.  The VGIC can ignore
> extra notifications using its validate mechanism.

Indeed vgic_validate_injection() should take care of that change.

> Signed-off-by: Christoffer Dall 

Reviewed-by: Andre Przywara 

Cheers,
Andre.

> ---
>  virt/kvm/arm/arch_timer.c | 20 +---
>  1 file changed, 13 insertions(+), 7 deletions(-)
> 
> diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
> index 190c99ed1b73..5f8ad8e3f3ff 100644
> --- a/virt/kvm/arm/arch_timer.c
> +++ b/virt/kvm/arm/arch_timer.c
> @@ -99,11 +99,9 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void 
> *dev_id)
>   }
>   vtimer = vcpu_vtimer(vcpu);
>  
> - if (!vtimer->irq.level) {
> - vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl);
> - if (kvm_timer_irq_can_fire(vtimer))
> - kvm_timer_update_irq(vcpu, true, vtimer);
> - }
> + vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl);
> + if (kvm_timer_irq_can_fire(vtimer))
> + kvm_timer_update_irq(vcpu, true, vtimer);
>  
>   if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
>   kvm_vtimer_update_mask_user(vcpu);
> @@ -324,12 +322,20 @@ static void kvm_timer_update_state(struct kvm_vcpu 
> *vcpu)
>   struct arch_timer_cpu *timer = >arch.timer_cpu;
>   struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
>   struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
> + bool level;
>  
>   if (unlikely(!timer->enabled))
>   return;
>  
> - if (kvm_timer_should_fire(vtimer) != vtimer->irq.level)
> - kvm_timer_update_irq(vcpu, !vtimer->irq.level, vtimer);
> + /*
> +  * The vtimer virtual interrupt is a 'mapped' interrupt, meaning part
> +  * of its lifecycle is offloaded to the hardware, and we therefore may
> +  * not have lowered the irq.level value before having to signal a new
> +  * interrupt, but have to signal an interrupt every time the level is
> +  * asserted.
> +  */
> + level = kvm_timer_should_fire(vtimer);
> + kvm_timer_update_irq(vcpu, level, vtimer);
>  
>   if (kvm_timer_should_fire(ptimer) != ptimer->irq.level)
>   kvm_timer_update_irq(vcpu, !ptimer->irq.level, ptimer);
> 
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH v5 1/8] KVM: arm/arm64: Remove redundant preemptible checks

2017-12-01 Thread Andre Przywara
Hi,

On 20/11/17 19:16, Christoffer Dall wrote:
> The __this_cpu_read() and __this_cpu_write() functions already implement
> checks for the required preemption levels when using
> CONFIG_DEBUG_PREEMPT which gives you nice error messages and such.
> Therefore there is no need to explicitly check this using a BUG_ON() in
> the code (which we don't do for other uses of per cpu variables either).
> 
> Acked-by: Marc Zyngier 
> Signed-off-by: Christoffer Dall 

Reviewed-by: Andre Przywara 


> ---
>  virt/kvm/arm/arm.c | 2 --
>  1 file changed, 2 deletions(-)
> 
> diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
> index c13d74c083fe..28548aeaf164 100644
> --- a/virt/kvm/arm/arm.c
> +++ b/virt/kvm/arm/arm.c
> @@ -71,7 +71,6 @@ static DEFINE_PER_CPU(unsigned char, 
> kvm_arm_hardware_enabled);
>  
>  static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
>  {
> - BUG_ON(preemptible());
>   __this_cpu_write(kvm_arm_running_vcpu, vcpu);
>  }
>  
> @@ -81,7 +80,6 @@ static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
>   */
>  struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
>  {
> - BUG_ON(preemptible());
>   return __this_cpu_read(kvm_arm_running_vcpu);
>  }
>  
> 
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH v5 2/8] KVM: arm/arm64: Factor out functionality to get vgic mmio requester_vcpu

2017-12-01 Thread Andre Przywara
Hi,

On 20/11/17 19:16, Christoffer Dall wrote:
> We are about to distinguish between userspace accesses and mmio traps
> for a number of the mmio handlers.  When the requester vcpu is NULL, it
> mens we are handling a userspace acccess.
> 
> Factor out the functionality to get the request vcpu into its own
> function, mostly so we have a common place to document the semantics of
> the return value.
> 
> Also take the chance to move the functionality outside of holding a
> spinlock and instead explicitly disable and enable preemption.  This
> supports PREEMPT_RT kernels as well.
> 
> Acked-by: Marc Zyngier 
> Signed-off-by: Christoffer Dall 
> ---
>  virt/kvm/arm/vgic/vgic-mmio.c | 43 
> +++
>  1 file changed, 27 insertions(+), 16 deletions(-)
> 
> diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
> index deb51ee16a3d..6113cf850f47 100644
> --- a/virt/kvm/arm/vgic/vgic-mmio.c
> +++ b/virt/kvm/arm/vgic/vgic-mmio.c
> @@ -122,6 +122,26 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu 
> *vcpu,
>   return value;
>  }
>  
> +/*
> + * This function will return the VCPU that performed the MMIO access and
> + * trapped from twithin the VM, and will return NULL if this is a userspace
> + * access.
> + *
> + * We can disable preemption locally around accessing the per-CPU variable
> + * because even if the current thread is migrated to another CPU, reading the
> + * per-CPU value later will give us the same value as we update the per-CPU
> + * variable in the preempt notifier handlers.

This comment left me scratching my head a bit. Maybe you could change it
to point out that ... it's safe to *enable* preemption after the call
again, because of said reasons? Because disabling preemption before
accessing a per-CPU variable is not really an issue.

Apart from that it's fine.

Reviewed-by: Andre Przywara 

Cheers,
Andre.

> + */
> +static struct kvm_vcpu *vgic_get_mmio_requester_vcpu(void)
> +{
> + struct kvm_vcpu *vcpu;
> +
> + preempt_disable();
> + vcpu = kvm_arm_get_running_vcpu();
> + preempt_enable();
> + return vcpu;
> +}
> +
>  void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
> gpa_t addr, unsigned int len,
> unsigned long val)
> @@ -184,24 +204,10 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu 
> *vcpu,
>  static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq 
> *irq,
>   bool new_active_state)
>  {
> - struct kvm_vcpu *requester_vcpu;
>   unsigned long flags;
> - spin_lock_irqsave(>irq_lock, flags);
> + struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu();
>  
> - /*
> -  * The vcpu parameter here can mean multiple things depending on how
> -  * this function is called; when handling a trap from the kernel it
> -  * depends on the GIC version, and these functions are also called as
> -  * part of save/restore from userspace.
> -  *
> -  * Therefore, we have to figure out the requester in a reliable way.
> -  *
> -  * When accessing VGIC state from user space, the requester_vcpu is
> -  * NULL, which is fine, because we guarantee that no VCPUs are running
> -  * when accessing VGIC state from user space so irq->vcpu->cpu is
> -  * always -1.
> -  */
> - requester_vcpu = kvm_arm_get_running_vcpu();
> + spin_lock_irqsave(>irq_lock, flags);
>  
>   /*
>* If this virtual IRQ was written into a list register, we
> @@ -213,6 +219,11 @@ static void vgic_mmio_change_active(struct kvm_vcpu 
> *vcpu, struct vgic_irq *irq,
>* vgic_change_active_prepare)  and still has to sync back this IRQ,
>* so we release and re-acquire the spin_lock to let the other thread
>* sync back the IRQ.
> +  *
> +  * When accessing VGIC state from user space, requester_vcpu is
> +  * NULL, which is fine, because we guarantee that no VCPUs are running
> +  * when accessing VGIC state from user space so irq->vcpu->cpu is
> +  * always -1.
>*/
>   while (irq->vcpu && /* IRQ may have state in an LR somewhere */
>  irq->vcpu != requester_vcpu && /* Current thread is not the VCPU 
> thread */
> 
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH 2/3] arm64: fpsimd: Abstract out binding of task's fpsimd context to the cpu.

2017-12-01 Thread Dave Martin
There is currently some duplicate logic to associate current's
FPSIMD context with the cpu when loading FPSIMD state into the cpu
regs.

Subsequent patches will update that logic, so in order to ensure it
only needs to be done in one place, this patch factors the relevant
code out into a new function fpsimd_bind_to_cpu().

Signed-off-by: Dave Martin 
Cc: Ard Biesheuvel 
---
 arch/arm64/kernel/fpsimd.c | 25 +++--
 1 file changed, 15 insertions(+), 10 deletions(-)

diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 143b3e7..007140b 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -992,6 +992,18 @@ void fpsimd_signal_preserve_current_state(void)
 }
 
 /*
+ * Associate current's FPSIMD context with this cpu
+ * Preemption must be disabled when calling this function.
+ */
+static void fpsimd_bind_to_cpu(void)
+{
+   struct fpsimd_state *st = >thread.fpsimd_state;
+
+   __this_cpu_write(fpsimd_last_state, st);
+   st->cpu = smp_processor_id();
+}
+
+/*
  * Load the userland FPSIMD state of 'current' from memory, but only if the
  * FPSIMD state already held in the registers is /not/ the most recent FPSIMD
  * state of 'current'
@@ -1004,11 +1016,8 @@ void fpsimd_restore_current_state(void)
local_bh_disable();
 
if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
-   struct fpsimd_state *st = >thread.fpsimd_state;
-
task_fpsimd_load();
-   __this_cpu_write(fpsimd_last_state, st);
-   st->cpu = smp_processor_id();
+   fpsimd_bind_to_cpu();
}
 
local_bh_enable();
@@ -1032,12 +1041,8 @@ void fpsimd_update_current_state(struct fpsimd_state 
*state)
}
task_fpsimd_load();
 
-   if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
-   struct fpsimd_state *st = >thread.fpsimd_state;
-
-   __this_cpu_write(fpsimd_last_state, st);
-   st->cpu = smp_processor_id();
-   }
+   if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE))
+   fpsimd_bind_to_cpu();
 
local_bh_enable();
 }
-- 
2.1.4

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH 1/3] arm64: KVM: Move CPU ID reg trap setup off the world switch path

2017-12-01 Thread Dave Martin
The HCR_EL2.TID3 flag needs to be set when trapping guest access to
the CPU ID registers is required.  However, the decision about
whether to set this bit does not need to be repeated at every
switch to the guest.

Instead, it's sufficient to make this decision once and record the
outcome.

This patch moves the decision to vcpu_reset_hcr() and records the
choice made in vcpu->arch.hcr_el2.  The world switch code can then
load this directly when switching to the guest without the need for
conditional logic on the critical path.

Signed-off-by: Dave Martin 
Suggested-by: Christoffer Dall 
Cc: Marc Zyngier 

---

Note to maintainers: this was discussed on-list [1] prior to the merge
window, but this patch implementing the agreed decision hasn't been
posted previously.

This should be considered a fix for v4.15.

[1] [PATCH v3 02/28] arm64: KVM: Hide unsupported AArch64 CPU features from 
guests
http://lists.infradead.org/pipermail/linux-arm-kernel/2017-October/537420.html
---
 arch/arm64/include/asm/kvm_emulate.h | 8 
 arch/arm64/kvm/hyp/switch.c  | 4 
 2 files changed, 8 insertions(+), 4 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_emulate.h 
b/arch/arm64/include/asm/kvm_emulate.h
index 5f28dfa..8ff5aef 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -52,6 +52,14 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 |= HCR_E2H;
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
vcpu->arch.hcr_el2 &= ~HCR_RW;
+
+   /*
+* TID3: trap feature register accesses that we virtualise.
+* For now this is conditional, since no AArch32 feature regs
+* are currently virtualised.
+*/
+   if (vcpu->arch.hcr_el2 & HCR_RW)
+   vcpu->arch.hcr_el2 |= HCR_TID3;
 }
 
 static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 525c01f..87fd590 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -86,10 +86,6 @@ static void __hyp_text __activate_traps(struct kvm_vcpu 
*vcpu)
write_sysreg(1 << 30, fpexc32_el2);
isb();
}
-
-   if (val & HCR_RW) /* for AArch64 only: */
-   val |= HCR_TID3; /* TID3: trap feature register accesses */
-
write_sysreg(val, hcr_el2);
 
/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
-- 
2.1.4

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH 0/3] arm64: SVE fixes for v4.15-rc1

2017-12-01 Thread Dave Martin
This mini-series contains a few fixes for known issues in the arm64 SVE
patches that missed the merge window.

They should be considered fixes for v4.15.

Dave Martin (3):
  arm64: KVM: Move CPU ID reg trap setup off the world switch path
  arm64: fpsimd: Abstract out binding of task's fpsimd context to the
cpu.
  arm64/sve: KVM: Avoid dereference of dead task during guest entry

 arch/arm64/include/asm/kvm_emulate.h |  8 ++
 arch/arm64/kernel/fpsimd.c   | 51 +---
 arch/arm64/kvm/hyp/switch.c  |  4 ---
 3 files changed, 38 insertions(+), 25 deletions(-)

-- 
2.1.4

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH v4 2/2] arm64: Add software workaround for Falkor erratum 1041

2017-12-01 Thread Will Deacon
On Mon, Nov 27, 2017 at 05:18:00PM -0600, Shanker Donthineni wrote:
> The ARM architecture defines the memory locations that are permitted
> to be accessed as the result of a speculative instruction fetch from
> an exception level for which all stages of translation are disabled.
> Specifically, the core is permitted to speculatively fetch from the
> 4KB region containing the current program counter 4K and next 4K.
> 
> When translation is changed from enabled to disabled for the running
> exception level (SCTLR_ELn[M] changed from a value of 1 to 0), the
> Falkor core may errantly speculatively access memory locations outside
> of the 4KB region permitted by the architecture. The errant memory
> access may lead to one of the following unexpected behaviors.
> 
> 1) A System Error Interrupt (SEI) being raised by the Falkor core due
>to the errant memory access attempting to access a region of memory
>that is protected by a slave-side memory protection unit.
> 2) Unpredictable device behavior due to a speculative read from device
>memory. This behavior may only occur if the instruction cache is
>disabled prior to or coincident with translation being changed from
>enabled to disabled.
> 
> The conditions leading to this erratum will not occur when either of the
> following occur:
>  1) A higher exception level disables translation of a lower exception level
>(e.g. EL2 changing SCTLR_EL1[M] from a value of 1 to 0).
>  2) An exception level disabling its stage-1 translation if its stage-2
> translation is enabled (e.g. EL1 changing SCTLR_EL1[M] from a value of 1
> to 0 when HCR_EL2[VM] has a value of 1).
> 
> To avoid the errant behavior, software must execute an ISB immediately
> prior to executing the MSR that will change SCTLR_ELn[M] from 1 to 0.
> 
> Signed-off-by: Shanker Donthineni 
> ---
> Changes since v3:
>   Rebased to kernel v4.15-rc1.
> Changes since v2:
>   Repost the corrected patches.
> Changes since v1:
>   Apply the workaround where it's required.
> 
>  Documentation/arm64/silicon-errata.txt |  1 +
>  arch/arm64/Kconfig | 12 +++-
>  arch/arm64/include/asm/assembler.h | 19 +++
>  arch/arm64/include/asm/cpucaps.h   |  3 ++-
>  arch/arm64/kernel/cpu-reset.S  |  1 +
>  arch/arm64/kernel/cpu_errata.c | 16 
>  arch/arm64/kernel/efi-entry.S  |  2 ++
>  arch/arm64/kernel/head.S   |  1 +
>  arch/arm64/kernel/relocate_kernel.S|  1 +
>  arch/arm64/kvm/hyp-init.S  |  1 +

This is an awful lot of code just to add an ISB instruction prior to
disabling the MMU. Why do you need to go through the alternatives framework
for this? Just do it with an #ifdef; this isn't a fastpath.

Will
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH] KVM: arm/arm64: kvm_arch_destroy_vm cleanups

2017-12-01 Thread Christoffer Dall
On Fri, Dec 1, 2017 at 8:18 AM, Andrew Jones  wrote:
> On Fri, Dec 01, 2017 at 09:09:19AM +0100, Christoffer Dall wrote:
>> Hi Drew,
>>
>> On Mon, Nov 27, 2017 at 07:17:18PM +0100, Andrew Jones wrote:
>> > Recently commit b2c9a85dd75a ("KVM: arm/arm64: vgic: Move
>> > kvm_vgic_destroy call around") caught my eye. When I looked closer I
>> > saw that while it made the code saner, it wasn't changing anything.
>> > kvm_for_each_vcpu() checks for NULL kvm->vcpus[i], so there wasn't
>> > a NULL dereference being fixed, and because kvm_vgic_vcpu_destroy()
>> > was called by kvm_arch_vcpu_free() it was still getting called, just
>> > not by kvm_vgic_destroy() as intended. But now the call from
>> > kvm_arch_vcpu_free() is redundant, and while currently harmless, it
>> > should be removed in case kvm_vgic_vcpu_destroy() were ever to
>> > want to reference vgic state, as kvm_vgic_destroy() now comes before
>> > kvm_arch_vcpu_free(). Additionally the other architectures set
>> > kvm->online_vcpus to zero after freeing them. We might as well do
>> > that for ARM too.
>>
>> Could this commit message be rewritten to:
>>
>>   kvm_vgic_vcpu_destroy already gets called from kvm_vgic_destroy for
>>   each vcpu, so we don't have to call it from kvm_arch_vcpu_free.
>>
>>   Additionally the other architectures set kvm->online_vcpus to zero
>>   after freeing them. We might as well do that for ARM too.
>
> Sure, I don't mind you removing the '-v' (verbose) from it. Should I
> respin? Or is that something you don't mind doing while applying?
>
No need to respin, I already applied your patch with the adjusted
commit message.

Thanks,
-Christoffer
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH 1/1] timecounter: Make cyclecounter struct part of timecounter struct

2017-12-01 Thread Sagar Arun Kamble
There is no real need for the users of timecounters to define cyclecounter
and timecounter variables separately. Since timecounter will always be
based on cyclecounter, have cyclecounter struct as member of timecounter
struct.

Suggested-by: Chris Wilson 
Signed-off-by: Sagar Arun Kamble 
Cc: Chris Wilson 
Cc: John Stultz 
Cc: Thomas Gleixner 
Cc: Stephen Boyd 
Cc: linux-ker...@vger.kernel.org
Cc: linux-arm-ker...@lists.infradead.org
Cc: net...@vger.kernel.org
Cc: intel-wired-...@lists.osuosl.org
Cc: linux-r...@vger.kernel.org
Cc: alsa-de...@alsa-project.org
Cc: kvmarm@lists.cs.columbia.edu
---
 arch/microblaze/kernel/timer.c | 20 ++--
 drivers/clocksource/arm_arch_timer.c   | 19 ++--
 drivers/net/ethernet/amd/xgbe/xgbe-dev.c   |  3 +-
 drivers/net/ethernet/amd/xgbe/xgbe-ptp.c   |  9 +++---
 drivers/net/ethernet/amd/xgbe/xgbe.h   |  1 -
 drivers/net/ethernet/broadcom/bnx2x/bnx2x.h|  1 -
 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c   | 20 ++--
 drivers/net/ethernet/freescale/fec.h   |  1 -
 drivers/net/ethernet/freescale/fec_ptp.c   | 30 +-
 drivers/net/ethernet/intel/e1000e/e1000.h  |  1 -
 drivers/net/ethernet/intel/e1000e/netdev.c | 27 
 drivers/net/ethernet/intel/e1000e/ptp.c|  2 +-
 drivers/net/ethernet/intel/igb/igb.h   |  1 -
 drivers/net/ethernet/intel/igb/igb_ptp.c   | 25 ---
 drivers/net/ethernet/intel/ixgbe/ixgbe.h   |  1 -
 drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c   | 17 +-
 drivers/net/ethernet/mellanox/mlx4/en_clock.c  | 28 -
 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h   |  1 -
 .../net/ethernet/mellanox/mlx5/core/lib/clock.c| 34 ++--
 drivers/net/ethernet/qlogic/qede/qede_ptp.c| 20 ++--
 drivers/net/ethernet/ti/cpts.c | 36 --
 drivers/net/ethernet/ti/cpts.h |  1 -
 include/linux/mlx5/driver.h|  1 -
 include/linux/timecounter.h|  4 +--
 include/sound/hdaudio.h|  1 -
 kernel/time/timecounter.c  | 28 -
 sound/hda/hdac_stream.c|  7 +++--
 virt/kvm/arm/arch_timer.c  |  6 ++--
 28 files changed, 163 insertions(+), 182 deletions(-)

diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c
index 7de941c..b7f89e9 100644
--- a/arch/microblaze/kernel/timer.c
+++ b/arch/microblaze/kernel/timer.c
@@ -199,27 +199,25 @@ static u64 xilinx_read(struct clocksource *cs)
return (u64)xilinx_clock_read();
 }
 
-static struct timecounter xilinx_tc = {
-   .cc = NULL,
-};
-
 static u64 xilinx_cc_read(const struct cyclecounter *cc)
 {
return xilinx_read(NULL);
 }
 
-static struct cyclecounter xilinx_cc = {
-   .read = xilinx_cc_read,
-   .mask = CLOCKSOURCE_MASK(32),
-   .shift = 8,
+static struct timecounter xilinx_tc = {
+   .cc.read = xilinx_cc_read,
+   .cc.mask = CLOCKSOURCE_MASK(32),
+   .cc.mult = 0,
+   .cc.shift = 8,
 };
 
 static int __init init_xilinx_timecounter(void)
 {
-   xilinx_cc.mult = div_sc(timer_clock_freq, NSEC_PER_SEC,
-   xilinx_cc.shift);
+   struct cyclecounter *cc = _tc.cc;
+
+   cc->mult = div_sc(timer_clock_freq, NSEC_PER_SEC, cc->shift);
 
-   timecounter_init(_tc, _cc, sched_clock());
+   timecounter_init(_tc, sched_clock());
 
return 0;
 }
diff --git a/drivers/clocksource/arm_arch_timer.c 
b/drivers/clocksource/arm_arch_timer.c
index 57cb2f0..31543e5 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -179,11 +179,6 @@ static u64 arch_counter_read_cc(const struct cyclecounter 
*cc)
.flags  = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-static struct cyclecounter cyclecounter __ro_after_init = {
-   .read   = arch_counter_read_cc,
-   .mask   = CLOCKSOURCE_MASK(56),
-};
-
 struct ate_acpi_oem_info {
char oem_id[ACPI_OEM_ID_SIZE + 1];
char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
@@ -915,7 +910,10 @@ static u64 arch_counter_get_cntvct_mem(void)
return ((u64) vct_hi << 32) | vct_lo;
 }
 
-static struct arch_timer_kvm_info arch_timer_kvm_info;
+static struct arch_timer_kvm_info arch_timer_kvm_info = {
+   .timecounter.cc.read = arch_counter_read_cc,
+   .timecounter.cc.mask = CLOCKSOURCE_MASK(56),
+};
 
 struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
 {
@@ -925,6 +923,7 @@ struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
 static void __init arch_counter_register(unsigned type)
 {
u64 start_count;
+   

Re: [PATCH] KVM: arm/arm64: kvm_arch_destroy_vm cleanups

2017-12-01 Thread Andrew Jones
On Fri, Dec 01, 2017 at 09:09:19AM +0100, Christoffer Dall wrote:
> Hi Drew,
> 
> On Mon, Nov 27, 2017 at 07:17:18PM +0100, Andrew Jones wrote:
> > Recently commit b2c9a85dd75a ("KVM: arm/arm64: vgic: Move
> > kvm_vgic_destroy call around") caught my eye. When I looked closer I
> > saw that while it made the code saner, it wasn't changing anything.
> > kvm_for_each_vcpu() checks for NULL kvm->vcpus[i], so there wasn't
> > a NULL dereference being fixed, and because kvm_vgic_vcpu_destroy()
> > was called by kvm_arch_vcpu_free() it was still getting called, just
> > not by kvm_vgic_destroy() as intended. But now the call from
> > kvm_arch_vcpu_free() is redundant, and while currently harmless, it
> > should be removed in case kvm_vgic_vcpu_destroy() were ever to
> > want to reference vgic state, as kvm_vgic_destroy() now comes before
> > kvm_arch_vcpu_free(). Additionally the other architectures set
> > kvm->online_vcpus to zero after freeing them. We might as well do
> > that for ARM too.
> 
> Could this commit message be rewritten to:
> 
>   kvm_vgic_vcpu_destroy already gets called from kvm_vgic_destroy for
>   each vcpu, so we don't have to call it from kvm_arch_vcpu_free.
> 
>   Additionally the other architectures set kvm->online_vcpus to zero
>   after freeing them. We might as well do that for ARM too.

Sure, I don't mind you removing the '-v' (verbose) from it. Should I
respin? Or is that something you don't mind doing while applying?

Thanks,
drew

> 
> ?
> 
> Thanks,
> -Christoffer
> 
> 
> 
> > 
> > Signed-off-by: Andrew Jones 
> > ---
> >  virt/kvm/arm/arm.c | 2 +-
> >  1 file changed, 1 insertion(+), 1 deletion(-)
> > 
> > diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
> > index a6524ff27de4..c5bc79c4ccf7 100644
> > --- a/virt/kvm/arm/arm.c
> > +++ b/virt/kvm/arm/arm.c
> > @@ -188,6 +188,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
> > kvm->vcpus[i] = NULL;
> > }
> > }
> > +   atomic_set(>online_vcpus, 0);
> >  }
> >  
> >  int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
> > @@ -296,7 +297,6 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
> >  {
> > kvm_mmu_free_memory_caches(vcpu);
> > kvm_timer_vcpu_terminate(vcpu);
> > -   kvm_vgic_vcpu_destroy(vcpu);
> > kvm_pmu_vcpu_destroy(vcpu);
> > kvm_vcpu_uninit(vcpu);
> > kmem_cache_free(kvm_vcpu_cache, vcpu);
> > -- 
> > 2.13.6
> > 
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH] KVM: arm/arm64: kvm_arch_destroy_vm cleanups

2017-12-01 Thread Christoffer Dall
Hi Drew,

On Mon, Nov 27, 2017 at 07:17:18PM +0100, Andrew Jones wrote:
> Recently commit b2c9a85dd75a ("KVM: arm/arm64: vgic: Move
> kvm_vgic_destroy call around") caught my eye. When I looked closer I
> saw that while it made the code saner, it wasn't changing anything.
> kvm_for_each_vcpu() checks for NULL kvm->vcpus[i], so there wasn't
> a NULL dereference being fixed, and because kvm_vgic_vcpu_destroy()
> was called by kvm_arch_vcpu_free() it was still getting called, just
> not by kvm_vgic_destroy() as intended. But now the call from
> kvm_arch_vcpu_free() is redundant, and while currently harmless, it
> should be removed in case kvm_vgic_vcpu_destroy() were ever to
> want to reference vgic state, as kvm_vgic_destroy() now comes before
> kvm_arch_vcpu_free(). Additionally the other architectures set
> kvm->online_vcpus to zero after freeing them. We might as well do
> that for ARM too.

Could this commit message be rewritten to:

  kvm_vgic_vcpu_destroy already gets called from kvm_vgic_destroy for
  each vcpu, so we don't have to call it from kvm_arch_vcpu_free.

  Additionally the other architectures set kvm->online_vcpus to zero
  after freeing them. We might as well do that for ARM too.

?

Thanks,
-Christoffer



> 
> Signed-off-by: Andrew Jones 
> ---
>  virt/kvm/arm/arm.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
> index a6524ff27de4..c5bc79c4ccf7 100644
> --- a/virt/kvm/arm/arm.c
> +++ b/virt/kvm/arm/arm.c
> @@ -188,6 +188,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
>   kvm->vcpus[i] = NULL;
>   }
>   }
> + atomic_set(>online_vcpus, 0);
>  }
>  
>  int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
> @@ -296,7 +297,6 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
>  {
>   kvm_mmu_free_memory_caches(vcpu);
>   kvm_timer_vcpu_terminate(vcpu);
> - kvm_vgic_vcpu_destroy(vcpu);
>   kvm_pmu_vcpu_destroy(vcpu);
>   kvm_vcpu_uninit(vcpu);
>   kmem_cache_free(kvm_vcpu_cache, vcpu);
> -- 
> 2.13.6
> 
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm