Re: KVM: x86: use dynamic percpu allocations for shared msrs area

2013-02-01 Thread Andy Lutomirski
On Thu, Jan 3, 2013 at 5:41 AM, Marcelo Tosatti mtosa...@redhat.com wrote:

 Andy, Mike, can you confirm whether this fixes the percpu allocation
 failures when loading kvm.ko? TIA

 

 Use dynamic percpu allocations for the shared msrs structure,
 to avoid using the limited reserved percpu space.

 Signed-off-by: Marcelo Tosatti mtosa...@redhat.com

Sorry for the amazingly long delay.  What kernel does this apply to?

--Andy
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: KVM: x86: use dynamic percpu allocations for shared msrs area

2013-01-08 Thread Gleb Natapov
On Thu, Jan 03, 2013 at 11:41:39AM -0200, Marcelo Tosatti wrote:
 
 Andy, Mike, can you confirm whether this fixes the percpu allocation
 failures when loading kvm.ko? TIA
 
 
 
 Use dynamic percpu allocations for the shared msrs structure, 
 to avoid using the limited reserved percpu space.
 
 Signed-off-by: Marcelo Tosatti mtosa...@redhat.com
 
Reviewed-by: Gleb Natapov g...@redhat.com

 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
 index 1c9c834..5229a67 100644
 --- a/arch/x86/kvm/x86.c
 +++ b/arch/x86/kvm/x86.c
 @@ -120,7 +120,7 @@ struct kvm_shared_msrs {
  };
  
  static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
 -static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
 +static struct kvm_shared_msrs __percpu *shared_msrs;
  
  struct kvm_stats_debugfs_item debugfs_entries[] = {
   { pf_fixed, VCPU_STAT(pf_fixed) },
 @@ -191,10 +191,10 @@ static void kvm_on_user_return(struct 
 user_return_notifier *urn)
  
  static void shared_msr_update(unsigned slot, u32 msr)
  {
 - struct kvm_shared_msrs *smsr;
   u64 value;
 + unsigned int cpu = smp_processor_id();
 + struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
  
 - smsr = __get_cpu_var(shared_msrs);
   /* only read, and nobody should modify it at this time,
* so don't need lock */
   if (slot = shared_msrs_global.nr) {
 @@ -226,7 +226,8 @@ static void kvm_shared_msr_cpu_online(void)
  
  void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
  {
 - struct kvm_shared_msrs *smsr = __get_cpu_var(shared_msrs);
 + unsigned int cpu = smp_processor_id();
 + struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
  
   if (((value ^ smsr-values[slot].curr)  mask) == 0)
   return;
 @@ -242,7 +243,8 @@ EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
  
  static void drop_user_return_notifiers(void *ignore)
  {
 - struct kvm_shared_msrs *smsr = __get_cpu_var(shared_msrs);
 + unsigned int cpu = smp_processor_id();
 + struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
  
   if (smsr-registered)
   kvm_on_user_return(smsr-urn);
 @@ -5233,9 +5235,16 @@ int kvm_arch_init(void *opaque)
   goto out;
   }
  
 + r = -ENOMEM;
 + shared_msrs = alloc_percpu(struct kvm_shared_msrs);
 + if (!shared_msrs) {
 + printk(KERN_ERR kvm: failed to allocate percpu 
 kvm_shared_msrs\n);
 + goto out;
 + }
 +
   r = kvm_mmu_module_init();
   if (r)
 - goto out;
 + goto out_free_percpu;
  
   kvm_set_mmio_spte_mask();
   kvm_init_msr_list();
 @@ -5258,6 +5267,8 @@ int kvm_arch_init(void *opaque)
  
   return 0;
  
 +out_free_percpu:
 + free_percpu(shared_msrs);
  out:
   return r;
  }
 @@ -5275,6 +5286,7 @@ void kvm_arch_exit(void)
  #endif
   kvm_x86_ops = NULL;
   kvm_mmu_module_exit();
 + free_percpu(shared_msrs);
  }
  
  int kvm_emulate_halt(struct kvm_vcpu *vcpu)
 --
 To unsubscribe from this list: send the line unsubscribe kvm in
 the body of a message to majord...@vger.kernel.org
 More majordomo info at  http://vger.kernel.org/majordomo-info.html

--
Gleb.
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: KVM: x86: use dynamic percpu allocations for shared msrs area

2013-01-03 Thread Mike Galbraith
On Thu, 2013-01-03 at 11:41 -0200, Marcelo Tosatti wrote: 
 Andy, Mike, can you confirm whether this fixes the percpu allocation
 failures when loading kvm.ko? TIA

monteverdi:~/:[1]# dmesg|grep PERCPU
[0.00] PERCPU: Embedded 27 pages/cpu @88047f80 s80704 r8192 
d21696 u262144
monteverdi:~/:[0]# lsmod|grep kvm
kvm_intel 132688  0 
kvm   423692  1 kvm_intel
monteverdi:~/:[0]# uname -r
3.6.0-bisect

Yup, kvm loaded itself, no gripeage.
   
 
 
 Use dynamic percpu allocations for the shared msrs structure, 
 to avoid using the limited reserved percpu space.
 
 Signed-off-by: Marcelo Tosatti mtosa...@redhat.com
 
 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
 index 1c9c834..5229a67 100644
 --- a/arch/x86/kvm/x86.c
 +++ b/arch/x86/kvm/x86.c
 @@ -120,7 +120,7 @@ struct kvm_shared_msrs {
  };
  
  static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
 -static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
 +static struct kvm_shared_msrs __percpu *shared_msrs;
  
  struct kvm_stats_debugfs_item debugfs_entries[] = {
   { pf_fixed, VCPU_STAT(pf_fixed) },
 @@ -191,10 +191,10 @@ static void kvm_on_user_return(struct 
 user_return_notifier *urn)
  
  static void shared_msr_update(unsigned slot, u32 msr)
  {
 - struct kvm_shared_msrs *smsr;
   u64 value;
 + unsigned int cpu = smp_processor_id();
 + struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
  
 - smsr = __get_cpu_var(shared_msrs);
   /* only read, and nobody should modify it at this time,
* so don't need lock */
   if (slot = shared_msrs_global.nr) {
 @@ -226,7 +226,8 @@ static void kvm_shared_msr_cpu_online(void)
  
  void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
  {
 - struct kvm_shared_msrs *smsr = __get_cpu_var(shared_msrs);
 + unsigned int cpu = smp_processor_id();
 + struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
  
   if (((value ^ smsr-values[slot].curr)  mask) == 0)
   return;
 @@ -242,7 +243,8 @@ EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
  
  static void drop_user_return_notifiers(void *ignore)
  {
 - struct kvm_shared_msrs *smsr = __get_cpu_var(shared_msrs);
 + unsigned int cpu = smp_processor_id();
 + struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
  
   if (smsr-registered)
   kvm_on_user_return(smsr-urn);
 @@ -5233,9 +5235,16 @@ int kvm_arch_init(void *opaque)
   goto out;
   }
  
 + r = -ENOMEM;
 + shared_msrs = alloc_percpu(struct kvm_shared_msrs);
 + if (!shared_msrs) {
 + printk(KERN_ERR kvm: failed to allocate percpu 
 kvm_shared_msrs\n);
 + goto out;
 + }
 +
   r = kvm_mmu_module_init();
   if (r)
 - goto out;
 + goto out_free_percpu;
  
   kvm_set_mmio_spte_mask();
   kvm_init_msr_list();
 @@ -5258,6 +5267,8 @@ int kvm_arch_init(void *opaque)
  
   return 0;
  
 +out_free_percpu:
 + free_percpu(shared_msrs);
  out:
   return r;
  }
 @@ -5275,6 +5286,7 @@ void kvm_arch_exit(void)
  #endif
   kvm_x86_ops = NULL;
   kvm_mmu_module_exit();
 + free_percpu(shared_msrs);
  }
  
  int kvm_emulate_halt(struct kvm_vcpu *vcpu)


--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html