[PATCH][retry 3] Support Pause Filter in AMD processors

2009-10-06 Thread Mark Langsdorf
From 8ec340648103510095ff339b914706c81e9d815d Mon Sep 17 00:00:00 2001
From: Mark Langsdorf mark.langsd...@amd.com
Date: Wed, 9 Sep 2009 22:12:51 -0500
Subject: [PATCH] [PATCH] Support Pause Filter in AMD processors

New AMD processors (Family 0x10 models 8+) support the Pause
Filter Feature.  This feature creates a new field in the VMCB
called Pause Filter Count.  If Pause Filter Count is greater
than 0 and intercepting PAUSEs is enabled, the processor will
increment an internal counter when a PAUSE instruction occurs
instead of intercepting.  When the internal counter reaches the
Pause Filter Count value, a PAUSE intercept will occur.

This feature can be used to detect contended spinlocks,
especially when the lock holding VCPU is not scheduled.
Rescheduling another VCPU prevents the VCPU seeking the
lock from wasting its quantum by spinning idly.

Experimental results show that most spinlocks are held
for less than 1000 PAUSE cycles or more than a few
thousand.  Default the Pause Filter Counter to 3000 to
detect the contended spinlocks.

Processor support for this feature is indicated by a CPUID
bit.

On a 24 core system running 4 guests each with 16 VCPUs,
this patch improved overall performance of each guest's
32 job kernbench by approximately 3-5% when combined
with a scheduler algorithm thati caused the VCPU to
sleep for a brief period. Further performance improvement
may be possible with a more sophisticated yield algorithm.

This patch depends on the changes to the kvm code from
KVM:VMX: Add support for Pause Loop Exiting
http://www.mail-archive.com/kvm@vger.kernel.org/msg23089.html

-Mark Langsdorf
Operating System Research Center
AMD

Signed-of-by: Mark Langsdorf mark.langsd...@amd.com
---
 arch/x86/include/asm/svm.h |3 ++-
 arch/x86/kvm/svm.c |   13 +
 2 files changed, 15 insertions(+), 1 deletions(-)

diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 85574b7..1fecb7e 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -57,7 +57,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
u16 intercept_dr_write;
u32 intercept_exceptions;
u64 intercept;
-   u8 reserved_1[44];
+   u8 reserved_1[42];
+   u16 pause_filter_count;
u64 iopm_base_pa;
u64 msrpm_base_pa;
u64 tsc_offset;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 279a2ae..a07f969 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -46,6 +46,7 @@ MODULE_LICENSE(GPL);
 #define SVM_FEATURE_NPT  (1  0)
 #define SVM_FEATURE_LBRV (1  1)
 #define SVM_FEATURE_SVML (1  2)
+#define SVM_FEATURE_PAUSE_FILTER (1  10)
 
 #define NESTED_EXIT_HOST   0   /* Exit handled on host level */
 #define NESTED_EXIT_DONE   1   /* Exit caused nested vmexit  */
@@ -660,6 +661,11 @@ static void init_vmcb(struct vcpu_svm *svm)
svm-nested.vmcb = 0;
svm-vcpu.arch.hflags = 0;
 
+   if (svm_has(SVM_FEATURE_PAUSE_FILTER)) {
+   control-pause_filter_count = 3000;
+   control-intercept |= (1ULL  INTERCEPT_PAUSE);
+   }
+
enable_gif(svm);
 }
 
@@ -2261,6 +2267,12 @@ static int interrupt_window_interception(struct vcpu_svm 
*svm)
return 1;
 }
 
+static int pause_interception(struct vcpu_svm *svm)
+{
+   kvm_vcpu_on_spin((svm-vcpu));
+   return 1;
+}
+
 static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_READ_CR0] = emulate_on_interception,
[SVM_EXIT_READ_CR3] = emulate_on_interception,
@@ -2296,6 +2308,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = 
{
[SVM_EXIT_CPUID]= cpuid_interception,
[SVM_EXIT_IRET] = iret_interception,
[SVM_EXIT_INVD] = emulate_on_interception,
+   [SVM_EXIT_PAUSE]= pause_interception,
[SVM_EXIT_HLT]  = halt_interception,
[SVM_EXIT_INVLPG]   = invlpg_interception,
[SVM_EXIT_INVLPGA]  = invlpga_interception,
-- 
1.6.0.2


--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH][retry 2] Support Pause Filter in AMD processors

2009-10-02 Thread Mark Langsdorf
From 66741f741da741e58e8162ef7809dd7d6f8e01cf Mon Sep 17 00:00:00 2001
From: Mark Langsdorf mark.langsd...@amd.com
Date: Fri, 2 Oct 2009 10:32:33 -0500
Subject: [PATCH] Support Pause Filter in AMD processors

New AMD processors (Family 0x10 models 8+) support the Pause
Filter Feature.  This feature creates a new field in the VMCB
called Pause Filter Count.  If Pause Filter Count is greater
than 0 and intercepting PAUSEs is enabled, the processor will
increment an internal counter when a PAUSE instruction occurs
instead of intercepting.  When the internal counter reaches the
Pause Filter Count value, a PAUSE intercept will occur.

This feature can be used to detect contended spinlocks,
especially when the lock holding VCPU is not scheduled.
Rescheduling another VCPU prevents the VCPU seeking the
lock from wasting its quantum by spinning idly.

Experimental results show that most spinlocks are held
for less than 1000 PAUSE cycles or more than a few
thousand.  Default the Pause Filter Counter to 3000 to
detect the contended spinlocks.

Processor support for this feature is indicated by a CPUID
bit.

On a 24 core system running 4 guests each with 16 VCPUs,
this patch improved overall performance of each guest's
32 job kernbench by approximately 3-5% when combined
with a scheduler algorithm thati caused the VCPU to
sleep for a brief period. Further performance improvement
may be possible with a more sophisticated yield algorithm.

This patch depends on the changes to the kvm code from
KVM:VMX: Add support for Pause Loop Exiting
http://www.mail-archive.com/kvm@vger.kernel.org/msg23089.html

-Mark Langsdorf
Operating System Research Center
AMD
---
 arch/x86/include/asm/svm.h |3 ++-
 arch/x86/kvm/svm.c |   16 
 2 files changed, 18 insertions(+), 1 deletions(-)

diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 85574b7..1fecb7e 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -57,7 +57,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
u16 intercept_dr_write;
u32 intercept_exceptions;
u64 intercept;
-   u8 reserved_1[44];
+   u8 reserved_1[42];
+   u16 pause_filter_count;
u64 iopm_base_pa;
u64 msrpm_base_pa;
u64 tsc_offset;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 9a4daca..d5d2e03 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -46,6 +46,7 @@ MODULE_LICENSE(GPL);
 #define SVM_FEATURE_NPT  (1  0)
 #define SVM_FEATURE_LBRV (1  1)
 #define SVM_FEATURE_SVML (1  2)
+#define SVM_FEATURE_PAUSE_FILTER (1  10)
 
 #define NESTED_EXIT_HOST   0   /* Exit handled on host level */
 #define NESTED_EXIT_DONE   1   /* Exit caused nested vmexit  */
@@ -659,6 +660,11 @@ static void init_vmcb(struct vcpu_svm *svm)
svm-nested.vmcb = 0;
svm-vcpu.arch.hflags = 0;
 
+   if (svm_has(SVM_FEATURE_PAUSE_FILTER)) {
+   control-pause_filter_count = 3000;
+   control-intercept |= (1ULL  INTERCEPT_PAUSE);
+   }
+
enable_gif(svm);
 }
 
@@ -2270,6 +2276,15 @@ static int interrupt_window_interception(struct vcpu_svm 
*svm)
return 1;
 }
 
+static int pause_interception(struct vcpu_svm *svm)
+{
+   static int pause_count = 0;
+
+   kvm_vcpu_on_spin((svm-vcpu));
+printk(KERN_ERR MJLL pause intercepted %d\n, ++pause_count);
+   return 1;
+}
+
 static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_READ_CR0] = emulate_on_interception,
[SVM_EXIT_READ_CR3] = emulate_on_interception,
@@ -2305,6 +2320,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = 
{
[SVM_EXIT_CPUID]= cpuid_interception,
[SVM_EXIT_IRET] = iret_interception,
[SVM_EXIT_INVD] = emulate_on_interception,
+   [SVM_EXIT_PAUSE]= pause_interception,
[SVM_EXIT_HLT]  = halt_interception,
[SVM_EXIT_INVLPG]   = invlpg_interception,
[SVM_EXIT_INVLPGA]  = invlpga_interception,
-- 
1.6.0.2


--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH][KVM][2/2] Support Pause Filter in AMD processors

2009-09-18 Thread Mark Langsdorf
This patch depends on [PATCH] Prevent immediate process rescheduling
that I just submitted.

New AMD processors (Family 0x10 models 8+) support the Pause
Filter Feature.  This feature creates a new field in the VMCB
called Pause Filter Count.  If Pause Filter Count is greater
than 0 and intercepting PAUSEs is enabled, the processor will
increment an internal counter when a PAUSE instruction occurs
instead of intercepting.  When the internal counter reaches the
Pause Filter Count value, a PAUSE intercept will occur.

This feature can be used to detect contended spinlocks,
especially when the lock holding VCPU is not scheduled.
Rescheduling another VCPU prevents the VCPU seeking the
lock from wasting its quantum by spinning idly.

Experimental results show that most spinlocks are held
for less than 1000 PAUSE cycles or more than a few
thousand.  Default the Pause Filter Counter to 3000 to
detect the contended spinlocks.

Processor support for this feature is indicated by a CPUID
bit.

On a 24 core system running 4 guests each with 16 VCPUs,
this patch improved overall performance of each guest's
32 job kernbench by approximately 3-5% when combined
with a scheduler algorithm that guaranteed that yielding
proccess would not be immediately rescheduled.  Further
performance improvement may be possible with a more
sophisticated yield algorithm.

-Mark Langsdorf
Operating System Research Center
AMD

Signed-off-by: Mark Langsdorf mark.langsd...@amd.com
---
 arch/x86/include/asm/svm.h |3 ++-
 arch/x86/kvm/svm.c |   13 +
 2 files changed, 15 insertions(+), 1 deletions(-)

diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 85574b7..1fecb7e 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -57,7 +57,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
u16 intercept_dr_write;
u32 intercept_exceptions;
u64 intercept;
-   u8 reserved_1[44];
+   u8 reserved_1[42];
+   u16 pause_filter_count;
u64 iopm_base_pa;
u64 msrpm_base_pa;
u64 tsc_offset;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index a2f2d43..28c49d0 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -46,6 +46,7 @@ MODULE_LICENSE(GPL);
 #define SVM_FEATURE_NPT  (1  0)
 #define SVM_FEATURE_LBRV (1  1)
 #define SVM_FEATURE_SVML (1  2)
+#define SVM_FEATURE_PAUSE_FILTER (1  10)
 
 #define NESTED_EXIT_HOST   0   /* Exit handled on host level */
 #define NESTED_EXIT_DONE   1   /* Exit caused nested vmexit  */
@@ -654,6 +655,11 @@ static void init_vmcb(struct vcpu_svm *svm)
svm-nested.vmcb = 0;
svm-vcpu.arch.hflags = 0;
 
+   if (svm_has(SVM_FEATURE_PAUSE_FILTER)) {
+   control-pause_filter_count = 3000;
+   control-intercept |= (1ULL  INTERCEPT_PAUSE);
+   }
+
enable_gif(svm);
 }
 
@@ -2255,6 +2261,12 @@ static int interrupt_window_interception(struct vcpu_svm 
*svm)
return 1;
 }
 
+static int pause_interception(struct vcpu_svm *svm)
+{
+   schedule();
+   return 1;
+}
+
 static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_READ_CR0] = emulate_on_interception,
[SVM_EXIT_READ_CR3] = emulate_on_interception,
@@ -2290,6 +2302,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = 
{
[SVM_EXIT_CPUID]= cpuid_interception,
[SVM_EXIT_IRET] = iret_interception,
[SVM_EXIT_INVD] = emulate_on_interception,
+   [SVM_EXIT_PAUSE]= pause_interception,
[SVM_EXIT_HLT]  = halt_interception,
[SVM_EXIT_INVLPG]   = invlpg_interception,
[SVM_EXIT_INVLPGA]  = invlpga_interception,
-- 
1.6.0.2



--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH][KVM][retry 4] Add support for Pause Filtering to AMD SVM

2009-05-20 Thread Mark Langsdorf
This feature creates a new field in the VMCB called Pause
Filter Count.  If Pause Filter Count is greater than 0 and
intercepting PAUSEs is enabled, the processor will increment
an internal counter when a PAUSE instruction occurs instead
of intercepting.  When the internal counter reaches the
Pause Filter Count value, a PAUSE intercept will occur.

This feature can be used to detect contended spinlocks,
especially when the lock holding VCPU is not scheduled.
Rescheduling another VCPU prevents the VCPU seeking the
lock from wasting its quantum by spinning idly.  Perform
the reschedule by increasing the the credited time on
the VCPU.

Experimental results show that most spinlocks are held
for less than 1000 PAUSE cycles or more than a few
thousand.  Default the Pause Filter Counter to 3000 to
detect the contended spinlocks.

Processor support for this feature is indicated by a CPUID
bit.

On a 24 core system running 4 guests each with 16 VCPUs,
this patch improved overall performance of each guest's
32 job kernbench by approximately 1%.  Further performance
improvement may be possible with a more sophisticated
yield algorithm.

-Mark Langsdorf
Operating System Research Center
AMD

Signed-off-by: Mark Langsdorf mark.langsd...@amd.com
---
 arch/x86/include/asm/svm.h |3 ++-
 arch/x86/kvm/svm.c |   13 +
 include/linux/sched.h  |7 +++
 kernel/sched.c |   18 ++
 4 files changed, 40 insertions(+), 1 deletions(-)

diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 85574b7..1fecb7e 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -57,7 +57,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
u16 intercept_dr_write;
u32 intercept_exceptions;
u64 intercept;
-   u8 reserved_1[44];
+   u8 reserved_1[42];
+   u16 pause_filter_count;
u64 iopm_base_pa;
u64 msrpm_base_pa;
u64 tsc_offset;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ef43a18..dad6c4b 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -45,6 +45,7 @@ MODULE_LICENSE(GPL);
 #define SVM_FEATURE_NPT  (1  0)
 #define SVM_FEATURE_LBRV (1  1)
 #define SVM_FEATURE_SVML (1  2)
+#define SVM_FEATURE_PAUSE_FILTER (1  10)
 
 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
 
@@ -575,6 +576,11 @@ static void init_vmcb(struct vcpu_svm *svm)
 
svm-nested_vmcb = 0;
svm-vcpu.arch.hflags = HF_GIF_MASK;
+
+   if (svm_has(SVM_FEATURE_PAUSE_FILTER)) {
+   control-pause_filter_count = 3000;
+   control-intercept |= (1ULL  INTERCEPT_PAUSE);
+   }
 }
 
 static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
@@ -2087,6 +2093,12 @@ static int interrupt_window_interception(struct vcpu_svm 
*svm,
return 1;
 }
 
+static int pause_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
+   sched_delay_yield(100);
+   return 1;
+}
+
 static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
  struct kvm_run *kvm_run) = {
[SVM_EXIT_READ_CR0] = emulate_on_interception,
@@ -2123,6 +2135,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
[SVM_EXIT_CPUID]= cpuid_interception,
[SVM_EXIT_IRET] = iret_interception,
[SVM_EXIT_INVD] = emulate_on_interception,
+   [SVM_EXIT_PAUSE]= pause_interception,
[SVM_EXIT_HLT]  = halt_interception,
[SVM_EXIT_INVLPG]   = invlpg_interception,
[SVM_EXIT_INVLPGA]  = invalid_op_interception,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b4c38bc..9cde585 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2283,6 +2283,9 @@ static inline unsigned int task_cpu(const struct 
task_struct *p)
return task_thread_info(p)-cpu;
 }
 
+extern void sched_delay_yield(unsigned long ns);
+
+
 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
 
 #else
@@ -2292,6 +2295,10 @@ static inline unsigned int task_cpu(const struct 
task_struct *p)
return 0;
 }
 
+void sched_delay_yield(struct task_struct *p, unsigned int delay)
+{
+}
+
 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
 {
 }
diff --git a/kernel/sched.c b/kernel/sched.c
index b902e58..3aed2f6 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1947,6 +1947,24 @@ task_hot(struct task_struct *p, u64 now, struct 
sched_domain *sd)
return delta  (s64)sysctl_sched_migration_cost;
 }
 
+/*
+ * Interface for yielding a thread by delaying it for a known
+ * interval.  Use at your own risk and not with real-time.
+ *
+ * Like yield, except for SCHED_OTHER/BATCH, where it will
+ * give us @ns time for the 'good' cause.
+ */
+void sched_delay_yield(unsigned long ns)
+{
+   struct task_struct

[PATCH][KVM][retry 3] Add support for Pause Filtering to AMD SVM

2009-05-19 Thread Mark Langsdorf
From 67f831e825b64be5dedae9936ff8a60b884959f2 Mon Sep 17 00:00:00 2001
From: mark.langsd...@amd.com 
Date: Tue, 19 May 2009 07:46:11 -0500
Subject: [PATCH]

This feature creates a new field in the VMCB called Pause
Filter Count.  If Pause Filter Count is greater than 0 and
intercepting PAUSEs is enabled, the processor will increment
an internal counter when a PAUSE instruction occurs instead
of intercepting.  When the internal counter reaches the
Pause Filter Count value, a PAUSE intercept will occur.

This feature can be used to detect contended spinlocks,
especially when the lock holding VCPU is not scheduled.
Rescheduling another VCPU prevents the VCPU seeking the
lock from wasting its quantum by spinning idly.  Perform
the reschedule by increasing the the credited time on
the VCPU.

Experimental results show that most spinlocks are held
for less than 1000 PAUSE cycles or more than a few
thousand.  Default the Pause Filter Counter to 5000 to
detect the contended spinlocks.

Processor support for this feature is indicated by a CPUID
bit.

On a 24 core system running 4 guests each with 16 VCPUs,
this patch improved overall performance of each guest's
32 job kernbench by approximately 1%.  Further performance
improvement may be possible with a more sophisticated
yield algorithm.

-Mark Langsdorf
Operating System Research Center
AMD

Signed-off-by: Mark Langsdorf mark.langsd...@amd.com
---
 arch/x86/include/asm/svm.h |3 ++-
 arch/x86/kvm/svm.c |   13 +
 include/linux/sched.h  |7 +++
 kernel/sched.c |5 +
 4 files changed, 27 insertions(+), 1 deletions(-)

diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 85574b7..1fecb7e 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -57,7 +57,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
u16 intercept_dr_write;
u32 intercept_exceptions;
u64 intercept;
-   u8 reserved_1[44];
+   u8 reserved_1[42];
+   u16 pause_filter_count;
u64 iopm_base_pa;
u64 msrpm_base_pa;
u64 tsc_offset;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ef43a18..86df191 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -45,6 +45,7 @@ MODULE_LICENSE(GPL);
 #define SVM_FEATURE_NPT  (1  0)
 #define SVM_FEATURE_LBRV (1  1)
 #define SVM_FEATURE_SVML (1  2)
+#define SVM_FEATURE_PAUSE_FILTER (1  10)
 
 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
 
@@ -575,6 +576,11 @@ static void init_vmcb(struct vcpu_svm *svm)
 
svm-nested_vmcb = 0;
svm-vcpu.arch.hflags = HF_GIF_MASK;
+
+   if (svm_has(SVM_FEATURE_PAUSE_FILTER)) {
+   control-pause_filter_count = 3000;
+   control-intercept |= (1ULL  INTERCEPT_PAUSE);
+   }
 }
 
 static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
@@ -2087,6 +2093,12 @@ static int interrupt_window_interception(struct vcpu_svm 
*svm,
return 1;
 }
 
+static int pause_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
+   set_task_delay(current, 100);
+   return 1;
+}
+
 static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
  struct kvm_run *kvm_run) = {
[SVM_EXIT_READ_CR0] = emulate_on_interception,
@@ -2123,6 +2135,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
[SVM_EXIT_CPUID]= cpuid_interception,
[SVM_EXIT_IRET] = iret_interception,
[SVM_EXIT_INVD] = emulate_on_interception,
+   [SVM_EXIT_PAUSE]= pause_interception,
[SVM_EXIT_HLT]  = halt_interception,
[SVM_EXIT_INVLPG]   = invlpg_interception,
[SVM_EXIT_INVLPGA]  = invalid_op_interception,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b4c38bc..683bc65 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2283,6 +2283,9 @@ static inline unsigned int task_cpu(const struct 
task_struct *p)
return task_thread_info(p)-cpu;
 }
 
+extern void set_task_delay(struct task_struct *p, unsigned int delay);
+
+
 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
 
 #else
@@ -2292,6 +2295,10 @@ static inline unsigned int task_cpu(const struct 
task_struct *p)
return 0;
 }
 
+void set_task_delay(struct task_struct *p, unsigned int delay)
+{
+}
+
 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
 {
 }
diff --git a/kernel/sched.c b/kernel/sched.c
index b902e58..3174620 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1947,6 +1947,11 @@ task_hot(struct task_struct *p, u64 now, struct 
sched_domain *sd)
return delta  (s64)sysctl_sched_migration_cost;
 }
 
+void set_task_delay(struct task_struct *p, unsigned int delay)
+{
+   p-se.vruntime += delay;
+}
+EXPORT_SYMBOL(set_task_delay);
 
 void

[PATCH][KVM][retry 2] Add support for Pause Filtering to AMD SVM

2009-05-08 Thread Mark Langsdorf
From 01813db8627e74018c8cec90df7e345839351f23 Mon Sep 17 00:00:00 2001
From: Mark Langsdorf mark.langsd...@amd.com
Date: Thu, 7 May 2009 09:44:10 -0500
Subject: [PATCH] Add support for Pause Filtering to AMD SVM

This feature creates a new field in the VMCB called Pause
Filter Count.  If Pause Filter Count is greater than 0 and
intercepting PAUSEs is enabled, the processor will increment
an internal counter when a PAUSE instruction occurs instead
of intercepting.  When the internal counter reaches the
Pause Filter Count value, a PAUSE intercept will occur.

This feature can be used to detect contended spinlocks,
especially when the lock holding VCPU is not scheduled.
Rescheduling another VCPU prevents the VCPU seeking the
lock from wasting its quantum by spinning idly.

Experimental results show that most spinlocks are held
for less than 1000 PAUSE cycles or more than a few
thousand.  Default the Pause Filter Counter to 3000 to
detect the contended spinlocks.

Processor support for this feature is indicated by a CPUID
bit.

On a 24 core system running 4 guests each with 16 VCPUs,
this patch improved overall performance of each guest's
32 job kernbench by approximately 1%.  Further performance
improvement may be possible with a more sophisticated
yield algorithm.

-Mark Langsdorf
Operating System Research Center
AMD

Signed-off-by: Mark Langsdorf mark.langsd...@amd.com
---
 arch/x86/include/asm/svm.h |3 ++-
 arch/x86/kvm/svm.c |   17 +
 virt/kvm/kvm_main.c|2 ++
 3 files changed, 21 insertions(+), 1 deletions(-)

diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 85574b7..1fecb7e 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -57,7 +57,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
u16 intercept_dr_write;
u32 intercept_exceptions;
u64 intercept;
-   u8 reserved_1[44];
+   u8 reserved_1[42];
+   u16 pause_filter_count;
u64 iopm_base_pa;
u64 msrpm_base_pa;
u64 tsc_offset;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ef43a18..4279141 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -45,6 +45,7 @@ MODULE_LICENSE(GPL);
 #define SVM_FEATURE_NPT  (1  0)
 #define SVM_FEATURE_LBRV (1  1)
 #define SVM_FEATURE_SVML (1  2)
+#define SVM_FEATURE_PAUSE_FILTER (1  10)
 
 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
 
@@ -575,6 +576,12 @@ static void init_vmcb(struct vcpu_svm *svm)
 
svm-nested_vmcb = 0;
svm-vcpu.arch.hflags = HF_GIF_MASK;
+
+   if (svm_has(SVM_FEATURE_PAUSE_FILTER)) {
+   control-pause_filter_count = 5000;
+   control-intercept |= (1ULL  INTERCEPT_PAUSE);
+   }
+
 }
 
 static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
@@ -2087,6 +2094,15 @@ static int interrupt_window_interception(struct vcpu_svm 
*svm,
return 1;
 }
 
+static int pause_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
+   /* Simple yield */
+   vcpu_put(svm-vcpu);
+   schedule();
+   vcpu_load(svm-vcpu);
+   return 1;
+}
+
 static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
  struct kvm_run *kvm_run) = {
[SVM_EXIT_READ_CR0] = emulate_on_interception,
@@ -2123,6 +2139,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
[SVM_EXIT_CPUID]= cpuid_interception,
[SVM_EXIT_IRET] = iret_interception,
[SVM_EXIT_INVD] = emulate_on_interception,
+   [SVM_EXIT_PAUSE]= pause_interception,
[SVM_EXIT_HLT]  = halt_interception,
[SVM_EXIT_INVLPG]   = invlpg_interception,
[SVM_EXIT_INVLPGA]  = invalid_op_interception,
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 2b73e19..e2b730d 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -710,6 +710,7 @@ void vcpu_load(struct kvm_vcpu *vcpu)
kvm_arch_vcpu_load(vcpu, cpu);
put_cpu();
 }
+EXPORT_SYMBOL_GPL(vcpu_load);
 
 void vcpu_put(struct kvm_vcpu *vcpu)
 {
@@ -719,6 +720,7 @@ void vcpu_put(struct kvm_vcpu *vcpu)
preempt_enable();
mutex_unlock(vcpu-mutex);
 }
+EXPORT_SYMBOL_GPL(vcpu_put);
 
 static void ack_flush(void *_completed)
 {
-- 
1.6.0.2


--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH][KVM][retry 1] Add support for Pause Filtering to AMD SVM

2009-05-07 Thread Mark Langsdorf
commit 01813db8627e74018c8cec90df7e345839351f23
Author: root r...@xendinar01.amd.com
Date:   Thu May 7 09:44:10 2009 -0500

New AMD processors will support the Pause Filter Feature.
This feature creates a new field in the VMCB called Pause
Filter Count.  If Pause Filter Count is greater than 0 and
intercepting PAUSEs is enabled, the processor will increment
an internal counter when a PAUSE instruction occurs instead
of intercepting.  When the internal counter reaches the
Pause Filter Count value, a PAUSE intercept will occur.

This feature can be used to detect contended spinlocks,
especially when the lock holding VCPU is not scheduled.
Rescheduling another VCPU prevents the VCPU seeking the
lock from wasting its quantum by spinning idly.

Experimental results show that most spinlocks are held
for less than 1000 PAUSE cycles or more than a few
thousand.  Default the Pause Filter Counter to 3000 to
detect the contended spinlocks.

Processor support for this feature is indicated by a CPUID
bit.

On a 24 core system running 4 guests each with 16 VCPUs,
this patch improved overall performance of each guest's
32 job kernbench by approximately 1%.  Further performance
improvement may be possible with a more sophisticated
yield algorithm.

-Mark Langsdorf
Operating System Research Center
AMD

Signed-off-by: Mark Langsdorf mark.langsd...@amd.com

diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 85574b7..1fecb7e 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -57,7 +57,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
u16 intercept_dr_write;
u32 intercept_exceptions;
u64 intercept;
-   u8 reserved_1[44];
+   u8 reserved_1[42];
+   u16 pause_filter_count;
u64 iopm_base_pa;
u64 msrpm_base_pa;
u64 tsc_offset;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ef43a18..4279141 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -45,6 +45,7 @@ MODULE_LICENSE(GPL);
 #define SVM_FEATURE_NPT  (1  0)
 #define SVM_FEATURE_LBRV (1  1)
 #define SVM_FEATURE_SVML (1  2)
+#define SVM_FEATURE_PAUSE_FILTER (1  10)
 
 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
 
@@ -575,6 +576,12 @@ static void init_vmcb(struct vcpu_svm *svm)
 
svm-nested_vmcb = 0;
svm-vcpu.arch.hflags = HF_GIF_MASK;
+
+   if (svm_has(SVM_FEATURE_PAUSE_FILTER)) {
+   control-pause_filter_count = 5000;
+   control-intercept |= (1ULL  INTERCEPT_PAUSE);
+   }
+
 }
 
 static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
@@ -2087,6 +2094,15 @@ static int interrupt_window_interception(struct vcpu_svm 
*svm,
return 1;
 }
 
+static int pause_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
+   /* Simple yield */
+   vcpu_put(svm-vcpu);
+   schedule();
+   vcpu_load(svm-vcpu);
+   return 1;
+}
+
 static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
  struct kvm_run *kvm_run) = {
[SVM_EXIT_READ_CR0] = emulate_on_interception,
@@ -2123,6 +2139,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
[SVM_EXIT_CPUID]= cpuid_interception,
[SVM_EXIT_IRET] = iret_interception,
[SVM_EXIT_INVD] = emulate_on_interception,
+   [SVM_EXIT_PAUSE]= pause_interception,
[SVM_EXIT_HLT]  = halt_interception,
[SVM_EXIT_INVLPG]   = invlpg_interception,
[SVM_EXIT_INVLPGA]  = invalid_op_interception,
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 2b73e19..e2b730d 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -710,6 +710,7 @@ void vcpu_load(struct kvm_vcpu *vcpu)
kvm_arch_vcpu_load(vcpu, cpu);
put_cpu();
 }
+EXPORT_SYMBOL_GPL(vcpu_load);
 
 void vcpu_put(struct kvm_vcpu *vcpu)
 {
@@ -719,6 +720,7 @@ void vcpu_put(struct kvm_vcpu *vcpu)
preempt_enable();
mutex_unlock(vcpu-mutex);
 }
+EXPORT_SYMBOL_GPL(vcpu_put);
 
 static void ack_flush(void *_completed)
 {

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html