Re: [Xen-devel] [PATCH 2/4] xen: sched: optimize exclusive pinning case (Credit1 & 2)

2017-10-04 Thread George Dunlap
On 09/15/2017 06:35 PM, Dario Faggioli wrote:
> Exclusive pinning of vCPUs is used, sometimes, for
> achieving the highest level of determinism, and the
> least possible overhead, for the vCPUs in question.
> 
> Although static 1:1 pinning is not recommended, for
> general use cases, optimizing the tickling code (of
> Credit1 and Credit2) is easy and cheap enough, so go
> for it.
> 
> Signed-off-by: Dario Faggioli 

Reviewed-by: George Dunlap 

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH 2/4] xen: sched: optimize exclusive pinning case (Credit1 & 2)

2017-09-20 Thread Anshul Makkar



On 9/15/17 6:35 PM, Dario Faggioli wrote:
  
  static unsigned int __read_mostly opt_migrate_resist = 500;

  integer_param("sched_credit2_migrate_resist", opt_migrate_resist);
@@ -1453,6 +1459,26 @@ runq_tickle(const struct scheduler *ops, struct 
csched2_vcpu *new, s_time_t now)
  (unsigned char *));
  }
  
+/*

+ * Exclusive pinning is when a vcpu has hard-affinity with only one
+ * cpu, and there is no other vcpu that has hard-affinity with that
+ * same cpu. This is infrequent, but if it happens, is for achieving
+ * the most possible determinism, and least possible overhead for
+ * the vcpus in question.
+ *
+ * Try to identify the vast majority of these situations, and deal
+ * with them quickly.

Sorry, if I have missed to review the earlier series on the same subject.
But, I am not completely satisfied with the condition that pinning to a
pcpu is only possible if and only if no other vcpu has hard affinity of 
that pcpu.

I think we can do away with this condition and give pinning a priority.
Your thoughts please..

+ */
+if ( unlikely((new->flags & CSFLAG_pinned) &&
+  cpumask_test_cpu(cpu, >idle) &&
+  !cpumask_test_cpu(cpu, >tickled)) )
+{
+ASSERT(cpumask_cycle(cpu, new->vcpu->cpu_hard_affinity) == cpu);
+SCHED_STAT_CRANK(tickled_idle_cpu_excl);
+ipid = cpu;
+goto tickle;
+}
+
  for_each_affinity_balance_step( bs )
  {
  /* Just skip first step, if we don't have a soft affinity */
@@ -2826,6 +2852,19 @@ csched2_dom_cntl(
  return rc;
  }
  
+static void

+csched2_aff_cntl(const struct scheduler *ops, struct vcpu *v,
+ const cpumask_t *hard, const cpumask_t *soft)
+{
+struct csched2_vcpu *svc = csched2_vcpu(v);
+
+/* Are we becoming exclusively pinned? */
+if ( cpumask_weight(hard) == 1 )
+__set_bit(__CSFLAG_pinned, >flags);
+else
+__clear_bit(__CSFLAG_pinned, >flags);
+}
+
  static int csched2_sys_cntl(const struct scheduler *ops,
  struct xen_sysctl_scheduler_op *sc)

Looks fine.

Anshul


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH 2/4] xen: sched: optimize exclusive pinning case (Credit1 & 2)

2017-09-15 Thread Dario Faggioli
Exclusive pinning of vCPUs is used, sometimes, for
achieving the highest level of determinism, and the
least possible overhead, for the vCPUs in question.

Although static 1:1 pinning is not recommended, for
general use cases, optimizing the tickling code (of
Credit1 and Credit2) is easy and cheap enough, so go
for it.

Signed-off-by: Dario Faggioli 
---
Cc: George Dunlap 
Cc: Anshul Makkar 
---
Changes from v1:
- use a flag during runtime, as suggested during review;
- make use of the affinity-change hook, introduced in pevious patch.
---
 xen/common/sched_credit.c|   35 +++
 xen/common/sched_credit2.c   |   40 
 xen/include/xen/perfc_defn.h |1 +
 3 files changed, 76 insertions(+)

diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 4fdaa08..3efbfc8 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -73,6 +73,7 @@
 #define CSCHED_FLAG_VCPU_PARKED0x0  /* VCPU over capped credits */
 #define CSCHED_FLAG_VCPU_YIELD 0x1  /* VCPU yielding */
 #define CSCHED_FLAG_VCPU_MIGRATING 0x2  /* VCPU may have moved to a new pcpu */
+#define CSCHED_FLAG_VCPU_PINNED0x4  /* VCPU can run only on 1 pcpu */
 
 
 /*
@@ -362,6 +363,25 @@ static inline void __runq_tickle(struct csched_vcpu *new)
 idlers_empty = cpumask_empty(_mask);
 
 /*
+ * Exclusive pinning is when a vcpu has hard-affinity with only one
+ * cpu, and there is no other vcpu that has hard-affinity with that
+ * same cpu. This is infrequent, but if it happens, is for achieving
+ * the most possible determinism, and least possible overhead for
+ * the vcpus in question.
+ *
+ * Try to identify the vast majority of these situations, and deal
+ * with them quickly.
+ */
+if ( unlikely(test_bit(CSCHED_FLAG_VCPU_PINNED, >flags) &&
+  cpumask_test_cpu(cpu, _mask)) )
+{
+ASSERT(cpumask_cycle(cpu, new->vcpu->cpu_hard_affinity) == cpu);
+SCHED_STAT_CRANK(tickled_idle_cpu_excl);
+__cpumask_set_cpu(cpu, );
+goto tickle;
+}
+
+/*
  * If the pcpu is idle, or there are no idlers and the new
  * vcpu is a higher priority than the old vcpu, run it here.
  *
@@ -457,6 +477,7 @@ static inline void __runq_tickle(struct csched_vcpu *new)
 }
 }
 
+ tickle:
 if ( !cpumask_empty() )
 {
 if ( unlikely(tb_init_done) )
@@ -1223,6 +1244,19 @@ csched_dom_cntl(
 return rc;
 }
 
+static void
+csched_aff_cntl(const struct scheduler *ops, struct vcpu *v,
+const cpumask_t *hard, const cpumask_t *soft)
+{
+struct csched_vcpu *svc = CSCHED_VCPU(v);
+
+/* Are we becoming exclusively pinned? */
+if ( cpumask_weight(hard) == 1 )
+set_bit(CSCHED_FLAG_VCPU_PINNED, >flags);
+else
+clear_bit(CSCHED_FLAG_VCPU_PINNED, >flags);
+}
+
 static inline void
 __csched_set_tslice(struct csched_private *prv, unsigned timeslice)
 {
@@ -2270,6 +2304,7 @@ static const struct scheduler sched_credit_def = {
 .yield  = csched_vcpu_yield,
 
 .adjust = csched_dom_cntl,
+.adjust_affinity= csched_aff_cntl,
 .adjust_global  = csched_sys_cntl,
 
 .pick_cpu   = csched_cpu_pick,
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 32234ac..e1985fb 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -299,6 +299,12 @@
  */
 #define __CSFLAG_vcpu_yield 4
 #define CSFLAG_vcpu_yield (1U<<__CSFLAG_vcpu_yield)
+/*
+ * CSFLAGS_pinned: this vcpu is currently 'pinned', i.e., has its hard
+ * affinity set to one and only 1 cpu (and, hence, can only run there).
+ */
+#define __CSFLAG_pinned 5
+#define CSFLAG_pinned (1U<<__CSFLAG_pinned)
 
 static unsigned int __read_mostly opt_migrate_resist = 500;
 integer_param("sched_credit2_migrate_resist", opt_migrate_resist);
@@ -1453,6 +1459,26 @@ runq_tickle(const struct scheduler *ops, struct 
csched2_vcpu *new, s_time_t now)
 (unsigned char *));
 }
 
+/*
+ * Exclusive pinning is when a vcpu has hard-affinity with only one
+ * cpu, and there is no other vcpu that has hard-affinity with that
+ * same cpu. This is infrequent, but if it happens, is for achieving
+ * the most possible determinism, and least possible overhead for
+ * the vcpus in question.
+ *
+ * Try to identify the vast majority of these situations, and deal
+ * with them quickly.
+ */
+if ( unlikely((new->flags & CSFLAG_pinned) &&
+  cpumask_test_cpu(cpu, >idle) &&
+  !cpumask_test_cpu(cpu, >tickled)) )
+{
+ASSERT(cpumask_cycle(cpu, new->vcpu->cpu_hard_affinity) == cpu);
+SCHED_STAT_CRANK(tickled_idle_cpu_excl);
+ipid = cpu;
+goto tickle;
+}
+
 for_each_affinity_balance_step( bs )
 {
 /*