Re: [libvirt] [PATCH 1/3] numad: Convert node list to cpumap before setting affinity

2012-04-16 Thread Daniel Veillard
On Wed, Apr 11, 2012 at 10:40:32PM +0800, Osier Yang wrote:
 Instead of returning a CPUs list, numad returns NUMA node
 list instead, this patch is to convert the node list to
 cpumap before affinity setting. Otherwise, the domain
 processes will be pinned only to CPU[$numa_cell_num],
 which will cause significiant performance losing.

  s/losing/losses/

 Also because numad will balance the affinity dynamically,
 reflecting the cpuset from numad back doesn't make much
 sense then, and it may just could produce confusion for
 users' eyes. Thus the better way is not to reflect it back

  confusion for the users.

 to XML. And in this case, it's better to ignore the cpuset
 when parsing XML.
 
 The codes to update the cpuset is removed in this patch
 incidentally, and there will be a follow up patch to ignore
 the manually specified cpuset if placement is auto,
 and document will be updated too.
 
 ---
 The patch is tested on a NUMA box with 4 cells, 24 CPUs.
 ---
  src/qemu/qemu_process.c |   33 -
  1 files changed, 20 insertions(+), 13 deletions(-)
 
 diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
 index 481b4f3..0bf743b 100644
 --- a/src/qemu/qemu_process.c
 +++ b/src/qemu/qemu_process.c
 @@ -1819,36 +1819,43 @@ qemuProcessInitCpuAffinity(struct qemud_driver 
 *driver,
  }
  
  if (vm-def-placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO) {
 -char *tmp_cpumask = NULL;
  char *nodeset = NULL;
 +char *nodemask = NULL;
  
  nodeset = qemuGetNumadAdvice(vm-def);
  if (!nodeset)
  return -1;
  
 -if (VIR_ALLOC_N(tmp_cpumask, VIR_DOMAIN_CPUMASK_LEN)  0) {
 +if (VIR_ALLOC_N(nodemask, VIR_DOMAIN_CPUMASK_LEN)  0) {
  virReportOOMError();
  return -1;
  }
  
 -if (virDomainCpuSetParse(nodeset, 0, tmp_cpumask,
 +if (virDomainCpuSetParse(nodeset, 0, nodemask,
   VIR_DOMAIN_CPUMASK_LEN)  0) {
 -VIR_FREE(tmp_cpumask);
 +VIR_FREE(nodemask);
  VIR_FREE(nodeset);
  return -1;
  }
  
 -for (i = 0; i  maxcpu  i  VIR_DOMAIN_CPUMASK_LEN; i++) {
 -if (tmp_cpumask[i])
 -VIR_USE_CPU(cpumap, i);
 +/* numad returns the NUMA node list, convert it to cpumap */
 +int prev_total_ncpus = 0;
 +for (i = 0; i  driver-caps-host.nnumaCell; i++) {
 +int j;
 +int cur_ncpus = driver-caps-host.numaCell[i]-ncpus;
 +if (nodemask[i]) {
 +for (j = prev_total_ncpus;
 + j  cur_ncpus + prev_total_ncpus 
 + j  maxcpu 
 + j  VIR_DOMAIN_CPUMASK_LEN;
 + j++) {
 +VIR_USE_CPU(cpumap, j);
 +}
 +}
 +prev_total_ncpus += cur_ncpus;
  }
  
 -VIR_FREE(vm-def-cpumask);
 -vm-def-cpumask = tmp_cpumask;
 -if (virDomainSaveStatus(driver-caps, driver-stateDir, vm)  0) {
 -VIR_WARN(Unable to save status on vm %s after state change,
 - vm-def-name);
 -}
 +VIR_FREE(nodemask);
  VIR_FREE(nodeset);
  } else {
  if (vm-def-cpumask) {

  ACK, but fix the commit message, and wait to see the feedback on 2/3
  and 3/3 as this should not be commited in isolation

Daniel

-- 
Daniel Veillard  | libxml Gnome XML XSLT toolkit  http://xmlsoft.org/
dan...@veillard.com  | Rpmfind RPM search engine http://rpmfind.net/
http://veillard.com/ | virtualization library  http://libvirt.org/

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH 1/3] numad: Convert node list to cpumap before setting affinity

2012-04-16 Thread Osier Yang

On 2012年04月16日 15:45, Daniel Veillard wrote:

On Wed, Apr 11, 2012 at 10:40:32PM +0800, Osier Yang wrote:

Instead of returning a CPUs list, numad returns NUMA node
list instead, this patch is to convert the node list to
cpumap before affinity setting. Otherwise, the domain
processes will be pinned only to CPU[$numa_cell_num],
which will cause significiant performance losing.


   s/losing/losses/


Also because numad will balance the affinity dynamically,
reflecting the cpuset from numad back doesn't make much
sense then, and it may just could produce confusion for
users' eyes. Thus the better way is not to reflect it back


   confusion for the users.


to XML. And in this case, it's better to ignore the cpuset
when parsing XML.

The codes to update the cpuset is removed in this patch
incidentally, and there will be a follow up patch to ignore
the manually specified cpuset if placement is auto,
and document will be updated too.

---
The patch is tested on a NUMA box with 4 cells, 24 CPUs.
---
  src/qemu/qemu_process.c |   33 -
  1 files changed, 20 insertions(+), 13 deletions(-)

diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index 481b4f3..0bf743b 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -1819,36 +1819,43 @@ qemuProcessInitCpuAffinity(struct qemud_driver *driver,
  }

  if (vm-def-placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO) {
-char *tmp_cpumask = NULL;
  char *nodeset = NULL;
+char *nodemask = NULL;

  nodeset = qemuGetNumadAdvice(vm-def);
  if (!nodeset)
  return -1;

-if (VIR_ALLOC_N(tmp_cpumask, VIR_DOMAIN_CPUMASK_LEN)  0) {
+if (VIR_ALLOC_N(nodemask, VIR_DOMAIN_CPUMASK_LEN)  0) {
  virReportOOMError();
  return -1;
  }

-if (virDomainCpuSetParse(nodeset, 0, tmp_cpumask,
+if (virDomainCpuSetParse(nodeset, 0, nodemask,
   VIR_DOMAIN_CPUMASK_LEN)  0) {
-VIR_FREE(tmp_cpumask);
+VIR_FREE(nodemask);
  VIR_FREE(nodeset);
  return -1;
  }

-for (i = 0; i  maxcpu  i  VIR_DOMAIN_CPUMASK_LEN; i++) {
-if (tmp_cpumask[i])
-VIR_USE_CPU(cpumap, i);
+/* numad returns the NUMA node list, convert it to cpumap */
+int prev_total_ncpus = 0;
+for (i = 0; i  driver-caps-host.nnumaCell; i++) {
+int j;
+int cur_ncpus = driver-caps-host.numaCell[i]-ncpus;
+if (nodemask[i]) {
+for (j = prev_total_ncpus;
+ j  cur_ncpus + prev_total_ncpus
+ j  maxcpu
+ j  VIR_DOMAIN_CPUMASK_LEN;
+ j++) {
+VIR_USE_CPU(cpumap, j);
+}
+}
+prev_total_ncpus += cur_ncpus;
  }

-VIR_FREE(vm-def-cpumask);
-vm-def-cpumask = tmp_cpumask;
-if (virDomainSaveStatus(driver-caps, driver-stateDir, vm)  0) {
-VIR_WARN(Unable to save status on vm %s after state change,
- vm-def-name);
-}
+VIR_FREE(nodemask);
  VIR_FREE(nodeset);
  } else {
  if (vm-def-cpumask) {


   ACK, but fix the commit message, and wait to see the feedback on 2/3
   and 3/3 as this should not be commited in isolation

Daniel


Thanks, pushed series with nits fixed. Also with conflicts with commit
354e6d4ed.

Regards,
Osier




--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

[libvirt] [PATCH 1/3] numad: Convert node list to cpumap before setting affinity

2012-04-11 Thread Osier Yang
Instead of returning a CPUs list, numad returns NUMA node
list instead, this patch is to convert the node list to
cpumap before affinity setting. Otherwise, the domain
processes will be pinned only to CPU[$numa_cell_num],
which will cause significiant performance losing.

Also because numad will balance the affinity dynamically,
reflecting the cpuset from numad back doesn't make much
sense then, and it may just could produce confusion for
users' eyes. Thus the better way is not to reflect it back
to XML. And in this case, it's better to ignore the cpuset
when parsing XML.

The codes to update the cpuset is removed in this patch
incidentally, and there will be a follow up patch to ignore
the manually specified cpuset if placement is auto,
and document will be updated too.

---
The patch is tested on a NUMA box with 4 cells, 24 CPUs.
---
 src/qemu/qemu_process.c |   33 -
 1 files changed, 20 insertions(+), 13 deletions(-)

diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index 481b4f3..0bf743b 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -1819,36 +1819,43 @@ qemuProcessInitCpuAffinity(struct qemud_driver *driver,
 }
 
 if (vm-def-placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO) {
-char *tmp_cpumask = NULL;
 char *nodeset = NULL;
+char *nodemask = NULL;
 
 nodeset = qemuGetNumadAdvice(vm-def);
 if (!nodeset)
 return -1;
 
-if (VIR_ALLOC_N(tmp_cpumask, VIR_DOMAIN_CPUMASK_LEN)  0) {
+if (VIR_ALLOC_N(nodemask, VIR_DOMAIN_CPUMASK_LEN)  0) {
 virReportOOMError();
 return -1;
 }
 
-if (virDomainCpuSetParse(nodeset, 0, tmp_cpumask,
+if (virDomainCpuSetParse(nodeset, 0, nodemask,
  VIR_DOMAIN_CPUMASK_LEN)  0) {
-VIR_FREE(tmp_cpumask);
+VIR_FREE(nodemask);
 VIR_FREE(nodeset);
 return -1;
 }
 
-for (i = 0; i  maxcpu  i  VIR_DOMAIN_CPUMASK_LEN; i++) {
-if (tmp_cpumask[i])
-VIR_USE_CPU(cpumap, i);
+/* numad returns the NUMA node list, convert it to cpumap */
+int prev_total_ncpus = 0;
+for (i = 0; i  driver-caps-host.nnumaCell; i++) {
+int j;
+int cur_ncpus = driver-caps-host.numaCell[i]-ncpus;
+if (nodemask[i]) {
+for (j = prev_total_ncpus;
+ j  cur_ncpus + prev_total_ncpus 
+ j  maxcpu 
+ j  VIR_DOMAIN_CPUMASK_LEN;
+ j++) {
+VIR_USE_CPU(cpumap, j);
+}
+}
+prev_total_ncpus += cur_ncpus;
 }
 
-VIR_FREE(vm-def-cpumask);
-vm-def-cpumask = tmp_cpumask;
-if (virDomainSaveStatus(driver-caps, driver-stateDir, vm)  0) {
-VIR_WARN(Unable to save status on vm %s after state change,
- vm-def-name);
-}
+VIR_FREE(nodemask);
 VIR_FREE(nodeset);
 } else {
 if (vm-def-cpumask) {
-- 
1.7.1

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list