The mem field itself will switch from the outside view to the "inside"
view if the VM is reporting detailed memory usage informatio via the
ballooning device.

Since sometimes other processes belong to a VM too, for example swtpm,
we collect all PIDs belonging to the VM cgroup and fetch their PSS data
to account for shared libraries used.

Signed-off-by: Aaron Lauterer <a.laute...@proxmox.com>
---

Notes:
    changes since:
    v2:
    * add memhost description to $vmstatus_return_properties
    * reorder to run earlier before the cpu collection. Otherwise it might
      be skipped on the first call or when using `pvesh` if the cpu
      collection triggers 'next'.
    RFC:
    * collect memory info for all processes in cgroup directly without too
      generic helper function

 src/PVE/QemuServer.pm | 26 +++++++++++++++++++++++++-
 1 file changed, 25 insertions(+), 1 deletion(-)

diff --git a/src/PVE/QemuServer.pm b/src/PVE/QemuServer.pm
index 9e2c621..630cef6 100644
--- a/src/PVE/QemuServer.pm
+++ b/src/PVE/QemuServer.pm
@@ -2426,6 +2426,12 @@ our $vmstatus_return_properties = {
         optional => 1,
         renderer => 'bytes',
     },
+    memhost => {
+        description => "Current memory usage on the host.",
+        type => 'integer',
+        optional => 1,
+        renderer => 'bytes',
+    },
     maxdisk => {
         description => "Root disk size in bytes.",
         type => 'integer',
@@ -2616,6 +2622,7 @@ sub vmstatus {
         $d->{uptime} = 0;
         $d->{cpu} = 0;
         $d->{mem} = 0;
+        $d->{memhost} = 0;
 
         $d->{netout} = 0;
         $d->{netin} = 0;
@@ -2668,6 +2675,24 @@ sub vmstatus {
             $d->{mem} = int(($pstat->{rss} / $pstat->{vsize}) * $d->{maxmem});
         }
 
+        my $fh = 
IO::File->new("/sys/fs/cgroup/qemu.slice/${vmid}.scope/cgroup.procs", "r");
+        if ($fh) {
+            while (my $childPid = <$fh>) {
+                chomp($childPid);
+                open(my $SMAPS_FH, '<', "/proc/$childPid/smaps_rollup")
+                    or die "failed to open PSS memory-stat from process - 
$!\n";
+
+                while (my $line = <$SMAPS_FH>) {
+                    if ($line =~ m/^Pss:\s+([0-9]+) kB$/) {
+                        $d->{memhost} = $d->{memhost} + int($1) * 1024;
+                        last;
+                    }
+                }
+                close $SMAPS_FH;
+            }
+        }
+        close($fh);
+
         my $pressures = 
PVE::ProcFSTools::read_cgroup_pressure("qemu.slice/${vmid}.scope");
         $d->{pressurecpusome} = $pressures->{cpu}->{some}->{avg10} * 1;
         $d->{pressurecpufull} = $pressures->{cpu}->{full}->{avg10} * 1;
@@ -2700,7 +2725,6 @@ sub vmstatus {
         } else {
             $d->{cpu} = $old->{cpu};
         }
-
     }
 
     return $res if !$full;
-- 
2.39.5



_______________________________________________
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel

Reply via email to