[pve-devel] [PATCH qemu-server] fix #2101: ipv6 ending in ':' not parsed as a string

2019-02-21 Thread David Limbeck
Space or newline after ':' is recognized as a mapping and as a result an
ipv6 ending in ':' is not parsed as a string. The solution is to quote
the address. For consistency all other addresses (including mac) are
quoted.

Signed-off-by: David Limbeck 
---
 PVE/QemuServer/Cloudinit.pm | 38 +++---
 1 file changed, 19 insertions(+), 19 deletions(-)

diff --git a/PVE/QemuServer/Cloudinit.pm b/PVE/QemuServer/Cloudinit.pm
index 5be820c..4dc4a14 100644
--- a/PVE/QemuServer/Cloudinit.pm
+++ b/PVE/QemuServer/Cloudinit.pm
@@ -174,9 +174,9 @@ sub configdrive2_network {
} else {
my ($addr, $mask) = split_ip4($net->{ip});
$content .= "iface $id inet static\n";
-   $content .= "address $addr\n";
-   $content .= "netmask $mask\n";
-   $content .= "gateway $net->{gw}\n" if $net->{gw};
+   $content .= "address '$addr'\n";
+   $content .= "netmask '$mask'\n";
+   $content .= "gateway '$net->{gw}'\n" if $net->{gw};
}
}
if ($net->{ip6}) {
@@ -185,9 +185,9 @@ sub configdrive2_network {
} else {
my ($addr, $mask) = split('/', $net->{ip6});
$content .= "iface $id inet6 static\n";
-   $content .= "address $addr\n";
-   $content .= "netmask $mask\n";
-   $content .= "gateway $net->{gw6}\n" if $net->{gw6};
+   $content .= "address '$addr'\n";
+   $content .= "netmask '$mask'\n";
+   $content .= "gateway '$net->{gw6}'\n" if $net->{gw6};
}
}
 }
@@ -270,13 +270,13 @@ sub nocloud_network_v2 {
}
if (@addresses) {
$content .= "${i}addresses:\n";
-   $content .= "${i}- $_\n" foreach @addresses;
+   $content .= "${i}- '$_'\n" foreach @addresses;
}
if (defined(my $gw = $ipconfig->{gw})) {
-   $content .= "${i}gateway4: $gw\n";
+   $content .= "${i}gateway4: '$gw'\n";
}
if (defined(my $gw = $ipconfig->{gw6})) {
-   $content .= "${i}gateway6: $gw\n";
+   $content .= "${i}gateway6: '$gw'\n";
}
 
next if $dns_done;
@@ -287,11 +287,11 @@ sub nocloud_network_v2 {
$content .= "${i}nameservers:\n";
if (defined($nameservers) && @$nameservers) {
$content .= "${i}  addresses:\n";
-   $content .= "${i}  - $_\n" foreach @$nameservers;
+   $content .= "${i}  - '$_'\n" foreach @$nameservers;
}
if (defined($searchdomains) && @$searchdomains) {
$content .= "${i}  search:\n";
-   $content .= "${i}  - $_\n" foreach @$searchdomains;
+   $content .= "${i}  - '$_'\n" foreach @$searchdomains;
}
}
 }
@@ -321,7 +321,7 @@ sub nocloud_network {
 
$content .= "${i}- type: physical\n"
  . "${i}  name: eth$id\n"
- . "${i}  mac_address: $mac\n"
+ . "${i}  mac_address: '$mac'\n"
  . "${i}  subnets:\n";
$i .= '  ';
if (defined(my $ip = $ipconfig->{ip})) {
@@ -330,10 +330,10 @@ sub nocloud_network {
} else {
my ($addr, $mask) = split_ip4($ip);
$content .= "${i}- type: static\n"
- . "${i}  address: $addr\n"
- . "${i}  netmask: $mask\n";
+ . "${i}  address: '$addr'\n"
+ . "${i}  netmask: '$mask'\n";
if (defined(my $gw = $ipconfig->{gw})) {
-   $content .= "${i}  gateway: $gw\n";
+   $content .= "${i}  gateway: '$gw'\n";
}
}
}
@@ -345,9 +345,9 @@ sub nocloud_network {
$content .= "${i}- type: dhcp6\n";
} else {
$content .= "${i}- type: static\n"
-  . "${i}  address: $ip\n";
+  . "${i}  address: '$ip'\n";
if (defined(my $gw = $ipconfig->{gw6})) {
-   $content .= "

[pve-devel] [PATCH kernel] add patch to fix ipset memory exhaustion

2019-02-20 Thread David Limbeck
Add a patch from upstream until it is fixed in the Ubuntu 4.15 kernel.

Signed-off-by: David Limbeck 
---
 ...ter-ipset-Fix-wraparound-n-hash-net-types.patch | 318 +
 1 file changed, 318 insertions(+)
 create mode 100644 
patches/kernel/0010-netfilter-ipset-Fix-wraparound-n-hash-net-types.patch

diff --git 
a/patches/kernel/0010-netfilter-ipset-Fix-wraparound-n-hash-net-types.patch 
b/patches/kernel/0010-netfilter-ipset-Fix-wraparound-n-hash-net-types.patch
new file mode 100644
index 000..282e380
--- /dev/null
+++ b/patches/kernel/0010-netfilter-ipset-Fix-wraparound-n-hash-net-types.patch
@@ -0,0 +1,318 @@
+From  Mon Sep 17 00:00:00 2001
+From: Jozsef Kadlecsik 
+Date: Fri, 12 Jan 2018 11:16:50 +0100
+Subject: [PATCH] netfilter: ipset: Fix wraparound in hash:*net* types
+
+Fix wraparound bug which could lead to memory exhaustion when adding an
+x.x.x.x-255.255.255.255 range to any hash:*net* types.
+
+Fixes Netfilter's bugzilla id #1212, reported by Thomas Schwark.
+
+Fixes: 48596a8ddc46 ("netfilter: ipset: Fix adding an IPv4 range containing 
more than 2^31 addresses")
+Signed-off-by: Jozsef Kadlecsik 
+Signed-off-by: Pablo Neira Ayuso 
+---
+ net/netfilter/ipset/ip_set_hash_ipportnet.c  | 26 ++---
+ net/netfilter/ipset/ip_set_hash_net.c|  9 ---
+ net/netfilter/ipset/ip_set_hash_netiface.c   |  9 ---
+ net/netfilter/ipset/ip_set_hash_netnet.c | 28 +++---
+ net/netfilter/ipset/ip_set_hash_netport.c| 19 ---
+ net/netfilter/ipset/ip_set_hash_netportnet.c | 35 ++--
+ 6 files changed, 63 insertions(+), 63 deletions(-)
+
+diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c 
b/net/netfilter/ipset/ip_set_hash_ipportnet.c
+index 0f164e986bf1..88b83d6d3084 100644
+--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
 b/net/netfilter/ipset/ip_set_hash_ipportnet.c
+@@ -168,7 +168,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr 
*tb[],
+   struct hash_ipportnet4_elem e = { .cidr = HOST_MASK - 1 };
+   struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+   u32 ip = 0, ip_to = 0, p = 0, port, port_to;
+-  u32 ip2_from = 0, ip2_to = 0, ip2_last, ip2;
++  u32 ip2_from = 0, ip2_to = 0, ip2;
+   bool with_ports = false;
+   u8 cidr;
+   int ret;
+@@ -269,22 +269,21 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr 
*tb[],
+   ip_set_mask_from_to(ip2_from, ip2_to, e.cidr + 1);
+   }
+ 
+-  if (retried)
++  if (retried) {
+   ip = ntohl(h->next.ip);
++  p = ntohs(h->next.port);
++  ip2 = ntohl(h->next.ip2);
++  } else {
++  p = port;
++  ip2 = ip2_from;
++  }
+   for (; ip <= ip_to; ip++) {
+   e.ip = htonl(ip);
+-  p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
+- : port;
+   for (; p <= port_to; p++) {
+   e.port = htons(p);
+-  ip2 = retried &&
+-ip == ntohl(h->next.ip) &&
+-p == ntohs(h->next.port)
+-  ? ntohl(h->next.ip2) : ip2_from;
+-  while (ip2 <= ip2_to) {
++  do {
+   e.ip2 = htonl(ip2);
+-  ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
+-  &cidr);
++  ip2 = ip_set_range_to_cidr(ip2, ip2_to, &cidr);
+   e.cidr = cidr - 1;
+   ret = adtfn(set, &e, &ext, &ext, flags);
+ 
+@@ -292,9 +291,10 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr 
*tb[],
+   return ret;
+ 
+   ret = 0;
+-  ip2 = ip2_last + 1;
+-  }
++  } while (ip2++ < ip2_to);
++  ip2 = ip2_from;
+   }
++  p = port;
+   }
+   return ret;
+ }
+diff --git a/net/netfilter/ipset/ip_set_hash_net.c 
b/net/netfilter/ipset/ip_set_hash_net.c
+index 1c67a1761e45..5449e23af13a 100644
+--- a/net/netfilter/ipset/ip_set_hash_net.c
 b/net/netfilter/ipset/ip_set_hash_net.c
+@@ -143,7 +143,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
+   ipset_adtfn adtfn = set->variant->adt[adt];
+   struct hash_net4_elem e = { .cidr = HOST_MASK };
+   struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+-  u32 ip = 0, ip_to = 0, last;
++  u32 ip = 0, ip_to = 0;
+   int ret;
+ 
+   if (tb[IPSET_ATTR_LINENO])
+@@ -193,16 +193,15 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
+   }

Re: [pve-devel] [PATCH v5 qemu-server 2/3] add QemuMigrateExternal.pm

2019-02-19 Thread David Limbeck

some comments inline.

On 1/29/19 2:20 AM, Alexandre Derumier wrote:

---
  PVE/Makefile   |   1 +
  PVE/QemuMigrateExternal.pm | 872 +
  2 files changed, 873 insertions(+)
  create mode 100644 PVE/QemuMigrateExternal.pm

diff --git a/PVE/Makefile b/PVE/Makefile
index 2c800f6..0494cfb 100644
--- a/PVE/Makefile
+++ b/PVE/Makefile
@@ -1,6 +1,7 @@
  PERLSOURCE =  \
QemuServer.pm   \
QemuMigrate.pm  \
+   QemuMigrateExternal.pm  \
QMPClient.pm\
QemuConfig.pm
  
diff --git a/PVE/QemuMigrateExternal.pm b/PVE/QemuMigrateExternal.pm

new file mode 100644
index 000..ff1f46d
--- /dev/null
+++ b/PVE/QemuMigrateExternal.pm
@@ -0,0 +1,872 @@
+package PVE::QemuMigrateExternal;
+
+use strict;
+use warnings;
+use PVE::AbstractMigrate;
+use IO::File;
+use IPC::Open2;
+use POSIX qw( WNOHANG );
+use PVE::INotify;
+use PVE::Tools;
+use PVE::Cluster;
+use PVE::Storage;
+use PVE::QemuServer;
+use Time::HiRes qw( usleep );
+use PVE::RPCEnvironment;
+use PVE::ReplicationConfig;
+use PVE::ReplicationState;
+use PVE::Replication;
+use Storable qw(dclone);
+
+use base qw(PVE::AbstractMigrate);
+
+sub fork_command_pipe {
+my ($self, $cmd) = @_;
+
+my $reader = IO::File->new();
+my $writer = IO::File->new();
+
+my $orig_pid = $$;
+
+my $cpid;
+
+eval { $cpid = open2($reader, $writer, @$cmd); };
+
+my $err = $@;
+
+# catch exec errors
+if ($orig_pid != $$) {
+   $self->log('err', "can't fork command pipe\n");
+   POSIX::_exit(1);
+   kill('KILL', $$);
+}
+
+die $err if $err;
+
+return { writer => $writer, reader => $reader, pid => $cpid };
+}
+
+sub finish_command_pipe {
+my ($self, $cmdpipe, $timeout) = @_;
+
+my $cpid = $cmdpipe->{pid};
+return if !defined($cpid);
+
+my $writer = $cmdpipe->{writer};
+my $reader = $cmdpipe->{reader};
+
+$writer->close();
+$reader->close();
+
+my $collect_child_process = sub {
+   my $res = waitpid($cpid, WNOHANG);
+   if (defined($res) && ($res == $cpid)) {
+   delete $cmdpipe->{cpid};
+   return 1;
+   } else {
+   return 0;
+   }
+ };
+
+if ($timeout) {
+   for (my $i = 0; $i < $timeout; $i++) {
+   return if &$collect_child_process();
+   sleep(1);
+   }
+}
+
+$self->log('info', "ssh tunnel still running - terminating now with 
SIGTERM\n");
+kill(15, $cpid);
+
+# wait again
+for (my $i = 0; $i < 10; $i++) {
+   return if &$collect_child_process();
+   sleep(1);
+}
+
+$self->log('info', "ssh tunnel still running - terminating now with 
SIGKILL\n");
+kill 9, $cpid;
+sleep 1;
+
+$self->log('err', "ssh tunnel child process (PID $cpid) couldn't be 
collected\n")
+   if !&$collect_child_process();
+}
+
+sub read_tunnel {
+my ($self, $tunnel, $timeout) = @_;
+
+$timeout = 60 if !defined($timeout);
+
+my $reader = $tunnel->{reader};
+
+my $output;
+eval {
+   PVE::Tools::run_with_timeout($timeout, sub { $output = <$reader>; });
+};
+die "reading from tunnel failed: $@\n" if $@;
+
+chomp $output;
+
+return $output;
+}
+
+sub write_tunnel {
+my ($self, $tunnel, $timeout, $command) = @_;
+
+$timeout = 60 if !defined($timeout);
+
+my $writer = $tunnel->{writer};
+
+eval {
+   PVE::Tools::run_with_timeout($timeout, sub {
+   print $writer "$command\n";
+   $writer->flush();
+   });
+};
+die "writing to tunnel failed: $@\n" if $@;
+
+if ($tunnel->{version} && $tunnel->{version} >= 1) {
+   my $res = eval { $self->read_tunnel($tunnel, 10); };
+   die "no reply to command '$command': $@\n" if $@;
+
+   if ($res eq 'OK') {
+   return;
+   } else {
+   die "tunnel replied '$res' to command '$command'\n";
+   }
+}
+}
+
+sub fork_tunnel {
+my ($self, $tunnel_addr) = @_;
+
+my @localtunnelinfo = defined($tunnel_addr) ? ('-L' , $tunnel_addr ) : ();
+
+my $cmd = [@{$self->{rem_ssh}}, '-o ExitOnForwardFailure=yes', 
@localtunnelinfo, '/usr/sbin/qm', 'mtunnel' ];
+
+my $tunnel = $self->fork_command_pipe($cmd);
+
+eval {
+   my $helo = $self->read_tunnel($tunnel, 60);
+   die "no reply\n" if !$helo;
+   die "no quorum on target node\n" if $helo =~ m/^no quorum$/;
+   die "got strange reply from mtunnel ('$helo')\n"
+   if $helo !~ m/^tunnel online$/;
+};
+my $err = $@;
+
+eval {
+   my $ver = $self->read_tunnel($tunnel, 10);
+   if ($ver =~ /^ver (\d+)$/) {
+   $tunnel->{version} = $1;
+   $self->log('info', "ssh tunnel $ver\n");
+   } else {
+   $err = "received invalid tunnel version string '$ver'\n" if !$err;
+   }
+};
+
+if ($err) {
+   $self->finish_command_pipe($tunnel);
+   die "can't open migration tunnel - $err";
+}
+

[pve-devel] [PATCH docs] change links from master/mimic to luminous

2019-02-13 Thread David Limbeck
Signed-off-by: David Limbeck 
---
 pveceph.adoc | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/pveceph.adoc b/pveceph.adoc
index c90a92e..3e35bb0 100644
--- a/pveceph.adoc
+++ b/pveceph.adoc
@@ -58,7 +58,7 @@ and VMs on the same node is possible.
 To simplify management, we provide 'pveceph' - a tool to install and
 manage {ceph} services on {pve} nodes.
 
-.Ceph consists of a couple of Daemons footnote:[Ceph intro 
http://docs.ceph.com/docs/master/start/intro/], for use as a RBD storage:
+.Ceph consists of a couple of Daemons footnote:[Ceph intro 
http://docs.ceph.com/docs/luminous/start/intro/], for use as a RBD storage:
 - Ceph Monitor (ceph-mon)
 - Ceph Manager (ceph-mgr)
 - Ceph OSD (ceph-osd; Object Storage Daemon)
@@ -470,7 +470,7 @@ Since Luminous (12.2.x) you can also have multiple active 
metadata servers
 running, but this is normally only useful for a high count on parallel clients,
 as else the `MDS` seldom is the bottleneck. If you want to set this up please
 refer to the ceph documentation. footnote:[Configuring multiple active MDS
-daemons http://docs.ceph.com/docs/mimic/cephfs/multimds/]
+daemons http://docs.ceph.com/docs/luminous/cephfs/multimds/]
 
 [[pveceph_fs_create]]
 Create a CephFS
@@ -502,7 +502,7 @@ This creates a CephFS named `'cephfs'' using a pool for its 
data named
 Check the xref:pve_ceph_pools[{pve} managed Ceph pool chapter] or visit the
 Ceph documentation for more information regarding a fitting placement group
 number (`pg_num`) for your setup footnote:[Ceph Placement Groups
-http://docs.ceph.com/docs/mimic/rados/operations/placement-groups/].
+http://docs.ceph.com/docs/luminous/rados/operations/placement-groups/].
 Additionally, the `'--add-storage'' parameter will add the CephFS to the {pve}
 storage configuration after it was created successfully.
 
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH v2 qemu-server] cloud-init: allow custom network/user data files via snippets

2019-02-07 Thread David Limbeck
Adds the 'cicustom' option to specify either or both network and user
options as property strings. Their parameters are files in a snippets
storage (e.g. local:snippets/network.yaml). If one or both are specified
they are used instead of their respective generated configuration.
This allows the use of completely custom configurations and is also a
possible solution for bug #2068 by specifying a custom user file that
contains package_upgrade: false.

Tested with Ubuntu 18.10 and cloud-init 18.4.7

Signed-off-by: David Limbeck 
---
v2:
 - Added meta parameter as well
 - removed _data from parameters
 - moved the reading of the files into a separate sub
 - renamed {network,user}_data_file to {network,user}_volid
 - changed bug number from 2038 to 2068 in commit message

 PVE/API2/Qemu.pm|  1 +
 PVE/QemuServer.pm   | 31 +
 PVE/QemuServer/Cloudinit.pm | 65 +
 3 files changed, 86 insertions(+), 11 deletions(-)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index 22f9f6a..49aaa48 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -292,6 +292,7 @@ my $diskoptions = {
 };
 
 my $cloudinitoptions = {
+cicustom => 1,
 cipassword => 1,
 citype => 1,
 ciuser => 1,
diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 4a903a6..1f2a46b 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -623,6 +623,31 @@ EODESCR
 },
 };
 
+my $cicustom_fmt = {
+meta => {
+   type => 'string',
+   optional => 1,
+   description => 'Specify a custom file containing all meta data passed 
to the VM via cloud-init. This is provider specific meaning configdrive2 and 
nocloud differ.',
+   format => 'pve-volume-id',
+   format_description => 'volume',
+},
+network => {
+   type => 'string',
+   optional => 1,
+   description => 'Specify a custom file containing all network data 
passed to the VM via cloud-init.',
+   format => 'pve-volume-id',
+   format_description => 'volume',
+},
+user => {
+   type => 'string',
+   optional => 1,
+   description => 'Specify a custom file containing all user data passed 
to the VM via cloud-init.',
+   format => 'pve-volume-id',
+   format_description => 'volume',
+},
+};
+PVE::JSONSchema::register_format('pve-qm-cicustom', $cicustom_fmt);
+
 my $confdesc_cloudinit = {
 citype => {
optional => 1,
@@ -640,6 +665,12 @@ my $confdesc_cloudinit = {
type => 'string',
description => 'cloud-init: Password to assign the user. Using this is 
generally not recommended. Use ssh keys instead. Also note that older 
cloud-init versions do not support hashed passwords.',
 },
+cicustom => {
+   optional => 1,
+   type => 'string',
+   description => 'cloud-init: Specify custom files to replace the 
automatically generated ones at start.',
+   format => 'pve-qm-cicustom',
+},
 searchdomain => {
optional => 1,
type => 'string',
diff --git a/PVE/QemuServer/Cloudinit.pm b/PVE/QemuServer/Cloudinit.pm
index 5be820c..abb2544 100644
--- a/PVE/QemuServer/Cloudinit.pm
+++ b/PVE/QemuServer/Cloudinit.pm
@@ -208,14 +208,16 @@ EOF
 sub generate_configdrive2 {
 my ($conf, $vmid, $drive, $volname, $storeid) = @_;
 
-my $user_data = cloudinit_userdata($conf, $vmid);
-my $network_data = configdrive2_network($conf);
+my ($user_data, $network_data, $meta_data) = 
get_custom_cloudinit_files($conf);
+$user_data = cloudinit_userdata($conf, $vmid) if !defined($user_data);
+$network_data = configdrive2_network($conf) if !defined($network_data);
 
-my $digest_data = $user_data . $network_data;
-my $uuid_str = Digest::SHA::sha1_hex($digest_data);
-
-my $meta_data = configdrive2_metadata($uuid_str);
+if (!defined($meta_data)) {
+   my $digest_data = $user_data . $network_data;
+   my $uuid_str = Digest::SHA::sha1_hex($digest_data);
 
+   $meta_data = configdrive2_metadata($uuid_str);
+}
 my $files = {
'/openstack/latest/user_data' => $user_data,
'/openstack/content/' => $network_data,
@@ -378,13 +380,16 @@ sub nocloud_metadata {
 sub generate_nocloud {
 my ($conf, $vmid, $drive, $volname, $storeid) = @_;
 
-my $user_data = cloudinit_userdata($conf, $vmid);
-my $network_data = nocloud_network($conf);
+my ($user_data, $network_data, $meta_data) = 
get_custom_cloudinit_files($conf);
+$user_data = cloudinit_userdata($conf, $vmid) if !defined($user_data);
+$network_data = nocloud_network($conf) if !defined($network_data);
 
-my $digest_data = $us

Re: [pve-devel] [PATCH qemu-server] cloud-init: allow custom network/user data files via snippets

2019-02-07 Thread David Limbeck

On 2/7/19 10:55 AM, Thomas Lamprecht wrote:

Am 2/7/19 um 10:38 AM schrieb David Limbeck:> On 2/7/19 10:32 AM, Thomas 
Lamprecht wrote:

Am 2/7/19 um 10:20 AM schrieb David Limbeck:> On 2/7/19 8:23 AM, Thomas 
Lamprecht wrote:

Am 2/6/19 um 1:35 PM schrieb David Limbeck:

Adds the 'cicustom' option to specify either or both network_data and
user_data options as property strings. Their parameters are files
in a snippets storage (e.g. local:snippets/network.yaml). If one or both
are specified they are used instead of their respective generated
configuration.
This allows the use of completely custom configurations and is also a
possible solution for bug #2038 by specifying a custom user_data file
that contains package_upgrade: false.

Wrong number, it's 2068 not 2038, will fix this as well in v2.

Tested with Ubuntu 18.10

Signed-off-by: David Limbeck 
---
PVE/API2/Qemu.pm|  1 +
PVE/QemuServer.pm   | 24 
PVE/QemuServer/Cloudinit.pm | 37 +
3 files changed, 58 insertions(+), 4 deletions(-)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index 22f9f6a..49aaa48 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -292,6 +292,7 @@ my $diskoptions = {
};
  my $cloudinitoptions = {
+cicustom => 1,
cipassword => 1,
citype => 1,
ciuser => 1,
diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 4a903a6..7c39b97 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -623,6 +623,24 @@ EODESCR
},
};
+my $cicustom_fmt = {
+network_data => {
+type => 'string',
+optional => 1,
+description => 'Specify a custom file containing all network data passed 
to the VM via cloud-init.',
+format => 'pve-volume-id',
+format_description => 'volume',
+},
+user_data => {
+type => 'string',
+optional => 1,
+description => 'Specify a custom file containing all user data passed to 
the VM via cloud-init.',
+format => 'pve-volume-id',
+format_description => 'volume',
+},
+};

Maybe rename it to networkdata, userdata and metadata (v2)? Or keep it 
network_data, user_data, meta_data(v2)?

or drop data postfix? we're already in the cicustom property here, so a:

cicustom: network=volid,user=volid,meta=volid

could be enough in addition to the properties description...


+PVE::JSONSchema::register_format('pve-qm-cicustom', $cicustom_fmt);
+
my $confdesc_cloudinit = {
citype => {
optional => 1,
@@ -640,6 +658,12 @@ my $confdesc_cloudinit = {
type => 'string',
description => 'cloud-init: Password to assign the user. Using this is 
generally not recommended. Use ssh keys instead. Also note that older cloud-init 
versions do not support hashed passwords.',
},
+cicustom => {
+optional => 1,
+type => 'string',
+description => 'cloud-init: Specify custom files to replace the 
automatically generated ones at start.',

"to replace and enhance" ? Could it make sense to merge both?

This would require a yaml parser and we would have to make sure indentation is 
compatible.

We already use CPAN::Meta::YAML from perl-modules in pve-common, so non-issue.
A basic syntax check may also be nice for a user, may allow fixing such things
earlier.


It's probably a lot of work for little to no benefit.

with the parser available, the work amount depends on the capabillity of ci
(which I do not have fully in mind, atm, sorry), i.e., do you have much
interpendent options in different subtrees (YAML/JSON objects), or is it mostly
enough to replace things a the highest level, e.g., if internal represented as
hash just overwrite the values of everything below if the keys are the same,
because that would be easy.


There's not that little nesting and at the moment overwriting files allows the 
use of network config v2 for example. In addition people can set netplan 
specific configs like 'on-link: true' to get routes working (don't work in 
network config v1 on ubuntu) for gateways not in the subnet. Of course, with 
your suggestion of adding an addition parameter to enable/disable merging it 
might be possible. I will look into it.

OK, thanks for providing a bit background! So, great if you look into it,
but with the additional parameter we can move forward with the simpler overwrite
approach for now, and add the merge stuff later, if it does seems reasonable 
then.
Maybe it's even good to wait a bit and do it the simple way for now, so we see 
if
users even want this or if it's not really useful/needed anyway.
I'll go ahead and work on a v2 then with your suggestions but without 
the merging for now.

Maybe an additional o

Re: [pve-devel] [PATCH qemu-server] cloud-init: allow custom network/user data files via snippets

2019-02-07 Thread David Limbeck

On 2/7/19 10:32 AM, Thomas Lamprecht wrote:

Am 2/7/19 um 10:20 AM schrieb David Limbeck:> On 2/7/19 8:23 AM, Thomas 
Lamprecht wrote:

Am 2/6/19 um 1:35 PM schrieb David Limbeck:

Adds the 'cicustom' option to specify either or both network_data and
user_data options as property strings. Their parameters are files
in a snippets storage (e.g. local:snippets/network.yaml). If one or both
are specified they are used instead of their respective generated
configuration.
This allows the use of completely custom configurations and is also a
possible solution for bug #2038 by specifying a custom user_data file
that contains package_upgrade: false.

Wrong number, it's 2068 not 2038, will fix this as well in v2.


Tested with Ubuntu 18.10

Signed-off-by: David Limbeck 
---
   PVE/API2/Qemu.pm|  1 +
   PVE/QemuServer.pm   | 24 
   PVE/QemuServer/Cloudinit.pm | 37 +
   3 files changed, 58 insertions(+), 4 deletions(-)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index 22f9f6a..49aaa48 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -292,6 +292,7 @@ my $diskoptions = {
   };
 my $cloudinitoptions = {
+cicustom => 1,
   cipassword => 1,
   citype => 1,
   ciuser => 1,
diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 4a903a6..7c39b97 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -623,6 +623,24 @@ EODESCR
   },
   };
   +my $cicustom_fmt = {
+network_data => {
+type => 'string',
+optional => 1,
+description => 'Specify a custom file containing all network data passed 
to the VM via cloud-init.',
+format => 'pve-volume-id',
+format_description => 'volume',
+},
+user_data => {
+type => 'string',
+optional => 1,
+description => 'Specify a custom file containing all user data passed to 
the VM via cloud-init.',
+format => 'pve-volume-id',
+format_description => 'volume',
+},
+};

Maybe rename it to networkdata, userdata and metadata (v2)? Or keep it 
network_data, user_data, meta_data(v2)?

+PVE::JSONSchema::register_format('pve-qm-cicustom', $cicustom_fmt);
+
   my $confdesc_cloudinit = {
   citype => {
   optional => 1,
@@ -640,6 +658,12 @@ my $confdesc_cloudinit = {
   type => 'string',
   description => 'cloud-init: Password to assign the user. Using this is 
generally not recommended. Use ssh keys instead. Also note that older cloud-init 
versions do not support hashed passwords.',
   },
+cicustom => {
+optional => 1,
+type => 'string',
+description => 'cloud-init: Specify custom files to replace the 
automatically generated ones at start.',

"to replace and enhance" ? Could it make sense to merge both?

This would require a yaml parser and we would have to make sure indentation is 
compatible.

We already use CPAN::Meta::YAML from perl-modules in pve-common, so non-issue.
A basic syntax check may also be nice for a user, may allow fixing such things
earlier.


It's probably a lot of work for little to no benefit.

with the parser available, the work amount depends on the capabillity of ci
(which I do not have fully in mind, atm, sorry), i.e., do you have much
interpendent options in different subtrees (YAML/JSON objects), or is it mostly
enough to replace things a the highest level, e.g., if internal represented as
hash just overwrite the values of everything below if the keys are the same,
because that would be easy.

There's not that little nesting and at the moment overwriting files 
allows the use of network config v2 for example. In addition people can 
set netplan specific configs like 'on-link: true' to get routes working 
(don't work in network config v1 on ubuntu) for gateways not in the 
subnet. Of course, with your suggestion of adding an addition parameter 
to enable/disable merging it might be possible. I will look into it.

Maybe an additional option ('qm generate_cloud_config' or so) would make more 
sense that generates the files based on our configuration and people can then 
use it as a base or in a pre-start hook.

besides the obvious blocker of additional underscores in (sub) commands ( ;-)
), this could be nice in general as we basically can get this for free...  I'd
really use and alternative CLI syntax, though, maybe:

qm cloudinit config VMID


+format => 'pve-qm-cicustom',
+},
   searchdomain => {
   optional => 1,
   type => 'string',
diff --git a/PVE/QemuServer/Cloudinit.pm b/PVE/QemuServer/Cloudinit.pm
index 5be820c..9f36744 100644
--- a/PVE/QemuServer/Cloudinit.pm
+++ b/PVE/QemuServer/Cloudinit.pm
@@ -208,8 +208,9 @@ EOF
   sub generate_configdri

Re: [pve-devel] [PATCH qemu-server] cloud-init: allow custom network/user data files via snippets

2019-02-07 Thread David Limbeck

On 2/7/19 8:23 AM, Thomas Lamprecht wrote:

Am 2/6/19 um 1:35 PM schrieb David Limbeck:

Adds the 'cicustom' option to specify either or both network_data and
user_data options as property strings. Their parameters are files
in a snippets storage (e.g. local:snippets/network.yaml). If one or both
are specified they are used instead of their respective generated
configuration.
This allows the use of completely custom configurations and is also a
possible solution for bug #2038 by specifying a custom user_data file
that contains package_upgrade: false.

Tested with Ubuntu 18.10

Signed-off-by: David Limbeck 
---
  PVE/API2/Qemu.pm|  1 +
  PVE/QemuServer.pm   | 24 
  PVE/QemuServer/Cloudinit.pm | 37 +
  3 files changed, 58 insertions(+), 4 deletions(-)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index 22f9f6a..49aaa48 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -292,6 +292,7 @@ my $diskoptions = {
  };
  
  my $cloudinitoptions = {

+cicustom => 1,
  cipassword => 1,
  citype => 1,
  ciuser => 1,
diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 4a903a6..7c39b97 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -623,6 +623,24 @@ EODESCR
  },
  };
  
+my $cicustom_fmt = {

+network_data => {
+   type => 'string',
+   optional => 1,
+   description => 'Specify a custom file containing all network data 
passed to the VM via cloud-init.',
+   format => 'pve-volume-id',
+   format_description => 'volume',
+},
+user_data => {
+   type => 'string',
+   optional => 1,
+   description => 'Specify a custom file containing all user data passed 
to the VM via cloud-init.',
+   format => 'pve-volume-id',
+   format_description => 'volume',
+},
+};
Maybe rename it to networkdata, userdata and metadata (v2)? Or keep it 
network_data, user_data, meta_data(v2)?

+PVE::JSONSchema::register_format('pve-qm-cicustom', $cicustom_fmt);
+
  my $confdesc_cloudinit = {
  citype => {
optional => 1,
@@ -640,6 +658,12 @@ my $confdesc_cloudinit = {
type => 'string',
description => 'cloud-init: Password to assign the user. Using this is 
generally not recommended. Use ssh keys instead. Also note that older cloud-init 
versions do not support hashed passwords.',
  },
+cicustom => {
+   optional => 1,
+   type => 'string',
+   description => 'cloud-init: Specify custom files to replace the 
automatically generated ones at start.',

"to replace and enhance" ? Could it make sense to merge both?
This would require a yaml parser and we would have to make sure 
indentation is compatible. It's probably a lot of work for little to no 
benefit. Maybe an additional option ('qm generate_cloud_config' or so) 
would make more sense that generates the files based on our 
configuration and people can then use it as a base or in a pre-start hook.



+   format => 'pve-qm-cicustom',
+},
  searchdomain => {
optional => 1,
type => 'string',
diff --git a/PVE/QemuServer/Cloudinit.pm b/PVE/QemuServer/Cloudinit.pm
index 5be820c..9f36744 100644
--- a/PVE/QemuServer/Cloudinit.pm
+++ b/PVE/QemuServer/Cloudinit.pm
@@ -208,8 +208,9 @@ EOF
  sub generate_configdrive2 {
  my ($conf, $vmid, $drive, $volname, $storeid) = @_;
  
-my $user_data = cloudinit_userdata($conf, $vmid);

-my $network_data = configdrive2_network($conf);
+my ($user_data, $network_data) = get_custom_cloudinit_files($conf);
+$user_data = cloudinit_userdata($conf, $vmid) if !defined($user_data);
+$network_data = configdrive2_network($conf) if !defined($network_data);
  
  my $digest_data = $user_data . $network_data;

  my $uuid_str = Digest::SHA::sha1_hex($digest_data);
@@ -378,8 +379,9 @@ sub nocloud_metadata {
  sub generate_nocloud {
  my ($conf, $vmid, $drive, $volname, $storeid) = @_;
  
-my $user_data = cloudinit_userdata($conf, $vmid);

-my $network_data = nocloud_network($conf);
+my ($user_data, $network_data) = get_custom_cloudinit_files($conf);
+$user_data = cloudinit_userdata($conf, $vmid) if !defined($user_data);
+$network_data = nocloud_network($conf) if !defined($network_data);

why not extend cloudinit_userdata and nocloud_network so that they merge
an external script into the generated data? if you pull out the repeating
parts (see below) this could be relatively straight forward and a nicer
methond interface, the cloudinit_userdata and nocloud_network get all info
needed ($conf) as parameter, so this could be a bit more transparent,
a hunk above it looks the same.

Merging may not be completely easy

[pve-devel] [PATCH qemu-server] cloud-init: allow custom network/user data files via snippets

2019-02-06 Thread David Limbeck
Adds the 'cicustom' option to specify either or both network_data and
user_data options as property strings. Their parameters are files
in a snippets storage (e.g. local:snippets/network.yaml). If one or both
are specified they are used instead of their respective generated
configuration.
This allows the use of completely custom configurations and is also a
possible solution for bug #2038 by specifying a custom user_data file
that contains package_upgrade: false.

Tested with Ubuntu 18.10

Signed-off-by: David Limbeck 
---
 PVE/API2/Qemu.pm|  1 +
 PVE/QemuServer.pm   | 24 
 PVE/QemuServer/Cloudinit.pm | 37 +
 3 files changed, 58 insertions(+), 4 deletions(-)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index 22f9f6a..49aaa48 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -292,6 +292,7 @@ my $diskoptions = {
 };
 
 my $cloudinitoptions = {
+cicustom => 1,
 cipassword => 1,
 citype => 1,
 ciuser => 1,
diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 4a903a6..7c39b97 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -623,6 +623,24 @@ EODESCR
 },
 };
 
+my $cicustom_fmt = {
+network_data => {
+   type => 'string',
+   optional => 1,
+   description => 'Specify a custom file containing all network data 
passed to the VM via cloud-init.',
+   format => 'pve-volume-id',
+   format_description => 'volume',
+},
+user_data => {
+   type => 'string',
+   optional => 1,
+   description => 'Specify a custom file containing all user data passed 
to the VM via cloud-init.',
+   format => 'pve-volume-id',
+   format_description => 'volume',
+},
+};
+PVE::JSONSchema::register_format('pve-qm-cicustom', $cicustom_fmt);
+
 my $confdesc_cloudinit = {
 citype => {
optional => 1,
@@ -640,6 +658,12 @@ my $confdesc_cloudinit = {
type => 'string',
description => 'cloud-init: Password to assign the user. Using this is 
generally not recommended. Use ssh keys instead. Also note that older 
cloud-init versions do not support hashed passwords.',
 },
+cicustom => {
+   optional => 1,
+   type => 'string',
+   description => 'cloud-init: Specify custom files to replace the 
automatically generated ones at start.',
+   format => 'pve-qm-cicustom',
+},
 searchdomain => {
optional => 1,
type => 'string',
diff --git a/PVE/QemuServer/Cloudinit.pm b/PVE/QemuServer/Cloudinit.pm
index 5be820c..9f36744 100644
--- a/PVE/QemuServer/Cloudinit.pm
+++ b/PVE/QemuServer/Cloudinit.pm
@@ -208,8 +208,9 @@ EOF
 sub generate_configdrive2 {
 my ($conf, $vmid, $drive, $volname, $storeid) = @_;
 
-my $user_data = cloudinit_userdata($conf, $vmid);
-my $network_data = configdrive2_network($conf);
+my ($user_data, $network_data) = get_custom_cloudinit_files($conf);
+$user_data = cloudinit_userdata($conf, $vmid) if !defined($user_data);
+$network_data = configdrive2_network($conf) if !defined($network_data);
 
 my $digest_data = $user_data . $network_data;
 my $uuid_str = Digest::SHA::sha1_hex($digest_data);
@@ -378,8 +379,9 @@ sub nocloud_metadata {
 sub generate_nocloud {
 my ($conf, $vmid, $drive, $volname, $storeid) = @_;
 
-my $user_data = cloudinit_userdata($conf, $vmid);
-my $network_data = nocloud_network($conf);
+my ($user_data, $network_data) = get_custom_cloudinit_files($conf);
+$user_data = cloudinit_userdata($conf, $vmid) if !defined($user_data);
+$network_data = nocloud_network($conf) if !defined($network_data);
 
 my $digest_data = $user_data . $network_data;
 my $uuid_str = Digest::SHA::sha1_hex($digest_data);
@@ -394,6 +396,33 @@ sub generate_nocloud {
 commit_cloudinit_disk($conf, $vmid, $drive, $volname, $storeid, $files, 
'cidata');
 }
 
+sub get_custom_cloudinit_files {
+my ($conf) = @_;
+
+my $cicustom = $conf->{cicustom};
+my $files = $cicustom ? 
PVE::JSONSchema::parse_property_string('pve-qm-cicustom', $cicustom) : {};
+
+my $network_data_file = $files->{network_data};
+my $user_data_file = $files->{user_data};
+
+my $storage_conf = PVE::Storage::config();
+
+my $network_data;
+if ($network_data_file) {
+   my ($full_path, undef, $type) = PVE::Storage::path($storage_conf, 
$network_data_file);
+   die "$network_data_file is not in the snippets directory\n" if $type ne 
'snippets';
+   $network_data = PVE::Tools::file_get_contents($full_path);
+}
+my $user_data;
+if ($user_data_file) {
+   my ($full_path, undef, $type) = PVE::Storage::path($storage_conf, 
$user

Re: [pve-devel] [PATCH v5 qemu-server 0/3] online vm migration to external cluster

2019-02-05 Thread David Limbeck

Not yet, sorry. Will get to it as soon as possible.

On 2/5/19 6:21 AM, Alexandre DERUMIER wrote:

Hi,

any comments for the v5 ?


- Mail original -
De: "Alexandre Derumier" 
À: "pve-devel" 
Cc: "Alexandre Derumier" 
Envoyé: Mardi 29 Janvier 2019 02:20:37
Objet: [PATCH v5 qemu-server 0/3] online vm migration to external cluster

This add support to migrate a vm online to an different external cluster.
(This a rework of a previous patch series sent 2years ago)


qm migrate_external   [--targetstorage otherstorage] [--targetvmid] 
[--targetkey]
--net[n] [,bridge=] [,firewall=<1|0>] [,link_down=<1|0>] [,rate=] 
[,tag=] [,trunks=]


- node is an ip or fqdn host from another cluster.


OPTIONS:
- targetstorage : allow to choose a different storage name than source.e

- targetvmid : allow to choose a specific vmid. (default is auto/first 
available)

- targetkey : allow to choose a specific ssh key located in 
/etc/pve/priv/migrate_external/

- net[n] : allow to override net config




Migration is done through an ssh tunnel, and 1 private ssh key need to be 
created for each targetremotenode in:
/etc/pve/priv/migrate_external/id_rsa_mytargetnode

source vm is not deleted currently for safety, just stopped and the migrate 
lock keeped to avoid
to restart it.


Changelog v5:
- api : add experimental in desc
- api : add -net[n] instead --targetbridge. (Cleanme: I'm allowing all options 
from $netdesc, but filtering them later, to avoid model/mac change)
- api : don't use noerr for ip/fqdn check.
- QemuMigrateExternal.pm : bugfix : dclone $conf, to avoid write source vm conf 
if we change net config
- QemuMigrateExternal.pm : use net[n] instead targetbridge, skip 
model/mac/queues.


Changelog v4:
- fix targetremovenode : can be fqdn or ip address
- add optionnal targetkey && targetvmid
- migrate : add a new QemuMigrationExternal.pm with all migration code + remove 
some part not used, like sync_disk and zfs replication.
- migrate : force ssh tunnel for migration
- vm_start : put code in patch3 + remove old no more used 
migration_type=external option


Alexandre Derumier (3):
api2 : add migrate_vm_external
add QemuMigrateExternal.pm
qemu : vm_start : add external_migration

PVE/API2/Qemu.pm | 106 +-
PVE/CLI/qm.pm | 2 +
PVE/Makefile | 1 +
PVE/QemuMigrateExternal.pm | 872 +
PVE/QemuServer.pm | 32 +-
5 files changed, 1007 insertions(+), 6 deletions(-)
create mode 100644 PVE/QemuMigrateExternal.pm



___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH firewall] make nfct_catch non-blocking

2019-02-04 Thread David Limbeck

Hi,

Did you test it without modifying the patch or only with your syslog change?

On 1/30/19 2:31 PM, Alexandre DERUMIER wrote:

Hi,

I have done some tests, and can't reproduce it.

I wonder if it could be related to syslog, the only thing I have change, is 
dropping pve-firewall log in rsyslog.

  218 // also log to syslog
  219
  220 vsyslog(loglevel, fmt, ap2);


It's quite possible than /dev/log was overloaded with the rate, rsyslog was not 
able to spool it. (I also forward log to central syslog with tcp, could be 
related).
I known if /dev/log buffer is full, syslog call are blocking.

don't known how vsyslog() is working in this case.

Could it be possible to have an option to disable syslog logging ? (or maybe 
add an option to use udp to send mail).


Also, I have notice that we don't have timestamp in pve-firewall.log  for 
conntrack log.
and maybe could we log them in a separate file ? (not sure how the gui will 
react if we need to filter a vm log, with the rate of new log coming)



- Mail original -
De: "aderumier" 
À: "David Limbeck" 
Cc: "pve-devel" 
Envoyé: Samedi 26 Janvier 2019 08:07:43
Objet: Re: [pve-devel] [PATCH firewall] make nfct_catch non-blocking

Thanks !

I'll test it Monday.

- Mail original -
De: "David Limbeck" 
À: "aderumier" , "Wolfgang Bumiller" 

Cc: "pve-devel" 
Envoyé: Vendredi 25 Janvier 2019 14:31:30
Objet: Re: [pve-devel] [PATCH firewall] make nfct_catch non-blocking

Hi,

A new commit was pushed that enables building of debug symbols for
pve-firewall. Please build and install it again with that commit
included and run it again.

This might help narrow it down some more.

On 1/14/19 11:42 AM, Alexandre DERUMIER wrote:

Hi,

I have able to reproduce, after 1hour.

I have enable debug to get it run in foreground.

This time, the process was not crashed, but was hanging.

output was simply hanging, and no more write in /var/log/pve-firewall.log

Also, memory was pretty huge, and still increasing during the hang (not sure if 
it's related to debug mode)


ps -aux|grep logger
root 19434 26.2 0.4 1770688 1679136 pts/1 Rl+ 10:44 11:27 ./pvefw-logger

after some minutes

root 19434 24.8 0.8 3625024 3533496 pts/1 Sl+ 10:44 12:20 ./pvefw-logger


I was able to do a coredump with gdb
http://odisoweb1.odiso.net/core.19434.gz

Hope it's help.


- Mail original -
De: "Wolfgang Bumiller" 
À: "aderumier" 
Cc: "David Limbeck" , "pve-devel" 

Envoyé: Lundi 14 Janvier 2019 08:01:54
Objet: Re: [pve-devel] [PATCH firewall] make nfct_catch non-blocking

On Fri, Jan 11, 2019 at 06:05:36PM +0100, Alexandre DERUMIER wrote:

Do you have any additional information as to why it stopped?

no sorry.


Maybe we could increase the buffer size via nfnl_set_rcv_buffer_size by
default and continue to ignore ENOBUFS?

I'll try next week. maybe doing strace on the process to have some clues ? 
(I'ts crashing after 30min-1h)

A coredump should work and produce less noise, perhaps?



___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel




___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH qemu-server] fix #2068: allow disabling of package_upgrade for cloud-init

2019-01-29 Thread David Limbeck
package_upgrade default is still 'true'. adds a new generic option 'cioptions'
that takes a list of property strings and is easy to extend. for now only
'package_upgrade=' is supported. registers a new format
'pve-qm-cioptions' for parsing in different files.

tested with ubuntu 18.10.

Signed-off-by: David Limbeck 
---
 PVE/API2/Qemu.pm|  1 +
 PVE/QemuServer.pm   | 18 ++
 PVE/QemuServer/Cloudinit.pm |  4 +++-
 3 files changed, 22 insertions(+), 1 deletion(-)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index 148ea1a..7fee88e 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -295,6 +295,7 @@ my $cloudinitoptions = {
 cipassword => 1,
 citype => 1,
 ciuser => 1,
+cioptions => 1,
 nameserver => 1,
 searchdomain => 1,
 sshkeys => 1,
diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index bc3bb1d..c5e2309 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -616,6 +616,18 @@ EODESCR
 },
 };
 
+my $cioptions_fmt = {
+package_upgrade => {
+   type => 'string',
+   optional => 1,
+   description => 'cloud-init: Enable/Disable package upgrade on boot.',
+   default => 'true',
+   enum => [qw(true false)]
+}
+};
+
+PVE::JSONSchema::register_format('pve-qm-cioptions', $cioptions_fmt);
+
 my $confdesc_cloudinit = {
 citype => {
optional => 1,
@@ -633,6 +645,12 @@ my $confdesc_cloudinit = {
type => 'string',
description => 'cloud-init: Password to assign the user. Using this is 
generally not recommended. Use ssh keys instead. Also note that older 
cloud-init versions do not support hashed passwords.',
 },
+cioptions => {
+   optional => 1,
+   type => 'string',
+   format => 'pve-qm-cioptions',
+   description => 'cloud-init: specify other options that are supported as 
a list of key=value pairs (e.g. package_upgrade=false,...).'
+},
 searchdomain => {
optional => 1,
type => 'string',
diff --git a/PVE/QemuServer/Cloudinit.pm b/PVE/QemuServer/Cloudinit.pm
index 5be820c..5f777a1 100644
--- a/PVE/QemuServer/Cloudinit.pm
+++ b/PVE/QemuServer/Cloudinit.pm
@@ -132,7 +132,9 @@ sub cloudinit_userdata {
$content .= "  - default\n";
 }
 
-$content .= "package_upgrade: true\n";
+my $cioptions = $conf->{cioptions} ? 
PVE::JSONSchema::parse_property_string('pve-qm-cioptions', $conf->{cioptions}) 
: {};
+my $package_upgrade = $cioptions->{package_upgrade} // 'true'; # default
+$content .= "package_upgrade: $package_upgrade\n";
 
 return $content;
 }
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH firewall] make nfct_catch non-blocking

2019-01-25 Thread David Limbeck

Hi,

A new commit was pushed that enables building of debug symbols for 
pve-firewall. Please build and install it again with that commit 
included and run it again.


This might help narrow it down some more.

On 1/14/19 11:42 AM, Alexandre DERUMIER wrote:

Hi,

I have able to reproduce, after 1hour.

I have enable debug to get it run in foreground.

This time, the process was not crashed, but was hanging.

output was simply hanging, and no more write in /var/log/pve-firewall.log

Also, memory was pretty huge, and still increasing during the hang (not sure if 
it's related to debug mode)


ps -aux|grep logger
root 19434 26.2  0.4 1770688 1679136 pts/1 Rl+  10:44  11:27 ./pvefw-logger

after some minutes

root 19434 24.8  0.8 3625024 3533496 pts/1 Sl+  10:44  12:20 ./pvefw-logger


I was able to do a coredump with gdb
http://odisoweb1.odiso.net/core.19434.gz

Hope it's help.


- Mail original -
De: "Wolfgang Bumiller" 
À: "aderumier" 
Cc: "David Limbeck" , "pve-devel" 

Envoyé: Lundi 14 Janvier 2019 08:01:54
Objet: Re: [pve-devel] [PATCH firewall] make nfct_catch non-blocking

On Fri, Jan 11, 2019 at 06:05:36PM +0100, Alexandre DERUMIER wrote:

Do you have any additional information as to why it stopped?

no sorry.


Maybe we could increase the buffer size via nfnl_set_rcv_buffer_size by
default and continue to ignore ENOBUFS?

I'll try next week. maybe doing strace on the process to have some clues ? 
(I'ts crashing after 30min-1h)

A coredump should work and produce less noise, perhaps?




___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH v4 qemu-server 1/3] api2 : add migrate_vm_external

2019-01-25 Thread David Limbeck

The patches look good to me, some non-blockers inline.

On 1/8/19 2:00 AM, Alexandre Derumier wrote:

qm migrate_external   [--targetstorage 
otherstorage] [--targetbridge otherbridge] [--targetvmid] [--targetkey]


targetvmid is optionnal, if not specified, the next vmid available will be used.

targetkey is optionnal, if not specified, the ssh private key should be
/etc/pve/priv/external_migration/id_rsa_targetremotenode_fqdn_or_ip


---
  PVE/API2/Qemu.pm | 96 
  PVE/CLI/qm.pm|  2 ++
  2 files changed, 98 insertions(+)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index b55fd13..b74f111 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -21,6 +21,7 @@ use PVE::GuestHelpers;
  use PVE::QemuConfig;
  use PVE::QemuServer;
  use PVE::QemuMigrate;
+use PVE::QemuMigrateExternal;
  use PVE::RPCEnvironment;
  use PVE::AccessControl;
  use PVE::INotify;
@@ -3164,6 +3165,101 @@ __PACKAGE__->register_method({
  }});
  
  __PACKAGE__->register_method({

+name => 'migrate_vm_external',
+path => '{vmid}/migrate_external',
+method => 'POST',
+protected => 1,
+proxyto => 'node',
+description => "Migrate virtual machine to an external cluster. Creates a new 
migration task.",
The description should perhaps contain something like 'Experimental! Use 
at your own risk'.

+permissions => {
+   check => ['perm', '/vms/{vmid}', [ 'VM.Migrate' ]],
+},
+parameters => {
+   additionalProperties => 0,
+   properties => {
+   node => get_standard_option('pve-node'),
+   vmid => get_standard_option('pve-vmid', { completion => 
\&PVE::QemuServer::complete_vmid }),
+   target => {
+   type => 'string',
+   description => "Target node fqdn address.",
description should mention target ip and fqdn, basically anything that 
can be resolved by get_ip_from_hostname is allowed.

+},
+   targetstorage => get_standard_option('pve-storage-id', {
+   description => "Target remote storage.",
+   optional => 1,
+   }),
+   targetbridge => {
+   type => 'string',
+   description => "Target remote bridge.",
+   format_description => 'bridge',
+   optional => 1,
+   },
This would probably be more useful if it supported multiple different 
bridges instead of just a single one.

+   targetvmid => get_standard_option('pve-vmid', {
+   description => "Target vmid. If not specified the next available 
vmid will be used.",
+   optional => 1,
+   }),
+   targetkey => {
+   type => 'string',
+   description => "Ssh private key file located in 
/etc/pve/priv/migrate_external/.",
+   optional => 1,
+   },
+   },
+},
+returns => {
+   type => 'string',
+   description => "the task ID.",
+},
+code => sub {
+   my ($param) = @_;
+
+   my $rpcenv = PVE::RPCEnvironment::get();
+
+   my $authuser = $rpcenv->get_user();
+
+   die "Only root can do external migration." if $authuser ne 'root@pam';
+
+   my $target = extract_param($param, 'target');
+
+   my $vmid = extract_param($param, 'vmid');
+
+   my $targetkey = extract_param($param, 'targetkey');
+
+   PVE::Cluster::check_cfs_quorum();
+
+   raise_param_exc({ target => "target is member of local cluster."}) if 
PVE::Cluster::check_node_exists($target, 1);
+
+die "HA must be disable for external migration." if 
PVE::HA::Config::vm_is_ha_managed($vmid);
+
+   my $migration_external_sshkey = $targetkey ? 
"/etc/pve/priv/migrate_external/$targetkey" : 
"/etc/pve/priv/migrate_external/id_rsa_$target";
+
+   die "ssh privatekey is missing for $target" if !-e 
$migration_external_sshkey;
+
+   my $targetip = PVE::Network::get_ip_from_hostname($target, 1);


This should probably be 0 instead of 1 ('noerr') as we don't have any 
check for validity before using $target. Just use it to error out with a 
nice message if $target can't be resolved.



+
+   # test if VM exists
+   my $conf = PVE::QemuConfig->load_config($vmid);
+
+   # try to detect errors early
+
+   PVE::QemuConfig->check_lock($conf);
+
+   die "VM need to be online for external migration" if 
!PVE::QemuServer::check_running($vmid);
+
+   $param->{online} = 1;
+   $param->{migration_external_sshkey} = $migration_external_sshkey;
+
+   my $realcmd = sub {
+   PVE::QemuMigrateExternal->migrate($target, $targetip, $vmid, 
$param);
+   };
+
+   my $worker = sub {
+   return PVE::GuestHelpers::guest_migration_lock($vmid, 10, $realcmd);
+   };
+
+   return $rpcenv->fork_worker('qmigrate', $vmid, $authuser, $worker);
+
+}});
+
+__PACKAGE__->register_method({
  name => 'monitor',
  path => '{vmid}/monitor',
  method => 'POST',
diff -

Re: [pve-devel] [PATCH v4 qemu-server 0/3] online vm migration to external cluster

2019-01-24 Thread David Limbeck
Sorry for the delay, wasn't at work the last 1 1/2 weeks. Will look 
through it in detail tomorrow.


On 1/11/19 4:33 PM, David Limbeck wrote:

Thank you for the patch.

I didn't have time yet to go through it but will do so early next week.

On 1/8/19 2:00 AM, Alexandre Derumier wrote:
This add support to migrate a vm online to an different external 
cluster.

(This a rework of a previous patch series sent 2years ago)


qm migrate_external   [--targetstorage 
otherstorage] [--targetbridge otherbridge] [--targetvmid] [--targetkey]



targetstorage && targetbridge are optional, if not defined it's used 
same name than source


targetremotenode is a fqdn host from another cluster.
(source node must be able to root ssh to target node with public key)

source vm is not deleted currently for safety, just stopped and the 
migrate lock keeped to avoid

to restart it.

1 private ssh key need to be created for each targetremotenode in:
/etc/pve/priv/migrate_external/id_rsa_mytargetnode


Changelog v4:
  - fix targetremovenode : can be fqdn or ip address
  - add optionnal targetkey && targetvmid
  - migrate : add a new QemuMigrationExternal.pm with all migration 
code + remove some part not used, like sync_disk and zfs replication.

  - migrate : force ssh tunnel for migration
  - vm_start : put code in patch3 + remove old no more used 
migration_type=external option



*** BLURB HERE ***

Alexandre Derumier (3):
   api2 : add migrate_vm_external
   add QemuMigrateExternal.pm
   qemu : vm_start : add external_migration

  PVE/API2/Qemu.pm   | 112 +-
  PVE/CLI/qm.pm  |   2 +
  PVE/Makefile   |   1 +
  PVE/QemuMigrateExternal.pm | 866 
+

  PVE/QemuServer.pm  |  20 +-
  5 files changed, 995 insertions(+), 6 deletions(-)
  create mode 100644 PVE/QemuMigrateExternal.pm



___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH firewall] make nfct_catch non-blocking

2019-01-11 Thread David Limbeck

Do you have any additional information as to why it stopped?

Maybe we could increase the buffer size via nfnl_set_rcv_buffer_size by 
default and continue to ignore ENOBUFS?


On 1/10/19 4:32 PM, Alexandre DERUMIER wrote:

Just tested, no difference. (but I don't see ENOBUFS since I have increase 
net.ipv4.tcp_rmem)

But I have reproduce my new hang,
and it seem that the pvefw-logger process was not running anymore. (seem to be 
a crash, I don't seen any out of memory).


- Mail original -
De: "Thomas Lamprecht" 
À: "pve-devel" , "David Limbeck" , 
"Wolfgang Bumiller" 
Envoyé: Jeudi 10 Janvier 2019 14:53:11
Objet: Re: [pve-devel] [PATCH firewall] make nfct_catch non-blocking

On 1/10/19 1:51 PM, David Limbeck wrote:

On 1/10/19 1:49 PM, Wolfgang Bumiller wrote:

On Thu, Jan 10, 2019 at 12:08:28PM +0100, David Limbeck wrote:

nfct_catch blocks if the callback always returns NFCT_CB_CONTINUE. this
works around the problem by setting the underlying file descriptor to
O_NONBLOCK. this should allow the callback to run multiple times and
catch as many events as possible before nfct_catch returns.

Signed-off-by: David Limbeck 
---
maybe this improves the ENOBUFS situation? it should result in equal or
more messages though as the callback is run multiple times before
nfct_catch returns.

I wouldn't expect a change in the ENOBUFS situation but rather just more
output happening which may have previously been lost from already-read
packet parts.

@Alexandre, could you give this a try?

For ENOBUFS we could try setting NETLINK_NO_ENOBUFS with setsockopt as 
mentioned by @Thomas.

together with NETLINK_BROADCAST_SEND_ERROR[0], ulogd uses this[1] too.

[0]: https://patchwork.ozlabs.org/patch/24919/ (second b) bullet point)
[1]: https://git.netfilter.org/ulogd2/tree/input/flow/ulogd_inpflow_NFCT.c#n1322


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel




___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH v4 qemu-server 0/3] online vm migration to external cluster

2019-01-11 Thread David Limbeck

Thank you for the patch.

I didn't have time yet to go through it but will do so early next week.

On 1/8/19 2:00 AM, Alexandre Derumier wrote:

This add support to migrate a vm online to an different external cluster.
(This a rework of a previous patch series sent 2years ago)


qm migrate_external   [--targetstorage otherstorage] 
[--targetbridge otherbridge] [--targetvmid] [--targetkey]


targetstorage && targetbridge are optional, if not defined it's used same name 
than source

targetremotenode is a fqdn host from another cluster.
(source node must be able to root ssh to target node with public key)

source vm is not deleted currently for safety, just stopped and the migrate 
lock keeped to avoid
to restart it.

1 private ssh key need to be created for each targetremotenode in:
/etc/pve/priv/migrate_external/id_rsa_mytargetnode


Changelog v4:
  - fix targetremovenode : can be fqdn or ip address
  - add optionnal targetkey && targetvmid
  - migrate : add a new QemuMigrationExternal.pm with all migration code + 
remove some part not used, like sync_disk and zfs replication.
  - migrate : force ssh tunnel for migration
  - vm_start : put code in patch3 + remove old no more used  
migration_type=external option


*** BLURB HERE ***

Alexandre Derumier (3):
   api2 : add migrate_vm_external
   add QemuMigrateExternal.pm
   qemu : vm_start : add external_migration

  PVE/API2/Qemu.pm   | 112 +-
  PVE/CLI/qm.pm  |   2 +
  PVE/Makefile   |   1 +
  PVE/QemuMigrateExternal.pm | 866 +
  PVE/QemuServer.pm  |  20 +-
  5 files changed, 995 insertions(+), 6 deletions(-)
  create mode 100644 PVE/QemuMigrateExternal.pm



___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH firewall] make nfct_catch non-blocking

2019-01-10 Thread David Limbeck


On 1/10/19 1:49 PM, Wolfgang Bumiller wrote:

On Thu, Jan 10, 2019 at 12:08:28PM +0100, David Limbeck wrote:

nfct_catch blocks if the callback always returns NFCT_CB_CONTINUE. this
works around the problem by setting the underlying file descriptor to
O_NONBLOCK. this should allow the callback to run multiple times and
catch as many events as possible before nfct_catch returns.

Signed-off-by: David Limbeck 
---
maybe this improves the ENOBUFS situation? it should result in equal or
more messages though as the callback is run multiple times before
nfct_catch returns.

I wouldn't expect a change in the ENOBUFS situation but rather just more
output happening which may have previously been lost from already-read
packet parts.

@Alexandre, could you give this a try?
For ENOBUFS we could try setting NETLINK_NO_ENOBUFS with setsockopt as 
mentioned by @Thomas.


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH firewall] make nfct_catch non-blocking

2019-01-10 Thread David Limbeck
nfct_catch blocks if the callback always returns NFCT_CB_CONTINUE. this
works around the problem by setting the underlying file descriptor to
O_NONBLOCK. this should allow the callback to run multiple times and
catch as many events as possible before nfct_catch returns.

Signed-off-by: David Limbeck 
---
maybe this improves the ENOBUFS situation? it should result in equal or
more messages though as the callback is run multiple times before
nfct_catch returns.

 src/pvefw-logger.c | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/src/pvefw-logger.c b/src/pvefw-logger.c
index f77f56f..89c9635 100644
--- a/src/pvefw-logger.c
+++ b/src/pvefw-logger.c
@@ -944,7 +944,7 @@ nfct_cb(const struct nlmsghdr *nlh,
 
 queue_log_entry(le);
 
-return NFCT_CB_STOP;
+return NFCT_CB_CONTINUE;
 }
 
 static gboolean
@@ -956,7 +956,7 @@ nfct_read_cb(GIOChannel *source,
 if ((res = nfct_catch(nfcth)) < 0) {
 if (errno == ENOBUFS) {
 log_status_message(3, "nfct_catch returned ENOBUFS: conntrack 
information may be incomplete");
-} else {
+} else if (errno != EAGAIN) {
 log_status_message(3, "error catching nfct: %s", strerror(errno));
 return FALSE;
 }
@@ -1151,6 +1151,8 @@ main(int argc, char *argv[])
 if (conntrack) {
 nfct_callback_register2(nfcth, NFCT_T_NEW|NFCT_T_DESTROY, &nfct_cb, 
NULL);
 int nfctfd = nfct_fd(nfcth);
+int status_flags = fcntl(nfctfd, F_GETFL);
+fcntl(nfctfd, F_SETFL, status_flags | O_NONBLOCK);
 GIOChannel *nfct_ch = g_io_channel_unix_new(nfctfd);
 g_io_add_watch(nfct_ch, G_IO_IN, nfct_read_cb, NULL);
 }
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH firewall] log and ignore ENOBUFS in nfct_catch

2019-01-09 Thread David Limbeck
nfct_catch sets ENOBUFS if not enough buffer space is available. log
and continue operation instead of stopping. in addition log possible
other errors set by nfct_catch

Signed-off-by: David Limbeck 
---
 src/pvefw-logger.c | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git a/src/pvefw-logger.c b/src/pvefw-logger.c
index 9c6fe4a..f77f56f 100644
--- a/src/pvefw-logger.c
+++ b/src/pvefw-logger.c
@@ -954,8 +954,12 @@ nfct_read_cb(GIOChannel *source,
 {
 int res;
 if ((res = nfct_catch(nfcth)) < 0) {
-log_status_message(3, "error catching nfct");
-return FALSE;
+if (errno == ENOBUFS) {
+log_status_message(3, "nfct_catch returned ENOBUFS: conntrack 
information may be incomplete");
+} else {
+log_status_message(3, "error catching nfct: %s", strerror(errno));
+return FALSE;
+}
 }
 return TRUE;
 }
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH firewall] print error message in case of 'error catching nfct'

2019-01-09 Thread David Limbeck

Thank you.

On 1/9/19 3:08 PM, Alexandre DERUMIER wrote:

error catching nfct: No buffer space available

- Mail original -
De: "David Limbeck" 
À: "pve-devel" 
Envoyé: Mercredi 9 Janvier 2019 14:37:27
Objet: [pve-devel] [PATCH firewall] print error message in case of 'error 
catching nfct'

Signed-off-by: David Limbeck 
---
I couldn't reproduce it here locally. Could you try it with this patch
applied? This should print the error message (You will have to pull the
latest changes from our repository though, as a bug in
log_status_message was fixed).

src/pvefw-logger.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/pvefw-logger.c b/src/pvefw-logger.c
index 9c6fe4a..79d4d57 100644
--- a/src/pvefw-logger.c
+++ b/src/pvefw-logger.c
@@ -954,7 +954,7 @@ nfct_read_cb(GIOChannel *source,
{
int res;
if ((res = nfct_catch(nfcth)) < 0) {
- log_status_message(3, "error catching nfct");
+ log_status_message(3, "error catching nfct: %s", strerror(errno));
return FALSE;
}
return TRUE;


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH firewall] print error message in case of 'error catching nfct'

2019-01-09 Thread David Limbeck
Signed-off-by: David Limbeck 
---
I couldn't reproduce it here locally. Could you try it with this patch
applied? This should print the error message (You will have to pull the
latest changes from our repository though, as a bug in
log_status_message was fixed).

 src/pvefw-logger.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/pvefw-logger.c b/src/pvefw-logger.c
index 9c6fe4a..79d4d57 100644
--- a/src/pvefw-logger.c
+++ b/src/pvefw-logger.c
@@ -954,7 +954,7 @@ nfct_read_cb(GIOChannel *source,
 {
 int res;
 if ((res = nfct_catch(nfcth)) < 0) {
-log_status_message(3, "error catching nfct");
+log_status_message(3, "error catching nfct: %s", strerror(errno));
 return FALSE;
 }
 return TRUE;
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH v2 storage] fix use of uninitialized value in parse_ceph_config

2019-01-03 Thread David Limbeck
Signed-off-by: David Limbeck 
---
v2:
  early return as Wolfgang suggested

 PVE/CephConfig.pm | 1 +
 1 file changed, 1 insertion(+)

diff --git a/PVE/CephConfig.pm b/PVE/CephConfig.pm
index 5b2d19e..b420fcc 100644
--- a/PVE/CephConfig.pm
+++ b/PVE/CephConfig.pm
@@ -14,6 +14,7 @@ sub parse_ceph_config {
 my ($filename, $raw) = @_;
 
 my $cfg = {};
+return $cfg if !defined($raw);
 
 my @lines = split /\n/, $raw;
 
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH storage] fix use of uninitialized value in parse_ceph_config

2019-01-03 Thread David Limbeck


On 1/3/19 9:26 AM, Wolfgang Bumiller wrote:

On Wed, Jan 02, 2019 at 12:17:35PM +0100, David Limbeck wrote:

Signed-off-by: David Limbeck 
---
  PVE/CephConfig.pm | 1 +
  1 file changed, 1 insertion(+)

diff --git a/PVE/CephConfig.pm b/PVE/CephConfig.pm
index 5b2d19e..3f06242 100644
--- a/PVE/CephConfig.pm
+++ b/PVE/CephConfig.pm
@@ -15,6 +15,7 @@ sub parse_ceph_config {
  
  my $cfg = {};
  
+$raw = '' if !defined($raw);

Since we're then effectively parsing an empty string, this could just
return the empty $cfg from above directly, no?


Yes it could return $cfg immediately. Will send a v2.




  my @lines = split /\n/, $raw;
  
  my $section;

--
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH storage] fix use of uninitialized value in parse_ceph_config

2019-01-02 Thread David Limbeck
Signed-off-by: David Limbeck 
---
 PVE/CephConfig.pm | 1 +
 1 file changed, 1 insertion(+)

diff --git a/PVE/CephConfig.pm b/PVE/CephConfig.pm
index 5b2d19e..3f06242 100644
--- a/PVE/CephConfig.pm
+++ b/PVE/CephConfig.pm
@@ -15,6 +15,7 @@ sub parse_ceph_config {
 
 my $cfg = {};
 
+$raw = '' if !defined($raw);
 my @lines = split /\n/, $raw;
 
 my $section;
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH v3 qemu-server 0/7] online vm migration to external cluster

2018-12-19 Thread David Limbeck

On 12/19/18 11:07 AM, Alexandre DERUMIER wrote:

hat do you think about splitting the code path completely from the
current migration path? This would allow us to work on it without any
modification to the current migration code. Makes testing easier and we
can refactor the code later on (and combine common functionality again)

Yes, I can do it if you want.

with a full /usr/share/perl5/PVE/QemuMigrateExternal.pm  for example (with all 
phases) ?

Should be fine, yes.



- Mail original -
De: "David Limbeck" 
À: "pve-devel" 
Envoyé: Mercredi 19 Décembre 2018 10:32:37
Objet: Re: [pve-devel] [PATCH v3 qemu-server 0/7] online vm migration to 
external cluster

What do you think about splitting the code path completely from the
current migration path? This would allow us to work on it without any
modification to the current migration code. Makes testing easier and we
can refactor the code later on (and combine common functionality again)


some more comments follow on the respective patches

On 11/27/18 4:38 PM, Alexandre Derumier wrote:

This add support to migrate a vm online to an different external cluster.
(This a rework of a previous patch series sent 2years ago)


qm migrate_external   [--targetstorage otherstorage] 
[--targetbridge otherbridge]


targetstorage && targetbridge are optional, if not defined it's used same name 
than source

targetremotenode is a fqdn host from another cluster.
(source node must be able to root ssh to target node with public key)

source vm is not deleted currently for safety, just stopped and the migrate 
lock keeped to avoid
to restart it.

1 private ssh key need to be created for each targetremotenode in:
/etc/pve/priv/migrate_external/id_rsa_mytargetnode

Changelog v3:
- prepare : shell_quote arguments in vm create (to escape options like -cpu 
+spectre;+ssbd;)
- phase2 : bugfix for --targetstorage option

Changelog v2:
- add migrate_external api2 (please check the api root permission)
- add targetbridge option
- fix targetremotenode fqdn resolution
- use pvesh to get nextvmid on remote cluster
- add sshkey
- add missing "\n" in some die messages.

Alexandre Derumier (7):
api2 : add migrate_vm_external
migrate : prepare : add create_vm for external migration
migrate : phase1 : skip sync_disk for external migration
migrate : phase2 : migrate external
migrate : phase2_cleanup : migrate_external
migrate : phase3_cleanup : migrate_external
migrate: add sshkey in /etc/pve/priv/migrate_external/

PVE/API2/Qemu.pm | 102 -
PVE/CLI/qm.pm | 2 +
PVE/QemuMigrate.pm | 164 -
PVE/QemuServer.pm | 20 +--
4 files changed, 253 insertions(+), 35 deletions(-)


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel

___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH v3 qemu-server 1/7] api2 : add migrate_vm_external

2018-12-19 Thread David Limbeck


On 11/27/18 4:38 PM, Alexandre Derumier wrote:

qm migrate_external   [--targetstorage otherstorage] 
[--targetbridge otherbridge]
---
  PVE/API2/Qemu.pm | 79 
  PVE/CLI/qm.pm|  2 ++
  2 files changed, 81 insertions(+)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index c82e2fa..b23db56 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -3162,6 +3162,85 @@ __PACKAGE__->register_method({
  }});
  
  __PACKAGE__->register_method({

+name => 'migrate_vm_external',
+path => '{vmid}/migrate_external',
+method => 'POST',
+protected => 1,
+proxyto => 'node',
+description => "Migrate virtual machine to an external cluster. Creates a new 
migration task.",
+permissions => {
+   check => ['perm', '/vms/{vmid}', [ 'VM.Migrate' ]],
+},
+parameters => {
+   additionalProperties => 0,
+   properties => {
+   node => get_standard_option('pve-node'),
+   vmid => get_standard_option('pve-vmid', { completion => 
\&PVE::QemuServer::complete_vmid }),
+   target => get_standard_option('pve-node', {
+   description => "Target node.",
+}),
+targetstorage => get_standard_option('pve-storage-id', {
+   description => "Target remote storage.",
+   optional => 1,
+}),
+   targetbridge => {
+   type => 'string',
+   description => "Target remote bridge.",
+   format_description => 'bridge',
+   optional => 1,
+   },


optional targetvmid with the current fallback of /cluster/nextid if not 
specified would be a great addition.


an override for the private key name might be nice to have as well, e.g. 
--targetkey 'external.cluster2' which uses 
/etc/pve/priv/migrate_external/external.cluster2 with the fallback to 
the current /etc/pve/priv/migrate_external/id_rsa_external.cluster2


What do you think?


+   },
+},
+returns => {
+   type => 'string',
+   description => "the task ID.",
+},
+code => sub {
+   my ($param) = @_;
+
+   my $rpcenv = PVE::RPCEnvironment::get();
+
+   my $authuser = $rpcenv->get_user();
+
+   die "Only root can do external migration." if $authuser ne 'root@pam';
+
+   my $target = extract_param($param, 'target');
+
+   my $vmid = extract_param($param, 'vmid');
+
+   PVE::Cluster::check_cfs_quorum();
+
+   raise_param_exc({ target => "target is member of local cluster."}) if 
PVE::Cluster::check_node_exists($target, 1);
+
+die "HA must be disable for external migration." if 
PVE::HA::Config::vm_is_ha_managed($vmid);
+
+   my $targetip = PVE::Network::get_ip_from_hostname($target, 1);
+
+   # test if VM exists
+   my $conf = PVE::QemuConfig->load_config($vmid);
+
+   # try to detect errors early
+
+   PVE::QemuConfig->check_lock($conf);
+
+   die "VM need to be online for external migration" if 
!PVE::QemuServer::check_running($vmid);
+
+   $param->{online} = 1;
+   $param->{migration_external} = 1;
+
+   my $realcmd = sub {
+   PVE::QemuMigrate->migrate($target, $targetip, $vmid, $param);
+   };
+
+   my $worker = sub {
+   return PVE::GuestHelpers::guest_migration_lock($vmid, 10, $realcmd);
+   };
+
+   return $rpcenv->fork_worker('qmigrate', $vmid, $authuser, $worker);
+
+}});
+
+__PACKAGE__->register_method({
  name => 'monitor',
  path => '{vmid}/monitor',
  method => 'POST',
diff --git a/PVE/CLI/qm.pm b/PVE/CLI/qm.pm
index eceb9b3..5aa1d48 100755
--- a/PVE/CLI/qm.pm
+++ b/PVE/CLI/qm.pm
@@ -850,6 +850,8 @@ our $cmddef = {
  
  migrate => [ "PVE::API2::Qemu", 'migrate_vm', ['vmid', 'target'], { node => $nodename }, $upid_exit ],
  
+migrate_external => [ "PVE::API2::Qemu", 'migrate_vm_external', ['vmid', 'target'], { node => $nodename }, $upid_exit ],

+
  set => [ "PVE::API2::Qemu", 'update_vm', ['vmid'], { node => $nodename } 
],
  
  resize => [ "PVE::API2::Qemu", 'resize_vm', ['vmid', 'disk', 'size'], { node => $nodename } ],


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH v3 qemu-server 4/7] migrate : phase2 : migrate external

2018-12-19 Thread David Limbeck

comments inline

On 11/27/18 4:38 PM, Alexandre Derumier wrote:

---
  PVE/API2/Qemu.pm   | 18 +++---
  PVE/QemuMigrate.pm | 21 ++---
  PVE/QemuServer.pm  | 20 
  3 files changed, 45 insertions(+), 14 deletions(-)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index b23db56..b85fd6d 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -1932,7 +1932,7 @@ __PACKAGE__->register_method({
migratedfrom => get_standard_option('pve-node',{ optional => 1 }),
migration_type => {
type => 'string',
-   enum => ['secure', 'insecure'],
+   enum => ['secure', 'insecure', 'external'],

unnecessary? migration_type 'external' is not used anywhere in the code

description => "Migration traffic is encrypted using an SSH " .
  "tunnel by default. On secure, completely private networks " .
  "this can be disabled to increase performance.",
@@ -1948,7 +1948,12 @@ __PACKAGE__->register_method({
description => "Target storage for the migration. (Can be '1' to use 
the same storage id as on the source node.)",
type => 'string',
optional => 1
-   }
+   },
+   external_migration => {
+   description => "Enable external migration.",
+   type => 'boolean',
+   optional => 1,
+   },
},
  },
  returns => {
@@ -1994,6 +1999,13 @@ __PACKAGE__->register_method({
raise_param_exc({ targetstorage => "targetstorage can only by used with 
migratedfrom." })
if $targetstorage && !$migratedfrom;
  
+	my $external_migration = extract_param($param, 'external_migration');

+   raise_param_exc({ external_migration => "Only root may use this 
option." })
+   if $external_migration && $authuser ne 'root@pam';
+
+   raise_param_exc({ external_migration => "targetstorage can't be used with 
external_migration." })
+   if ($targetstorage && $external_migration);
+
maybe add an additional check if migration_type is set that it is set to 
'secure' as external migration should always be secure?

# read spice ticket from STDIN
my $spice_ticket;
if ($stateuri && ($stateuri eq 'tcp') && $migratedfrom && 
($rpcenv->{type} eq 'cli')) {
@@ -2034,7 +2046,7 @@ __PACKAGE__->register_method({
syslog('info', "start VM $vmid: $upid\n");
  
  		PVE::QemuServer::vm_start($storecfg, $vmid, $stateuri, $skiplock, $migratedfrom, undef,

- $machine, $spice_ticket, 
$migration_network, $migration_type, $targetstorage);
+ $machine, $spice_ticket, 
$migration_network, $migration_type, $targetstorage, $external_migration);
  
  		return;

};
diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
index 1dea286..b4dc8f7 100644
--- a/PVE/QemuMigrate.pm
+++ b/PVE/QemuMigrate.pm
@@ -597,7 +597,9 @@ sub phase2 {
  
  my $conf = $self->{vmconf};
  
-$self->log('info', "starting VM $vmid on remote node '$self->{node}'");

+my $targetvmid = $self->{opts}->{targetvmid} ? $self->{opts}->{targetvmid} 
: $vmid;
+
+$self->log('info', "starting VM $targetvmid on remote node 
'$self->{node}'");
  
  my $raddr;

  my $rport;
@@ -613,10 +615,14 @@ sub phase2 {
$spice_ticket = $res->{ticket};
  }
  
-push @$cmd , 'qm', 'start', $vmid, '--skiplock', '--migratedfrom', $nodename;

-
  my $migration_type = $self->{opts}->{migration_type};
  
+push @$cmd , 'qm', 'start', $targetvmid, '--skiplock';

+
+push @$cmd, '--migratedfrom', $nodename if 
!$self->{opts}->{migration_external};
+
+push @$cmd, '--external_migration' if $self->{opts}->{migration_external};
+
  push @$cmd, '--migration_type', $migration_type;
  
  push @$cmd, '--migration_network', $self->{opts}->{migration_network}

@@ -633,7 +639,7 @@ sub phase2 {
  }
  
  if ($self->{opts}->{targetstorage}) {

-   push @$cmd, '--targetstorage', $self->{opts}->{targetstorage};
+   push @$cmd, '--targetstorage', $self->{opts}->{targetstorage} if 
!$self->{opts}->{migration_external};
  }
  
  my $spice_port;

@@ -650,7 +656,7 @@ sub phase2 {
}
elsif ($line =~ m!^migration listens on 
unix:(/run/qemu-server/(\d+)\.migrate)$!) {
$raddr = $1;
-   die "Destination UNIX sockets VMID does not match source VMID" if 
$vmid ne $2;
+   die "Destination UNIX sockets VMID does not match source VMID" if 
$targetvmid ne $2;
$ruri = "unix:$raddr";
}
elsif ($line =~ m/^migration listens on port (\d+)$/) {
@@ -720,13 +726,14 @@ sub phase2 {
  
  my $start = time();
  
-if ($self->{opts}->{targetstorage} && defined($self->{online_local_volumes})) {

+if (($self->{opts}->{targetstorage} && defined($self->{online_local_volumes})) 
|| $self->

Re: [pve-devel] [PATCH v3 qemu-server 0/7] online vm migration to external cluster

2018-12-19 Thread David Limbeck
What do you think about splitting the code path completely from the 
current migration path? This would allow us to work on it without any 
modification to the current migration code. Makes testing easier and we 
can refactor the code later on (and combine common functionality again)



some more comments follow on the respective patches

On 11/27/18 4:38 PM, Alexandre Derumier wrote:

This add support to migrate a vm online to an different external cluster.
(This a rework of a previous patch series sent 2years ago)


qm migrate_external   [--targetstorage otherstorage] 
[--targetbridge otherbridge]


targetstorage && targetbridge are optional, if not defined it's used same name 
than source

targetremotenode is a fqdn host from another cluster.
(source node must be able to root ssh to target node with public key)

source vm is not deleted currently for safety, just stopped and the migrate 
lock keeped to avoid
to restart it.

1 private ssh key need to be created for each targetremotenode in:
/etc/pve/priv/migrate_external/id_rsa_mytargetnode

Changelog v3:
  - prepare : shell_quote arguments in vm create (to escape options like -cpu 
+spectre;+ssbd;)
  - phase2 : bugfix for --targetstorage option

Changelog v2:
  - add migrate_external api2 (please check the api root permission)
  - add targetbridge option
  - fix targetremotenode fqdn resolution
  - use pvesh to get nextvmid on remote cluster
  - add sshkey
  - add missing "\n" in some die messages.

Alexandre Derumier (7):
   api2 : add migrate_vm_external
   migrate : prepare : add create_vm for external migration
   migrate : phase1 : skip sync_disk for external migration
   migrate : phase2 : migrate external
   migrate : phase2_cleanup : migrate_external
   migrate : phase3_cleanup : migrate_external
   migrate: add sshkey in /etc/pve/priv/migrate_external/

  PVE/API2/Qemu.pm   | 102 -
  PVE/CLI/qm.pm  |   2 +
  PVE/QemuMigrate.pm | 164 -
  PVE/QemuServer.pm  |  20 +--
  4 files changed, 253 insertions(+), 35 deletions(-)



___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH v4 firewall 1/2] add conntrack logging via libnetfilter_conntrack

2018-12-13 Thread David Limbeck
add conntrack logging to pvefw-logger including timestamps (requires
/proc/sys/net/netfilter/nf_conntrack_timestamp to be 1).
this allows the tracking of sessions (start, end timestamps with
nf_conntrack_timestamp on [DESTROY] messages). commit includes
Build-Depends inclusion of libnetfilter-conntrack-dev and
libnetfilter_conntrack library in the Makefile.

Signed-off-by: David Limbeck 
---
 debian/control |  1 +
 src/Makefile   |  2 +-
 src/pvefw-logger.c | 77 ++
 3 files changed, 79 insertions(+), 1 deletion(-)

diff --git a/debian/control b/debian/control
index a68a81c..2a92b16 100644
--- a/debian/control
+++ b/debian/control
@@ -5,6 +5,7 @@ Maintainer: Proxmox Support Team 
 Build-Depends: debhelper (>= 7.0.50~),
dh-systemd,
libglib2.0-dev,
+   libnetfilter-conntrack-dev,
libnetfilter-log-dev,
libpve-common-perl,
pve-cluster,
diff --git a/src/Makefile b/src/Makefile
index ed74393..a35e53d 100644
--- a/src/Makefile
+++ b/src/Makefile
@@ -25,7 +25,7 @@ LDFLAGS:=$(shell dpkg-buildflags --get LDFLAGS)
 pvefw-logger: pvefw-logger.c
gcc -Wall -Werror pvefw-logger.c -o pvefw-logger -std=gnu99 \
$(CPPFLAGS) $(CFLAGS) $(LDFLAGS) \
-   $(shell pkg-config libnetfilter_log glib-2.0 --libs --cflags)
+   $(shell pkg-config libnetfilter_log libnetfilter_conntrack glib-2.0 
--libs --cflags)
 
 .PHONY: install
 install: pve-firewall pve-firewall.8 pve-firewall.bash-completion pvefw-logger
diff --git a/src/pvefw-logger.c b/src/pvefw-logger.c
index 2bd869c..506568c 100644
--- a/src/pvefw-logger.c
+++ b/src/pvefw-logger.c
@@ -40,6 +40,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -53,10 +54,12 @@
 
 static struct nflog_handle *logh = NULL;
 static struct nlif_handle *nlifh = NULL;
+static struct nfct_handle *nfcth = NULL;
 GMainLoop *main_loop;
 
 gboolean foreground = FALSE;
 gboolean debug = FALSE;
+gboolean conntrack = FALSE;
 
 /*
 
@@ -76,6 +79,7 @@ Example:
 
 #define LOCKFILE "/var/lock/pvefw-logger.lck"
 #define PIDFILE "/var/run/pvefw-logger.pid"
+#define LOG_CONNTRACK_FILE "/var/lib/pve-firewall/log_nf_conntrack"
 
 #define LQ_LEN 512
 #define LE_MAX (512 - 4) // try to fit into 512 bytes
@@ -917,6 +921,42 @@ signal_read_cb(GIOChannel *source,
 return TRUE;
 }
 
+static int
+nfct_cb(const struct nlmsghdr *nlh,
+enum nf_conntrack_msg_type type,
+struct nf_conntrack *ct,
+void *data)
+{
+struct log_entry *le = g_new0(struct log_entry, 1);
+int len = nfct_snprintf(&le->buf[le->len], LE_MAX - le->len,
+ct, type, NFCT_O_DEFAULT,
+NFCT_OF_SHOW_LAYER3|NFCT_OF_TIMESTAMP);
+le->len += len;
+
+if (le->len == LE_MAX) {
+le->buf[le->len-1] = '\n';
+} else { // le->len < LE_MAX
+le->buf[le->len++] = '\n';
+}
+
+queue_log_entry(le);
+
+return NFCT_CB_STOP;
+}
+
+static gboolean
+nfct_read_cb(GIOChannel *source,
+ GIOCondition condition,
+ gpointer data)
+{
+int res;
+if ((res = nfct_catch(nfcth)) < 0) {
+log_status_message(3, "error catching nfct");
+return FALSE;
+}
+return TRUE;
+}
+
 int
 main(int argc, char *argv[])
 {
@@ -932,6 +972,7 @@ main(int argc, char *argv[])
 GOptionEntry entries[] = {
 { "debug", 'd', 0, G_OPTION_ARG_NONE, &debug, "Turn on debug 
messages", NULL },
 { "foreground", 'f', 0, G_OPTION_ARG_NONE, &foreground, "Do not 
daemonize server", NULL },
+{ "conntrack", 0, 0, G_OPTION_ARG_NONE, &conntrack, "Add conntrack 
logging", NULL },
 { NULL },
 };
 
@@ -954,6 +995,23 @@ main(int argc, char *argv[])
 
 g_option_context_free(context);
 
+if (!conntrack) {
+int log_nf_conntrackfd = open(LOG_CONNTRACK_FILE, O_RDONLY);
+if (log_nf_conntrackfd == -1) {
+if (errno != ENOENT) {
+fprintf(stderr, "error: failed to open "LOG_CONNTRACK_FILE": 
%s\n", strerror(errno));
+}
+} else {
+char c = '0';
+ssize_t bytes = read(log_nf_conntrackfd, &c, sizeof(c));
+if (bytes < 0) {
+fprintf(stderr, "error: failed to read value in 
log_nf_conntrack: %s\n", strerror(errno));
+} else {
+conntrack = (c == '1');
+}
+}
+}
+
 if (debug) foreground = TRUE;
 
 if ((lockfd = open(LOCKFILE, O_RDWR|O_CREAT|O_APPEND, 0644)) == -1) {
@@ -1017,6 +1075,13 @@ main(int argc, char *argv[])
 exit(-1);
 }
 
+if (conntrack) {
+if ((nfcth = nfct_open(CONNT

[pve-devel] [PATCH v4 firewall 0/2] firewall conntrack logging

2018-12-13 Thread David Limbeck
Adds optional conntrack logging. pvefw-logger is restarted whenever the
config changes.

To enable conntrack logging set 'log_nf_conntrack: 1' in
/etc/pve/nodes/{node}/host.fw
To enable timestamps (start and end time in [DESTROY] messages) set
/proc/sys/net/netfilter/nf_conntrack_timestamp to 1

v3 ->v4:
  fixed cover letter version
  fixed check for ENOENT

v2->v3:
  incorporated Wolfgang's suggestions
  pvefw-logger:
  - file path as DEFINE
  - check for ENOENT
  - conntrack: everything other than '1' is false

  Firewall.pm:
  - changed command to 'try-reload-or-restart'
  - separated parts of command
  - brace placement

David Limbeck (2):
  add conntrack logging via libnetfilter_conntrack
  add log_nf_conntrack host firewall option

 debian/control  |  1 +
 src/Makefile|  2 +-
 src/PVE/Firewall.pm | 20 +-
 src/pvefw-logger.c  | 77 +
 4 files changed, 98 insertions(+), 2 deletions(-)

-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH v4 firewall 2/2] add log_nf_conntrack host firewall option

2018-12-13 Thread David Limbeck
add log_nf_conntrack host firewall option to enable or disable logging
of connections. restarts pvefw-logger if the option changes in the
config. the pvefw-logger is always restarted in the beginning to make
sure the current config is applied.

Signed-off-by: David Limbeck 
---
 src/PVE/Firewall.pm | 20 +++-
 1 file changed, 19 insertions(+), 1 deletion(-)

diff --git a/src/PVE/Firewall.pm b/src/PVE/Firewall.pm
index db1eae3..39f79d4 100644
--- a/src/PVE/Firewall.pm
+++ b/src/PVE/Firewall.pm
@@ -2638,7 +2638,7 @@ sub parse_hostfw_option {
 
 my $loglevels = "emerg|alert|crit|err|warning|notice|info|debug|nolog";
 
-if ($line =~ m/^(enable|nosmurfs|tcpflags|ndp):\s*(0|1)\s*$/i) {
+if ($line =~ 
m/^(enable|nosmurfs|tcpflags|ndp|log_nf_conntrack):\s*(0|1)\s*$/i) {
$opt = lc($1);
$value = int($2);
 } elsif ($line =~ 
m/^(log_level_in|log_level_out|tcp_flags_log_level|smurf_log_level):\s*(($loglevels)\s*)?$/i)
 {
@@ -4069,6 +4069,7 @@ sub apply_ruleset {
 
 update_nf_conntrack_tcp_timeout_established($hostfw_conf);
 
+update_nf_conntrack_logging($hostfw_conf);
 }
 
 sub update_nf_conntrack_max {
@@ -4105,6 +4106,23 @@ sub update_nf_conntrack_tcp_timeout_established {
 
PVE::ProcFSTools::write_proc_entry("/proc/sys/net/netfilter/nf_conntrack_tcp_timeout_established",
 $value);
 }
 
+my $log_nf_conntrack_enabled = undef;
+sub update_nf_conntrack_logging {
+my ($hostfw_conf) = @_;
+
+my $options = $hostfw_conf->{options} || {};
+my $value = $options->{log_nf_conntrack} || 0;
+if (!defined($log_nf_conntrack_enabled)
+   || $value != $log_nf_conntrack_enabled)
+{
+   my $tmpfile = "$pve_fw_status_dir/log_nf_conntrack";
+   PVE::Tools::file_set_contents($tmpfile, $value);
+
+   PVE::Tools::run_command([qw(systemctl try-reload-or-restart 
pvefw-logger.service)]);
+   $log_nf_conntrack_enabled = $value;
+}
+}
+
 sub remove_pvefw_chains {
 
 PVE::Firewall::remove_pvefw_chains_iptables("iptables");
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH v2 firewall 0/2] firewall conntrack logging

2018-12-11 Thread David Limbeck

v3, not v2, typo


On 12/11/18 3:09 PM, David Limbeck wrote:

Adds optional conntrack logging. pvefw-logger is restarted whenever the
config changes.

To enable conntrack logging set 'log_nf_conntrack: 1' in
/etc/pve/nodes/{node}/host.fw
To enable timestamps (start and end time in [DESTROY] messages) set
/proc/sys/net/netfilter/nf_conntrack_timestamp to 1

v2->v3:
   incorporated Wolfgang's suggestions
   pvefw-logger:
   - file path as DEFINE
   - check for ENOENT
   - conntrack: everything other than '1' is false

   Firewall.pm:
   - changed command to 'try-reload-or-restart'
   - separated parts of command
   - brace placement

David Limbeck (2):
   add conntrack logging via libnetfilter_conntrack
   add log_nf_conntrack host firewall option

  debian/control  |  1 +
  src/Makefile|  2 +-
  src/PVE/Firewall.pm | 19 +-
  src/pvefw-logger.c  | 75 +
  4 files changed, 95 insertions(+), 2 deletions(-)



___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH v3 firewall 1/2] add conntrack logging via libnetfilter_conntrack

2018-12-11 Thread David Limbeck
add conntrack logging to pvefw-logger including timestamps (requires
/proc/sys/net/netfilter/nf_conntrack_timestamp to be 1).
this allows the tracking of sessions (start, end timestamps with
nf_conntrack_timestamp on [DESTROY] messages). commit includes
Build-Depends inclusion of libnetfilter-conntrack-dev and
libnetfilter_conntrack library in the Makefile.

Signed-off-by: David Limbeck 
---
 debian/control |  1 +
 src/Makefile   |  2 +-
 src/pvefw-logger.c | 75 ++
 3 files changed, 77 insertions(+), 1 deletion(-)

diff --git a/debian/control b/debian/control
index a68a81c..2a92b16 100644
--- a/debian/control
+++ b/debian/control
@@ -5,6 +5,7 @@ Maintainer: Proxmox Support Team 
 Build-Depends: debhelper (>= 7.0.50~),
dh-systemd,
libglib2.0-dev,
+   libnetfilter-conntrack-dev,
libnetfilter-log-dev,
libpve-common-perl,
pve-cluster,
diff --git a/src/Makefile b/src/Makefile
index ed74393..a35e53d 100644
--- a/src/Makefile
+++ b/src/Makefile
@@ -25,7 +25,7 @@ LDFLAGS:=$(shell dpkg-buildflags --get LDFLAGS)
 pvefw-logger: pvefw-logger.c
gcc -Wall -Werror pvefw-logger.c -o pvefw-logger -std=gnu99 \
$(CPPFLAGS) $(CFLAGS) $(LDFLAGS) \
-   $(shell pkg-config libnetfilter_log glib-2.0 --libs --cflags)
+   $(shell pkg-config libnetfilter_log libnetfilter_conntrack glib-2.0 
--libs --cflags)
 
 .PHONY: install
 install: pve-firewall pve-firewall.8 pve-firewall.bash-completion pvefw-logger
diff --git a/src/pvefw-logger.c b/src/pvefw-logger.c
index 2bd869c..808 100644
--- a/src/pvefw-logger.c
+++ b/src/pvefw-logger.c
@@ -40,6 +40,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -53,10 +54,12 @@
 
 static struct nflog_handle *logh = NULL;
 static struct nlif_handle *nlifh = NULL;
+static struct nfct_handle *nfcth = NULL;
 GMainLoop *main_loop;
 
 gboolean foreground = FALSE;
 gboolean debug = FALSE;
+gboolean conntrack = FALSE;
 
 /*
 
@@ -76,6 +79,7 @@ Example:
 
 #define LOCKFILE "/var/lock/pvefw-logger.lck"
 #define PIDFILE "/var/run/pvefw-logger.pid"
+#define LOG_CONNTRACK_FILE "/var/lib/pve-firewall/log_nf_conntrack"
 
 #define LQ_LEN 512
 #define LE_MAX (512 - 4) // try to fit into 512 bytes
@@ -917,6 +921,42 @@ signal_read_cb(GIOChannel *source,
 return TRUE;
 }
 
+static int
+nfct_cb(const struct nlmsghdr *nlh,
+enum nf_conntrack_msg_type type,
+struct nf_conntrack *ct,
+void *data)
+{
+struct log_entry *le = g_new0(struct log_entry, 1);
+int len = nfct_snprintf(&le->buf[le->len], LE_MAX - le->len,
+ct, type, NFCT_O_DEFAULT,
+NFCT_OF_SHOW_LAYER3|NFCT_OF_TIMESTAMP);
+le->len += len;
+
+if (le->len == LE_MAX) {
+le->buf[le->len-1] = '\n';
+} else { // le->len < LE_MAX
+le->buf[le->len++] = '\n';
+}
+
+queue_log_entry(le);
+
+return NFCT_CB_STOP;
+}
+
+static gboolean
+nfct_read_cb(GIOChannel *source,
+ GIOCondition condition,
+ gpointer data)
+{
+int res;
+if ((res = nfct_catch(nfcth)) < 0) {
+log_status_message(3, "error catching nfct");
+return FALSE;
+}
+return TRUE;
+}
+
 int
 main(int argc, char *argv[])
 {
@@ -932,6 +972,7 @@ main(int argc, char *argv[])
 GOptionEntry entries[] = {
 { "debug", 'd', 0, G_OPTION_ARG_NONE, &debug, "Turn on debug 
messages", NULL },
 { "foreground", 'f', 0, G_OPTION_ARG_NONE, &foreground, "Do not 
daemonize server", NULL },
+{ "conntrack", 0, 0, G_OPTION_ARG_NONE, &conntrack, "Add conntrack 
logging", NULL },
 { NULL },
 };
 
@@ -954,6 +995,21 @@ main(int argc, char *argv[])
 
 g_option_context_free(context);
 
+if (!conntrack) {
+int log_nf_conntrackfd = open(LOG_CONNTRACK_FILE, O_RDONLY);
+if (log_nf_conntrackfd == -1 && errno != ENOENT) {
+fprintf(stderr, "error: failed to open "LOG_CONNTRACK_FILE": 
%s\n", strerror(errno));
+} else {
+char c = '0';
+ssize_t bytes = read(log_nf_conntrackfd, &c, sizeof(c));
+if (bytes < 0) {
+fprintf(stderr, "error: failed to read value in 
log_nf_conntrack: %s\n", strerror(errno));
+} else {
+conntrack = (c == '1');
+}
+}
+}
+
 if (debug) foreground = TRUE;
 
 if ((lockfd = open(LOCKFILE, O_RDWR|O_CREAT|O_APPEND, 0644)) == -1) {
@@ -1017,6 +1073,13 @@ main(int argc, char *argv[])
 exit(-1);
 }
 
+if (conntrack) {
+if ((nfcth = nfct_open(CONNTRACK, 
NF_NETLINK_CONNTRACK_NEW|NF

[pve-devel] [PATCH v3 firewall 2/2] add log_nf_conntrack host firewall option

2018-12-11 Thread David Limbeck
add log_nf_conntrack host firewall option to enable or disable logging
of connections. restarts pvefw-logger if the option changes in the
config. the pvefw-logger is always restarted in the beginning to make
sure the current config is applied.

Signed-off-by: David Limbeck 
---
 src/PVE/Firewall.pm | 20 +++-
 1 file changed, 19 insertions(+), 1 deletion(-)

diff --git a/src/PVE/Firewall.pm b/src/PVE/Firewall.pm
index db1eae3..39f79d4 100644
--- a/src/PVE/Firewall.pm
+++ b/src/PVE/Firewall.pm
@@ -2638,7 +2638,7 @@ sub parse_hostfw_option {
 
 my $loglevels = "emerg|alert|crit|err|warning|notice|info|debug|nolog";
 
-if ($line =~ m/^(enable|nosmurfs|tcpflags|ndp):\s*(0|1)\s*$/i) {
+if ($line =~ 
m/^(enable|nosmurfs|tcpflags|ndp|log_nf_conntrack):\s*(0|1)\s*$/i) {
$opt = lc($1);
$value = int($2);
 } elsif ($line =~ 
m/^(log_level_in|log_level_out|tcp_flags_log_level|smurf_log_level):\s*(($loglevels)\s*)?$/i)
 {
@@ -4069,6 +4069,7 @@ sub apply_ruleset {
 
 update_nf_conntrack_tcp_timeout_established($hostfw_conf);
 
+update_nf_conntrack_logging($hostfw_conf);
 }
 
 sub update_nf_conntrack_max {
@@ -4105,6 +4106,23 @@ sub update_nf_conntrack_tcp_timeout_established {
 
PVE::ProcFSTools::write_proc_entry("/proc/sys/net/netfilter/nf_conntrack_tcp_timeout_established",
 $value);
 }
 
+my $log_nf_conntrack_enabled = undef;
+sub update_nf_conntrack_logging {
+my ($hostfw_conf) = @_;
+
+my $options = $hostfw_conf->{options} || {};
+my $value = $options->{log_nf_conntrack} || 0;
+if (!defined($log_nf_conntrack_enabled)
+   || $value != $log_nf_conntrack_enabled)
+{
+   my $tmpfile = "$pve_fw_status_dir/log_nf_conntrack";
+   PVE::Tools::file_set_contents($tmpfile, $value);
+
+   PVE::Tools::run_command([qw(systemctl try-reload-or-restart 
pvefw-logger.service)]);
+   $log_nf_conntrack_enabled = $value;
+}
+}
+
 sub remove_pvefw_chains {
 
 PVE::Firewall::remove_pvefw_chains_iptables("iptables");
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH v2 firewall 0/2] firewall conntrack logging

2018-12-11 Thread David Limbeck
Adds optional conntrack logging. pvefw-logger is restarted whenever the
config changes.

To enable conntrack logging set 'log_nf_conntrack: 1' in
/etc/pve/nodes/{node}/host.fw
To enable timestamps (start and end time in [DESTROY] messages) set
/proc/sys/net/netfilter/nf_conntrack_timestamp to 1

v2->v3:
  incorporated Wolfgang's suggestions
  pvefw-logger:
  - file path as DEFINE
  - check for ENOENT
  - conntrack: everything other than '1' is false

  Firewall.pm:
  - changed command to 'try-reload-or-restart'
  - separated parts of command
  - brace placement

David Limbeck (2):
  add conntrack logging via libnetfilter_conntrack
  add log_nf_conntrack host firewall option

 debian/control  |  1 +
 src/Makefile|  2 +-
 src/PVE/Firewall.pm | 19 +-
 src/pvefw-logger.c  | 75 +
 4 files changed, 95 insertions(+), 2 deletions(-)

-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH v3 qemu-server 1/7] api2 : add migrate_vm_external

2018-12-07 Thread David Limbeck

Sorry for the delay, firewall conntrack logging took longer than expected.

Still need some time to go through your patches in detail, but one thing 
inline:


On 11/27/18 4:38 PM, Alexandre Derumier wrote:

qm migrate_external   [--targetstorage otherstorage] 
[--targetbridge otherbridge]
---
  PVE/API2/Qemu.pm | 79 
  PVE/CLI/qm.pm|  2 ++
  2 files changed, 81 insertions(+)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index c82e2fa..b23db56 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -3162,6 +3162,85 @@ __PACKAGE__->register_method({
  }});
  
  __PACKAGE__->register_method({

+name => 'migrate_vm_external',
+path => '{vmid}/migrate_external',
+method => 'POST',
+protected => 1,
+proxyto => 'node',
+description => "Migrate virtual machine to an external cluster. Creates a new 
migration task.",
+permissions => {
+   check => ['perm', '/vms/{vmid}', [ 'VM.Migrate' ]],
+},
+parameters => {
+   additionalProperties => 0,
+   properties => {
+   node => get_standard_option('pve-node'),
+   vmid => get_standard_option('pve-vmid', { completion => 
\&PVE::QemuServer::complete_vmid }),
+   target => get_standard_option('pve-node', {
+   description => "Target node.",
+}),
'pve-node' does not support fully qualified domain names, only 
[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])? (copied from 
pve-common/src/PVE/JSONSchema.pm).

+targetstorage => get_standard_option('pve-storage-id', {
+   description => "Target remote storage.",
+   optional => 1,
+}),
+   targetbridge => {
+   type => 'string',
+   description => "Target remote bridge.",
+   format_description => 'bridge',
+   optional => 1,
+   },
+   },
+},
+returns => {
+   type => 'string',
+   description => "the task ID.",
+},
+code => sub {
+   my ($param) = @_;
+
+   my $rpcenv = PVE::RPCEnvironment::get();
+
+   my $authuser = $rpcenv->get_user();
+
+   die "Only root can do external migration." if $authuser ne 'root@pam';
+
+   my $target = extract_param($param, 'target');
+
+   my $vmid = extract_param($param, 'vmid');
+
+   PVE::Cluster::check_cfs_quorum();
+
+   raise_param_exc({ target => "target is member of local cluster."}) if 
PVE::Cluster::check_node_exists($target, 1);
+
+die "HA must be disable for external migration." if 
PVE::HA::Config::vm_is_ha_managed($vmid);
+
+   my $targetip = PVE::Network::get_ip_from_hostname($target, 1);
+
+   # test if VM exists
+   my $conf = PVE::QemuConfig->load_config($vmid);
+
+   # try to detect errors early
+
+   PVE::QemuConfig->check_lock($conf);
+
+   die "VM need to be online for external migration" if 
!PVE::QemuServer::check_running($vmid);
+
+   $param->{online} = 1;
+   $param->{migration_external} = 1;
+
+   my $realcmd = sub {
+   PVE::QemuMigrate->migrate($target, $targetip, $vmid, $param);
+   };
+
+   my $worker = sub {
+   return PVE::GuestHelpers::guest_migration_lock($vmid, 10, $realcmd);
+   };
+
+   return $rpcenv->fork_worker('qmigrate', $vmid, $authuser, $worker);
+
+}});
+
+__PACKAGE__->register_method({
  name => 'monitor',
  path => '{vmid}/monitor',
  method => 'POST',
diff --git a/PVE/CLI/qm.pm b/PVE/CLI/qm.pm
index eceb9b3..5aa1d48 100755
--- a/PVE/CLI/qm.pm
+++ b/PVE/CLI/qm.pm
@@ -850,6 +850,8 @@ our $cmddef = {
  
  migrate => [ "PVE::API2::Qemu", 'migrate_vm', ['vmid', 'target'], { node => $nodename }, $upid_exit ],
  
+migrate_external => [ "PVE::API2::Qemu", 'migrate_vm_external', ['vmid', 'target'], { node => $nodename }, $upid_exit ],

+
  set => [ "PVE::API2::Qemu", 'update_vm', ['vmid'], { node => $nodename } 
],
  
  resize => [ "PVE::API2::Qemu", 'resize_vm', ['vmid', 'disk', 'size'], { node => $nodename } ],


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH v2 firewall 1/2] add conntrack logging via libnetfilter_conntrack

2018-12-07 Thread David Limbeck
add conntrack logging to pvefw-logger including timestamps (requires
/proc/sys/net/netfilter/nf_conntrack_timestamp to be 1).
this allows the tracking of sessions (start, end timestamps with
nf_conntrack_timestamp on [DESTROY] messages). commit includes
Build-Depends inclusion of libnetfilter-conntrack-dev and
libnetfilter_conntrack library in the Makefile.
conntrack can also be enabled by passing --contrack on the command line.

Signed-off-by: David Limbeck 
---
 debian/control |  1 +
 src/Makefile   |  2 +-
 src/pvefw-logger.c | 75 ++
 3 files changed, 77 insertions(+), 1 deletion(-)

diff --git a/debian/control b/debian/control
index a68a81c..2a92b16 100644
--- a/debian/control
+++ b/debian/control
@@ -5,6 +5,7 @@ Maintainer: Proxmox Support Team 
 Build-Depends: debhelper (>= 7.0.50~),
dh-systemd,
libglib2.0-dev,
+   libnetfilter-conntrack-dev,
libnetfilter-log-dev,
libpve-common-perl,
pve-cluster,
diff --git a/src/Makefile b/src/Makefile
index ed74393..a35e53d 100644
--- a/src/Makefile
+++ b/src/Makefile
@@ -25,7 +25,7 @@ LDFLAGS:=$(shell dpkg-buildflags --get LDFLAGS)
 pvefw-logger: pvefw-logger.c
gcc -Wall -Werror pvefw-logger.c -o pvefw-logger -std=gnu99 \
$(CPPFLAGS) $(CFLAGS) $(LDFLAGS) \
-   $(shell pkg-config libnetfilter_log glib-2.0 --libs --cflags)
+   $(shell pkg-config libnetfilter_log libnetfilter_conntrack glib-2.0 
--libs --cflags)
 
 .PHONY: install
 install: pve-firewall pve-firewall.8 pve-firewall.bash-completion pvefw-logger
diff --git a/src/pvefw-logger.c b/src/pvefw-logger.c
index 2bd869c..bea7e5a 100644
--- a/src/pvefw-logger.c
+++ b/src/pvefw-logger.c
@@ -40,6 +40,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -53,10 +54,12 @@
 
 static struct nflog_handle *logh = NULL;
 static struct nlif_handle *nlifh = NULL;
+static struct nfct_handle *nfcth = NULL;
 GMainLoop *main_loop;
 
 gboolean foreground = FALSE;
 gboolean debug = FALSE;
+gboolean conntrack = FALSE;
 
 /*
 
@@ -917,6 +920,42 @@ signal_read_cb(GIOChannel *source,
 return TRUE;
 }
 
+static int
+nfct_cb(const struct nlmsghdr *nlh,
+enum nf_conntrack_msg_type type,
+struct nf_conntrack *ct,
+void *data)
+{
+struct log_entry *le = g_new0(struct log_entry, 1);
+int len = nfct_snprintf(&le->buf[le->len], LE_MAX - le->len,
+ct, type, NFCT_O_DEFAULT,
+NFCT_OF_SHOW_LAYER3|NFCT_OF_TIMESTAMP);
+le->len += len;
+
+if (le->len == LE_MAX) {
+le->buf[le->len-1] = '\n';
+} else { // le->len < LE_MAX
+le->buf[le->len++] = '\n';
+}
+
+queue_log_entry(le);
+
+return NFCT_CB_STOP;
+}
+
+static gboolean
+nfct_read_cb(GIOChannel *source,
+ GIOCondition condition,
+ gpointer data)
+{
+int res;
+if ((res = nfct_catch(nfcth)) < 0) {
+log_status_message(3, "error catching nfct");
+return FALSE;
+}
+return TRUE;
+}
+
 int
 main(int argc, char *argv[])
 {
@@ -932,6 +971,7 @@ main(int argc, char *argv[])
 GOptionEntry entries[] = {
 { "debug", 'd', 0, G_OPTION_ARG_NONE, &debug, "Turn on debug 
messages", NULL },
 { "foreground", 'f', 0, G_OPTION_ARG_NONE, &foreground, "Do not 
daemonize server", NULL },
+{ "conntrack", 0, 0, G_OPTION_ARG_NONE, &conntrack, "Add conntrack 
logging", NULL },
 { NULL },
 };
 
@@ -954,6 +994,22 @@ main(int argc, char *argv[])
 
 g_option_context_free(context);
 
+if (!conntrack) {
+int log_nf_conntrackfd = 
open("/var/lib/pve-firewall/log_nf_conntrack", O_RDONLY);
+if (log_nf_conntrackfd == -1) {
+fprintf(stderr, "error: failed to open 
/var/lib/pve-firewall/log_nf_conntrack: %s\n", strerror(errno));
+} else {
+char c = '0';
+ssize_t bytes = read(log_nf_conntrackfd, &c, sizeof(c));
+if (bytes < 0) {
+fprintf(stderr, "error: failed to read value in 
log_nf_conntrack: %s\n", strerror(errno));
+} else {
+// should be either '0' or '1' so subtract '0' for either 0 or 
1
+conntrack = c-'0';
+}
+}
+}
+
 if (debug) foreground = TRUE;
 
 if ((lockfd = open(LOCKFILE, O_RDWR|O_CREAT|O_APPEND, 0644)) == -1) {
@@ -1017,6 +1073,13 @@ main(int argc, char *argv[])
 exit(-1);
 }
 
+if (conntrack) {
+if ((nfcth = nfct_open(CONNTRACK, 
NF_NETLINK_CONNTRACK_NEW|NF_NETLINK_CONNTRACK_DESTROY)) == NULL) {
+fprintf(stderr, "unable to open netfilter c

[pve-devel] [PATCH v2 firewall 0/2] firewall conntrack logging

2018-12-07 Thread David Limbeck
Adds optional conntrack logging. pvefw-logger is restarted whenever the
config changes.

To enable conntrack logging set 'log_nf_conntrack: 1' in
/etc/pve/nodes/{node}/host.fw
To enable timestamps (start and end time in [DESTROY] messages) set
/proc/sys/net/netfilter/nf_conntrack_timestamp to 1


David Limbeck (2):
  add conntrack logging via libnetfilter_conntrack
  add log_nf_conntrack host firewall option

 debian/control  |  1 +
 src/Makefile|  2 +-
 src/PVE/Firewall.pm | 19 +-
 src/pvefw-logger.c  | 75 +
 4 files changed, 95 insertions(+), 2 deletions(-)

-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH v2 firewall 2/2] add log_nf_conntrack host firewall option

2018-12-07 Thread David Limbeck
add log_nf_conntrack host firewall option to enable or disable logging
of connections. restarts pvefw-logger if the option changes in the
config. the pvefw-logger is always restarted in the beginning to make
sure the current config is applied.

Signed-off-by: David Limbeck 
---
 src/PVE/Firewall.pm | 19 ++-
 1 file changed, 18 insertions(+), 1 deletion(-)

diff --git a/src/PVE/Firewall.pm b/src/PVE/Firewall.pm
index db1eae3..d9d8e26 100644
--- a/src/PVE/Firewall.pm
+++ b/src/PVE/Firewall.pm
@@ -2638,7 +2638,7 @@ sub parse_hostfw_option {
 
 my $loglevels = "emerg|alert|crit|err|warning|notice|info|debug|nolog";
 
-if ($line =~ m/^(enable|nosmurfs|tcpflags|ndp):\s*(0|1)\s*$/i) {
+if ($line =~ 
m/^(enable|nosmurfs|tcpflags|ndp|log_nf_conntrack):\s*(0|1)\s*$/i) {
$opt = lc($1);
$value = int($2);
 } elsif ($line =~ 
m/^(log_level_in|log_level_out|tcp_flags_log_level|smurf_log_level):\s*(($loglevels)\s*)?$/i)
 {
@@ -4069,6 +4069,7 @@ sub apply_ruleset {
 
 update_nf_conntrack_tcp_timeout_established($hostfw_conf);
 
+update_nf_conntrack_logging($hostfw_conf);
 }
 
 sub update_nf_conntrack_max {
@@ -4105,6 +4106,22 @@ sub update_nf_conntrack_tcp_timeout_established {
 
PVE::ProcFSTools::write_proc_entry("/proc/sys/net/netfilter/nf_conntrack_tcp_timeout_established",
 $value);
 }
 
+my $log_nf_conntrack_enabled = undef;
+sub update_nf_conntrack_logging {
+my ($hostfw_conf) = @_;
+
+my $options = $hostfw_conf->{options} || {};
+my $value = $options->{log_nf_conntrack} || 0;
+if (!defined($log_nf_conntrack_enabled)
+   || $value != $log_nf_conntrack_enabled) {
+   my $tmpfile = "$pve_fw_status_dir/log_nf_conntrack";
+   PVE::Tools::file_set_contents($tmpfile, $value);
+
+   PVE::Tools::run_command(['systemctl restart pvefw-logger.service']);
+   $log_nf_conntrack_enabled = $value;
+}
+}
+
 sub remove_pvefw_chains {
 
 PVE::Firewall::remove_pvefw_chains_iptables("iptables");
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH qemu-server] fix #1811: allow VM.Config.HWType to add serial socket

2018-11-30 Thread David Limbeck
allow serial sockets to be added if VM.Config.HWType permission is
satisfied but deny serial devices for anyone other than root
(raise_perm_exc). this allows PVEVMAdmins to add serial consoles.

Signed-off-by: David Limbeck 
---
 PVE/API2/Qemu.pm | 11 +++
 1 file changed, 11 insertions(+)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index c82e2fa..8ae4da2 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -310,6 +310,7 @@ my $check_vm_modify_config_perm = sub {
next if PVE::QemuServer::is_valid_drivename($opt);
next if $opt eq 'cdrom';
next if $opt =~ m/^unused\d+$/;
+   next if $opt =~ m/^serial[0-3]$/;
 
if ($cpuoptions->{$opt} || $opt =~ m/^numa\d+$/) {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.CPU']);
@@ -1108,6 +1109,16 @@ my $update_vm_api  = sub {
 
 &$check_vm_modify_config_perm($rpcenv, $authuser, $vmid, undef, [keys 
%$param]);
 
+foreach my $opt (keys %$param) {
+   if ($opt =~ m/serial[0-3]/) {
+   if ($param->{$opt} eq 'socket') {
+   $rpcenv->check_vm_perm($authuser, $vmid, undef, 
['VM.Config.HWType']);
+   } else {
+   raise_perm_exc('user root@pam required for serial devices') if 
($authuser ne 'root@pam');
+   }
+   }
+}
+
 &$check_storage_access($rpcenv, $authuser, $storecfg, $vmid, $param);
 
 my $updatefn =  sub {
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH i18n] update german translation

2018-11-29 Thread David Limbeck


  
  #: pve-manager/www/manager6/node/LVMThin.js:109

  msgid "Metadata Used"
-msgstr ""
+msgstr "Metadata Verbraucht"

maybe lowercase 'v' -> 'verbraucht'?
  
  #: proxmox-widget-toolkit/node/Tasks.js:133

  #: proxmox-widget-toolkit/window/TaskViewer.js:133
@@ -2902,7 +2872,7 @@ msgstr "Keine gültige Host-Liste"
  
  #: pve-manager/www/manager6/lxc/SnapshotTree.js:169

  msgid "Note: Rollback stops CT"
-msgstr ""
+msgstr "Achtung: Rollback stoppt CT"

'Hinweis'?
  
  #: proxmox-widget-toolkit/Toolkit.js:93

  msgid "letter"
@@ -5423,9 +5380,8 @@ msgid "unlimited"
  msgstr "unbegrenzt"
  
  #: pve-manager/www/manager6/lxc/FeaturesEdit.js:60

-#, fuzzy
  msgid "unprivileged only"
-msgstr "Unprivilegierter Container"
+msgstr "nur unpriviligiert"

'unprivilegiert'


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH v2 firewall 1/2] add connection tracking via libnetfilter_conntrack

2018-11-28 Thread David Limbeck
For timestamps to show '/proc/sys/net/netfilter/nf_conntrack_timestamp' 
has to be set to 1. (adds start and end timestamps when a connection is 
destroyed)


On 11/28/18 12:05 PM, David Limbeck wrote:

adds connection tracking (NEW, DESTROY) to the pvefw-logger so beginning
and end of sessions can be tracked

Signed-off-by: David Limbeck 
---
  src/pvefw-logger.c | 45 +
  1 file changed, 45 insertions(+)

diff --git a/src/pvefw-logger.c b/src/pvefw-logger.c
index 2bd869c..c8693c8 100644
--- a/src/pvefw-logger.c
+++ b/src/pvefw-logger.c
@@ -40,6 +40,7 @@
  #include 
  #include 
  #include 
+#include 
  #include 
  #include 
  #include 
@@ -53,6 +54,7 @@
  
  static struct nflog_handle *logh = NULL;

  static struct nlif_handle *nlifh = NULL;
+static struct nfct_handle *nfcth = NULL;
  GMainLoop *main_loop;
  
  gboolean foreground = FALSE;

@@ -917,6 +919,36 @@ signal_read_cb(GIOChannel *source,
  return TRUE;
  }
  
+static int

+nfct_cb(const struct nlmsghdr *nlh,
+enum nf_conntrack_msg_type type,
+struct nf_conntrack *ct,
+void *data)
+{
+char buf[LE_MAX];
+nfct_snprintf(buf, LE_MAX, ct, type, NFCT_O_DEFAULT, 
NFCT_OF_SHOW_LAYER3|NFCT_OF_TIMESTAMP);
+
+struct log_entry *le = g_new0(struct log_entry, 1);
+LEPRINTF("%s\n", &buf[0]);
+
+queue_log_entry(le);
+
+return NFCT_CB_STOP;
+}
+
+static gboolean
+nfct_read_cb(GIOChannel *source,
+ GIOCondition condition,
+ gpointer data)
+{
+int res;
+if ((res = nfct_catch(nfcth)) < 0) {
+log_status_message(3, "error catching nfct");
+return FALSE;
+}
+return TRUE;
+}
+
  int
  main(int argc, char *argv[])
  {
@@ -1017,6 +1049,11 @@ main(int argc, char *argv[])
  exit(-1);
  }
  
+if ((nfcth = nfct_open(CONNTRACK, NF_NETLINK_CONNTRACK_NEW|NF_NETLINK_CONNTRACK_DESTROY)) == NULL) {

+   fprintf(stderr, "unable to open netfilter conntrack\n");
+   exit(-1);
+}
+
  sigset_t mask;
  sigemptyset(&mask);
  sigaddset(&mask, SIGINT);
@@ -1076,6 +1113,11 @@ main(int argc, char *argv[])
  
  g_io_add_watch(nflog_ch, G_IO_IN, nflog_read_cb, NULL);
  
+nfct_callback_register2(nfcth, NFCT_T_NEW|NFCT_T_DESTROY, &nfct_cb, NULL);

+int nfctfd = nfct_fd(nfcth);
+GIOChannel *nfct_ch = g_io_channel_unix_new(nfctfd);
+g_io_add_watch(nfct_ch, G_IO_IN, nfct_read_cb, NULL);
+
  GIOChannel *sig_ch = g_io_channel_unix_new(sigfd);
  if (!g_io_add_watch(sig_ch, G_IO_IN, signal_read_cb, NULL)) {
  exit(-1);
@@ -1093,6 +1135,9 @@ main(int argc, char *argv[])
  
  close(outfd);
  
+nfct_callback_unregister2(nfcth);

+nfct_close(nfcth);
+
  nflog_close(logh);
  
  if (wrote_pidfile)


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH v2 firewall 1/2] add connection tracking via libnetfilter_conntrack

2018-11-28 Thread David Limbeck

Ahh yes, forgot, simply the 2nd patch (build-depends) was added

On 11/28/18 12:12 PM, Thomas Lamprecht wrote:

On 11/28/18 12:05 PM, David Limbeck wrote:

adds connection tracking (NEW, DESTROY) to the pvefw-logger so beginning
and end of sessions can be tracked

Signed-off-by: David Limbeck 
---

please document the changes from the last version here, I neither see anything
here nor in the new 2/2 patch...


  src/pvefw-logger.c | 45 +
  1 file changed, 45 insertions(+)



___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH v2 firewall 2/2] add libnetfilter-conntrack-dev to Build-Depends

2018-11-28 Thread David Limbeck
Signed-off-by: David Limbeck 
---
 debian/control | 1 +
 1 file changed, 1 insertion(+)

diff --git a/debian/control b/debian/control
index a68a81c..2a92b16 100644
--- a/debian/control
+++ b/debian/control
@@ -5,6 +5,7 @@ Maintainer: Proxmox Support Team 
 Build-Depends: debhelper (>= 7.0.50~),
dh-systemd,
libglib2.0-dev,
+   libnetfilter-conntrack-dev,
libnetfilter-log-dev,
libpve-common-perl,
pve-cluster,
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH v2 firewall 1/2] add connection tracking via libnetfilter_conntrack

2018-11-28 Thread David Limbeck
adds connection tracking (NEW, DESTROY) to the pvefw-logger so beginning
and end of sessions can be tracked

Signed-off-by: David Limbeck 
---
 src/pvefw-logger.c | 45 +
 1 file changed, 45 insertions(+)

diff --git a/src/pvefw-logger.c b/src/pvefw-logger.c
index 2bd869c..c8693c8 100644
--- a/src/pvefw-logger.c
+++ b/src/pvefw-logger.c
@@ -40,6 +40,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -53,6 +54,7 @@
 
 static struct nflog_handle *logh = NULL;
 static struct nlif_handle *nlifh = NULL;
+static struct nfct_handle *nfcth = NULL;
 GMainLoop *main_loop;
 
 gboolean foreground = FALSE;
@@ -917,6 +919,36 @@ signal_read_cb(GIOChannel *source,
 return TRUE;
 }
 
+static int
+nfct_cb(const struct nlmsghdr *nlh,
+enum nf_conntrack_msg_type type,
+struct nf_conntrack *ct,
+void *data)
+{
+char buf[LE_MAX];
+nfct_snprintf(buf, LE_MAX, ct, type, NFCT_O_DEFAULT, 
NFCT_OF_SHOW_LAYER3|NFCT_OF_TIMESTAMP);
+
+struct log_entry *le = g_new0(struct log_entry, 1);
+LEPRINTF("%s\n", &buf[0]);
+
+queue_log_entry(le);
+
+return NFCT_CB_STOP;
+}
+
+static gboolean
+nfct_read_cb(GIOChannel *source,
+ GIOCondition condition,
+ gpointer data)
+{
+int res;
+if ((res = nfct_catch(nfcth)) < 0) {
+log_status_message(3, "error catching nfct");
+return FALSE;
+}
+return TRUE;
+}
+
 int
 main(int argc, char *argv[])
 {
@@ -1017,6 +1049,11 @@ main(int argc, char *argv[])
 exit(-1);
 }
 
+if ((nfcth = nfct_open(CONNTRACK, 
NF_NETLINK_CONNTRACK_NEW|NF_NETLINK_CONNTRACK_DESTROY)) == NULL) {
+   fprintf(stderr, "unable to open netfilter conntrack\n");
+   exit(-1);
+}
+
 sigset_t mask;
 sigemptyset(&mask);
 sigaddset(&mask, SIGINT);
@@ -1076,6 +1113,11 @@ main(int argc, char *argv[])
 
 g_io_add_watch(nflog_ch, G_IO_IN, nflog_read_cb, NULL);
 
+nfct_callback_register2(nfcth, NFCT_T_NEW|NFCT_T_DESTROY, &nfct_cb, NULL);
+int nfctfd = nfct_fd(nfcth);
+GIOChannel *nfct_ch = g_io_channel_unix_new(nfctfd);
+g_io_add_watch(nfct_ch, G_IO_IN, nfct_read_cb, NULL);
+
 GIOChannel *sig_ch = g_io_channel_unix_new(sigfd);
 if (!g_io_add_watch(sig_ch, G_IO_IN, signal_read_cb, NULL)) {
 exit(-1);
@@ -1093,6 +1135,9 @@ main(int argc, char *argv[])
 
 close(outfd);
 
+nfct_callback_unregister2(nfcth);
+nfct_close(nfcth);
+
 nflog_close(logh);
 
 if (wrote_pidfile)
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH firewall] add connection tracking via libnetfilter_conntrack

2018-11-28 Thread David Limbeck
adds connection tracking (NEW, DESTROY) to the pvefw-logger so beginning
and end of sessions can be tracked

Signed-off-by: David Limbeck 
---
 src/pvefw-logger.c | 45 +
 1 file changed, 45 insertions(+)

diff --git a/src/pvefw-logger.c b/src/pvefw-logger.c
index 2bd869c..c8693c8 100644
--- a/src/pvefw-logger.c
+++ b/src/pvefw-logger.c
@@ -40,6 +40,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -53,6 +54,7 @@
 
 static struct nflog_handle *logh = NULL;
 static struct nlif_handle *nlifh = NULL;
+static struct nfct_handle *nfcth = NULL;
 GMainLoop *main_loop;
 
 gboolean foreground = FALSE;
@@ -917,6 +919,36 @@ signal_read_cb(GIOChannel *source,
 return TRUE;
 }
 
+static int
+nfct_cb(const struct nlmsghdr *nlh,
+enum nf_conntrack_msg_type type,
+struct nf_conntrack *ct,
+void *data)
+{
+char buf[LE_MAX];
+nfct_snprintf(buf, LE_MAX, ct, type, NFCT_O_DEFAULT, 
NFCT_OF_SHOW_LAYER3|NFCT_OF_TIMESTAMP);
+
+struct log_entry *le = g_new0(struct log_entry, 1);
+LEPRINTF("%s\n", &buf[0]);
+
+queue_log_entry(le);
+
+return NFCT_CB_STOP;
+}
+
+static gboolean
+nfct_read_cb(GIOChannel *source,
+ GIOCondition condition,
+ gpointer data)
+{
+int res;
+if ((res = nfct_catch(nfcth)) < 0) {
+log_status_message(3, "error catching nfct");
+return FALSE;
+}
+return TRUE;
+}
+
 int
 main(int argc, char *argv[])
 {
@@ -1017,6 +1049,11 @@ main(int argc, char *argv[])
 exit(-1);
 }
 
+if ((nfcth = nfct_open(CONNTRACK, 
NF_NETLINK_CONNTRACK_NEW|NF_NETLINK_CONNTRACK_DESTROY)) == NULL) {
+   fprintf(stderr, "unable to open netfilter conntrack\n");
+   exit(-1);
+}
+
 sigset_t mask;
 sigemptyset(&mask);
 sigaddset(&mask, SIGINT);
@@ -1076,6 +1113,11 @@ main(int argc, char *argv[])
 
 g_io_add_watch(nflog_ch, G_IO_IN, nflog_read_cb, NULL);
 
+nfct_callback_register2(nfcth, NFCT_T_NEW|NFCT_T_DESTROY, &nfct_cb, NULL);
+int nfctfd = nfct_fd(nfcth);
+GIOChannel *nfct_ch = g_io_channel_unix_new(nfctfd);
+g_io_add_watch(nfct_ch, G_IO_IN, nfct_read_cb, NULL);
+
 GIOChannel *sig_ch = g_io_channel_unix_new(sigfd);
 if (!g_io_add_watch(sig_ch, G_IO_IN, signal_read_cb, NULL)) {
 exit(-1);
@@ -1093,6 +1135,9 @@ main(int argc, char *argv[])
 
 close(outfd);
 
+nfct_callback_unregister2(nfcth);
+nfct_close(nfcth);
+
 nflog_close(logh);
 
 if (wrote_pidfile)
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH v2 qemu-server 0/7] online vm migration to external cluster

2018-11-23 Thread David Limbeck
Sorry for taking so long. I will look through the patch series in detail 
after I'm done with the firewall logger. Should be sometime next week.


On 11/20/18 1:23 AM, Alexandre Derumier wrote:

This add support to migrate a vm online to an different external cluster.
(This a rework of a previous patch series sent 2years ago)


qm migrate_external   [--targetstorage otherstorage] 
[--targetbridge otherbridge]


targetstorage && targetbridge are optional, if not defined it's used same name 
than source

targetremotenode is a fqdn host from another cluster.
(source node must be able to root ssh to target node with public key)

source vm is not deleted currently for safety, just stopped and the migrate 
lock keeped to avoid
to restart it.

1 private ssh key need to be created for each targetremotenode in:
/etc/pve/priv/migrate_external/id_rsa_mytargetnode


Changelog v2:
  - add migrate_external api2 (please check the api root permission)
  - add targetbridge option
  - fix targetremotenode fqdn resolution
  - use pvesh to get nextvmid on remote cluster
  - add sshkey
  - add missing "\n" in some die messages.

  




Alexandre Derumier (7):
   api2 : add migrate_vm_external
   migrate : prepare : add create_vm for external migration
   migrate : phase1 : skip sync_disk for external migration
   migrate : phase2 : migrate external
   migrate : phase2_cleanup : migrate_external
   migrate : phase3_cleanup : migrate_external
   migrate: add sshkey in /etc/pve/priv/migrate_external/

  PVE/API2/Qemu.pm   | 104 --
  PVE/CLI/qm.pm  |   2 +
  PVE/QemuMigrate.pm | 160 -
  PVE/QemuServer.pm  |  20 +--
  4 files changed, 251 insertions(+), 35 deletions(-)



___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] pve-firewall : log conntrack sessions ?

2018-11-21 Thread David Limbeck

Will look into it.

On 11/21/18 7:50 AM, Alexandre DERUMIER wrote:

Hi,

I'm currently to finally use proxmox firewall in production next year,

and I missing piece is session logging (create in conntrack, end in conntrack).

It's currently possible with ulogd2, but ulogd2 don't start with pve fw logger 
is running.


I have found a blog about it:

https://home.regit.org/2014/02/logging-connection-tracking-event-with-ulogd/


It's need to enable :

echo "1"> /proc/sys/net/netfilter/nf_conntrack_acct
echo "1"> /proc/sys/net/netfilter/nf_conntrack_timestamp

then ulogd2 listen for 2 netlink events:

NF_NETLINK_CONNTRACK_NEW: 0x0001
NF_NETLINK_CONNTRACK_DESTROY: 0x0004

https://git.netfilter.org/ulogd2/tree/input/flow/ulogd_inpflow_NFCT.c


I'm pretty poor in C , don't known if it's difficult to port this ulogd code in 
pve fw logger ?

___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [RFC docs] fix #1975: extend 'My Settings' documentation

2018-11-13 Thread David Limbeck
add a section for 'My Settings' documenting Dashboard Storages, clear
user name, reset layout and xterm.js settings

Signed-off-by: David Limbeck 
---
 pve-gui.adoc | 31 +--
 1 file changed, 29 insertions(+), 2 deletions(-)

diff --git a/pve-gui.adoc b/pve-gui.adoc
index 76262bf..78a2294 100644
--- a/pve-gui.adoc
+++ b/pve-gui.adoc
@@ -101,8 +101,6 @@ search bar nearside you can search for specific objects 
(VMs,
 containers, nodes, ...). This is sometimes faster than selecting an
 object in the resource tree.
 
-[thumbnail="screenshot/gui-my-settings.png"]
-
 To the right of the search bar we see the identity (login name). The
 gear symbol is a button opening the 'My Settings' dialog. There you
 can customize some client side user interface setting (reset the saved
@@ -120,6 +118,35 @@ Create CT :: Open the container creation wizard.
 Logout :: Logout, and show the login dialog again.
 
 
+[[gui_my_settings]]
+My Settings
+~~~
+
+[thumbnail="screenshot/gui-my-settings.png"]
+
+The 'My Settings' window allows you to set locally stored settings. These
+include the 'Dashboard Storages' which allow you to enable or disable specific
+storages to be counted towards the total amount visible in the datacenter
+summary. If no storage is checked the total is the sum of all storages, same
+as enabling every single one.
+
+Below the dashboard settings you find the stored user name and a button to
+clear it as well as a button to reset every layout in the GUI to its default.
+
+On the right side there are 'xterm.js Settings'. These contain the following
+options:
+
+[horizontal]
+Font-Family :: The font to be used in xterm.js (e.g. Arial).
+
+Font-Size :: The preferred font size to be used.
+
+Letter Spacing :: Increases or decreases spacing between letters in text.
+
+Line Height :: Specify the absolute height of a line.
+
+
+
 Resource Tree
 ~
 
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [RFC docs/manager 0/2] fix #1975: extend 'My Settings' docs

2018-11-13 Thread David Limbeck
RFC for following reasons:
  1. unsure about wording
  2. better name for [[gui_my_settings]]?
  3. thumbnail out of date
  4. anything missing?

David Limbeck (1):
  fix #1975: extend 'My Settings' documentation

 pve-gui.adoc | 31 +--
 1 file changed, 29 insertions(+), 2 deletions(-)

David Limbeck (1):
  add help button to 'My Settings' window

 www/manager6/window/Settings.js | 19 +--
 1 file changed, 13 insertions(+), 6 deletions(-)

-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [RFC manager] add help button to 'My Settings' window

2018-11-13 Thread David Limbeck
link to 'gui_my_settings' in docs

Signed-off-by: David Limbeck 
---
 www/manager6/window/Settings.js | 19 +--
 1 file changed, 13 insertions(+), 6 deletions(-)

diff --git a/www/manager6/window/Settings.js b/www/manager6/window/Settings.js
index ef9dc021..1a4d8599 100644
--- a/www/manager6/window/Settings.js
+++ b/www/manager6/window/Settings.js
@@ -8,12 +8,20 @@ Ext.define('PVE.window.Settings', {
 bodyPadding: 10,
 resizable: false,
 
-buttons: [{
-   text: gettext('Close'),
-   handler: function() {
-   this.up('window').close();
+buttons: [
+   {
+   xtype: 'proxmoxHelpButton',
+   onlineHelp: 'gui_my_settings',
+   hidden: false
+   },
+   '->',
+   {
+   text: gettext('Close'),
+   handler: function() {
+   this.up('window').close();
+   }
}
-}],
+],
 
 layout: {
type: 'hbox',
@@ -317,6 +325,5 @@ Ext.define('PVE.window.Settings', {
 onShow: function() {
var me = this;
me.callParent();
-
 }
 });
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH container] allow fedora 29

2018-11-09 Thread David Limbeck
tested start/stop of container as well as ipv4 and ipv6 static addresses

Signed-off-by: David Limbeck 
---
 src/PVE/LXC/Setup/Fedora.pm | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/PVE/LXC/Setup/Fedora.pm b/src/PVE/LXC/Setup/Fedora.pm
index 5262347..ed2476b 100644
--- a/src/PVE/LXC/Setup/Fedora.pm
+++ b/src/PVE/LXC/Setup/Fedora.pm
@@ -11,7 +11,7 @@ sub new {
 my ($class, $conf, $rootdir, $os_release) = @_;
 
 my $version = $os_release->{VERSION_ID};
-die "unsupported fedora release\n" if !($version >= 22 && $version <= 28);
+die "unsupported fedora release\n" if !($version >= 22 && $version <= 29);
 
 my $self = { conf => $conf, rootdir => $rootdir, version => $version };
 
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH manager 1/2] fix sorting for unused disks in HardwareView.js

2018-11-09 Thread David Limbeck
sort everything based on group and order for those with multiple numbered
entries

Signed-off-by: David Limbeck 
---
refactoring of the sort function into Utils.js is not easily possible as
we're using a closure. Maybe adding it to ObjectGrid so it is inherited?

 www/manager6/qemu/HardwareView.js | 40 +++
 1 file changed, 32 insertions(+), 8 deletions(-)

diff --git a/www/manager6/qemu/HardwareView.js 
b/www/manager6/qemu/HardwareView.js
index f2a3e244..814f67c4 100644
--- a/www/manager6/qemu/HardwareView.js
+++ b/www/manager6/qemu/HardwareView.js
@@ -53,6 +53,7 @@ Ext.define('PVE.qemu.HardwareView', {
never_delete: true,
defaultValue: '512',
tdCls: 'pve-itype-icon-memory',
+   group: 2,
multiKey: ['memory', 'balloon', 'shares'],
renderer: function(value, metaData, record, ri, ci, store, 
pending) {
var res = '';
@@ -81,6 +82,7 @@ Ext.define('PVE.qemu.HardwareView', {
editor: (caps.vms['VM.Config.CPU'] || 
caps.vms['VM.Config.HWType']) ? 
'PVE.qemu.ProcessorEdit' : undefined,
tdCls: 'pve-itype-icon-processor',
+   group: 3,
defaultValue: '1',
multiKey: ['sockets', 'cpu', 'cores', 'numa', 'vcpus', 
'cpulimit', 'cpuunits'],
renderer: function(value, metaData, record, rowIndex, colIndex, 
store, pending) {
@@ -124,6 +126,7 @@ Ext.define('PVE.qemu.HardwareView', {
never_delete: true,
editor: caps.vms['VM.Config.Options'] ? 'PVE.qemu.KeyboardEdit' 
: undefined,
tdCls: 'pve-itype-icon-keyboard',
+   group: 1,
defaultValue: '',
renderer: PVE.Utils.render_kvm_language
},
@@ -132,6 +135,7 @@ Ext.define('PVE.qemu.HardwareView', {
editor: caps.vms['VM.Config.HWType'] ? 'PVE.qemu.DisplayEdit' : 
undefined,
never_delete: true,
tdCls: 'pve-itype-icon-display',
+   group:4,
defaultValue: '',
renderer: PVE.Utils.render_kvm_vga_driver   
},
@@ -170,7 +174,7 @@ Ext.define('PVE.qemu.HardwareView', {
PVE.Utils.forEachBus(undefined, function(type, id) {
var confid = type + id;
rows[confid] = {
-   group: 1,
+   group: 5,
tdCls: 'pve-itype-icon-storage',
editor: 'PVE.qemu.HDEdit',
never_delete: caps.vms['VM.Config.Disk'] ? false : true,
@@ -182,7 +186,8 @@ Ext.define('PVE.qemu.HardwareView', {
for (i = 0; i < 32; i++) {
confid = "net" + i.toString();
rows[confid] = {
-   group: 2,
+   group: 6,
+   order: i,
tdCls: 'pve-itype-icon-network',
editor: caps.vms['VM.Config.Network'] ? 'PVE.qemu.NetworkEdit' 
: undefined,
never_delete: caps.vms['VM.Config.Network'] ? false : true,
@@ -190,7 +195,7 @@ Ext.define('PVE.qemu.HardwareView', {
};
}
rows.efidisk0 = {
-   group: 3,
+   group: 7,
tdCls: 'pve-itype-icon-storage',
editor: null,
never_delete: caps.vms['VM.Config.Disk'] ? false : true,
@@ -199,7 +204,8 @@ Ext.define('PVE.qemu.HardwareView', {
for (i = 0; i < 5; i++) {
confid = "usb" + i.toString();
rows[confid] = {
-   group: 4,
+   group: 8,
+   order: i,
tdCls: 'pve-itype-icon-usb',
editor: caps.nodes['Sys.Console'] ? 'PVE.qemu.USBEdit' : 
undefined,
never_delete: caps.nodes['Sys.Console'] ? false : true,
@@ -209,7 +215,8 @@ Ext.define('PVE.qemu.HardwareView', {
for (i = 0; i < 4; i++) {
confid = "hostpci" + i.toString();
rows[confid] = {
-   group: 5,
+   group: 9,
+   order: i,
tdCls: 'pve-itype-icon-pci',
never_delete: caps.nodes['Sys.Console'] ? false : true,
header: gettext('PCI Device') + ' (' + confid + ')'
@@ -218,7 +225,8 @@ Ext.define('PVE.qemu.HardwareView', {
for (i = 0; i < 4; i++) {
confid = "serial" + i.

[pve-devel] [PATCH manager 2/2] fix mount point/unused disk sorting in Resources.js

2018-11-09 Thread David Limbeck
use the same sort function as for VMs based on group and order

Signed-off-by: David Limbeck 
---
 www/manager6/lxc/Resources.js | 39 ---
 1 file changed, 36 insertions(+), 3 deletions(-)

diff --git a/www/manager6/lxc/Resources.js b/www/manager6/lxc/Resources.js
index 4e38042a..bfdc3dca 100644
--- a/www/manager6/lxc/Resources.js
+++ b/www/manager6/lxc/Resources.js
@@ -40,6 +40,7 @@ Ext.define('PVE.lxc.RessourceView', {
editor: caps.vms['VM.Config.Memory'] ? 'PVE.lxc.MemoryEdit' : 
undefined,
defaultValue: 512,
tdCls: 'pve-itype-icon-memory',
+   group: 1,
renderer: function(value) {
return Proxmox.Utils.format_size(value*1024*1024);
}
@@ -49,6 +50,7 @@ Ext.define('PVE.lxc.RessourceView', {
editor: caps.vms['VM.Config.Memory'] ? 'PVE.lxc.MemoryEdit' : 
undefined,
defaultValue: 512,
tdCls: 'pve-itype-icon-swap',
+   group: 2,
renderer: function(value) {
return Proxmox.Utils.format_size(value*1024*1024);
}
@@ -58,6 +60,7 @@ Ext.define('PVE.lxc.RessourceView', {
editor: caps.vms['VM.Config.CPU'] ? 'PVE.lxc.CPUEdit' : 
undefined,
defaultValue: '',
tdCls: 'pve-itype-icon-processor',
+   group: 3,
renderer: function(value) {
var cpulimit = me.getObjectValue('cpulimit');
var cpuunits = me.getObjectValue('cpuunits');
@@ -82,7 +85,8 @@ Ext.define('PVE.lxc.RessourceView', {
header: gettext('Root Disk'),
defaultValue: Proxmox.Utils.noneText,
editor: mpeditor,
-   tdCls: 'pve-itype-icon-storage'
+   tdCls: 'pve-itype-icon-storage',
+   group: 4
},
cpulimit: {
visible: false
@@ -97,14 +101,17 @@ Ext.define('PVE.lxc.RessourceView', {
 
PVE.Utils.forEachMP(function(bus, i) {
confid = bus + i;
-   var  header;
+   var group = 5;
+   var header;
if (bus === 'mp') {
header = gettext('Mount Point') + ' (' + confid + ')';
} else {
header = gettext('Unused Disk') + ' ' + i;
+   group += 1;
}
rows[confid] = {
-   group: 1,
+   group: group,
+   order: i,
tdCls: 'pve-itype-icon-storage',
editor: mpeditor,
header: header
@@ -237,6 +244,31 @@ Ext.define('PVE.lxc.RessourceView', {
 
};

+   var sorterFn = function(rec1, rec2) {
+   var v1 = rec1.data.key;
+   var v2 = rec2.data.key;
+   var g1 = rows[v1].group || 0;
+   var g2 = rows[v2].group || 0;
+   var order1 = rows[v1].order || 0;
+   var order2 = rows[v2].order || 0;
+
+   if ((g1 - g2) !== 0) {
+   return g1 - g2;
+   }
+
+   if ((order1 - order2) !== 0) {
+   return order1 - order2;
+   }
+
+   if (v1 > v2) {
+   return 1;
+   } else if (v1 < v2) {
+   return -1;
+   } else {
+   return 0;
+   }
+   }
+
Ext.apply(me, {
url: '/api2/json/' + baseurl,
selModel: me.selModel,
@@ -269,6 +301,7 @@ Ext.define('PVE.lxc.RessourceView', {
move_btn
],
rows: rows,
+   sorterFn: sorterFn,
editorConfig: {
pveSelNode: me.pveSelNode,
url: '/api2/extjs/' + baseurl
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH qemu-server 5/7] migrate : phase2 : migrate external

2018-11-07 Thread David Limbeck

one more thing inline

On 11/7/18 4:29 PM, David Limbeck wrote:

comments inline

On 10/29/18 4:38 PM, Alexandre Derumier wrote:

---
  PVE/API2/Qemu.pm   |  4 ++--
  PVE/QemuMigrate.pm | 21 +
  PVE/QemuServer.pm  | 24 +---
  3 files changed, 36 insertions(+), 13 deletions(-)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index ac8b907..509747c 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -1927,7 +1927,7 @@ __PACKAGE__->register_method({
  migratedfrom => get_standard_option('pve-node',{ optional 
=> 1 }),

  migration_type => {
  type => 'string',
-    enum => ['secure', 'insecure'],
+    enum => ['secure', 'insecure', 'external'],
  description => "Migration traffic is encrypted using an SSH 
" .
    "tunnel by default. On secure, completely private 
networks " .

    "this can be disabled to increase performance.",
@@ -1987,7 +1987,7 @@ __PACKAGE__->register_method({
  if $targetstorage && $authuser ne 'root@pam';
    raise_param_exc({ targetstorage => "targetstorage can only by 
used with migratedfrom." })

-    if $targetstorage && !$migratedfrom;
+    if $targetstorage && !$migratedfrom && !($migration_type && 
$migration_type eq 'external');

    # read spice ticket from STDIN
  my $spice_ticket;
diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
index 3e50f07..6eb629b 100644
--- a/PVE/QemuMigrate.pm
+++ b/PVE/QemuMigrate.pm
@@ -591,7 +591,9 @@ sub phase2 {
    my $conf = $self->{vmconf};
  -    $self->log('info', "starting VM $vmid on remote node 
'$self->{node}'");
+    my $targetvmid = $self->{opts}->{targetvmid} ? 
$self->{opts}->{targetvmid} : $vmid;

+
+    $self->log('info', "starting VM $targetvmid on remote node 
'$self->{node}'");

    my $raddr;
  my $rport;
@@ -607,11 +609,13 @@ sub phase2 {
  $spice_ticket = $res->{ticket};
  }
  -    push @$cmd , 'qm', 'start', $vmid, '--skiplock', 
'--migratedfrom', $nodename;

-
  my $migration_type = $self->{opts}->{migration_type};
  -    push @$cmd, '--migration_type', $migration_type;
+    push @$cmd , 'qm', 'start', $targetvmid, '--skiplock';
+
+    push @$cmd, '--migratedfrom', $nodename if 
!$self->{migration_external};

+
+    push @$cmd, '--migration_type', $self->{opts}->{migration_type};
use $migration_type that's already defined instead of 
$self->{opts}->{migration_type}
    push @$cmd, '--migration_network', 
$self->{opts}->{migration_network}

    if $self->{opts}->{migration_network};
@@ -644,7 +648,7 @@ sub phase2 {
  }
  elsif ($line =~ m!^migration listens on 
unix:(/run/qemu-server/(\d+)\.migrate)$!) {

  $raddr = $1;
-    die "Destination UNIX sockets VMID does not match source 
VMID" if $vmid ne $2;
+    die "Destination UNIX sockets VMID does not match source 
VMID" if $targetvmid ne $2;

missing '\n'

  $ruri = "unix:$raddr";
  }
  elsif ($line =~ m/^migration listens on port (\d+)$/) {
@@ -674,7 +678,7 @@ sub phase2 {
    $self->log('info', "start remote tunnel");
  -    if ($migration_type eq 'secure') {
+    if ($migration_type eq 'secure' || $migration_type eq 'external') {
    if ($ruri =~ /^unix:/) {
  unlink $raddr;
@@ -714,13 +718,14 @@ sub phase2 {
    my $start = time();
  -    if ($self->{opts}->{targetstorage} && 
defined($self->{online_local_volumes})) {
+    if (($self->{opts}->{targetstorage} && 
defined($self->{online_local_volumes})) || 
$self->{migration_external}) {

  $self->{storage_migration} = 1;
  $self->{storage_migration_jobs} = {};
  $self->log('info', "starting storage migration");
    die "The number of local disks does not match between the 
source and the destination.\n"
-    if (scalar(keys %{$self->{target_drive}}) != scalar 
@{$self->{online_local_volumes}});
+    if !$self->{migration_external} && (scalar(keys 
%{$self->{target_drive}}) != scalar @{$self->{online_local_volumes}});

+
  foreach my $drive (keys %{$self->{target_drive}}){
  my $nbd_uri = $self->{target_drive}->{$drive}->{nbd_uri};
  $self->log('info', "$drive: start migration to $nbd_uri");
diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 933f54f..37dcf5f 100644
--- a/PVE/QemuServer.p

Re: [pve-devel] [PATCH qemu-server 5/7] migrate : phase2 : migrate external

2018-11-07 Thread David Limbeck

comments inline

On 10/29/18 4:38 PM, Alexandre Derumier wrote:

---
  PVE/API2/Qemu.pm   |  4 ++--
  PVE/QemuMigrate.pm | 21 +
  PVE/QemuServer.pm  | 24 +---
  3 files changed, 36 insertions(+), 13 deletions(-)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index ac8b907..509747c 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -1927,7 +1927,7 @@ __PACKAGE__->register_method({
migratedfrom => get_standard_option('pve-node',{ optional => 1 }),
migration_type => {
type => 'string',
-   enum => ['secure', 'insecure'],
+   enum => ['secure', 'insecure', 'external'],
description => "Migration traffic is encrypted using an SSH " .
  "tunnel by default. On secure, completely private networks " .
  "this can be disabled to increase performance.",
@@ -1987,7 +1987,7 @@ __PACKAGE__->register_method({
if $targetstorage && $authuser ne 'root@pam';
  
  	raise_param_exc({ targetstorage => "targetstorage can only by used with migratedfrom." })

-   if $targetstorage && !$migratedfrom;
+   if $targetstorage && !$migratedfrom && !($migration_type && 
$migration_type eq 'external');
  
  	# read spice ticket from STDIN

my $spice_ticket;
diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
index 3e50f07..6eb629b 100644
--- a/PVE/QemuMigrate.pm
+++ b/PVE/QemuMigrate.pm
@@ -591,7 +591,9 @@ sub phase2 {
  
  my $conf = $self->{vmconf};
  
-$self->log('info', "starting VM $vmid on remote node '$self->{node}'");

+my $targetvmid = $self->{opts}->{targetvmid} ? $self->{opts}->{targetvmid} 
: $vmid;
+
+$self->log('info', "starting VM $targetvmid on remote node 
'$self->{node}'");
  
  my $raddr;

  my $rport;
@@ -607,11 +609,13 @@ sub phase2 {
$spice_ticket = $res->{ticket};
  }
  
-push @$cmd , 'qm', 'start', $vmid, '--skiplock', '--migratedfrom', $nodename;

-
  my $migration_type = $self->{opts}->{migration_type};
  
-push @$cmd, '--migration_type', $migration_type;

+push @$cmd , 'qm', 'start', $targetvmid, '--skiplock';
+
+push @$cmd, '--migratedfrom', $nodename if !$self->{migration_external};
+
+push @$cmd, '--migration_type', $self->{opts}->{migration_type};
  
  push @$cmd, '--migration_network', $self->{opts}->{migration_network}

if $self->{opts}->{migration_network};
@@ -644,7 +648,7 @@ sub phase2 {
}
elsif ($line =~ m!^migration listens on 
unix:(/run/qemu-server/(\d+)\.migrate)$!) {
$raddr = $1;
-   die "Destination UNIX sockets VMID does not match source VMID" if 
$vmid ne $2;
+   die "Destination UNIX sockets VMID does not match source VMID" if 
$targetvmid ne $2;

missing '\n'

$ruri = "unix:$raddr";
}
elsif ($line =~ m/^migration listens on port (\d+)$/) {
@@ -674,7 +678,7 @@ sub phase2 {
  
  $self->log('info', "start remote tunnel");
  
-if ($migration_type eq 'secure') {

+if ($migration_type eq 'secure' || $migration_type eq 'external') {
  
  	if ($ruri =~ /^unix:/) {

unlink $raddr;
@@ -714,13 +718,14 @@ sub phase2 {
  
  my $start = time();
  
-if ($self->{opts}->{targetstorage} && defined($self->{online_local_volumes})) {

+if (($self->{opts}->{targetstorage} && 
defined($self->{online_local_volumes})) || $self->{migration_external}) {
$self->{storage_migration} = 1;
$self->{storage_migration_jobs} = {};
$self->log('info', "starting storage migration");
  
  	die "The number of local disks does not match between the source and the destination.\n"

-   if (scalar(keys %{$self->{target_drive}}) != scalar 
@{$self->{online_local_volumes}});
+   if !$self->{migration_external} && (scalar(keys 
%{$self->{target_drive}}) != scalar @{$self->{online_local_volumes}});
+
foreach my $drive (keys %{$self->{target_drive}}){
my $nbd_uri = $self->{target_drive}->{$drive}->{nbd_uri};
$self->log('info', "$drive: start migration to $nbd_uri");
diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 933f54f..37dcf5f 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -4820,8 +4820,26 @@ sub vm_start {
$ENV{PVE_MIGRATED_FROM} = $migratedfrom if $migratedfrom;
  
  	my $local_volumes = {};

+   my $external_migration = undef;
  
-	if ($targetstorage) {

+   if ($migration_type && $migration_type eq 'external') {
+   $migration_type = 'secure';
+   $external_migration = 1;
+   }
+
+   if ($external_migration) {
+   foreach_drive($conf, sub {
+   my ($ds, $drive) = @_;
+
+   return if drive_is_cdrom($drive);
+
+   my $volid = $drive->{file};
+
+   return if !$volid;
+
+   $local_volumes->{$ds} = $volid;
+   });
+   } elsif ($targetstorage) {
forea

Re: [pve-devel] [PATCH qemu-server 3/7] migrate : prepare : add create_vm for external migration

2018-11-07 Thread David Limbeck

comments inline

On 10/29/18 4:38 PM, Alexandre Derumier wrote:

Create vm on target cluster with same options.
Disks are created with same size and same options than source
---
  PVE/QemuMigrate.pm | 82 ++
  1 file changed, 77 insertions(+), 5 deletions(-)

diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
index e9e9075..ce43fc9 100644
--- a/PVE/QemuMigrate.pm
+++ b/PVE/QemuMigrate.pm
@@ -227,6 +227,83 @@ sub prepare {
}
  }
  
+# test ssh connection

+my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ];
+eval { $self->cmd_quiet($cmd); };
+die "Can't connect to destination address using public key\n" if $@;
+
+$self->{migration_external} = 1 if $self->{opts}->{migration_type} && 
$self->{opts}->{migration_type} eq 'external';
+
+if($self->{migration_external}) {
+
+   #get remote nextvmid
+   eval {
+   my $cmd = [@{$self->{rem_ssh}}, 'qm', 'nextvmid'];
+   PVE::Tools::run_command($cmd, outfunc => sub {
+   my $line = shift;
+   if ($line =~ m/^(\d+)/) {
+   $self->{opts}->{targetvmid} = $line;
+   }
+   });
+   };
+if (my $err = $@) {
+$self->log('err', $err);
+$self->{errors} = 1;
+die $err;
+}
+
+   die "can't find the next free vmid on remote cluster" if 
!$self->{opts}->{targetvmid};

missing '\n'

+
+   #create vm
+   my $cmd = [@{$self->{rem_ssh}}, 'qm', 'create', 
$self->{opts}->{targetvmid}];
+
+   foreach my $opt (keys %{$conf}) {
+   next if $opt =~ m/^(pending|snapshots|digest|parent)/;
+   next if $opt =~ m/^(ide|scsi|virtio)(\d+)/;
+   die "can't migrate unused disk. please remove it before migrate" if 
$opt =~ m/^(unused)(\d+)/;

missing '\n'

+   push @$cmd , "-$opt", $conf->{$opt};
+   }
+
+   PVE::QemuServer::foreach_drive($conf, sub {
+   my ($ds, $drive) = @_;
+
+   if (PVE::QemuServer::drive_is_cdrom($drive, 1)) {
+   push @$cmd , "-$ds", $conf->{$ds};
+   return;
+   }
+
+   my $volid = $drive->{file};
+   return if !$volid;
+
+   my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
+   return if !$sid;
+   my $size = PVE::Storage::volume_size_info($self->{storecfg}, 
$volid, 5);
+   die "can't get size" if !$size;

missing '\n'

+   $size = $size/1024/1024/1024;
+   my $targetsid = $self->{opts}->{targetstorage} ? 
$self->{opts}->{targetstorage} : $sid;
+
+   my $data = { %$drive };
+   delete $data->{$_} for qw(index interface file size);
+   my $drive_conf = "$targetsid:$size";
+foreach my $drive_opt (keys %{$data}) {
+   $drive_conf .= ",$drive_opt=$data->{$drive_opt}";
+   }
+
+   push @$cmd , "-$ds", $drive_conf;
+   });
+
+   push @$cmd , '-lock', 'migrate';
+
+   eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub 
{}) };
+   if (my $err = $@) {
+   $self->log('err', $err);
+   $self->{errors} = 1;
+   die $err;
+   }
+
+   return 1;
+}
+
  my $vollist = PVE::QemuServer::get_vm_volumes($conf);
  
  my $need_activate = [];

@@ -253,11 +330,6 @@ sub prepare {
  # activate volumes
  PVE::Storage::activate_volumes($self->{storecfg}, $need_activate);
  
-# test ssh connection

-my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ];
-eval { $self->cmd_quiet($cmd); };
-die "Can't connect to destination address using public key\n" if $@;
-
  return $running;
  }
  


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH qemu-server 2/7] qm: add nextvmid

2018-11-07 Thread David Limbeck
why not use pvesh to get the next id in cluster instead of adding a new 
command?


On 10/29/18 4:38 PM, Alexandre Derumier wrote:

return the next vmid free
---
  PVE/API2/Qemu.pm |  1 -
  PVE/CLI/qm.pm| 19 +++
  2 files changed, 19 insertions(+), 1 deletion(-)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index 64eaa0e..ac8b907 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -389,7 +389,6 @@ __PACKAGE__->register_method({
  }});
  
  
-

  __PACKAGE__->register_method({
  name => 'create_vm',
  path => '',
diff --git a/PVE/CLI/qm.pm b/PVE/CLI/qm.pm
index 46a7e2f..b1f146d 100755
--- a/PVE/CLI/qm.pm
+++ b/PVE/CLI/qm.pm
@@ -144,6 +144,23 @@ __PACKAGE__->register_method ({
  }});
  
  __PACKAGE__->register_method ({

+name => 'nextvmid',
+path => 'nextvmid',
+method => 'GET',
+description => "Return next free vmid.",
+parameters => {
+   additionalProperties => 0,
+},
+returns => { type => 'null'},
+code => sub {
+   my ($param) = @_;
+
+   my $nextvmid = PVE::Cluster::complete_next_vmid;
+   print $nextvmid->[0];
+   return undef;
+}});
+
+__PACKAGE__->register_method ({
  name => 'status',
  path => 'status',
  method => 'GET',
@@ -896,6 +913,8 @@ our $cmddef = {
  
  mtunnel => [ __PACKAGE__, 'mtunnel', []],
  
+nextvmid => [ __PACKAGE__, 'nextvmid', []],

+
  nbdstop => [ __PACKAGE__, 'nbdstop', ['vmid']],
  
  terminal => [ __PACKAGE__, 'terminal', ['vmid']],


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH qemu-server 1/7] api2 : migrate_vm : add migration_type "external"

2018-11-07 Thread David Limbeck

Comments inline

On 10/29/18 4:38 PM, Alexandre Derumier wrote:

qm migrate   --migration_type external [--targetstorage 
monpoolceph_vm] --online


 still has to obey the format rules of 'pve-node' so 
no fqdn or ip possible. might need an additional change in pve-common




Allow to migrate to an external cluster node.
--targetstorage is optionnal, if not defined it's used same storagename than 
source
---
  PVE/API2/Qemu.pm | 26 +++---
  1 file changed, 19 insertions(+), 7 deletions(-)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index 52f4a5f..64eaa0e 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -3046,7 +3046,7 @@ __PACKAGE__->register_method({
},
migration_type => {
type => 'string',
-   enum => ['secure', 'insecure'],
+   enum => ['secure', 'insecure', 'external'],
this seems to conflate 2 different things. 'secure'/'insecure' describe 
how something is transported (encrypted or not) while 'external' 
specifies that it's outside of the cluster.

description => "Migration traffic is encrypted using an SSH tunnel 
by default. On secure, completely private networks this can be disabled to increase 
performance.",
optional => 1,
},
@@ -3085,7 +3085,7 @@ __PACKAGE__->register_method({
  
  	PVE::Cluster::check_cfs_quorum();
  
-	PVE::Cluster::check_node_exists($target);

+   PVE::Cluster::check_node_exists($target) if $param->{migration_type} ne 
'external';
  
  	my $targetip = PVE::Cluster::remote_node_ip($target);
  
@@ -3094,6 +3094,13 @@ __PACKAGE__->register_method({

raise_param_exc({ targetstorage => "Live storage migration can only be done 
online." })
if !$param->{online} && $param->{targetstorage};
  
+	raise_param_exc({ migration_type => "external migration can only be done online." })

+   if !$param->{online} && $param->{migration_type} eq 'external';
+
+   raise_param_exc({ migration_type => "HA must be disable for external 
migration." })
+   if PVE::HA::Config::vm_is_ha_managed($vmid) && 
$param->{migration_type} eq 'external';
+
+
raise_param_exc({ force => "Only root may use this option." })
if $param->{force} && $authuser ne 'root@pam';
  
@@ -3114,17 +3121,22 @@ __PACKAGE__->register_method({

if (PVE::QemuServer::check_running($vmid)) {
die "cant migrate running VM without --online\n"
if !$param->{online};
+   } else {
+   die "vm need to be online for external migration" if 
$param->{migration_type} eq 'external';

missing '\n' at the end of the string

}
  
  	my $storecfg = PVE::Storage::config();
  
-	if( $param->{targetstorage}) {

-   PVE::Storage::storage_check_node($storecfg, 
$param->{targetstorage}, $target);
-} else {
-   PVE::QemuServer::check_storage_availability($storecfg, $conf, 
$target);
+   if ($param->{migration_type} ne 'external') {
+
+   if( $param->{targetstorage} ) {
+   PVE::Storage::storage_check_node($storecfg, 
$param->{targetstorage}, $target);
+   } else {
+   PVE::QemuServer::check_storage_availability($storecfg, $conf, 
$target);
+   }
}
  
-	if (PVE::HA::Config::vm_is_ha_managed($vmid) && $rpcenv->{type} ne 'ha') {

+   if (PVE::HA::Config::vm_is_ha_managed($vmid) && $rpcenv->{type} ne 'ha' && 
$param->{migration_type} ne 'external') {
  
  	my $hacmd = sub {

my $upid = shift;


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH qemu-server 0/7] online vm migration to external cluster

2018-11-07 Thread David Limbeck

Tested it and it seems to overall work.

One thing we're still discussing internally is the SSH tunnel, we're not 
sure how we want to go forward (maybe a different solution). We'll let 
you know once we've further discussed this.


On 10/29/18 4:38 PM, Alexandre Derumier wrote:

This add support to migrate a vm online to an different external cluster.
(This a rework of a previous patch series sent 2years ago)


qm migrate   --migration_type external [--targetstorage 
monpoolceph_vm] --online


targetstorage is optional, if not defined it's used same storagename than source
targetremotenode is a fqdn host from another cluster.
(source node must be able to root ssh to target node with public key)

source vm is not deleted currently for safety, just stopped and the migrate 
lock keeped to avoid
to restart it.


Please review and comment

Alexandre

Alexandre Derumier (7):
   api2 : migrate_vm : add migration_type "external"
   qm: add nextvmid
   migrate : prepare : add create_vm for external migration
   migrate : phase1 : skip sync_disk for external migration
   migrate : phase2 : migrate external
   migrate : phase2_cleanup : migrate_external
   migrate : phase3_cleanup : migrate_external

  PVE/API2/Qemu.pm   |  31 +++
  PVE/CLI/qm.pm  |  19 +++
  PVE/QemuMigrate.pm | 156 +++--
  PVE/QemuServer.pm  |  24 +++--
  4 files changed, 188 insertions(+), 42 deletions(-)



___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH manager 1/2] fix sorting for unused disks in HardwareView.js

2018-11-07 Thread David Limbeck
sort based on group for everything and unused disks based on their
number (unused2 before unused10) as well as add groups to every item and
keep original order

Signed-off-by: David Limbeck 
---
 www/manager6/qemu/HardwareView.js | 36 
 1 file changed, 28 insertions(+), 8 deletions(-)

diff --git a/www/manager6/qemu/HardwareView.js 
b/www/manager6/qemu/HardwareView.js
index f2a3e244..3a17751d 100644
--- a/www/manager6/qemu/HardwareView.js
+++ b/www/manager6/qemu/HardwareView.js
@@ -53,6 +53,7 @@ Ext.define('PVE.qemu.HardwareView', {
never_delete: true,
defaultValue: '512',
tdCls: 'pve-itype-icon-memory',
+   group: 2,
multiKey: ['memory', 'balloon', 'shares'],
renderer: function(value, metaData, record, ri, ci, store, 
pending) {
var res = '';
@@ -81,6 +82,7 @@ Ext.define('PVE.qemu.HardwareView', {
editor: (caps.vms['VM.Config.CPU'] || 
caps.vms['VM.Config.HWType']) ? 
'PVE.qemu.ProcessorEdit' : undefined,
tdCls: 'pve-itype-icon-processor',
+   group: 3,
defaultValue: '1',
multiKey: ['sockets', 'cpu', 'cores', 'numa', 'vcpus', 
'cpulimit', 'cpuunits'],
renderer: function(value, metaData, record, rowIndex, colIndex, 
store, pending) {
@@ -124,6 +126,7 @@ Ext.define('PVE.qemu.HardwareView', {
never_delete: true,
editor: caps.vms['VM.Config.Options'] ? 'PVE.qemu.KeyboardEdit' 
: undefined,
tdCls: 'pve-itype-icon-keyboard',
+   group: 1,
defaultValue: '',
renderer: PVE.Utils.render_kvm_language
},
@@ -132,6 +135,7 @@ Ext.define('PVE.qemu.HardwareView', {
editor: caps.vms['VM.Config.HWType'] ? 'PVE.qemu.DisplayEdit' : 
undefined,
never_delete: true,
tdCls: 'pve-itype-icon-display',
+   group:4,
defaultValue: '',
renderer: PVE.Utils.render_kvm_vga_driver   
},
@@ -170,7 +174,7 @@ Ext.define('PVE.qemu.HardwareView', {
PVE.Utils.forEachBus(undefined, function(type, id) {
var confid = type + id;
rows[confid] = {
-   group: 1,
+   group: 5,
tdCls: 'pve-itype-icon-storage',
editor: 'PVE.qemu.HDEdit',
never_delete: caps.vms['VM.Config.Disk'] ? false : true,
@@ -182,7 +186,7 @@ Ext.define('PVE.qemu.HardwareView', {
for (i = 0; i < 32; i++) {
confid = "net" + i.toString();
rows[confid] = {
-   group: 2,
+   group: 6,
tdCls: 'pve-itype-icon-network',
editor: caps.vms['VM.Config.Network'] ? 'PVE.qemu.NetworkEdit' 
: undefined,
never_delete: caps.vms['VM.Config.Network'] ? false : true,
@@ -190,7 +194,7 @@ Ext.define('PVE.qemu.HardwareView', {
};
}
rows.efidisk0 = {
-   group: 3,
+   group: 7,
tdCls: 'pve-itype-icon-storage',
editor: null,
never_delete: caps.vms['VM.Config.Disk'] ? false : true,
@@ -199,7 +203,7 @@ Ext.define('PVE.qemu.HardwareView', {
for (i = 0; i < 5; i++) {
confid = "usb" + i.toString();
rows[confid] = {
-   group: 4,
+   group: 8,
tdCls: 'pve-itype-icon-usb',
editor: caps.nodes['Sys.Console'] ? 'PVE.qemu.USBEdit' : 
undefined,
never_delete: caps.nodes['Sys.Console'] ? false : true,
@@ -209,7 +213,7 @@ Ext.define('PVE.qemu.HardwareView', {
for (i = 0; i < 4; i++) {
confid = "hostpci" + i.toString();
rows[confid] = {
-   group: 5,
+   group: 9,
tdCls: 'pve-itype-icon-pci',
never_delete: caps.nodes['Sys.Console'] ? false : true,
header: gettext('PCI Device') + ' (' + confid + ')'
@@ -218,7 +222,7 @@ Ext.define('PVE.qemu.HardwareView', {
for (i = 0; i < 4; i++) {
confid = "serial" + i.toString();
rows[confid] = {
-   group: 6,
+   group: 10,
tdCls: 'pve-itype-icon-serial',
 

[pve-devel] [PATCH manager 2/2] fix disk sorting in Resources.js

2018-11-07 Thread David Limbeck
use the same sorting as in HardwareView.js based on group as well as add
groups to every item to keep original order

Signed-off-by: David Limbeck 
---
 www/manager6/lxc/Resources.js | 39 ---
 1 file changed, 36 insertions(+), 3 deletions(-)

diff --git a/www/manager6/lxc/Resources.js b/www/manager6/lxc/Resources.js
index 4e38042a..bfdc3dca 100644
--- a/www/manager6/lxc/Resources.js
+++ b/www/manager6/lxc/Resources.js
@@ -40,6 +40,7 @@ Ext.define('PVE.lxc.RessourceView', {
editor: caps.vms['VM.Config.Memory'] ? 'PVE.lxc.MemoryEdit' : 
undefined,
defaultValue: 512,
tdCls: 'pve-itype-icon-memory',
+   group: 1,
renderer: function(value) {
return Proxmox.Utils.format_size(value*1024*1024);
}
@@ -49,6 +50,7 @@ Ext.define('PVE.lxc.RessourceView', {
editor: caps.vms['VM.Config.Memory'] ? 'PVE.lxc.MemoryEdit' : 
undefined,
defaultValue: 512,
tdCls: 'pve-itype-icon-swap',
+   group: 2,
renderer: function(value) {
return Proxmox.Utils.format_size(value*1024*1024);
}
@@ -58,6 +60,7 @@ Ext.define('PVE.lxc.RessourceView', {
editor: caps.vms['VM.Config.CPU'] ? 'PVE.lxc.CPUEdit' : 
undefined,
defaultValue: '',
tdCls: 'pve-itype-icon-processor',
+   group: 3,
renderer: function(value) {
var cpulimit = me.getObjectValue('cpulimit');
var cpuunits = me.getObjectValue('cpuunits');
@@ -82,7 +85,8 @@ Ext.define('PVE.lxc.RessourceView', {
header: gettext('Root Disk'),
defaultValue: Proxmox.Utils.noneText,
editor: mpeditor,
-   tdCls: 'pve-itype-icon-storage'
+   tdCls: 'pve-itype-icon-storage',
+   group: 4
},
cpulimit: {
visible: false
@@ -97,14 +101,17 @@ Ext.define('PVE.lxc.RessourceView', {
 
PVE.Utils.forEachMP(function(bus, i) {
confid = bus + i;
-   var  header;
+   var group = 5;
+   var header;
if (bus === 'mp') {
header = gettext('Mount Point') + ' (' + confid + ')';
} else {
header = gettext('Unused Disk') + ' ' + i;
+   group += 1;
}
rows[confid] = {
-   group: 1,
+   group: group,
+   order: i,
tdCls: 'pve-itype-icon-storage',
editor: mpeditor,
header: header
@@ -237,6 +244,31 @@ Ext.define('PVE.lxc.RessourceView', {
 
};

+   var sorterFn = function(rec1, rec2) {
+   var v1 = rec1.data.key;
+   var v2 = rec2.data.key;
+   var g1 = rows[v1].group || 0;
+   var g2 = rows[v2].group || 0;
+   var order1 = rows[v1].order || 0;
+   var order2 = rows[v2].order || 0;
+
+   if ((g1 - g2) !== 0) {
+   return g1 - g2;
+   }
+
+   if ((order1 - order2) !== 0) {
+   return order1 - order2;
+   }
+
+   if (v1 > v2) {
+   return 1;
+   } else if (v1 < v2) {
+   return -1;
+   } else {
+   return 0;
+   }
+   }
+
Ext.apply(me, {
url: '/api2/json/' + baseurl,
selModel: me.selModel,
@@ -269,6 +301,7 @@ Ext.define('PVE.lxc.RessourceView', {
move_btn
],
rows: rows,
+   sorterFn: sorterFn,
editorConfig: {
pveSelNode: me.pveSelNode,
url: '/api2/extjs/' + baseurl
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH manager] remove unnecessary sort function in HardwareView.js

2018-11-07 Thread David Limbeck

On 11/7/18 10:23 AM, Dominik Csapak wrote:

On 11/7/18 9:14 AM, Thomas Lamprecht wrote:

On 11/6/18 12:14 PM, David Limbeck wrote:
we're adding everything in the right order already so there's no 
need to

sort them based on groups


Hmm,

Memory, Processor and Keyboard switches order with this.

Also if I add a serial port and then a USB (e.g., spice) one the 
order is

now switched until reload (as it does not gets ordered on change)

Not a really big issue, but you should at least mention that...

@Dominik do we really want this, or do we want a better sorter for the
increase of unused disks?

Because if I have holes in the unused disk id allocation the next detach
also results in a wrong order with this, until reopening of component or
page...



without looking at it, how does it work with containers?
does the wrong order until reload also exists there?

if yes, i would like to have it fixed in both so that the order is 
always the same -> fix the sorter and add one for containers


if not, what is different there (since we do not define a sorter there 
either) -> find out why it works there and do that also for vms


Order for resources is wrong as well. I will send another patch adding 
custom sorting for unused disks for both VMs and CTs.


Signed-off-by: David Limbeck 
---
this is in preparation for increased unused disks as unused disk 10-19
would appear before disk 2

  www/manager6/qemu/HardwareView.js | 11 ---
  1 file changed, 11 deletions(-)

diff --git a/www/manager6/qemu/HardwareView.js 
b/www/manager6/qemu/HardwareView.js

index f2a3e244..2a86e461 100644
--- a/www/manager6/qemu/HardwareView.js
+++ b/www/manager6/qemu/HardwareView.js
@@ -233,16 +233,6 @@ Ext.define('PVE.qemu.HardwareView', {
  };
  }
  -    var sorterFn = function(rec1, rec2) {
-    var v1 = rec1.data.key;
-    var v2 = rec2.data.key;
-    var g1 = rows[v1].group || 0;
-    var g2 = rows[v2].group || 0;
-
-    return (g1 !== g2) ?
-    (g1 > g2 ? 1 : -1) : (v1 > v2 ? 1 : (v1 < v2 ? -1 : 0));
-    };
-
  var reload = function() {
  me.rstore.load();
  };
@@ -627,7 +617,6 @@ Ext.define('PVE.qemu.HardwareView', {
  revert_btn
  ],
  rows: rows,
-    sorterFn: sorterFn,
  listeners: {
  itemdblclick: run_editor,
  selectionchange: set_button_status








___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH v2 manager 0/4] implemented suggested changes

2018-11-06 Thread David Limbeck
Looking through the patches I saw that the commit messages are missing 
(compared to v1). Might still be useful for the ZFSStatus -> ZFSConfig 
change as it explains a bit why it was changed.


On 11/6/18 1:48 PM, Tim Marx wrote:

changes since v1:
* changed if statement in Utils.js
* changed new component to proxmox.grid.objectgrid

Tim Marx (4):
   node zfs: added check for undefined & new case AVAIL
   node zfs: added new component to display additional zfs details
   node zfs: renamed & revised tree component to display more information
   node zfs: added panel to window as container for new components

  www/manager6/Utils.js|   5 +++
  www/manager6/node/ZFS.js | 109 +++
  2 files changed, 87 insertions(+), 27 deletions(-)



___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH manager] remove unnecessary sort function in HardwareView.js

2018-11-06 Thread David Limbeck
we're adding everything in the right order already so there's no need to
sort them based on groups

Signed-off-by: David Limbeck 
---
this is in preparation for increased unused disks as unused disk 10-19
would appear before disk 2

 www/manager6/qemu/HardwareView.js | 11 ---
 1 file changed, 11 deletions(-)

diff --git a/www/manager6/qemu/HardwareView.js 
b/www/manager6/qemu/HardwareView.js
index f2a3e244..2a86e461 100644
--- a/www/manager6/qemu/HardwareView.js
+++ b/www/manager6/qemu/HardwareView.js
@@ -233,16 +233,6 @@ Ext.define('PVE.qemu.HardwareView', {
};
}
 
-   var sorterFn = function(rec1, rec2) {
-   var v1 = rec1.data.key;
-   var v2 = rec2.data.key;
-   var g1 = rows[v1].group || 0;
-   var g2 = rows[v2].group || 0;
-   
-   return (g1 !== g2) ? 
-   (g1 > g2 ? 1 : -1) : (v1 > v2 ? 1 : (v1 < v2 ? -1 : 0));
-   };
-
var reload = function() {
me.rstore.load();
};
@@ -627,7 +617,6 @@ Ext.define('PVE.qemu.HardwareView', {
revert_btn
],
rows: rows,
-   sorterFn: sorterFn,
listeners: {
itemdblclick: run_editor,
selectionchange: set_button_status
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH qemu-server] fix #1969: increase max unused disks

2018-11-05 Thread David Limbeck
increase to 256, same as containers

Signed-off-by: David Limbeck 
---
 PVE/QemuConfig.pm | 2 +-
 PVE/QemuServer.pm | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/PVE/QemuConfig.pm b/PVE/QemuConfig.pm
index c2192cf..6ba0cc3 100644
--- a/PVE/QemuConfig.pm
+++ b/PVE/QemuConfig.pm
@@ -20,7 +20,7 @@ mkdir $confdir;
 my $lock_dir = "/var/lock/qemu-server";
 mkdir $lock_dir;
 
-my $MAX_UNUSED_DISKS = 8;
+my $MAX_UNUSED_DISKS = 256;
 
 # BEGIN implemented abstract methods from PVE::AbstractConfig
 
diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 82c9b96..311a2d1 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -641,7 +641,7 @@ my $MAX_VIRTIO_DISKS = 16;
 my $MAX_SATA_DISKS = 6;
 my $MAX_USB_DEVICES = 5;
 my $MAX_NETS = 32;
-my $MAX_UNUSED_DISKS = 8;
+my $MAX_UNUSED_DISKS = 256;
 my $MAX_HOSTPCI_DEVICES = 4;
 my $MAX_SERIAL_PORTS = 4;
 my $MAX_PARALLEL_PORTS = 3;
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH manager] fix #1969: increase max unused disks

2018-11-05 Thread David Limbeck
Signed-off-by: David Limbeck 
---
 www/manager6/qemu/HardwareView.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/www/manager6/qemu/HardwareView.js 
b/www/manager6/qemu/HardwareView.js
index f2a3e244..1fb6a44b 100644
--- a/www/manager6/qemu/HardwareView.js
+++ b/www/manager6/qemu/HardwareView.js
@@ -224,7 +224,7 @@ Ext.define('PVE.qemu.HardwareView', {
header: gettext('Serial Port') + ' (' + confid + ')'
};
}
-   for (i = 0; i < 8; i++) {
+   for (i = 0; i < 256; i++) {
rows["unused" + i.toString()] = {
group: 99,
tdCls: 'pve-itype-icon-storage',
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH qemu-server/manager 0/2] fix #1969: increase max unused

2018-11-05 Thread David Limbeck
increase $MAX_UNUSED_DISKS in both qemu-server and pve-manager to 256

David Limbeck (1):
  fix #1969: increase max unused disks

 PVE/QemuConfig.pm | 2 +-
 PVE/QemuServer.pm | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

David Limbeck (1):
  fix #1969: increase max unused disks

 www/manager6/qemu/HardwareView.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)


-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH qemu-server 0/7] online vm migration to external cluster

2018-11-05 Thread David Limbeck
I'm currently looking through the patches and will provide you with 
feedback sometime this week.


On 10/29/18 4:38 PM, Alexandre Derumier wrote:

This add support to migrate a vm online to an different external cluster.
(This a rework of a previous patch series sent 2years ago)


qm migrate   --migration_type external [--targetstorage 
monpoolceph_vm] --online


targetstorage is optional, if not defined it's used same storagename than source
targetremotenode is a fqdn host from another cluster.
(source node must be able to root ssh to target node with public key)

source vm is not deleted currently for safety, just stopped and the migrate 
lock keeped to avoid
to restart it.


Please review and comment

Alexandre

Alexandre Derumier (7):
   api2 : migrate_vm : add migration_type "external"
   qm: add nextvmid
   migrate : prepare : add create_vm for external migration
   migrate : phase1 : skip sync_disk for external migration
   migrate : phase2 : migrate external
   migrate : phase2_cleanup : migrate_external
   migrate : phase3_cleanup : migrate_external

  PVE/API2/Qemu.pm   |  31 +++
  PVE/CLI/qm.pm  |  19 +++
  PVE/QemuMigrate.pm | 156 +++--
  PVE/QemuServer.pm  |  24 +++--
  4 files changed, 188 insertions(+), 42 deletions(-)



___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH container] fix #1808: readonly mount source disk

2018-10-30 Thread David Limbeck
Always readonly mount the source disk so a full clone still works with
an immutable base disk. Applies to every disk copy.

Signed-off-by: David Limbeck 
---
 src/PVE/LXC.pm | 17 -
 1 file changed, 12 insertions(+), 5 deletions(-)

diff --git a/src/PVE/LXC.pm b/src/PVE/LXC.pm
index 0f2aa5c..1e47faf 100644
--- a/src/PVE/LXC.pm
+++ b/src/PVE/LXC.pm
@@ -1200,7 +1200,7 @@ sub query_loopdev {
 # The loop device is always detached afterwards (or set to autoclear).
 # Returns the loop device.
 sub run_with_loopdev {
-my ($func, $file) = @_;
+my ($func, $file, $readonly) = @_;
 my $device = query_loopdev($file);
 # Try to reuse an existing device
 if ($device) {
@@ -1216,7 +1216,14 @@ sub run_with_loopdev {
$device = $1;
}
 };
-PVE::Tools::run_command(['losetup', '--show', '-f', $file], outfunc => 
$parser);
+my $losetup_cmd = [
+   'losetup',
+   '--show',
+   '-f',
+   $file,
+];
+push @$losetup_cmd, '-r' if $readonly;
+PVE::Tools::run_command($losetup_cmd, outfunc => $parser);
 die "failed to setup loop device for $file\n" if !$device;
 eval { &$func($device); };
 my $err = $@;
@@ -1462,7 +1469,7 @@ sub mountpoint_mount {
};
my $use_loopdev = 0;
if ($scfg->{path}) {
-   $mounted_dev = run_with_loopdev($domount, $path);
+   $mounted_dev = run_with_loopdev($domount, $path, $readonly);
$use_loopdev = 1;
} elsif ($scfg->{type} eq 'drbd' || $scfg->{type} eq 'lvm' ||
 $scfg->{type} eq 'rbd' || $scfg->{type} eq 'lvmthin') {
@@ -1825,10 +1832,10 @@ sub run_unshared {
 my $copy_volume = sub {
 my ($src_volid, $src, $dst_volid, $dest, $storage_cfg, $snapname) = @_;
 
-my $src_mp = { volume => $src_volid, mp => '/' };
+my $src_mp = { volume => $src_volid, mp => '/', ro => 1 };
 $src_mp->{type} = PVE::LXC::Config->classify_mountpoint($src_volid);
 
-my $dst_mp = { volume => $dst_volid, mp => '/' };
+my $dst_mp = { volume => $dst_volid, mp => '/', ro => 0 };
 $dst_mp->{type} = PVE::LXC::Config->classify_mountpoint($dst_volid);
 
 my @mounted;
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH manager] fix wrong permissions for subscription info

2018-10-30 Thread David Limbeck
workaround to keep the subscription popup on login even without 'Sys.Audit'
permissions but remove the subscription menu in the GUI for unauthorized
users

Signed-off-by: David Limbeck 
---
 PVE/API2/Subscription.pm| 20 +++-
 www/manager6/node/Config.js | 19 ---
 2 files changed, 27 insertions(+), 12 deletions(-)

diff --git a/PVE/API2/Subscription.pm b/PVE/API2/Subscription.pm
index 9d24dce8..efbe70c2 100644
--- a/PVE/API2/Subscription.pm
+++ b/PVE/API2/Subscription.pm
@@ -91,9 +91,6 @@ __PACKAGE__->register_method ({
 name => 'get',
 path => '',
 method => 'GET',
-permissions => {
-   check => ['perm', '/nodes/{node}', [ 'Sys.Audit' ]],
-},
 description => "Read subscription info.",
 proxyto => 'node',
 permissions => { user => 'all' },
@@ -110,12 +107,25 @@ __PACKAGE__->register_method ({
my $server_id = PVE::API2Tools::get_hwaddress();
my $url = 
"http://www.proxmox.com/products/proxmox-ve/subscription-service-plans";;
 
+   my $rpcenv = PVE::RPCEnvironment::get();
+   my $authuser = $rpcenv->get_user();
+   my $has_permission = PVE::AccessControl::check_permissions($authuser, 
'/nodes/{node}', 'Sys.Audit');
+
my $info = PVE::INotify::read_file('subscription');
if (!$info) {
-   return {
+   my $no_subscription_info = {
status => "NotFound",
message => "There is no subscription key",
-   serverid => $server_id,
+   url => $url,
+   };
+   $no_subscription_info->{serverid} = $server_id if $has_permission;
+   return $no_subscription_info;
+   }
+
+   if (!$has_permission) {
+   return {
+   status => $info->{status},
+   message => $info->{message},
url => $url,
}
}
diff --git a/www/manager6/node/Config.js b/www/manager6/node/Config.js
index e7a38296..37863f09 100644
--- a/www/manager6/node/Config.js
+++ b/www/manager6/node/Config.js
@@ -380,15 +380,20 @@ Ext.define('PVE.node.Config', {
nodename: nodename,
xtype: 'proxmoxNodeTasks'
},
-   {
-   title: gettext('Subscription'),
-   iconCls: 'fa fa-support',
-   itemId: 'support',
-   xtype: 'pveNodeSubscription',
-   nodename: nodename
-   }
);
 
+   if (caps.nodes['Sys.Audit']) {
+   me.items.push(
+   {
+   title: gettext('Subscription'),
+   iconCls: 'fa fa-support',
+   itemId: 'support',
+   xtype: 'pveNodeSubscription',
+   nodename: nodename
+   }
+   );
+   }
+
me.callParent();
 
me.mon(me.statusStore, 'load', function(s, records, success) {
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH qemu-server/manager/docs] fix #1959: remove cloud-init SLAAC option

2018-10-25 Thread David Limbeck
This series removes the SLAAC option from the GUI, adds a fallback for
'auto' found in the config (set by the SLAAC option in GUI or via the CLI) to
'dhcp' and removes any information regarding the 'auto' option from the
docs.

qemu-server:

David Limbeck (1):
  fix #1959: add fallback for 'auto' previously set by SLAAC

 PVE/QemuServer/Cloudinit.pm | 3 +++
 1 file changed, 3 insertions(+)

manager:

David Limbeck (1):
  fix #1959: remove invalid SLAAC option

 www/manager6/qemu/IPConfigEdit.js | 8 
 1 file changed, 8 deletions(-)

docs:

David Limbeck (1):
  fix #1959: remove any information regarding 'auto'

 qm-cloud-init-opts.adoc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH qemu-server] fix #1959: add fallback for 'auto' previously set by SLAAC

2018-10-25 Thread David Limbeck
SLAAC previously set 'auto' which is not supported by nocloud network
config. On an up-to-date Ubuntu this should work as it uses 'dhcp' for
both dhcp and SLAAC. For others it was invalid anyway.

Signed-off-by: David Limbeck 
---
 PVE/QemuServer/Cloudinit.pm | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/PVE/QemuServer/Cloudinit.pm b/PVE/QemuServer/Cloudinit.pm
index 53f1de9..5be820c 100644
--- a/PVE/QemuServer/Cloudinit.pm
+++ b/PVE/QemuServer/Cloudinit.pm
@@ -340,6 +340,9 @@ sub nocloud_network {
if (defined(my $ip = $ipconfig->{ip6})) {
if ($ip eq 'dhcp') {
$content .= "${i}- type: dhcp6\n";
+   } elsif ($ip eq 'auto') {
+   # SLAAC is not supported by cloud-init, this fallback should 
work with an up-to-date netplan at least
+   $content .= "${i}- type: dhcp6\n";
} else {
$content .= "${i}- type: static\n"
   . "${i}  address: $ip\n";
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH manager] fix #1959: remove invalid SLAAC option

2018-10-25 Thread David Limbeck
nocloud configuration does not support SLAAC option, only static or dhcp
so this option shouldn't be available

Signed-off-by: David Limbeck 
---
 www/manager6/qemu/IPConfigEdit.js | 8 
 1 file changed, 8 deletions(-)

diff --git a/www/manager6/qemu/IPConfigEdit.js 
b/www/manager6/qemu/IPConfigEdit.js
index e185f1a3..961e5032 100644
--- a/www/manager6/qemu/IPConfigEdit.js
+++ b/www/manager6/qemu/IPConfigEdit.js
@@ -157,14 +157,6 @@ Ext.define('PVE.qemu.IPConfigPanel', {
inputValue: 'dhcp',
checked: false,
margin: '0 0 0 10'
-   },
-   {
-   xtype: 'radiofield',
-   boxLabel: gettext('SLAAC'),
-   name: 'ipv6mode',
-   inputValue: 'auto',
-   checked: false,
-   margin: '0 0 0 10'
}
]
},
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH docs] fix #1959: remove any information regarding 'auto'

2018-10-25 Thread David Limbeck
Remove any information regarding 'auto' but mention the netplan workaround for 
SLAAC

Signed-off-by: David Limbeck 
---
 qm-cloud-init-opts.adoc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/qm-cloud-init-opts.adoc b/qm-cloud-init-opts.adoc
index 705a8c5..64b7670 100644
--- a/qm-cloud-init-opts.adoc
+++ b/qm-cloud-init-opts.adoc
@@ -17,7 +17,7 @@ Specify IP addresses and gateways for the corresponding 
interface.
 IP addresses use CIDR notation, gateways are optional but need an IP of the 
same type specified.
 +
 The special string 'dhcp' can be used for IP addresses to use DHCP, in which 
case no explicit gateway should be provided.
-For IPv6 the special string 'auto' can be used to use stateless 
autoconfiguration.
+SLAAC is not supported by cloud-init but at least netplan activates SLAAC when 
selecting 'dhcp' so it should work with up-to-date netplan versions.
 +
 If cloud-init is enabled and neither an IPv4 nor an IPv6 address is specified, 
it defaults to using dhcp on IPv4.
 
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH v4 manager] add wipe_disk option when destroying ceph disk

2018-10-24 Thread David Limbeck
this allows the disk to be reused as ceph disk by zeroing the first 200M
of the destroyed disk. disks are iterated separately from partitions to
prevent duplicate wipes.

Signed-off-by: David Limbeck 
---
since v3:
$disks_to_wipe is captured in $remove_partition instead of being
passed 

 PVE/API2/Ceph.pm | 9 +
 1 file changed, 9 insertions(+)

diff --git a/PVE/API2/Ceph.pm b/PVE/API2/Ceph.pm
index 69489a70..ac277fdf 100644
--- a/PVE/API2/Ceph.pm
+++ b/PVE/API2/Ceph.pm
@@ -397,6 +397,7 @@ __PACKAGE__->register_method ({
# try to unmount from standard mount point
my $mountpoint = "/var/lib/ceph/osd/ceph-$osdid";
 
+   my $disks_to_wipe = {};
my $remove_partition = sub {
my ($part) = @_;
 
@@ -407,6 +408,8 @@ __PACKAGE__->register_method ({
print "remove partition $part (disk '${devpath}', partnum 
$partnum)\n";
eval { run_command(['/sbin/sgdisk', '-d', $partnum, 
"${devpath}"]); };
warn $@ if $@;
+
+   $disks_to_wipe->{$devpath} = 1;
};
 
my $partitions_to_remove = [];
@@ -434,6 +437,7 @@ __PACKAGE__->register_method ({
}
}
 
+
print "Unmount OSD $osdsection from  $mountpoint\n";
eval { run_command(['/bin/umount', $mountpoint]); };
if (my $err = $@) {
@@ -443,6 +447,11 @@ __PACKAGE__->register_method ({
foreach my $part (@$partitions_to_remove) {
$remove_partition->($part);
}
+   foreach my $devpath (keys %$disks_to_wipe) {
+   print "wipe disk: $devpath\n";
+   eval { run_command(['/bin/dd', 'if=/dev/zero', 
"of=${devpath}", 'bs=1M', 'count=200', 'conv=fdatasync']); };
+   warn $@ if $@;
+   }
}
};
 
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH v3 manager] add wipe_disk option when destroying ceph disk

2018-10-24 Thread David Limbeck

On 10/24/18 11:36 AM, Thomas Lamprecht wrote:

On 10/24/18 11:13 AM, David Limbeck wrote:

this allows the disk to be reused as ceph disk by zeroing the first 200M
of the destroyed disk. disks are iterated separately from partitions to
prevent duplicate wipes.

Signed-off-by: David Limbeck 
---
since v2:
 incorporated Thomas' suggestions combining remove_partition and the
 generation of the disk set

  PVE/API2/Ceph.pm | 13 +++--
  1 file changed, 11 insertions(+), 2 deletions(-)

diff --git a/PVE/API2/Ceph.pm b/PVE/API2/Ceph.pm
index 69489a70..493131e6 100644
--- a/PVE/API2/Ceph.pm
+++ b/PVE/API2/Ceph.pm
@@ -398,7 +398,7 @@ __PACKAGE__->register_method ({
my $mountpoint = "/var/lib/ceph/osd/ceph-$osdid";
  
  	my $remove_partition = sub {

-   my ($part) = @_;
+   my ($part, $disks_to_wipe) = @_;
  
  		return if !$part || (! -b $part );

my $partnum = PVE::Diskmanage::get_partnum($part);
@@ -407,6 +407,8 @@ __PACKAGE__->register_method ({
print "remove partition $part (disk '${devpath}', partnum 
$partnum)\n";
eval { run_command(['/sbin/sgdisk', '-d', $partnum, 
"${devpath}"]); };
warn $@ if $@;
+
+   $disks_to_wipe->{$devpath} = 1;
};
  
  	my $partitions_to_remove = [];

@@ -434,14 +436,21 @@ __PACKAGE__->register_method ({
}
}
  
+

print "Unmount OSD $osdsection from  $mountpoint\n";
eval { run_command(['/bin/umount', $mountpoint]); };
if (my $err = $@) {
warn $err;
} elsif ($param->{cleanup}) {
#be aware of the ceph udev rules which can remount.
+   my $disks_to_wipe = {};
foreach my $part (@$partitions_to_remove) {
-   $remove_partition->($part);
+   $remove_partition->($part, $disks_to_wipe);
+   }
+   foreach my $devpath (keys %$disks_to_wipe) {
+   print "wipe disk: $devpath\n";
+   eval { run_command(['/bin/dd', 'if=/dev/zero', 
"of=${devpath}", 'bs=1M', 'count=200', 'conv=fdatasync']); };
+   warn $@ if $@;
}
}
};


No need to change the closures signature, I envisioned something like:

Ahh, right, it's a closure. Will send it again.


8<
diff --git a/PVE/API2/Ceph.pm b/PVE/API2/Ceph.pm
index 69489a70..b26a7343 100644
--- a/PVE/API2/Ceph.pm
+++ b/PVE/API2/Ceph.pm
@@ -397,6 +397,7 @@ __PACKAGE__->register_method ({
 # try to unmount from standard mount point
 my $mountpoint = "/var/lib/ceph/osd/ceph-$osdid";

+   my $disks_to_wipe = {};
 my $remove_partition = sub {
 my ($part) = @_;

@@ -407,6 +408,8 @@ __PACKAGE__->register_method ({
 print "remove partition $part (disk '${devpath}', partnum 
$partnum)\n";
 eval { run_command(['/sbin/sgdisk', '-d', $partnum, 
"${devpath}"]); };
 warn $@ if $@;
+
+   $disks_to_wipe->{$devpath} = 1;
 };

 my $partitions_to_remove = [];
@@ -443,6 +446,11 @@ __PACKAGE__->register_method ({
 foreach my $part (@$partitions_to_remove) {
 $remove_partition->($part);
 }
+   foreach my $devpath (keys %$disks_to_wipe) {
+   print "wipe disk: $devpath\n";
+   eval { run_command(['/bin/dd', 'if=/dev/zero', 
"of=${devpath}", 'bs=1M', 'count=200', 'conv=fdatasync']); };
+   warn $@ if $@;
+   }
 }
 };



___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH v3 manager] add wipe_disk option when destroying ceph disk

2018-10-24 Thread David Limbeck
this allows the disk to be reused as ceph disk by zeroing the first 200M
of the destroyed disk. disks are iterated separately from partitions to
prevent duplicate wipes.

Signed-off-by: David Limbeck 
---
since v2:
incorporated Thomas' suggestions combining remove_partition and the
generation of the disk set

 PVE/API2/Ceph.pm | 13 +++--
 1 file changed, 11 insertions(+), 2 deletions(-)

diff --git a/PVE/API2/Ceph.pm b/PVE/API2/Ceph.pm
index 69489a70..493131e6 100644
--- a/PVE/API2/Ceph.pm
+++ b/PVE/API2/Ceph.pm
@@ -398,7 +398,7 @@ __PACKAGE__->register_method ({
my $mountpoint = "/var/lib/ceph/osd/ceph-$osdid";
 
my $remove_partition = sub {
-   my ($part) = @_;
+   my ($part, $disks_to_wipe) = @_;
 
return if !$part || (! -b $part );
my $partnum = PVE::Diskmanage::get_partnum($part);
@@ -407,6 +407,8 @@ __PACKAGE__->register_method ({
print "remove partition $part (disk '${devpath}', partnum 
$partnum)\n";
eval { run_command(['/sbin/sgdisk', '-d', $partnum, 
"${devpath}"]); };
warn $@ if $@;
+
+   $disks_to_wipe->{$devpath} = 1;
};
 
my $partitions_to_remove = [];
@@ -434,14 +436,21 @@ __PACKAGE__->register_method ({
}
}
 
+
print "Unmount OSD $osdsection from  $mountpoint\n";
eval { run_command(['/bin/umount', $mountpoint]); };
if (my $err = $@) {
warn $err;
} elsif ($param->{cleanup}) {
#be aware of the ceph udev rules which can remount.
+   my $disks_to_wipe = {};
foreach my $part (@$partitions_to_remove) {
-   $remove_partition->($part);
+   $remove_partition->($part, $disks_to_wipe);
+   }
+   foreach my $devpath (keys %$disks_to_wipe) {
+   print "wipe disk: $devpath\n";
+   eval { run_command(['/bin/dd', 'if=/dev/zero', 
"of=${devpath}", 'bs=1M', 'count=200', 'conv=fdatasync']); };
+   warn $@ if $@;
}
}
};
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH v2 manager] add wipe_disk option when destroying ceph disk

2018-10-24 Thread David Limbeck

On 10/24/18 10:31 AM, Thomas Lamprecht wrote:

On 10/24/18 9:32 AM, David Limbeck wrote:

this allows the disk to be reused as ceph disk by zeroing the first 200M
of the destroyed disk

Signed-off-by: David Limbeck 
---
since v1:
 wipe is always done after remove_partition
 fdatasync is used to make sure data is synced on some disks
 (as proposed by Alwin)

  PVE/API2/Ceph.pm | 12 
  1 file changed, 12 insertions(+)

diff --git a/PVE/API2/Ceph.pm b/PVE/API2/Ceph.pm
index 69489a70..a0b5042d 100644
--- a/PVE/API2/Ceph.pm
+++ b/PVE/API2/Ceph.pm
@@ -434,6 +434,13 @@ __PACKAGE__->register_method ({
}
}
  
+	my $disks_to_wipe = {};

+   foreach my $part (@$partitions_to_remove) {
+   next if !$part || (! -b $part );
+   my $devpath = PVE::Diskmanage::get_blockdev($part);
+   $disks_to_wipe->{$devpath} = 1;
+   }

Could be done in single line with:

my $disks_to_wipe = {
 map {  PVE::Diskmanage::get_blockdev($_) => 1 } grep { -b } 
@partitions_to_remove
};

I find it much nicer, as more concise and using what perl provides,
but I don't think a lot other here think the same way, so take with
caution ;-)


Same style as $remove_partition.



But I have another question


+
print "Unmount OSD $osdsection from  $mountpoint\n";
eval { run_command(['/bin/umount', $mountpoint]); };
if (my $err = $@) {
@@ -443,6 +450,11 @@ __PACKAGE__->register_method ({
foreach my $part (@$partitions_to_remove) {
$remove_partition->($part);

...why don't you do the wiping in the $remove_partition closure?

Because your code seems to introduce two additional and unnecessary for
loops over  $partitions_to_remove

Just doing the checks in the aforementioned closure, if those checks are
needed at all, and run the command there? Should be much shorter (almost
a one-line only change)?

Only 1 additional loop over $partitions_to_remove to create a hash set 
for the disks. This was done so partitions could be deleted first before 
wiping everything as well as only wiping once per disk.

}
+   foreach my $devpath (keys %$disks_to_wipe) {
+   print "wipe disk: $devpath\n";
+   eval { run_command(['/bin/dd', 'if=/dev/zero', 
"of=${devpath}", 'bs=1M', 'count=200', 'conv=fdatasync']); };
+   warn $@ if $@;
+   }
}
};
  



___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH v2 manager] add wipe_disk option when destroying ceph disk

2018-10-24 Thread David Limbeck
this allows the disk to be reused as ceph disk by zeroing the first 200M
of the destroyed disk

Signed-off-by: David Limbeck 
---
since v1:
wipe is always done after remove_partition
fdatasync is used to make sure data is synced on some disks
(as proposed by Alwin)

 PVE/API2/Ceph.pm | 12 
 1 file changed, 12 insertions(+)

diff --git a/PVE/API2/Ceph.pm b/PVE/API2/Ceph.pm
index 69489a70..a0b5042d 100644
--- a/PVE/API2/Ceph.pm
+++ b/PVE/API2/Ceph.pm
@@ -434,6 +434,13 @@ __PACKAGE__->register_method ({
}
}
 
+   my $disks_to_wipe = {};
+   foreach my $part (@$partitions_to_remove) {
+   next if !$part || (! -b $part );
+   my $devpath = PVE::Diskmanage::get_blockdev($part);
+   $disks_to_wipe->{$devpath} = 1;
+   }
+
print "Unmount OSD $osdsection from  $mountpoint\n";
eval { run_command(['/bin/umount', $mountpoint]); };
if (my $err = $@) {
@@ -443,6 +450,11 @@ __PACKAGE__->register_method ({
foreach my $part (@$partitions_to_remove) {
$remove_partition->($part);
}
+   foreach my $devpath (keys %$disks_to_wipe) {
+   print "wipe disk: $devpath\n";
+   eval { run_command(['/bin/dd', 'if=/dev/zero', 
"of=${devpath}", 'bs=1M', 'count=200', 'conv=fdatasync']); };
+   warn $@ if $@;
+   }
}
};
 
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH manager] add wipe_disk option when destroying ceph disk

2018-10-23 Thread David Limbeck
this allows the disk to be reused as ceph disk by zeroing the first 200M
of the destroyed disk

Signed-off-by: David Limbeck 
---
 PVE/API2/Ceph.pm | 22 ++
 www/manager6/ceph/OSD.js | 18 +-
 2 files changed, 39 insertions(+), 1 deletion(-)

diff --git a/PVE/API2/Ceph.pm b/PVE/API2/Ceph.pm
index 69489a70..6dce2f01 100644
--- a/PVE/API2/Ceph.pm
+++ b/PVE/API2/Ceph.pm
@@ -347,6 +347,12 @@ __PACKAGE__->register_method ({
optional => 1,
default => 0,
},
+   wipe_disk => {
+   description => 'Wipe first 200M of disk to make it reusable as 
a ceph OSD.',
+   type => 'boolean',
+   optional => 1,
+   default => 0,
+   },
},
 },
 returns => { type => 'string' },
@@ -434,6 +440,15 @@ __PACKAGE__->register_method ({
}
}
 
+   my $disks_to_wipe = {};
+   if ($param->{wipe_disk}) {
+   foreach my $part (@$partitions_to_remove) {
+   next if !$part || (! -b $part );
+   my $devpath = PVE::Diskmanage::get_blockdev($part);
+   $disks_to_wipe->{$devpath} = 1;
+   }
+   }
+
print "Unmount OSD $osdsection from  $mountpoint\n";
eval { run_command(['/bin/umount', $mountpoint]); };
if (my $err = $@) {
@@ -443,6 +458,13 @@ __PACKAGE__->register_method ({
foreach my $part (@$partitions_to_remove) {
$remove_partition->($part);
}
+   if ($param->{wipe_disk}) {
+   foreach my $devpath (keys %$disks_to_wipe) {
+   print "wipe disk: $devpath\n";
+   eval { run_command(['/bin/dd', 'if=/dev/zero', 
"of=${devpath}", 'bs=1M', 'count=200']); };
+   warn $@ if $@;
+   }
+   }
}
};
 
diff --git a/www/manager6/ceph/OSD.js b/www/manager6/ceph/OSD.js
index 8fe7e794..6c8a5aaf 100644
--- a/www/manager6/ceph/OSD.js
+++ b/www/manager6/ceph/OSD.js
@@ -68,7 +68,23 @@ Ext.define('PVE.CephRemoveOsd', {
name: 'cleanup',
checked: true,
labelWidth: 130,
-   fieldLabel: gettext('Remove Partitions')
+   fieldLabel: gettext('Remove Partitions'),
+   handler: function(value) {
+   var wipe_disk_checkbox = Ext.getCmp('wipe_disk_checkbox');
+   if (value.checked) {
+   wipe_disk_checkbox.setDisabled(false);
+   } else {
+   wipe_disk_checkbox.setDisabled(true);
+   }
+   }
+   },
+   {
+   xtype: 'proxmoxcheckbox',
+   name: 'wipe_disk',
+   checked: false,
+   disabled: false,
+   fieldLabel: gettext('Wipe first 200M of disk'),
+   id: 'wipe_disk_checkbox'
}
 ],
 initComponent : function() {
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH pve-manager] pvesh: fix bug #1942 - add standard options conditional

2018-10-16 Thread David Limbeck
I'd prefer to have at least a warning if there's a conflict and the 
standard output options are not added instead of silently dismissing all 
of them.


As a quick workaround to get pvesh backups working again it's fine, but 
we really should work on a better solution/redesign.


Some comments on the code inline.

On 10/10/18 11:36 AM, Dietmar Maurer wrote:

Do not add/extract standard options if the method itself defined properties
using the same names (like 'quiet').

Signed-off-by: Dietmar Maurer 
---
  PVE/CLI/pvesh.pm | 31 +--
  1 file changed, 25 insertions(+), 6 deletions(-)

diff --git a/PVE/CLI/pvesh.pm b/PVE/CLI/pvesh.pm
index ccfb5c20..03222445 100755
--- a/PVE/CLI/pvesh.pm
+++ b/PVE/CLI/pvesh.pm
@@ -263,6 +263,24 @@ $path_properties->{noproxy} = {
  optional => 1,
  };
  
+my $extract_std_options = 1;

+
+my $cond_add_standard_output_properties = sub {
+my ($props) = @_;
+
+my $optlist = [];

$optlist is never used

+foreach my $opt (keys %$PVE::RESTHandler::standard_output_options) {
+   if (defined($props->{$opt})) {
maybe add a simple warn "conflicting option $opt: ignoring any standard 
output options"; or something like that? this shouldn't be silent at all

+   $extract_std_options = 0;
+   return $props;
+   }
+}
+
+return $props if $props->{quiet};
this already happens in the loop above, so unnecessary. should quiet be 
removed from the standard output options we no longer have a conflict 
and can add the standard output options, why return here instead of 
adding them then?

+
+return PVE::RESTHandler::add_standard_output_properties($props);
+};
+
  sub call_api_method {
  my ($cmd, $param) = @_;
  
@@ -271,7 +289,8 @@ sub call_api_method {

  my $path = PVE::Tools::extract_param($param, 'api_path');
  die "missing API path\n" if !defined($path);
  
-my $stdopts =  PVE::RESTHandler::extract_standard_output_properties($param);

+my $stdopts =  $extract_std_options ?
+   PVE::RESTHandler::extract_standard_output_properties($param) : {};
  
  $opt_nooutput = 1 if $stdopts->{quiet};
  
@@ -305,7 +324,7 @@ __PACKAGE__->register_method ({

  description => "List child objects on .",
  parameters => {
additionalProperties => 0,
-   properties => 
PVE::RESTHandler::add_standard_output_properties($path_properties),
+   properties => $cond_add_standard_output_properties->($path_properties),
  },
  returns => { type => 'null' },
  code => sub {
@@ -361,7 +380,7 @@ __PACKAGE__->register_method ({
  description => "Call API GET on .",
  parameters => {
additionalProperties => 0,
-   properties => 
PVE::RESTHandler::add_standard_output_properties($path_properties),
+   properties => $cond_add_standard_output_properties->($path_properties),
  },
  returns => { type => 'null' },
  code => sub {
@@ -379,7 +398,7 @@ __PACKAGE__->register_method ({
  description => "Call API PUT on .",
  parameters => {
additionalProperties => 0,
-   properties => 
PVE::RESTHandler::add_standard_output_properties($path_properties),
+   properties => $cond_add_standard_output_properties->($path_properties),
  },
  returns => { type => 'null' },
  code => sub {
@@ -397,7 +416,7 @@ __PACKAGE__->register_method ({
  description => "Call API POST on .",
  parameters => {
additionalProperties => 0,
-   properties => 
PVE::RESTHandler::add_standard_output_properties($path_properties),
+   properties => $cond_add_standard_output_properties->($path_properties),
  },
  returns => { type => 'null' },
  code => sub {
@@ -415,7 +434,7 @@ __PACKAGE__->register_method ({
  description => "Call API DELETE on .",
  parameters => {
additionalProperties => 0,
-   properties => 
PVE::RESTHandler::add_standard_output_properties($path_properties),
+   properties => $cond_add_standard_output_properties->($path_properties),
  },
  returns => { type => 'null' },
  code => sub {


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [RFC PATCH qemu-server] add qemumonitor.c

2018-10-09 Thread David Limbeck

One socket is exclusive to one client, so it needs another qmp socket.

On 10/9/18 4:50 PM, Alexandre DERUMIER wrote:

this adds a program that can listen to qemu qmp events on a given socket

Does it work in parallel with sending qmp command ?

As far I remember, some year ago, it was not possible to have 2 qmp clients at
the same time. (and needed some kind of proxy betweens clients and qemu)

Maybe more complex, but t could be great to be able to catch any events,
and use them in QemuServer.pm for example.


- Mail original -
De: "Dominik Csapak" 
À: "pve-devel" 
Envoyé: Mardi 9 Octobre 2018 14:49:22
Objet: [pve-devel] [RFC PATCH qemu-server] add qemumonitor.c

this adds a program that can listen to qemu qmp events on a given socket
and if a shutdown event followed by a disconnected socket occurs,
execute the given script with the given and additional arguments

this is useful if we want to cleanup after the qemu process exited,
e.g. tap devices, vgpus, etc.

also we could implement a 'proper' reboot with applying pending changes
and a stop/reboot hoook

for now, this needs a not-yet applied patch[1] to qemu
but this should be trivial to backport

1: https://lists.gnu.org/archive/html/qemu-devel/2018-10/msg01271.html

Signed-off-by: Dominik Csapak 
---
sending this as rfc, without makefile/manpage/inclusion in the package/use/
build-dependencies/etc.

location and name of it are ofc subject to change :)
i just want a general feedback of the code and the interface

i had imagined starting this tool after a qemu start
with a 'qm cleanup ID' tool to do the general cleanup

the program links against libjansson4, a ~75k library with only libc6 as
dependency, and the program uses about 100k RSS memory,
so i think this is an acceptable overhead
for a vm (with possibly multiple gbs of ram)

qemumonitor.c | 166 ++
1 file changed, 166 insertions(+)
create mode 100644 qemumonitor.c

diff --git a/qemumonitor.c b/qemumonitor.c
new file mode 100644
index 000..13dcfa2
--- /dev/null
+++ b/qemumonitor.c
@@ -0,0 +1,166 @@
+/*
+
+ Copyright (C) 2018 Proxmox Server Solutions GmbH
+
+ Copyright: qemumonitor is under GNU GPL, the GNU General Public License.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 dated June, 1991.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ Author: Dominik Csapak 
+
+ qemumonitor connects to a given qmp socket, and waits for a
+ shutdown event followed by the closing of the socket,
+ it then calls the given script with following arguments
+
+ SCRIPT [ARGUMENTS]   
+
+ parameter explanation:
+
+ graceful:
+ 1|0 depending if it saw a shutdown event before the socket closed
+
+ guest:
+ 1|0 depending if the shutdown was requested from the guest
+
+ was_reset:
+ 1|0 depending if the shutdown was actually a request
+
+*/
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include 
+
+typedef enum { false, true } bool;
+typedef enum { STATE_PRECONNECTING, STATE_CONNECTING, STATE_CONNECTED } 
state_t;
+
+#define QMP_ANSWER "{ \"execute\":\"qmp_capabilities\" }\n"
+
+void usage(char *name);
+
+void usage(char *name)
+{
+ fprintf(stderr, "Usage: %s SOCKET SCRIPT [ARGUMENTS..]\n", name);
+}
+
+int main(int argc, char *argv[])
+{
+ if (argc < 3) {
+ usage(argv[0]);
+ exit(EXIT_FAILURE);
+ }
+
+ ssize_t len;
+ bool graceful_shutdown = false;
+ bool guest_requested = false;
+ bool was_reset = false;
+
+ struct sockaddr_un serv_addr;
+ int sock;
+ FILE *socketfile;
+
+ sock = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (sock == -1) {
+ fprintf(stderr, "cannot create unix socket: %s\n", strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ memset(&serv_addr, 0, sizeof(serv_addr));
+ serv_addr.sun_family = AF_UNIX;
+ memcpy(&(serv_addr.sun_path), argv[1], strlen(argv[1]));
+
+ if (connect(sock, (struct sockaddr *)&serv_addr, sizeof(serv_addr)) < 0) {
+ close(sock);
+ fprintf(stderr, "error connecting to %s: %s\n", argv[1], strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ socketfile = fdopen(sock, "r");
+ if (socketfile == NULL) {
+ fclose(socketfile);
+ fprintf(stderr, "error opening %s: %s\n", argv[1], strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ json_t *json;
+ json_error_t err;
+ bool guest;
+ bool reset;
+ const char *event;
+ state_t qmp_state = STATE_PRECONNECTING;
+
+ while (!feof(socketfile)) {
+ json = json_loadf(socketfile, JSON_DISABLE_EOF_CHECK, &err);
+ if (json == NULL) {
+ // ignore

[pve-devel] [PATCH v2 common] fix #1942: workaround for 'quiet' option conflict

2018-10-08 Thread David Limbeck
workaround for option 'quiet' in VZDump conflicting with
standard_output_options in RESTHandler.pm. this change is not limited to
'quiet' but also allows other options to be overridden as long as their
type property matches.

Signed-off-by: David Limbeck 
---
This workaround was proposed by Dietmar. Any better solution?

 src/PVE/RESTHandler.pm | 8 +++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/src/PVE/RESTHandler.pm b/src/PVE/RESTHandler.pm
index f645f1b..92a0c9e 100644
--- a/src/PVE/RESTHandler.pm
+++ b/src/PVE/RESTHandler.pm
@@ -810,7 +810,13 @@ sub add_standard_output_properties {
 
 foreach my $opt (@$list) {
die "no such standard output option '$opt'\n" if 
!defined($standard_output_options->{$opt});
-   die "detected overwriten standard CLI parameter '$opt'\n" if 
defined($res->{$opt});
+   if (defined($res->{$opt})) {
+   my $opt_type = $standard_output_options->{$opt}->{type};
+   my $res_opt_type = $res->{$opt}->{type};
+   if ($res_opt_type ne $opt_type) {
+   die "detected overridden standard CLI parameter '$opt'\n";
+   }
+   }
$res->{$opt} = $standard_output_options->{$opt};
 }
 
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH manager] fix #1942: workaround for vzdump conflicting 'quiet' parameter

2018-10-08 Thread David Limbeck

Sent it too soon, does not work.

On 10/8/18 11:46 AM, David Limbeck wrote:

workaround for 'quiet' parameter conflicting with standard_output_parameters
in PVE/RESTHandler.pm as proposed by Dietmar

Signed-off-by: David Limbeck 
---
  PVE/VZDump.pm | 2 ++
  1 file changed, 2 insertions(+)

diff --git a/PVE/VZDump.pm b/PVE/VZDump.pm
index 7fc69f98..57d371af 100644
--- a/PVE/VZDump.pm
+++ b/PVE/VZDump.pm
@@ -1166,6 +1166,8 @@ sub json_config_properties {
$prop->{$opt} = $confdesc->{$opt};
  }
  
+delete $prop->{quiet};

+
  return $prop;
  }
  


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH manager] fix #1942: workaround for vzdump conflicting 'quiet' parameter

2018-10-08 Thread David Limbeck
workaround for 'quiet' parameter conflicting with standard_output_parameters
in PVE/RESTHandler.pm as proposed by Dietmar

Signed-off-by: David Limbeck 
---
 PVE/VZDump.pm | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/PVE/VZDump.pm b/PVE/VZDump.pm
index 7fc69f98..57d371af 100644
--- a/PVE/VZDump.pm
+++ b/PVE/VZDump.pm
@@ -1166,6 +1166,8 @@ sub json_config_properties {
$prop->{$opt} = $confdesc->{$opt};
 }
 
+delete $prop->{quiet};
+
 return $prop;
 }
 
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH v2 common] fix #1938: increase filesize limit for /proc/mounts

2018-10-03 Thread David Limbeck
Signed-off-by: David Limbeck 
---
since v1:
changed commit message

 src/PVE/ProcFSTools.pm | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/PVE/ProcFSTools.pm b/src/PVE/ProcFSTools.pm
index 80c0425..1b98b1e 100644
--- a/src/PVE/ProcFSTools.pm
+++ b/src/PVE/ProcFSTools.pm
@@ -292,7 +292,7 @@ sub read_proc_net_route {
 }
 
 sub read_proc_mounts {
-return PVE::Tools::file_get_contents("/proc/mounts", 128*1024);
+return PVE::Tools::file_get_contents("/proc/mounts", 512*1024);
 }
 
 # mounts encode spaces (\040), tabs (\011), newlines (\012), backslashes (\\ 
or \134)
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH common] fix #1938: /proc/mounts too long

2018-10-03 Thread David Limbeck
So something like: "fix #1938: increase maximum file size to accommodate 
a larger /proc/mounts"?


On 10/3/18 12:05 PM, Wolfgang Bumiller wrote:

I'd prefer a description of the code rather than the error message in
the commit message. (Ideally the error (and how to reach it) are added
as well, just not in the header line.)

On Tue, Oct 02, 2018 at 04:13:23PM +0200, David Limbeck wrote:

Signed-off-by: David Limbeck 
---
Would a bigger filesize (1M or more) be more approriate?

Given that /proc/mounts is rather limited (usually... by design (sort
of)), I wouldn't object to any arbitrarily large number here (or
teaching file_get_contents an explicit 'unlimited' value), I think
Dietmar has a stronger opinion here?


  src/PVE/ProcFSTools.pm | 2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/PVE/ProcFSTools.pm b/src/PVE/ProcFSTools.pm
index 80c0425..1b98b1e 100644
--- a/src/PVE/ProcFSTools.pm
+++ b/src/PVE/ProcFSTools.pm
@@ -292,7 +292,7 @@ sub read_proc_net_route {
  }
  
  sub read_proc_mounts {

-return PVE::Tools::file_get_contents("/proc/mounts", 128*1024);
+return PVE::Tools::file_get_contents("/proc/mounts", 512*1024);
  }
  
  # mounts encode spaces (\040), tabs (\011), newlines (\012), backslashes (\\ or \134)

--
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH cluster] add 'for internal use' to description of addnode

2018-10-03 Thread David Limbeck
Signed-off-by: David Limbeck 
---
 data/PVE/API2/ClusterConfig.pm | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/data/PVE/API2/ClusterConfig.pm b/data/PVE/API2/ClusterConfig.pm
index 9ea0341..75ec56a 100644
--- a/data/PVE/API2/ClusterConfig.pm
+++ b/data/PVE/API2/ClusterConfig.pm
@@ -209,7 +209,7 @@ __PACKAGE__->register_method ({
 path => 'nodes/{node}',
 method => 'POST',
 protected => 1,
-description => "Adds a node to the cluster configuration.",
+description => "Adds a node to the cluster configuration. This call is for 
internal use.",
 parameters => {
additionalProperties => 0,
properties => {
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH common] fix #1938: /proc/mounts too long

2018-10-02 Thread David Limbeck
Signed-off-by: David Limbeck 
---
Would a bigger filesize (1M or more) be more approriate?
 src/PVE/ProcFSTools.pm | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/PVE/ProcFSTools.pm b/src/PVE/ProcFSTools.pm
index 80c0425..1b98b1e 100644
--- a/src/PVE/ProcFSTools.pm
+++ b/src/PVE/ProcFSTools.pm
@@ -292,7 +292,7 @@ sub read_proc_net_route {
 }
 
 sub read_proc_mounts {
-return PVE::Tools::file_get_contents("/proc/mounts", 128*1024);
+return PVE::Tools::file_get_contents("/proc/mounts", 512*1024);
 }
 
 # mounts encode spaces (\040), tabs (\011), newlines (\012), backslashes (\\ 
or \134)
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH v3 container] fix #1885: delete old route when changing gateway

2018-09-04 Thread David Limbeck
if the gateway is not in the subnet of the ip a route is
added. this change enables the deletion of the old route when it
is no longer needed.

Signed-off-by: David Limbeck 
---
changes since v2:
 - added $oldip check
 - changed comment
 - changed commit message

 src/PVE/LXC.pm | 8 +++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/src/PVE/LXC.pm b/src/PVE/LXC.pm
index 1504bd0..0b57ae9 100644
--- a/src/PVE/LXC.pm
+++ b/src/PVE/LXC.pm
@@ -791,9 +791,10 @@ sub update_ipconfig {
my $newip = $newnet->{$ip};
my $newgw = $newnet->{$gw};
my $oldip = $optdata->{$ip};
+   my $oldgw = $optdata->{$gw};
 
my $change_ip = &$safe_string_ne($oldip, $newip);
-   my $change_gw = &$safe_string_ne($optdata->{$gw}, $newgw);
+   my $change_gw = &$safe_string_ne($oldgw, $newgw);
 
return if !$change_ip && !$change_gw;
 
@@ -836,6 +837,11 @@ sub update_ipconfig {
# warn and continue
warn $@ if $@;
}
+   if ($oldgw && $oldip && !PVE::Network::is_ip_in_cidr($oldgw, 
$oldip)) {
+   eval { &$ipcmd($family_opt, 'route', 'del', $oldgw, 'dev', 
$eth); };
+   # warn if the route was deleted manually
+   warn $@ if $@;
+   }
}
 
# from this point on we save the configuration
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH container v2] fix #1885: delete old route after changing gateway

2018-08-29 Thread David Limbeck
changing a gateway can fail if there is an old conflicting route. this
can happen when changing gateways back and forth.
with this change all old routes that are no longer relevant are deleted
after a change.

Signed-off-by: David Limbeck 
---
changes since v1:
  using $oldgw everywhere
  check for an out-of-subnet route

 src/PVE/LXC.pm | 8 +++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/src/PVE/LXC.pm b/src/PVE/LXC.pm
index 1504bd0..f685279 100644
--- a/src/PVE/LXC.pm
+++ b/src/PVE/LXC.pm
@@ -791,9 +791,10 @@ sub update_ipconfig {
my $newip = $newnet->{$ip};
my $newgw = $newnet->{$gw};
my $oldip = $optdata->{$ip};
+   my $oldgw = $optdata->{$gw};
 
my $change_ip = &$safe_string_ne($oldip, $newip);
-   my $change_gw = &$safe_string_ne($optdata->{$gw}, $newgw);
+   my $change_gw = &$safe_string_ne($oldgw, $newgw);
 
return if !$change_ip && !$change_gw;
 
@@ -836,6 +837,11 @@ sub update_ipconfig {
# warn and continue
warn $@ if $@;
}
+   if ($oldgw && !PVE::Network::is_ip_in_cidr($oldgw, $oldip)) {
+   eval { &$ipcmd($family_opt, 'route', 'del', $oldgw, 'dev', 
$eth); };
+   # keep warning in case route was deleted manually
+   warn $@ if $@;
+   }
}
 
# from this point on we save the configuration
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH container] fix #1885: delete old route when changing gateway

2018-08-24 Thread David Limbeck
changing a gateway can fail if there is an old conflicting route. this
can happen when changing gateways back and forth.
with this change all old routes that are no longer relevant are deleted
after a change.

Signed-off-by: David Limbeck 
---
 src/PVE/LXC.pm | 7 +++
 1 file changed, 7 insertions(+)

diff --git a/src/PVE/LXC.pm b/src/PVE/LXC.pm
index 1504bd0..4d9f5b0 100644
--- a/src/PVE/LXC.pm
+++ b/src/PVE/LXC.pm
@@ -791,6 +791,7 @@ sub update_ipconfig {
my $newip = $newnet->{$ip};
my $newgw = $newnet->{$gw};
my $oldip = $optdata->{$ip};
+   my $oldgw = $optdata->{$gw};
 
my $change_ip = &$safe_string_ne($oldip, $newip);
my $change_gw = &$safe_string_ne($optdata->{$gw}, $newgw);
@@ -836,6 +837,12 @@ sub update_ipconfig {
# warn and continue
warn $@ if $@;
}
+   if ($oldgw) {
+   eval { &$ipcmd($family_opt, 'route', 'del', $oldgw, 'dev', 
$eth); };
+   # if the route was not deleted, the guest might have deleted it 
manually
+   # warn and continue
+   warn $@ if $@;
+   }
}
 
# from this point on we save the configuration
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH manager v2] fix #1884: qemu vm: pending deletion of cdrom/dvd drive shows as hard disk in hardware tab

2018-08-24 Thread David Limbeck
Signed-off-by: David Limbeck 
---
changes since v1:
cleanup
code simplification

www/manager6/qemu/HardwareView.js | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/www/manager6/qemu/HardwareView.js 
b/www/manager6/qemu/HardwareView.js
index a87a9df1..a1bccc3c 100644
--- a/www/manager6/qemu/HardwareView.js
+++ b/www/manager6/qemu/HardwareView.js
@@ -14,7 +14,10 @@ Ext.define('PVE.qemu.HardwareView', {
if (rowdef.tdCls) {
metaData.tdCls = rowdef.tdCls;
if (rowdef.tdCls == 'pve-itype-icon-storage') { 
-   var value = me.getObjectValue(key, '', true);
+   var value = me.getObjectValue(key, '', false);
+   if (value === '') {
+   value = me.getObjectValue(key, '', true);
+   }
if (value.match(/vm-.*-cloudinit/)) {
metaData.tdCls = 'pve-itype-icon-cloud';
return rowdef.cloudheader;
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH manager] fix #1884: qemu vm: pending deletion of cdrom/dvd drive shows as hard disk in hardware tab

2018-08-24 Thread David Limbeck

On 08/24/2018 10:51 AM, Thomas Lamprecht wrote:

On 8/23/18 3:50 PM, David Limbeck wrote:

Signed-off-by: David Limbeck 
---
  www/manager6/qemu/HardwareView.js | 10 +-
  1 file changed, 9 insertions(+), 1 deletion(-)

diff --git a/www/manager6/qemu/HardwareView.js 
b/www/manager6/qemu/HardwareView.js
index a87a9df1..9b99f77e 100644
--- a/www/manager6/qemu/HardwareView.js
+++ b/www/manager6/qemu/HardwareView.js
@@ -14,7 +14,15 @@ Ext.define('PVE.qemu.HardwareView', {
if (rowdef.tdCls) {
metaData.tdCls = rowdef.tdCls;
if (rowdef.tdCls == 'pve-itype-icon-storage') {
-   var value = me.getObjectValue(key, '', true);
+   var oldValue = me.getObjectValue(key, '', false);
+   var newValue = me.getObjectValue(key, '', true);
+   var value;
+   if(oldValue === '') {
+   value = newValue;
+   }
+   else {

coding style nit: we always place else on the same line as the closing bracket.

 [...]
} else {
 [...]
Still getting used to this kind of style, sorry. Should have looked over 
it a few more times.

Will send v2 in a few minutes.

but why so complicate and not just somethingh alike:

8<
diff --git a/www/manager6/qemu/HardwareView.js 
b/www/manager6/qemu/HardwareView.js
index a87a9df1..df9847da 100644
--- a/www/manager6/qemu/HardwareView.js
+++ b/www/manager6/qemu/HardwareView.js
@@ -14,7 +14,10 @@ Ext.define('PVE.qemu.HardwareView', {
 if (rowdef.tdCls) {
 metaData.tdCls = rowdef.tdCls;
 if (rowdef.tdCls == 'pve-itype-icon-storage') {
-   var value = me.getObjectValue(key, '', true);
+   var value = me.getObjectValue(key, '', false);
+   if(value === '') {
+   value = me.getObjectValue(key, '', true);
+   }
 if (value.match(/vm-.*-cloudinit/)) {
 metaData.tdCls = 'pve-itype-icon-cloud';
 return rowdef.cloudheader;
>8

?



+   value = oldValue;
+   }
if (value.match(/vm-.*-cloudinit/)) {
metaData.tdCls = 'pve-itype-icon-cloud';
return rowdef.cloudheader;




___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH manager] fix #1884: qemu vm: pending deletion of cdrom/dvd drive shows as hard disk in hardware tab

2018-08-23 Thread David Limbeck
Signed-off-by: David Limbeck 
---
 www/manager6/qemu/HardwareView.js | 10 +-
 1 file changed, 9 insertions(+), 1 deletion(-)

diff --git a/www/manager6/qemu/HardwareView.js 
b/www/manager6/qemu/HardwareView.js
index a87a9df1..9b99f77e 100644
--- a/www/manager6/qemu/HardwareView.js
+++ b/www/manager6/qemu/HardwareView.js
@@ -14,7 +14,15 @@ Ext.define('PVE.qemu.HardwareView', {
if (rowdef.tdCls) {
metaData.tdCls = rowdef.tdCls;
if (rowdef.tdCls == 'pve-itype-icon-storage') { 
-   var value = me.getObjectValue(key, '', true);
+   var oldValue = me.getObjectValue(key, '', false);
+   var newValue = me.getObjectValue(key, '', true);
+   var value;
+   if(oldValue === '') {
+   value = newValue;
+   }
+   else {
+   value = oldValue;
+   }
if (value.match(/vm-.*-cloudinit/)) {
metaData.tdCls = 'pve-itype-icon-cloud';
return rowdef.cloudheader;
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH pve-docs] fix typos/wording

2018-08-23 Thread David Limbeck
Signed-off-by: David Limbeck 
---
 ha-manager.adoc | 28 ++--
 pveum.adoc  |  2 +-
 qm.adoc |  8 
 vzdump.adoc |  2 +-
 4 files changed, 20 insertions(+), 20 deletions(-)

diff --git a/ha-manager.adoc b/ha-manager.adoc
index 3edbc50..4d38583 100644
--- a/ha-manager.adoc
+++ b/ha-manager.adoc
@@ -137,7 +137,7 @@ resource of type `vm` (virtual machine) with the ID 100.
 
 For now we have two important resources types - virtual machines and
 containers. One basic idea here is that we can bundle related software
-into such VM or container, so there is no need to compose one big
+into such a VM or container, so there is no need to compose one big
 service from other services, like it was done with `rgmanager`. In
 general, a HA managed resource should not depend on other resources.
 
@@ -156,7 +156,7 @@ GUI, or simply use the command line tool, for example:
 
 The HA stack now tries to start the resources and keeps it
 running. Please note that you can configure the ``requested''
-resources state. For example you may want that the HA stack stops the
+resources state. For example you may want the HA stack to stop the
 resource:
 
 
@@ -225,7 +225,7 @@ the following command:
 
 NOTE: This does not start or stop the resource.
 
-But all HA related task can be done on the GUI, so there is no need to
+But all HA related tasks can be done in the GUI, so there is no need to
 use the command line at all.
 
 
@@ -253,7 +253,7 @@ handles node fencing.
 .Locks in the LRM & CRM
 [NOTE]
 Locks are provided by our distributed configuration file system (pmxcfs).
-They are used to guarantee that each LRM is active once and working. As a
+They are used to guarantee that each LRM is active once and working. As an
 LRM only executes actions when it holds its lock, we can mark a failed node
 as fenced if we can acquire its lock. This lets us then recover any failed
 HA services securely without any interference from the now unknown failed node.
@@ -369,7 +369,7 @@ The LRM lost its lock, this means a failure happened and 
quorum was lost.
 After the LRM gets in the active state it reads the manager status
 file in `/etc/pve/ha/manager_status` and determines the commands it
 has to execute for the services it owns.
-For each command a worker gets started, this workers are running in
+For each command a worker gets started, these workers are running in
 parallel and are limited to at most 4 by default. This default setting
 may be changed through the datacenter configuration key `max_worker`.
 When finished the worker process gets collected and its result saved for
@@ -381,19 +381,19 @@ The default value of at most 4 concurrent workers may be 
unsuited for
 a specific setup. For example may 4 live migrations happen at the same
 time, which can lead to network congestions with slower networks and/or
 big (memory wise) services. Ensure that also in the worst case no congestion
-happens and lower the `max_worker` value if needed. In the contrary, if you
+happens and lower the `max_worker` value if needed. On the contrary, if you
 have a particularly powerful high end setup you may also want to increase it.
 
-Each command requested by the CRM is uniquely identifiable by an UID, when
-the worker finished its result will be processed and written in the LRM
+Each command requested by the CRM is uniquely identifiable by a UID, when
+the worker finishes its result will be processed and written in the LRM
 status file `/etc/pve/nodes//lrm_status`. There the CRM may collect
 it and let its state machine - respective the commands output - act on it.
 
 The actions on each service between CRM and LRM are normally always synced.
-This means that the CRM requests a state uniquely marked by an UID, the LRM
+This means that the CRM requests a state uniquely marked by a UID, the LRM
 then executes this action *one time* and writes back the result, also
 identifiable by the same UID. This is needed so that the LRM does not
-executes an outdated command.
+execute an outdated command.
 With the exception of the `stop` and the `error` command,
 those two do not depend on the result produced and are executed
 always in the case of the stopped state and once in the case of
@@ -430,11 +430,11 @@ lost agent lock::
 
 The CRM lost its lock, this means a failure happened and quorum was lost.
 
-It main task is to manage the services which are configured to be highly
+Its main task is to manage the services which are configured to be highly
 available and try to always enforce the requested state. For example, a
 service with the requested state 'started' will be started if its not
 already running. If it crashes it will be automatically started again.
-Thus the CRM dictates the actions which the LRM needs to execute.
+Thus the CRM dictates the actions the LRM needs to execute.
 
 When an node leaves the cluster quorum, its state changes to unknown.
 If the current CRM then can secu

[pve-devel] [qemu-server v2] fix #1865: CloudInit doesn't add IPv6

2018-08-22 Thread David Limbeck
changes since v1:
added description in subject 

---
 PVE/QemuServer/Cloudinit.pm | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/PVE/QemuServer/Cloudinit.pm b/PVE/QemuServer/Cloudinit.pm
index e90e1c0..53f1de9 100644
--- a/PVE/QemuServer/Cloudinit.pm
+++ b/PVE/QemuServer/Cloudinit.pm
@@ -341,7 +341,7 @@ sub nocloud_network {
if ($ip eq 'dhcp') {
$content .= "${i}- type: dhcp6\n";
} else {
-   $content .= "${i}- type: static6\n"
+   $content .= "${i}- type: static\n"
   . "${i}  address: $ip\n";
if (defined(my $gw = $ipconfig->{gw6})) {
$content .= "${i}  gateway: $gw\n";
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH manager v2] fix #1872 Move button stays on the screen after closing

2018-08-20 Thread David Limbeck
---
 www/manager6/qemu/HDMove.js | 6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/www/manager6/qemu/HDMove.js b/www/manager6/qemu/HDMove.js
index f08fd316..df325f75 100644
--- a/www/manager6/qemu/HDMove.js
+++ b/www/manager6/qemu/HDMove.js
@@ -33,12 +33,10 @@ Ext.define('PVE.window.HDMove', {
success: function(response, options) {
var upid = response.result.data;
var win = Ext.create('Proxmox.window.TaskViewer', {
-   upid: upid,
-   taskDone: function(success) {
-   me.close();
-   }
+   upid: upid
});
win.show();
+   win.on('destroy', function() { me.close(); });
}
});
 
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


  1   2   >