[pve-devel] [PATCH] Add to config the nic flag link_down=[0|1] also enable nic link if the Flag is set in the config. we use to verify the result the qemu extension get_link_status from or pachtes.

2014-12-22 Thread Wolfgang Link

Signed-off-by: Wolfgang Link wolfg...@linksystems.org
---
 PVE/API2/Qemu.pm  |   57 +++--
 PVE/QemuServer.pm |   22 -
 2 files changed, 76 insertions(+), 3 deletions(-)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index 066726d..f3fe3e6 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -849,9 +849,36 @@ my $vmconfig_update_net = sub {
 
if(($newnet-{bridge} ne $oldnet-{bridge}) || ($newnet-{tag} 
ne $oldnet-{tag}) || ($newnet-{firewall} ne $oldnet-{firewall})){
PVE::Network::tap_unplug($iface);
-   PVE::Network::tap_plug($iface, $newnet-{bridge}, 
$newnet-{tag}, $newnet-{firewall});
+   PVE::Network::tap_plug($iface, $newnet-{bridge}, 
$newnet-{tag});
}
 
+   my $verify_link_status = sub {
+   my ($expected, $vmid) = @_;
+   my $nic_status;
+   eval {
+   my %param = (name = $opt); 
+   $nic_status = PVE::QemuServer::vm_mon_cmd($vmid, 
get_link_status,%param);
+   };
+   die $@ if $@;
+   die chagning nic status dosn't work! if $expected != 
$nic_status;
+   };
+
+   if($newnet-{link_down}){
+   eval {
+   my %param = (name = $opt, up = \0);
+   PVE::QemuServer::vm_mon_cmd($vmid, set_link,%param);
+   };
+   die $@ if $@;
+   $verify_link_status(0,$vmid);
+   }
+   if($oldnet-{link_down}  !$newnet-{link_down}){
+   eval {
+   my %param = (name = $opt, up = \1);
+   PVE::QemuServer::vm_mon_cmd($vmid, set_link,%param);
+   };
+   die $@ if $@;
+   $verify_link_status(1,$vmid);
+   }
}else{
#if bridge/nat mode change, we try to hot-unplug
die error hot-unplug $opt for update if 
!PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
@@ -982,7 +1009,7 @@ my $update_vm_api  = sub {
my $running = PVE::QemuServer::check_running($vmid);
 
foreach my $opt (keys %$param) { # add/change
-
+   
$conf = PVE::QemuServer::load_config($vmid); # update/reload
 
next if $conf-{$opt}  ($param-{$opt} eq $conf-{$opt}); # 
skip if nothing changed
@@ -1646,11 +1673,37 @@ __PACKAGE__-register_method({
my $realcmd = sub {
my $upid = shift;
 
+   my $conf = PVE::QemuServer::load_config($vmid);
+
syslog('info', start VM $vmid: $upid\n);
 
PVE::QemuServer::vm_start($storecfg, $vmid, $stateuri, 
$skiplock, $migratedfrom, undef,
  $machine, $spice_ticket);
 
+   foreach my $nic (keys %$conf){
+
+   if($nic =~  m/^net\d+$/){
+
+   my $nicconf= PVE::QemuServer::parse_net($conf-{$nic});
+
+   if($nicconf-{link_down}){
+
+   eval {
+   my %param = (name = $nic, up = \0);
+   PVE::QemuServer::vm_mon_cmd($vmid, 
set_link,%param);
+   };
+   die $@ if $@;
+
+   my $nic_status;
+   eval {
+   my %param = (name = $nic); 
+   $nic_status = 
PVE::QemuServer::vm_mon_cmd($vmid, get_link_status,%param);
+   };
+   warn $@ if $@;
+   warn Error Link from $nic is not Down if 
$nic_status != 0; 
+   }   
+   }
+   }
return;
};
 
diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 4840c73..7d8d863 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -510,7 +510,7 @@ my $nic_model_list_txt = join(' ', sort @$nic_model_list);
 my $netdesc = {
 optional = 1,
 type = 'string', format = 'pve-qm-net',
-typetext = MODEL=XX:XX:XX:XX:XX:XX 
[,bridge=dev][,queues=nbqueues][,rate=mbps][,tag=vlanid][,firewall=0|1],
+typetext = MODEL=XX:XX:XX:XX:XX:XX 
[,bridge=dev][,queues=nbqueues][,rate=mbps][,tag=vlanid][,firewall=0|1][,link_down=0|1],
 description = EODESCR,
 Specify network devices.
 
@@ -1378,6 +1378,8 @@ sub parse_net {
 $res-{tag} = $1;
 } elsif ($kvp =~ m/^firewall=(\d+)$/) {
$res-{firewall} = $1;
+   } elsif ($kvp =~ m/^link_down=(\d+)$/) {
+   $res-{link_down} = $1;
} else {
return undef;
}
@@ -1398,6 +1400,7 @@ sub print_net {
 $res .= ,rate=$net-{rate} if $net-{rate};
 $res .= ,tag=$net-{tag} if 

[pve-devel] [PATCH] implement the disconnect nic button on the network edit pannel.

2014-12-22 Thread Wolfgang Link

Signed-off-by: Wolfgang Link wolfg...@linksystems.org
---
 www/manager/Parser.js   |5 +
 www/manager/qemu/NetworkEdit.js |8 +++-
 2 files changed, 12 insertions(+), 1 deletion(-)

diff --git a/www/manager/Parser.js b/www/manager/Parser.js
index 11fbe49..3494637 100644
--- a/www/manager/Parser.js
+++ b/www/manager/Parser.js
@@ -33,6 +33,8 @@ Ext.define('PVE.Parser', { statics: {
 res.tag = match_res[1];
} else if ((match_res = p.match(/^firewall=(\d+)$/)) !== null) {
 res.firewall = match_res[1];
+   } else if ((match_res = p.match(/^link_down=(\d+)$/)) !== null) {
+res.disconnect = match_res[1];
} else {
errors = true;
return false; // break
@@ -64,6 +66,9 @@ Ext.define('PVE.Parser', { statics: {
if (net.rate) {
netstr += ,rate= + net.rate;
}
+   if (net.disconnect) {
+   netstr += ,link_down= + net.disconnect;
+   }
return netstr;
 },
 
diff --git a/www/manager/qemu/NetworkEdit.js b/www/manager/qemu/NetworkEdit.js
index 1e5b97b..ae3147e 100644
--- a/www/manager/qemu/NetworkEdit.js
+++ b/www/manager/qemu/NetworkEdit.js
@@ -18,6 +18,7 @@ Ext.define('PVE.qemu.NetworkInputPanel', {
me.network.bridge = undefined;
}
me.network.macaddr = values.macaddr;
+   me.network.disconnect = values.disconnect;
 
if (values.rate) {
me.network.rate = values.rate;
@@ -147,7 +148,12 @@ Ext.define('PVE.qemu.NetworkInputPanel', {
value: '',
emptyText: 'unlimited',
allowBlank: true
-   }
+   },
+   {
+   xtype: 'pvecheckbox',
+   fieldLabel: gettext('Disconnect'),
+   name: 'disconnect'
+   },
];
 
me.callParent();
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH] this commit:

2014-12-22 Thread Dietmar Maurer

   $timeout = 60 if !defined($timeout);
 - my $config = load_config($vmid);

Oh, we already have $conf loaded - good catch!

   eval {
   if ($shutdown) {
 - if ($config-{agent}) {
 + if (!$nocheck  $conf-{agent}) {
   vm_qmp_command($vmid, { execute = guest-shutdown }, 
 $nocheck);

we pass $nockeck to  vm_qmp_command, so the following should be good enough:

-   if ($config-{agent}) {
+   if ($conf-{agent}) {


Would you mind to cleanup the patch? Or can I simply commit a modified version?

___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH] this commit:

2014-12-22 Thread Stefan Priebe - Profihost AG

 Am 22.12.2014 um 16:36 schrieb Dietmar Maurer diet...@proxmox.com:
 
 
$timeout = 60 if !defined($timeout);
 -my $config = load_config($vmid);
 
 Oh, we already have $conf loaded - good catch!
 
eval {
if ($shutdown) {
 -if ($config-{agent}) {
 +if (!$nocheck  $conf-{agent}) {
vm_qmp_command($vmid, { execute = guest-shutdown }, $nocheck);
 
 we pass $nockeck to  vm_qmp_command, so the following should be good enough:
 
 -if ($config-{agent}) {
 +if ($conf-{agent}) {

But conf may be undefined if we do not load the config. So check for a key 
might issue a warning.

 
 
 Would you mind to cleanup the patch? Or can I simply commit a modified 
 version?
 
___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH] this commit:

2014-12-22 Thread Dietmar Maurer
  we pass $nockeck to  vm_qmp_command, so the following should be good enough:
  
  -if ($config-{agent}) {
  +if ($conf-{agent}) {
 
 But conf may be undefined if we do not load the config. So check for a key
 might issue a warning.


OK, you are right. I just applied your patch - thanks!

___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] Quorum problems with NICs Intel of 10 Gb/s and VMsturns off

2014-12-22 Thread Alexandre DERUMIER
After several checks, I found the problem in these two servers: a 
configuration in the Hardware Bios that isn't compatible with the 
pve-kernel-3.10.0-5, and my NICs was getting the link to down and after up. 
(i guess that soon i will comunicate my setup of BIOS in Dell R720). 
... :-) 

I'm interested to known what is this option ;)



The strange behaviour is that when i run pvecm status, i get this message: 
Version: 6.2.0 
Config Version: 41 
Cluster Name: ptrading 
Cluster Id: 28503 
Cluster Member: Yes 
Cluster Generation: 8360 
Membership state: Cluster-Member 
Nodes: 8 
Expected votes: 8 
Total votes: 8 
Node votes: 1 
Quorum: 5 
Active subsystems: 6 
Flags: 
Ports Bound: 0 177 
Node name: pve5 
Node ID: 5 
Multicast addresses: 239.192.111.198 
Node addresses: 192.100.100.50 

So, you have quorum here. All nodes are ok . I don't see any problem.


And in the PVE GUI i see the red light in all the others nodes. 

That's mean that the pvestatd daemon is hanging/crashed.


Can you check that you can write to  /etc/pve.

if not, try to restart

/etc/init.d/pve-cluster restart

then 

/etc/init.d/pvedaemon restart
/etc/init.d/pvestatd restart



- Mail original -
De: Cesar Peschiera br...@click.com.py
À: aderumier aderum...@odiso.com, pve-devel pve-devel@pve.proxmox.com
Envoyé: Lundi 22 Décembre 2014 04:01:31
Objet: Re: [pve-devel] Quorum problems with NICs Intel of 10 Gb/s and VMsturns 
off

After several checks, I found the problem in these two servers: a 
configuration in the Hardware Bios that isn't compatible with the 
pve-kernel-3.10.0-5, and my NICs was getting the link to down and after up. 
(i guess that soon i will comunicate my setup of BIOS in Dell R720). 
... :-) 

But now i have other problem, with the mix of PVE-manager 3.3-5 and 2.3-13 
versions in a PVE cluster of 8 nodes: I am losing quorum in several nodes 
very often. 

Moreover, for now i can not apply a upgrade to my old PVE nodes, so for the 
moment i would like to know if is possible to make a quick configuration for 
that all my nodes always has quorum. 

The strange behaviour is that when i run pvecm status, i get this message: 
Version: 6.2.0 
Config Version: 41 
Cluster Name: ptrading 
Cluster Id: 28503 
Cluster Member: Yes 
Cluster Generation: 8360 
Membership state: Cluster-Member 
Nodes: 8 
Expected votes: 8 
Total votes: 8 
Node votes: 1 
Quorum: 5 
Active subsystems: 6 
Flags: 
Ports Bound: 0 177 
Node name: pve5 
Node ID: 5 
Multicast addresses: 239.192.111.198 
Node addresses: 192.100.100.50 

And in the PVE GUI i see the red light in all the others nodes. 

Can apply a some kind of temporal solution as Quorum: 1 for that my nodes 
can work well and not has this strange behaviour? (Only until I performed 
the updates) 
Or, what will be the more simple and quick temporal solution for avoid to do 
a upgrade in my nodes? 
(something as for example: add to the rc.local file a line that says: pvecm 
expected 1) 

Note about of the Quorum: I don't have any Hardware fence device enabled, so 
i do not care that each node always have quorum (i always can turns off the 
server manually and brutally if it is necessary). 

- Original Message - 
From: Cesar Peschiera br...@click.com.py 
To: Alexandre DERUMIER aderum...@odiso.com 
Cc: pve-devel pve-devel@pve.proxmox.com 
Sent: Saturday, December 20, 2014 9:30 AM 
Subject: Re: [pve-devel] Quorum problems with NICs Intel of 10 Gb/s and 
VMsturns off 


 Hi Alexandre 
 
 I put 192.100.100.51 ip address directly to bond0, and i don't have 
 network 
 enabled (as if the node is totally isolated) 
 
 This was my setup: 
 --- 
 auto bond0 
 iface bond0 inet static 
 address 192.100.100.51 
 netmask 255.255.255.0 
 gateway 192.100.100.4 
 slaves eth0 eth2 
 bond_miimon 100 
 bond_mode 802.3ad 
 bond_xmit_hash_policy layer2 
 
 auto vmbr0 
 iface vmbr0 inet manual 
 bridge_ports bond0 
 bridge_stp off 
 bridge_fd 0 
 post-up echo 0  /sys/devices/virtual/net/vmbr0/bridge/multicast_snooping 
 post-up echo 1  /sys/class/net/vmbr0/bridge/multicast_querier 
 
 .. :-( 
 
 Some other suggestion? 
 
 - Original Message - 
 From: Alexandre DERUMIER aderum...@odiso.com 
 To: Cesar Peschiera br...@click.com.py 
 Cc: pve-devel pve-devel@pve.proxmox.com 
 Sent: Friday, December 19, 2014 7:59 AM 
 Subject: Re: [pve-devel] Quorum problems with NICs Intel of 10 Gb/s and 
 VMsturns off 
 
 
 maybe can you try to put 192.100.100.51 ip address directly to bond0, 
 
 to avoid corosync traffic going through to vmbr0. 
 
 (I remember some old offloading bugs with 10gbe nic and linux bridge) 
 
 
 - Mail original - 
 De: Cesar Peschiera br...@click.com.py 
 À: aderumier aderum...@odiso.com 
 Cc: pve-devel pve-devel@pve.proxmox.com 
 Envoyé: Vendredi 19 Décembre 2014 11:08:33 
 Objet: Re: [pve-devel] Quorum problems with NICs Intel of 10 Gb/s and 
 VMsturns off 
 
can you post your /etc/network/interfaces of theses 10gb/s nodes ? 
 
 This is my configuration: 
 

[pve-devel] backup lock

2014-12-22 Thread lyt_yudi
hi,all

INFO: starting new backup job: vzdump 121 --remove 0 --mode snapshot --compress 
lzo --storage local --node t3
INFO: Starting Backup of VM 121 (qemu)
INFO: status = running
INFO: VM is locked (backup)
ERROR: Backup of VM 121 failed - command 'qm set 121 --lock backup' failed: 
exit code 25
INFO: Backup job finished with errors
TASK ERROR: job errors

this case must be in cli: qm unlock 121 , for unlock 121

maybe this is a bug :(

Can to increase this feature in the API? or unlock it before new backup job? 


lyt_yudi
lyt_y...@icloud.com





smime.p7s
Description: S/MIME cryptographic signature
___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] backup lock

2014-12-22 Thread Dietmar Maurer

On 12/23/2014 02:58 AM, lyt_yudi wrote:

hi,all

INFO: starting new backup job: vzdump 121 --remove 0 --mode snapshot --compress 
lzo --storage local --node t3
INFO: Starting Backup of VM 121 (qemu)
INFO: status = running
INFO: VM is locked (backup)
ERROR: Backup of VM 121 failed - command 'qm set 121 --lock backup' failed: 
exit code 25
INFO: Backup job finished with errors
TASK ERROR: job errors

this case must be in cli: qm unlock 121 , for unlock 121

maybe this is a bug :(

Can to increase this feature in the API? or unlock it before new backup job?


This indicates that something is wrong, maybe a crashed backup job?
You should check if there is an old backup task still running before 
using unlock.



___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] backup lock

2014-12-22 Thread lyt_yudi
sorry, forgot send to pve-devel :(

 在 2014年12月23日,下午12:59,Dietmar Maurer diet...@proxmox.com 
 mailto:diet...@proxmox.com 写道:
 
 This indicates that something is wrong, maybe a crashed backup job?

no, it’s cancel the backup job by manual, and then start new backup job.

 You should check if there is an old backup task still running before using 
 unlock.


there is no old backup task.

thanks.

smime.p7s
Description: S/MIME cryptographic signature
___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] backup lock

2014-12-22 Thread lyt_yudi

 在 2014年12月23日,下午1:20,lyt_yudi lyt_y...@icloud.com 写道:
 
 This indicates that something is wrong, maybe a crashed backup job?

INFO: starting new backup job: vzdump 121 --remove 0 --mode snapshot --compress 
lzo --storage local --node t3
INFO: Starting Backup of VM 121 (qemu)
INFO: status = running
INFO: update VM 121: -lock backup
INFO: exclude disk 'virtio1' (backup=no)
INFO: backup mode: snapshot
INFO: ionice priority: 7
INFO: snapshots found (not included into backup)
INFO: creating archive 
'/var/lib/vz/dump/vzdump-qemu-121-2014_12_23-13_22_52.vma.lzo'
INFO: started backup task 'd7330854-dbb6-4461-aba5-f3b604bcfa34'
INFO: status: 0% (88211456/34359738368), sparse 0% (83898368), duration 3, 29/1 
MB/s
ERROR: interrupted by signal
INFO: aborting backup job

this ERROR is caused by manual cancellation.

maybe this process can be integrated to unlock operation.

thanks.

smime.p7s
Description: S/MIME cryptographic signature
___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] backup lock

2014-12-22 Thread Wolfgang Link
Can you pleas post me your version of PVE.

pveversion -v

because I can't reproduce it at my machine, not on the gui and not at cli.

how do you cancel the job manually?

Regrades 

Wolfgang

 On December 23, 2014 at 6:36 AM lyt_yudi lyt_y...@icloud.com wrote:
 
 
 
  在 2014年12月23日,下午1:20,lyt_yudi lyt_y...@icloud.com 写道:
  
  This indicates that something is wrong, maybe a crashed backup job?
 
 INFO: starting new backup job: vzdump 121 --remove 0 --mode snapshot
 --compress lzo --storage local --node t3
 INFO: Starting Backup of VM 121 (qemu)
 INFO: status = running
 INFO: update VM 121: -lock backup
 INFO: exclude disk 'virtio1' (backup=no)
 INFO: backup mode: snapshot
 INFO: ionice priority: 7
 INFO: snapshots found (not included into backup)
 INFO: creating archive
 '/var/lib/vz/dump/vzdump-qemu-121-2014_12_23-13_22_52.vma.lzo'
 INFO: started backup task 'd7330854-dbb6-4461-aba5-f3b604bcfa34'
 INFO: status: 0% (88211456/34359738368), sparse 0% (83898368), duration 3,
 29/1 MB/s
 ERROR: interrupted by signal
 INFO: aborting backup job
 
 this ERROR is caused by manual cancellation.
 
 maybe this process can be integrated to unlock operation.
 
 thanks.___
 pve-devel mailing list
 pve-devel@pve.proxmox.com
 http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel

___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] backup lock

2014-12-22 Thread Gökalp Çakıcı
You can cancel the job via web interface while doing backup job. And i 
try the scenario and reproduce the situation. When you use the stop 
button and try to start the vm it says locked. First you unlock it from 
the command line with qm unlock vmid and then you can start the vm.


On 12/23/14 9:13 AM, Wolfgang Link wrote:

Can you pleas post me your version of PVE.

pveversion -v

because I can't reproduce it at my machine, not on the gui and not at cli.

how do you cancel the job manually?

Regrades

Wolfgang


On December 23, 2014 at 6:36 AM lyt_yudi lyt_y...@icloud.com wrote:




在 2014年12月23日,下午1:20,lyt_yudi lyt_y...@icloud.com 写道:


This indicates that something is wrong, maybe a crashed backup job?

INFO: starting new backup job: vzdump 121 --remove 0 --mode snapshot
--compress lzo --storage local --node t3
INFO: Starting Backup of VM 121 (qemu)
INFO: status = running
INFO: update VM 121: -lock backup
INFO: exclude disk 'virtio1' (backup=no)
INFO: backup mode: snapshot
INFO: ionice priority: 7
INFO: snapshots found (not included into backup)
INFO: creating archive
'/var/lib/vz/dump/vzdump-qemu-121-2014_12_23-13_22_52.vma.lzo'
INFO: started backup task 'd7330854-dbb6-4461-aba5-f3b604bcfa34'
INFO: status: 0% (88211456/34359738368), sparse 0% (83898368), duration 3,
29/1 MB/s
ERROR: interrupted by signal
INFO: aborting backup job

this ERROR is caused by manual cancellation.

maybe this process can be integrated to unlock operation.

thanks.___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel

___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


--
Gokalp Cakici
Pusula Iletisim Hizmetleri
T: +90-212-2134142
F: +90-212-2135958
http://www.pusula.net.tr
http://www.uZmanPosta.com

___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel