I just realized that the attachments weren't getting through. Here's
the patch to make Xen support work:
Index: trunk/src/tashi/nodemanager/vmcontrol/__init__.py
===================================================================
--- trunk/src/tashi/nodemanager/vmcontrol/__init__.py (revision 734779)
+++ trunk/src/tashi/nodemanager/vmcontrol/__init__.py (working copy)
@@ -18,4 +18,3 @@
from vmcontrolinterface import VmControlInterface
from qemu import Qemu
from xenpv import XenPV
-from newxen import NewXen
Index: trunk/src/tashi/nodemanager/vmcontrol/xenpv.py
===================================================================
--- trunk/src/tashi/nodemanager/vmcontrol/xenpv.py (revision 734779)
+++ trunk/src/tashi/nodemanager/vmcontrol/xenpv.py (working copy)
@@ -25,7 +25,7 @@
from vmcontrolinterface import VmControlInterface
from tashi.services.ttypes import Errors, InstanceState,
TashiException
-from tashi.services.ttypes import Instance, MachineType
+from tashi.services.ttypes import Instance, MachineType, Host
from tashi import boolean, convertExceptions, ConnectionManager
from tashi.util import isolatedRPC
@@ -90,7 +90,7 @@
instance.typeObj = MachineType()
instance.typeObj.memory = int(vminfo['memory'])
instance.typeObj.cores = int(vminfo['cores'])
-
+ instance.disks = []
r[instance.vmId] = instance
return r
@@ -99,7 +99,7 @@
-class XenPV(threading.Thread):
+class XenPV(VmControlInterface, threading.Thread):
def __init__(self, config, dfs, cm):
threading.Thread.__init__(self)
if self.__class__ is VmControlInterface:
@@ -120,7 +120,7 @@
# invoked every (self.sleeptime) seconds
@synchronizedmethod
def cron(self):
- print 'xenpv cron woke up'
+# print 'xenpv cron woke up'
vmlist = listVms(self.vmNamePrefix)
# If we are supposed to be managing a VM that is not
# in the list, tell the CM
@@ -138,14 +138,7 @@
os.unlink(diskname)
except:
print
'WARNING could not delete transient disk %s' % diskname
- try:
- isolatedRPC(self.cm, 'vmExited',
self.hostId, vmId)
- except Exception, e:
- print "RPC failed for vmExited on CM"
- print e
- raise e
- # FIXME: send this to the cm later
- # self.exitedVms[vmId] = child
+ self.nm.vmStateChange(a.vmId,
a.state, InstanceState.Exited)
for vmId in vmlist.keys():
if not self.newvms.has_key(vmId):
print 'WARNING: found vm that should be
managed, but is not'
@@ -164,12 +157,13 @@
image, macAddr, memory, cores):
fn = os.path.join("/tmp", vmName)
cfgstr = """
-# kernel="/boot/vmlinuz-2.6.24-19-xen"
-# ramdisk="/boot/initrd.img-2.6.24-19-xen"
+# kernel="/boot/vmlinuz-2.6.24-17-xen"
+# ramdisk="/boot/initrd.img-2.6.24-17-xen"
bootloader="/usr/bin/pygrub"
disk=['tap:qcow:%s,xvda1,w']
# vif = [ 'mac=%s' ]
-vif = ['ip=172.19.158.1']
+# vif = ['ip=172.19.158.1']
+vif = ['']
memory=%i
#cpus is a list of cpus for pinning, this is not what we want
#cpus=%i
@@ -181,7 +175,8 @@
f.close()
return fn
def deleteXenConfig(self, vmName):
- os.unlink(os.path.join("/tmp", vmName))
+ pass
+# os.unlink(os.path.join("/tmp", vmName))
########################################
def vmName(self, instanceId):
@@ -222,7 +217,7 @@
instance.typeObj.cores)
cmd = "xm create %s"%fn
r = os.system(cmd)
- # self.deleteXenConfig(name)
+# self.deleteXenConfig(name)
if r != 0:
print 'WARNING: "%s" returned %i' % ( cmd, r)
raise Exception, 'WARNING: "%s" returned %i' % ( cmd, r)
@@ -238,7 +233,7 @@
# suspend/resume. save/restore allow you to specify the state
# file, suspend/resume do not.
@synchronizedmethod
- def suspendVM(self, vmId, target, suspendCookie=None):
+ def suspendVm(self, vmId, target, suspendCookie=None):
# FIXME: don't use hardcoded /tmp for temporary data.
# Get tmp location from config
infofile = target + ".info"
@@ -267,7 +262,7 @@
return vmId
@synchronizedmethod
- def resumeVM(self, source):
+ def resumeVm(self, source):
infofile = source + ".info"
source = source + ".dat"
tmpfile = os.path.join("/tmp", source)
@@ -310,7 +305,7 @@
@synchronizedmethod
- def pauseVM(self, vmId):
+ def pauseVm(self, vmId):
r = os.system("xm pause %i"%vmId)
if r != 0:
print "xm pause failed for VM %i"%vmId
@@ -319,7 +314,7 @@
return vmId
@synchronizedmethod
- def unpauseVM(self, VMId):
+ def unpauseVm(self, VMId):
r = os.system("xm unpause %i"%VMId)
if r != 0:
print "xm unpause failed for VM %i"%VMId
@@ -328,7 +323,7 @@
return VMId
@synchronizedmethod
- def shutdownVM(self, vmId):
+ def shutdownVm(self, vmId):
r = os.system("xm shutdown %i"%vmId)
if r != 0:
print "xm shutdown failed for VM %i"%vmId
@@ -336,7 +331,7 @@
return vmId
@synchronizedmethod
- def destroyVM(self, vmId):
+ def destroyVm(self, vmId):
r = os.system("xm destroy %i"%vmId)
if r != 0:
print "xm destroy failed for VM %i"%vmId
@@ -345,10 +340,28 @@
@synchronizedmethod
- def getVMInfo(self, vmId):
+ def getVmInfo(self, vmId):
return self.newvms[vmId]
@synchronizedmethod
- def listVMs(self):
+ def listVms(self):
# On init, this should get a list from listVMs
return self.newvms.keys()
+
+
+ @synchronizedmethod
+ def getHostInfo(self):
+ host = Host()
+ memp = subprocess.Popen("xm info | awk '/
^total_memory/ { print $3 }' ",
+ shell = True,
+ stdout = subprocess.PIPE)
+ mems = memp.stdout.readline()
+ host.memory = int(mems)
+ corep = subprocess.Popen("xm info | awk '/^nr_cpus/
{ print $3 }' ",
+ shell = True,
+ stdout = subprocess.PIPE)
+ cores = corep.stdout.readline()
+ host.cores = int(cores)
+ return host
+
+
Index: trunk/src/tashi/nodemanager/vmcontrol/newxen.py
===================================================================
--- trunk/src/tashi/nodemanager/vmcontrol/newxen.py (revision 734779)
+++ trunk/src/tashi/nodemanager/vmcontrol/newxen.py (working copy)
@@ -1,130 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-import cPickle
-import logging
-import os
-import threading
-import random
-import select
-import signal
-import socket
-import subprocess
-import sys
-import time
-
-import inspect # used to get current function
-def currentFunction(n=1):
- # get the name of our caller, e.g. the requesting function
- return inspect.stack()[n][3]
-
-from tashi.services.ttypes import *
-from tashi.util import broken, isolatedRPC
-from vmcontrolinterface import VmControlInterface
-
-log = logging.getLogger(__file__)
-
-
-import xenpv
-
-class NewXen(VmControlInterface):
- """VM Control for Paravirtualized Xen"""
-
- def __init__(self, config, dfs, cm):
- """Base init function -- it handles inserting config and dfs
- into the object as well as checking that the class type is
- not VmControlInterface"""
- print 'NewXen::init called'
- if self.__class__ is VmControlInterface:
- raise NotImplementedError
- self.config = config
- self.dfs = dfs
- self.cm = cm
- self.xenpv = xenpv.XenPV(self.config, self.dfs, self.cm)
-
- def instantiateVm(self, instance):
- """Takes an InstanceConfiguration, creates a VM based on it,
- and returns the vmId"""
- print 'XenPV::%s called' % currentFunction()
- # FIXME: this is NOT the right way to get out hostId
- self.hostId = instance.hostId
- return self.xenpv.instantiateVm(instance)
-
-
- def suspendVm(self, vmId, target, suspendCookie=None):
- """Suspends a vm to the target on the dfs, including the
- suspendCookie"""
- print 'XenPV::%s called' % currentFunction()
- return self.xenpv.suspendVM(vmId, target, suspendCookie)
-
-
- def resumeVm(self, source):
- """Resumes a vm from the dfs and returns the newly created
- vmId as well as the suspendCookie in a tuple"""
- print 'XenPV::%s called' % currentFunction()
- return self.xenpv.resumeVM(source)
-
- def prepReceiveVm(self, instance, source):
- """First call made as part of vm migration -- it is made to
- the target machine and it returns a transportCookie"""
- print 'XenPV::%s called' % currentFunction()
- return self.xenpv.prepReceiveVm(instance, source)
-
- def migrateVm(self, vmId, target, transportCookie):
- """Second call made as part of a vm migration -- it is made
- to the source machine and it does not return until the
- migration is complete"""
- print 'XenPV::%s called' % currentFunction()
- return self.xenpv.migrateVm(vmId, target,transportCookie)
-
- def receiveVm(self, transportCookie):
- """Third call made as part of a vm migration -- it is made to
- the target machine and it does not return until the
- migration is complete, it returns the new vmId"""
- print 'XenPV::%s called' % currentFunction()
- return self.xenpv.receiveVm(transportCookie)
-
- def pauseVm(self, vmId):
- """Pauses a vm and returns nothing"""
- print 'XenPV::%s called' % currentFunction()
- return self.xenpv.pauseVM(vmId)
-
- def unpauseVm(self, vmId):
- """Unpauses a vm and returns nothing"""
- print 'XenPV::%s called' % currentFunction()
- return self.xenpv.unpauseVM(vmId)
-
- def shutdownVm(self, vmId):
- """Performs a clean shutdown on a vm and returns nothing"""
- print 'XenPV::%s called' % currentFunction()
- return self.xenpv.shutdownVM(vmId)
-
-
- def destroyVm(self, vmId):
- """Forces the exit of a vm and returns nothing"""
- print 'XenPV::%s called' % currentFunction()
- return self.xenpv.destroyVM(vmId)
-
- def getVmInfo(self, vmId):
- """Returns the InstanceConfiguration for the given vmId"""
- print 'XenPV::%s called' % currentFunction()
- return self.xenpv.getVMInfo(vmId)
-
- def listVms(self):
- """Returns a list of vmIds to the caller"""
- print 'XenPV::%s called' % currentFunction()
- return self.xenpv.listVMs()