Noam Slomianko has uploaded a new change for review.

Change subject: Load Balancing: never return null in rpc
......................................................................

Load Balancing: never return null in rpc

Because load balancing returned the results directly from
user code it may try and send null/nil/none via rpc (not possible)

Change-Id: Iaf572035a5c44c521525090156921e12d6eabe67
Signed-off-by: Noam Slomianko <[email protected]>
---
A plugins/host_memory_balance.py
M src/ovirtscheduler/request_handler.py
2 files changed, 167 insertions(+), 1 deletion(-)


  git pull ssh://gerrit.ovirt.org:29418/ovirt-scheduler-proxy 
refs/changes/26/20026/1

diff --git a/plugins/host_memory_balance.py b/plugins/host_memory_balance.py
new file mode 100644
index 0000000..84a03a0
--- /dev/null
+++ b/plugins/host_memory_balance.py
@@ -0,0 +1,163 @@
+from ovirtsdk.xml import params
+from ovirtsdk.api import API
+import sys
+
+
+class host_memory_balance():
+    '''migrate vms from over utilized hosts'''
+
+    #What are the values this module will accept, used to present
+    #the user with options
+    properties_validation = 
'minimum_host_memoryMB=[0-9]*;safe_selection=True|False'
+    TO_BYTES = 1024 * 1024
+    MINIMUM_MEMORY_DEFAULT = 500
+    SAFE_SELECTION_DEFAULT = 'True'
+    free_memory_cache = {}
+
+    def quit(self):
+        print ['', []]
+        exit()
+
+    def _get_connection(self):
+        #open a connection to the rest api
+        connection = None
+        try:
+            connection = API(url='http://localhost:8080',
+                             username='admin@internal', password='1')
+        except BaseException as ex:
+            #letting the external proxy know there was an error
+            print >> sys.stderr, ex
+            return None
+
+        return connection
+
+    def _get_hosts(self, host_ids, connection):
+        #get all the hosts with the given ids
+        engine_hosts = connection.hosts.list(
+            query=" or ".join(["id=%s" % u for u in host_ids]))
+
+        return engine_hosts
+
+    def getFreeMemory(self, host):
+        #getiing free memory requires a REST call, so cache results
+        if not host.id in self.free_memory_cache:
+            try:
+                self.free_memory_cache[host.id] = host.get_statistics().get(
+                    'memory.free').get_values().get_value()[0].get_datum()
+            except:
+                self.free_memory_cache[host.id] = -1
+
+        return self.free_memory_cache[host.id]
+
+    def getOverUtilizedHostAndUnderUtilizedList(self, engine_hosts, 
minimum_host_memory):
+        '''return the most over utilized host,
+        and a list of under utilized hosts'''
+        over_utilized_host = None
+        under_utilized_hosts = []
+        for host in engine_hosts:
+            if(host):
+                free_memory = self.getFreeMemory(host)
+                if(free_memory <= 0):
+                    continue
+                if free_memory > minimum_host_memory:
+                        under_utilized_hosts.append(host)
+                        continue
+                    #take the host with least amount of free memory
+                if (over_utilized_host is None or
+                        self.getFreeMemory(over_utilized_host)
+                        > free_memory):
+                        over_utilized_host = host
+        return over_utilized_host, under_utilized_hosts
+
+    def getMaximumVmMemory(self, hosts, minimum_host_memory):
+        '''the maximum amount of memory that a migrated vm can have
+        without sending the other hosts over the threshold'''
+        maximum_vm_memory = 0
+        for host in hosts:
+            available_memory = self.getFreeMemory(host) - minimum_host_memory
+
+            available_memory = min(available_memory,
+                                   host.get_max_scheduling_memory())
+            if available_memory > maximum_vm_memory:
+                maximum_vm_memory = available_memory
+
+        return maximum_vm_memory
+
+    def getBestVmForMigration(self, vms, maximum_vm_memory, memory_delta, 
safe):
+        #safe -> select the smallest vm
+        #not safe -> try and select the smallest vm larger then the delta,
+        #   if no such vm exists take the largest one
+
+        #migrating a small vm is more likely to succeeded and puts less strain
+        #on the network
+        selected_vm = None
+        best_effort_vm = None
+        for vm in vms:
+                if vm.memory > maximum_vm_memory:
+                    #never select a vm that will send all the under
+                    #utilized hosts over the threshold
+                    continue
+                if safe:
+                    if (selected_vm is None or
+                            vm.memory < selected_vm.memory):
+                        selected_vm = vm
+                else:
+                    if vm.memory > memory_delta:
+                        if (selected_vm is None or
+                                vm.memory < selected_vm.memmory):
+                            selected_vm = vm
+                if (best_effort_vm is None or
+                        vm.memory > best_effort_vm.memory):
+                    best_effort_vm = vm
+
+        if not safe and selected_vm is None:
+            selected_vm = best_effort_vm
+
+        return selected_vm
+
+    def do_balance(self, hosts_ids, args_map):
+        '''selects a vm from the most over utilized vm to migrate.
+        if safe_selection is true selects the smallest vm from the host
+        if safe_selection is false try and take a vm larger then the amount of 
memory the host is missing'''
+        conn = self._get_connection()
+        if conn is None:
+            self.quit()
+
+        #get our parameters from the map
+        minimum_host_memory = long(args_map.get('minimum_host_memoryMB',
+                                                self.MINIMUM_MEMORY_DEFAULT))
+        minimum_host_memory = minimum_host_memory * self.TO_BYTES
+        safe = bool(args_map.get('safe_selection',
+                                 self.SAFE_SELECTION_DEFAULT))
+
+        #get all the hosts with the given ids
+        engine_hosts = self._get_hosts(hosts_ids, conn)
+
+        over_utilized_host, under_utilized_hosts = (
+            self.getOverUtilizedHostAndUnderUtilizedList(engine_hosts,
+                                                         minimum_host_memory))
+
+        if over_utilized_host is None:
+            self.quit()
+
+        maximum_vm_memory = self.getMaximumVmMemory(under_utilized_hosts,
+                                                    minimum_host_memory)
+
+        #amount of memory the host is missing
+        memory_delta = (
+            minimum_host_memory -
+            self.getFreeMemory(over_utilized_host))
+
+        host_vms = conn.vms.list('host=' + over_utilized_host.name)
+        if host_vms is None:
+            self.quit()
+
+        #get largest/smallest vm that will
+        selected_vm = self.getBestVmForMigration(host_vms, maximum_vm_memory,
+                                                 memory_delta, safe)
+        # try another host?
+        if selected_vm is None:
+            self.quit()
+
+        under_utilized_hosts_ids = [host.id for host in under_utilized_hosts]
+        print (selected_vm.id, under_utilized_hosts_ids)
\ No newline at end of file
diff --git a/src/ovirtscheduler/request_handler.py 
b/src/ovirtscheduler/request_handler.py
index ed0c793..6c44ba4 100644
--- a/src/ovirtscheduler/request_handler.py
+++ b/src/ovirtscheduler/request_handler.py
@@ -272,4 +272,7 @@
 
         log_adapter.info('returning: %s' % str(runner.getResults()))
 
-        return runner.getResults()
+        if runner.getResults() is None:
+            return ['', []]
+        else:
+            return runner.getResults()


-- 
To view, visit http://gerrit.ovirt.org/20026
To unsubscribe, visit http://gerrit.ovirt.org/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: Iaf572035a5c44c521525090156921e12d6eabe67
Gerrit-PatchSet: 1
Gerrit-Project: ovirt-scheduler-proxy
Gerrit-Branch: master
Gerrit-Owner: Noam Slomianko <[email protected]>
_______________________________________________
Engine-patches mailing list
[email protected]
http://lists.ovirt.org/mailman/listinfo/engine-patches

Reply via email to