Updated Branches:
  refs/heads/master 94d8eb71f -> a50cf618e

bug CS-15278: For removing clusters crossing threshold find out the list of 
cluster through db instead of iteratting cluster one by one in the java code.


Project: http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/repo
Commit: 
http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/commit/a50cf618
Tree: http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/tree/a50cf618
Diff: http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/diff/a50cf618

Branch: refs/heads/master
Commit: a50cf618ec444459af9fc55f04c8f4df0a0b6188
Parents: 94d8eb7
Author: Nitin Mehta <[email protected]>
Authored: Mon Aug 13 16:20:57 2012 +0530
Committer: Nitin Mehta <[email protected]>
Committed: Mon Aug 13 16:20:57 2012 +0530

----------------------------------------------------------------------
 api/src/com/cloud/deploy/DeploymentPlanner.java    |    8 ++
 server/src/com/cloud/capacity/dao/CapacityDao.java |    3 +-
 .../com/cloud/capacity/dao/CapacityDaoImpl.java    |   54 ++++++++++++-
 server/src/com/cloud/deploy/FirstFitPlanner.java   |   63 +++++++--------
 4 files changed, 93 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/a50cf618/api/src/com/cloud/deploy/DeploymentPlanner.java
----------------------------------------------------------------------
diff --git a/api/src/com/cloud/deploy/DeploymentPlanner.java 
b/api/src/com/cloud/deploy/DeploymentPlanner.java
index cb99a76..e5dcff1 100644
--- a/api/src/com/cloud/deploy/DeploymentPlanner.java
+++ b/api/src/com/cloud/deploy/DeploymentPlanner.java
@@ -16,6 +16,7 @@
 // under the License.
 package com.cloud.deploy;
 
+import java.util.Collection;
 import java.util.HashSet;
 import java.util.Set;
 
@@ -180,6 +181,13 @@ public interface DeploymentPlanner extends Adapter {
             _clusterIds.add(clusterId);
         }
 
+        public void addClusterList(Collection<Long> clusterList) {
+            if (_clusterIds == null) {
+                _clusterIds = new HashSet<Long>();
+            }
+            _clusterIds.addAll(clusterList);
+        }
+        
         public void addHost(long hostId) {
             if (_hostIds == null) {
                 _hostIds = new HashSet<Long>();

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/a50cf618/server/src/com/cloud/capacity/dao/CapacityDao.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/capacity/dao/CapacityDao.java 
b/server/src/com/cloud/capacity/dao/CapacityDao.java
index 360c863..0c0723b 100755
--- a/server/src/com/cloud/capacity/dao/CapacityDao.java
+++ b/server/src/com/cloud/capacity/dao/CapacityDao.java
@@ -41,4 +41,5 @@ public interface CapacityDao extends GenericDao<CapacityVO, 
Long> {
     List<SummedCapacity> listCapacitiesGroupedByLevelAndType(Integer 
capacityType, Long zoneId, Long podId, Long clusterId, int level, Long limit);  
     void updateCapacityState(Long dcId, Long podId, Long clusterId,
             Long hostId, String capacityState);
-}
+       List<Long> listClustersCrossingThreshold(short capacityType, Long 
zoneId, Float disableThreshold, long computeRequested, Float overProvFactor);
+}

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/a50cf618/server/src/com/cloud/capacity/dao/CapacityDaoImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/capacity/dao/CapacityDaoImpl.java 
b/server/src/com/cloud/capacity/dao/CapacityDaoImpl.java
index 0c2ca0f..a2df475 100755
--- a/server/src/com/cloud/capacity/dao/CapacityDaoImpl.java
+++ b/server/src/com/cloud/capacity/dao/CapacityDaoImpl.java
@@ -34,6 +34,7 @@ import com.cloud.storage.Storage;
 import com.cloud.storage.StoragePoolVO;
 import com.cloud.storage.dao.StoragePoolDaoImpl;
 import com.cloud.utils.Pair;
+import com.cloud.utils.StringUtils;
 import com.cloud.utils.component.ComponentLocator;
 import com.cloud.utils.db.Filter;
 import com.cloud.utils.db.GenericDaoBase;
@@ -108,7 +109,12 @@ public class CapacityDaoImpl extends 
GenericDaoBase<CapacityVO, Long> implements
                                                                             
"WHERE  total_capacity > 0 AND cluster_id is not null AND 
capacity_state='Enabled'";
     private static final String LIST_CAPACITY_GROUP_BY_CLUSTER_TYPE_PART2 = " 
GROUP BY cluster_id, capacity_type order by percent desc limit ";
     private static final String UPDATE_CAPACITY_STATE = "UPDATE 
`cloud`.`op_host_capacity` SET capacity_state = ? WHERE ";
-    
+    private static final String LIST_CLUSTERS_CROSSING_THRESHOLD = "SELECT 
cluster_id " +
+               "FROM (SELECT cluster_id, ( (sum(capacity.used_capacity) + 
sum(capacity.reserved_capacity) + ?)/sum(total_capacity) ) ratio "+
+               "FROM `cloud`.`op_host_capacity` capacity "+
+               "WHERE capacity.data_center_id = ? AND capacity.capacity_type = 
? AND capacity.total_capacity > 0 "+                                    
+               "GROUP BY cluster_id) tmp " +
+               "WHERE tmp.ratio > ? ";
     
     
     public CapacityDaoImpl() {
@@ -132,6 +138,52 @@ public class CapacityDaoImpl extends 
GenericDaoBase<CapacityVO, Long> implements
        
        _allFieldsSearch.done();
     }
+          
+    @Override
+    public  List<Long> listClustersCrossingThreshold(short capacityType, Long 
zoneId, Float disableThreshold, long compute_requested, Float overProvFactor){
+ 
+         Transaction txn = Transaction.currentTxn();
+         PreparedStatement pstmt = null;
+         List<Long> result = new ArrayList<Long>();         
+         StringBuilder sql = new 
StringBuilder(LIST_CLUSTERS_CROSSING_THRESHOLD);
+         
+ 
+         try {
+             pstmt = txn.prepareAutoCloseStatement(sql.toString());
+             pstmt.setLong(1, compute_requested);
+             pstmt.setLong(2, zoneId);
+             pstmt.setShort(3, capacityType);                          
+             pstmt.setFloat(4, disableThreshold*overProvFactor);             
+ 
+             ResultSet rs = pstmt.executeQuery();
+             while (rs.next()) {
+                 result.add(rs.getLong(1));
+             }
+             return result;
+         } catch (SQLException e) {
+             throw new CloudRuntimeException("DB Exception on: " + sql, e);
+         } catch (Throwable e) {
+                throw new CloudRuntimeException("Caught: " + sql, e);
+         } 
+     }
+
+    /*public static String preparePlaceHolders(int length) {
+        StringBuilder builder = new StringBuilder();
+        for (int i = 0; i < length;) {
+            builder.append("?");
+            if (++i < length) {
+                builder.append(",");
+            }
+        }
+        return builder.toString();
+    }
+
+    public static void setValues(PreparedStatement preparedStatement, 
Object... values) throws SQLException {
+        for (int i = 0; i < values.length; i++) {
+            preparedStatement.setObject(i + 1, values[i]);
+        }
+    }*/
+
     
     @Override
     public  List<SummedCapacity> findCapacityBy(Integer capacityType, Long 
zoneId, Long podId, Long clusterId, String resource_state){

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/a50cf618/server/src/com/cloud/deploy/FirstFitPlanner.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/deploy/FirstFitPlanner.java 
b/server/src/com/cloud/deploy/FirstFitPlanner.java
index d22b55f..88b0cbd 100755
--- a/server/src/com/cloud/deploy/FirstFitPlanner.java
+++ b/server/src/com/cloud/deploy/FirstFitPlanner.java
@@ -73,6 +73,7 @@ import com.cloud.storage.dao.VolumeDao;
 import com.cloud.user.AccountManager;
 import com.cloud.utils.NumbersUtil;
 import com.cloud.utils.Pair;
+import com.cloud.utils.StringUtils;
 import com.cloud.utils.component.Adapters;
 import com.cloud.utils.component.Inject;
 import com.cloud.vm.DiskProfile;
@@ -457,7 +458,7 @@ public class FirstFitPlanner extends PlannerBase implements 
DeploymentPlanner {
        return capacityList;
     }
     
-    private void removeClustersCrossingThreshold(List<Long> clusterList, 
ExcludeList avoid, VirtualMachineProfile<? extends VirtualMachine> vmProfile){
+    private void removeClustersCrossingThreshold(List<Long> 
clusterListForVmAllocation, ExcludeList avoid, VirtualMachineProfile<? extends 
VirtualMachine> vmProfile, DeploymentPlan plan){
                
        Map<Short,Float> capacityThresholdMap = getCapacityThresholdMap();
        List<Short> capacityList = getCapacitiesForCheckingThreshold();
@@ -467,37 +468,33 @@ public class FirstFitPlanner extends PlannerBase 
implements DeploymentPlanner {
         int cpu_requested = offering.getCpu() * offering.getSpeed();
         long ram_requested = offering.getRamSize() * 1024L * 1024L;
        
-       // Iterate over the cluster List and check for each cluster whether it 
breaks disable threshold for any of the capacity types
-       for (Long clusterId : clusterList){
-               for(short capacity : capacityList){
-                       
-                       List<SummedCapacity> summedCapacityList = 
_capacityDao.findCapacityBy(new Integer(capacity), null, null, clusterId);      
                      
-               if (summedCapacityList != null && summedCapacityList.size() != 
0  && summedCapacityList.get(0).getTotalCapacity() != 0){
-                       
-                       double used = 
(double)(summedCapacityList.get(0).getUsedCapacity() + 
summedCapacityList.get(0).getReservedCapacity());
-                       double total = 
summedCapacityList.get(0).getTotalCapacity();
-                       
-                       if (capacity == Capacity.CAPACITY_TYPE_CPU){
-                               total = total * 
ApiDBUtils.getCpuOverprovisioningFactor();
-                               used = used + cpu_requested;
-                       }else{
-                               used = used + ram_requested;
-                       }
-                       
-                       double usedPercentage = used/total;
-                       if ( usedPercentage > 
capacityThresholdMap.get(capacity)){                              
-                               avoid.addCluster(clusterId);
-                               clustersCrossingThreshold.add(clusterId);
-                                               s_logger.debug("Cannot allocate 
cluster " + clusterId + " for vm creation since its allocated percentage: " 
+usedPercentage + 
-                                                               " will cross 
the disable capacity threshold: " + capacityThresholdMap.get(capacity) + " for 
capacity Type : " + capacity + ", skipping this cluster");                      
            
-                               break;
-                       }                                       
-               }               
-               }                               
-       }
-       
-       clusterList.removeAll(clustersCrossingThreshold);               
-       
+        //     For each capacity get the cluster list crossing the threshold 
and remove it from the clusterList that will be used for vm allocation.
+        for(short capacity : capacityList){
+               
+               if (clusterListForVmAllocation == null || 
clusterListForVmAllocation.size() == 0){
+                       return;
+               }
+               
+               if (capacity == Capacity.CAPACITY_TYPE_CPU){
+                       clustersCrossingThreshold = 
_capacityDao.listClustersCrossingThreshold(Capacity.CAPACITY_TYPE_CPU, 
plan.getDataCenterId(),
+                                       capacityThresholdMap.get(capacity), 
cpu_requested, ApiDBUtils.getCpuOverprovisioningFactor());
+               }else{
+                       clustersCrossingThreshold = 
_capacityDao.listClustersCrossingThreshold(capacity, plan.getDataCenterId(),
+                                       capacityThresholdMap.get(capacity), 
ram_requested, 1.0f);//Mem overprov not supported yet
+               }
+
+               
+               if (clustersCrossingThreshold != null && 
clustersCrossingThreshold.size() != 0){
+                       // addToAvoid Set
+                       avoid.addClusterList(clustersCrossingThreshold);
+                       // Remove clusters crossing disabled threshold
+                       
clusterListForVmAllocation.removeAll(clustersCrossingThreshold);
+                       
+                       s_logger.debug("Cannot allocate cluster list " + 
clustersCrossingThreshold.toString() + " for vm creation since their allocated 
percentage" +
+                                       " crosses the disable capacity 
threshold: " + capacityThresholdMap.get(capacity) + " for capacity Type : " + 
capacity + ", skipping these clusters");                           
+               }
+                                               
+        }
     }
 
     private DeployDestination checkClustersforDestination(List<Long> 
clusterList, VirtualMachineProfile<? extends VirtualMachine> vmProfile,
@@ -507,7 +504,7 @@ public class FirstFitPlanner extends PlannerBase implements 
DeploymentPlanner {
             s_logger.trace("ClusterId List to consider: " + clusterList);
         }
 
-        removeClustersCrossingThreshold(clusterList, avoid, vmProfile);
+        removeClustersCrossingThreshold(clusterList, avoid, vmProfile, plan);
         
         for(Long clusterId : clusterList){
             Cluster clusterVO = _clusterDao.findById(clusterId);

Reply via email to