Repository: stratos
Updated Branches:
  refs/heads/4.1.0-test d7627b81c -> 5e3663561


Update scaling rule to keep the max


Project: http://git-wip-us.apache.org/repos/asf/stratos/repo
Commit: http://git-wip-us.apache.org/repos/asf/stratos/commit/5e366356
Tree: http://git-wip-us.apache.org/repos/asf/stratos/tree/5e366356
Diff: http://git-wip-us.apache.org/repos/asf/stratos/diff/5e366356

Branch: refs/heads/4.1.0-test
Commit: 5e36635616f5dca18402d82289718e16dc21e99a
Parents: d7627b8
Author: Lahiru Sandaruwan <[email protected]>
Authored: Wed Dec 10 19:09:16 2014 +0530
Committer: Lahiru Sandaruwan <[email protected]>
Committed: Wed Dec 10 19:09:23 2014 +0530

----------------------------------------------------------------------
 .../src/main/conf/drools/scaling.drl            | 181 ++++++++++---------
 1 file changed, 95 insertions(+), 86 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/stratos/blob/5e366356/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
----------------------------------------------------------------------
diff --git 
a/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl 
b/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
index 35adcc5..03dd925 100644
--- a/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
+++ b/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
@@ -99,8 +99,8 @@ dialect "mvel"
 
 
 
-        scaleUp : Boolean() from (activeInstancesCount < 
numberOfRequiredInstances )
-        scaleDown : Boolean() from (activeInstancesCount > 
numberOfRequiredInstances )
+        scaleUp : Boolean() from (activeInstancesCount < 
numberOfRequiredInstances)
+        scaleDown : Boolean() from (activeInstancesCount > 
numberOfRequiredInstances)
 
         eval(log.debug("[scaling] " + " [cluster] " + clusterId + " RIF 
Resetted?: " + rifReset))
         eval(log.debug("[scaling] " + " [cluster] " + clusterId + " RIF 
predicted value: " + rifPredictedValue))
@@ -120,104 +120,113 @@ dialect "mvel"
            log.debug("Number Of Required Instances " + 
numberOfRequiredInstances + " Instances Count " + activeInstancesCount);
 
         if(scaleUp){
+            if (clusterInstanceContext.getNonTerminatedMemberCount() < 
clusterInstanceContext.getMaxInstanceCount()) {
+                int additionalInstances = numberOfRequiredInstances - 
activeInstancesCount ;
+                clusterInstanceContext.resetScaleDownRequestsCount();
+                int count = 0;
 
-            int additionalInstances = numberOfRequiredInstances - 
activeInstancesCount ;
-            clusterInstanceContext.resetScaleDownRequestsCount();
-            int count = 0;
+                //Calculating the factor scaling
+                float factor = numberOfRequiredInstances / 
clusterInstanceContext.getMinInstanceCount();
+    //            delegator.delegateScalingDependencyNotification(clusterId, 
clusterInstanceContext.getId(), factor);
 
-            //Calculating the factor scaling
-            float factor = numberOfRequiredInstances / 
clusterInstanceContext.getMinInstanceCount();
-//            delegator.delegateScalingDependencyNotification(clusterId, 
clusterInstanceContext.getId(), factor);
+                boolean partitionsAvailable = true;
 
-            boolean partitionsAvailable = true;
+                while(count != additionalInstances && partitionsAvailable){
 
-            while(count != additionalInstances && partitionsAvailable){
+                    ClusterLevelPartitionContext partitionContext =  
(ClusterLevelPartitionContext)autoscaleAlgorithm.getNextScaleUpPartitionContext(clusterInstanceContext.getPartitionCtxtsAsAnArray());
+                    if(partitionContext != null){
 
-                ClusterLevelPartitionContext partitionContext =  
(ClusterLevelPartitionContext)autoscaleAlgorithm.getNextScaleUpPartitionContext(clusterInstanceContext.getPartitionCtxtsAsAnArray());
-                if(partitionContext != null){
+                        log.info("[scale-up] Partition available, hence trying 
to spawn an instance to scale up!" );
+                        log.debug("[scale-up] " + " [partition] " + 
partitionContext.getPartitionId() + " [cluster] " + clusterId );
+                        delegator.delegateSpawn(partitionContext, clusterId, 
clusterInstanceContext.getId(), isPrimary);
+                        count++;
+                    } else {
 
-                    log.info("[scale-up] Partition available, hence trying to 
spawn an instance to scale up!" );
-                    log.debug("[scale-up] " + " [partition] " + 
partitionContext.getPartitionId() + " [cluster] " + clusterId );
-                    delegator.delegateSpawn(partitionContext, clusterId, 
clusterInstanceContext.getId(), isPrimary);
-                    count++;
-                } else {
-
-                    partitionsAvailable = false;
+                        partitionsAvailable = false;
+                    }
                 }
+            } else{
+                log.info("[scale-up] Max is reached, hence not scaling up 
[cluster] " + clusterId + " [instance id]" + clusterInstanceContext.getId());
             }
         } else if(scaleDown){
 
-            float factor = numberOfRequiredInstances / 
clusterInstanceContext.getMinInstanceCount();
-//            delegator.delegateScalingDependencyNotification(clusterId, 
clusterInstanceContext.getNetworkPartitionId(), factor);
-
-            log.debug("[scale-down] Decided to Scale down [cluster] " + 
clusterId);
-            if(clusterInstanceContext.getScaleDownRequestsCount() > 5 ){
-
-                log.debug("[scale-down] Reached scale down requests threshold 
[cluster] " + clusterId + " Count " + 
clusterInstanceContext.getScaleDownRequestsCount());
-                MemberStatsContext selectedMemberStatsContext = null;
-                double lowestOverallLoad = 0.0;
-                boolean foundAValue = false;
-                ClusterLevelPartitionContext partitionContext =  
(ClusterLevelPartitionContext) 
autoscaleAlgorithm.getNextScaleDownPartitionContext((clusterInstanceContext.getPartitionCtxtsAsAnArray()));
-                if(partitionContext != null){
-                    log.info("[scale-down] Partition available to scale down 
");
-//                    log.debug("[scale-down] " + " [partition] " + 
partition.getId() + " [cluster] " + clusterId);
-//                    partitionContext = 
clusterInstanceContext.getPartitionCtxt(partition.getId());
-//
-
-                                       // In partition context member stat 
context, all the primary members need to be
-                                       // avoided being selected as the member 
to terminated
-                                       
-
-                    for(MemberStatsContext memberStatsContext: 
partitionContext.getMemberStatsContexts().values()){
-                                       
-                                               if( 
!primaryMembers.contains(memberStatsContext.getMemberId()) ) {
-                                               
-                        LoadAverage loadAverage = 
memberStatsContext.getLoadAverage();
-                        log.debug("[scale-down] " + " [cluster] "
-                            + clusterId + " [member] " + 
memberStatsContext.getMemberId() + " Load average: " + loadAverage);
-
-                        MemoryConsumption memoryConsumption = 
memberStatsContext.getMemoryConsumption();
-                        log.debug("[scale-down] " + " [partition] " + 
partitionContext.getPartitionId() + " [cluster] "
-                            + clusterId + " [member] " + 
memberStatsContext.getMemberId() + " Memory consumption: " + memoryConsumption);
-
-                        double predictedCpu = 
delegator.getPredictedValueForNextMinute(loadAverage.getAverage(),loadAverage.getGradient(),loadAverage.getSecondDerivative(),
 1);
-                        log.debug("[scale-down] " + " [partition] " + 
partitionContext.getPartitionId() + " [cluster] "
-                            + clusterId + " [member] " + 
memberStatsContext.getMemberId() + " Predicted CPU: " + predictedCpu);
-
-                        double predictedMemoryConsumption = 
delegator.getPredictedValueForNextMinute(memoryConsumption.getAverage(),memoryConsumption.getGradient(),memoryConsumption.getSecondDerivative(),
 1);
-                        log.debug("[scale-down] " + " [partition] " + 
partitionContext.getPartitionId() + " [cluster] "
-                            + clusterId + " [member] " + 
memberStatsContext.getMemberId() + " Predicted memory consumption: " + 
predictedMemoryConsumption);
-
-                        double overallLoad = (predictedCpu + 
predictedMemoryConsumption) / 2;
-                        log.debug("[scale-down] " + " [partition] " + 
partitionContext.getPartitionId() + " [cluster] "
-                            + clusterId + " [member] " + 
memberStatsContext.getMemberId() + " Overall load: " + overallLoad);
-
-                        if(!foundAValue){
-                            foundAValue = true;
-                            selectedMemberStatsContext = memberStatsContext;
-                            lowestOverallLoad = overallLoad;
-                        } else if(overallLoad < lowestOverallLoad){
-                            selectedMemberStatsContext = memberStatsContext;
-                            lowestOverallLoad = overallLoad;
+            if(clusterInstanceContext.getNonTerminatedMemberCount() > 
clusterInstanceContext.getMinInstanceCount){
+
+
+                float factor = numberOfRequiredInstances / 
clusterInstanceContext.getMinInstanceCount();
+    //            delegator.delegateScalingDependencyNotification(clusterId, 
clusterInstanceContext.getNetworkPartitionId(), factor);
+
+                log.debug("[scale-down] Decided to Scale down [cluster] " + 
clusterId);
+                if(clusterInstanceContext.getScaleDownRequestsCount() > 5 ){
+
+                    log.debug("[scale-down] Reached scale down requests 
threshold [cluster] " + clusterId + " Count " + 
clusterInstanceContext.getScaleDownRequestsCount());
+                    MemberStatsContext selectedMemberStatsContext = null;
+                    double lowestOverallLoad = 0.0;
+                    boolean foundAValue = false;
+                    ClusterLevelPartitionContext partitionContext =  
(ClusterLevelPartitionContext) 
autoscaleAlgorithm.getNextScaleDownPartitionContext((clusterInstanceContext.getPartitionCtxtsAsAnArray()));
+                    if(partitionContext != null){
+                        log.info("[scale-down] Partition available to scale 
down ");
+    //                    log.debug("[scale-down] " + " [partition] " + 
partition.getId() + " [cluster] " + clusterId);
+    //                    partitionContext = 
clusterInstanceContext.getPartitionCtxt(partition.getId());
+    //
+
+                        // In partition context member stat context, all the 
primary members need to be
+                        // avoided being selected as the member to terminated
+
+
+                        for(MemberStatsContext memberStatsContext: 
partitionContext.getMemberStatsContexts().values()){
+
+                            if( 
!primaryMembers.contains(memberStatsContext.getMemberId()) ) {
+
+                            LoadAverage loadAverage = 
memberStatsContext.getLoadAverage();
+                            log.debug("[scale-down] " + " [cluster] "
+                                + clusterId + " [member] " + 
memberStatsContext.getMemberId() + " Load average: " + loadAverage);
+
+                            MemoryConsumption memoryConsumption = 
memberStatsContext.getMemoryConsumption();
+                            log.debug("[scale-down] " + " [partition] " + 
partitionContext.getPartitionId() + " [cluster] "
+                                + clusterId + " [member] " + 
memberStatsContext.getMemberId() + " Memory consumption: " + memoryConsumption);
+
+                            double predictedCpu = 
delegator.getPredictedValueForNextMinute(loadAverage.getAverage(),loadAverage.getGradient(),loadAverage.getSecondDerivative(),
 1);
+                            log.debug("[scale-down] " + " [partition] " + 
partitionContext.getPartitionId() + " [cluster] "
+                                + clusterId + " [member] " + 
memberStatsContext.getMemberId() + " Predicted CPU: " + predictedCpu);
+
+                            double predictedMemoryConsumption = 
delegator.getPredictedValueForNextMinute(memoryConsumption.getAverage(),memoryConsumption.getGradient(),memoryConsumption.getSecondDerivative(),
 1);
+                            log.debug("[scale-down] " + " [partition] " + 
partitionContext.getPartitionId() + " [cluster] "
+                                + clusterId + " [member] " + 
memberStatsContext.getMemberId() + " Predicted memory consumption: " + 
predictedMemoryConsumption);
+
+                            double overallLoad = (predictedCpu + 
predictedMemoryConsumption) / 2;
+                            log.debug("[scale-down] " + " [partition] " + 
partitionContext.getPartitionId() + " [cluster] "
+                                + clusterId + " [member] " + 
memberStatsContext.getMemberId() + " Overall load: " + overallLoad);
+
+                            if(!foundAValue){
+                                foundAValue = true;
+                                selectedMemberStatsContext = 
memberStatsContext;
+                                lowestOverallLoad = overallLoad;
+                            } else if(overallLoad < lowestOverallLoad){
+                                selectedMemberStatsContext = 
memberStatsContext;
+                                lowestOverallLoad = overallLoad;
+                            }
+
+
+                          }
+
                         }
-                        
-                                                                               
-                                         }
-                                               
-                    }
-                    if(selectedMemberStatsContext != null) {
-                        log.info("[scale-down] Trying to terminating an 
instace to scale down!" );
-                        log.debug("[scale-down] " + " [partition] " + 
partitionContext.getPartitionId() + " [cluster] "
-                            + clusterId + " Member with lowest overall load: " 
+ selectedMemberStatsContext.getMemberId());
+                        if(selectedMemberStatsContext != null) {
+                            log.info("[scale-down] Trying to terminating an 
instace to scale down!" );
+                            log.debug("[scale-down] " + " [partition] " + 
partitionContext.getPartitionId() + " [cluster] "
+                                + clusterId + " Member with lowest overall 
load: " + selectedMemberStatsContext.getMemberId());
 
-                        delegator.delegateTerminate(partitionContext, 
selectedMemberStatsContext.getMemberId());
+                            delegator.delegateTerminate(partitionContext, 
selectedMemberStatsContext.getMemberId());
+                        }
                     }
-                }
-            } else{
-                 log.debug("[scale-down] Not reached scale down requests 
threshold. " + clusterId + " Count " + 
clusterInstanceContext.getScaleDownRequestsCount());
-                 clusterInstanceContext.increaseScaleDownRequestsCount();
+                } else{
+                     log.debug("[scale-down] Not reached scale down requests 
threshold. " + clusterId + " Count " + 
clusterInstanceContext.getScaleDownRequestsCount());
+                     clusterInstanceContext.increaseScaleDownRequestsCount();
 
-             }
+                }
+            } else {
+                log.info("[scale-down] Min is reached, hence not scaling down 
[cluster] " + clusterId + " [instance id]" + clusterInstanceContext.getId());
+            }
         }  else{
             log.debug("[scaling] No decision made to either scale up or scale 
down ... ");
 

Reply via email to