We have move this discussion to WSO2 Dev since the problem is related to
WSO2 Private PaaS.

Thanks

On Tue, Sep 8, 2015 at 7:06 PM, Monaco Marco <ma.mon...@almaviva.it> wrote:

> Hi,
>
> We have successfully installed WSO2 PPaaS 4.1.2 on our Openstack IaaS
> Environment.
>
> After following this procedure (
> https://docs.wso2.com/display/PP410/Deploy+Private+PaaS+in+OpenStack) we
> are able to open the Private PaaS console and configure Network Partitions,
> Autoscale Policies, Deployment, Cartridge, ecc..
>
> We have problems trying to deploy applications. We tested both PHP and
> WSO2ESB applications, also using Mock IaaS, but we receive always the same
> error:
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
> *ERROR {org.apache.stratos.autoscaler.rule.AutoscalerRuleEvaluator} -
> Unable to Analyse Expression log.debug("[scaling] Number of required
> instances based on stats: " + numberOfRequiredInstances + " " +
>                 "[active instances count] " + activeInstancesCount + "
> [network-partition] " +
> clusterInstanceContext.getNetworkPartitionId() + " [cluster] " +
> clusterId);         int nonTerminatedMembers =
> clusterInstanceContext.getNonTerminatedMemberCount();         if(scaleUp){
>             int clusterMaxMembers =
> clusterInstanceContext.getMaxInstanceCount();             if
> (nonTerminatedMembers < clusterMaxMembers) {                 int
> additionalInstances = 0;                 if(clusterMaxMembers <
> numberOfRequiredInstances){                     additionalInstances =
> clusterMaxMembers - nonTerminatedMembers;                     log.info
> <http://log.info>("[scale-up] Required member count based on stat based
> scaling is higher than max, hence"                             + "
> notifying to parent for possible group scaling or app bursting. [cluster] "
> + clusterId                             + " [instance id]" +
> clusterInstanceContext.getId() + " [max] " + clusterMaxMembers
>                             + " [number of required instances] " +
> numberOfRequiredInstances                             + " [additional
> instances to be created] " + additionalInstances);
> delegator.delegateScalingOverMaxNotification(clusterId,
> clusterInstanceContext.getNetworkPartitionId(),
> clusterInstanceContext.getId());                 } else {
>                     additionalInstances = numberOfRequiredInstances -
> nonTerminatedMembers;                 }
> clusterInstanceContext.resetScaleDownRequestsCount();
> log.debug("[scale-up] " + " [has scaling dependents] " +
> clusterInstanceContext.hasScalingDependants() +                     "
> [cluster] " + clusterId );
> if(clusterInstanceContext.hasScalingDependants()) {
> log.debug("[scale-up] Notifying dependencies [cluster] " + clusterId);
>
> delegator.delegateScalingDependencyNotification(clusterId,
> clusterInstanceContext.getNetworkPartitionId(),
> clusterInstanceContext.getId(), numberOfRequiredInstances,
> clusterInstanceContext.getMinInstanceCount());                 } else {
>                     boolean partitionsAvailable = true;
> int count = 0;                     String autoscalingReason =
> (numberOfRequiredInstances ==
> numberOfInstancesReuquiredBasedOnRif)?"Scaling up due to RIF, [Predicted
> Value] "+rifPredictedValue+" [Threshold]
> "+rifThreshold:(numberOfRequiredInstances==
> numberOfInstancesReuquiredBasedOnMemoryConsumption)?"Scaling up due to MC,
> [Predicted Value] "+mcPredictedValue+" [Threshold] "+mcThreshold:"Scaling
> up due to LA, [Predicted Value] "+laPredictedValue+" [Threshold]
> "+laThreshold;                     autoscalingReason += " [Number of
> required instances] "+numberOfRequiredInstances+" [Cluster Max Members]
> "+clusterMaxMembers+" [Additional instances to be created] " +
> additionalInstances;                     while(count != additionalInstances
> && partitionsAvailable){
> ClusterLevelPartitionContext partitionContext =
> (ClusterLevelPartitionContext)
> partitionAlgorithm.getNextScaleUpPartitionContext(clusterInstanceContext.getPartitionCtxtsAsAnArray());
>                         if(partitionContext != null){
>                             log.info <http://log.info>("[scale-up]
> Partition available, hence trying to spawn an instance to scale up! " +
>                                 " [application id] " + applicationId +
>                                 " [cluster] " + clusterId + " [instance id]
> " + clusterInstanceContext.getId() +                                 "
> [network-partition] " + clusterInstanceContext.getNetworkPartitionId() +
>                                 " [partition] " +
> partitionContext.getPartitionId() +                                 "
> scaleup due to RIF: " + (rifReset && (rifPredictedValue > rifThreshold)) +
>                                 " [rifPredictedValue] " + rifPredictedValue
> + " [rifThreshold] " + rifThreshold +                                 "
> scaleup due to MC: " + (mcReset && (mcPredictedValue > mcThreshold)) +
>                                 " [mcPredictedValue] " + mcPredictedValue +
> " [mcThreshold] " + mcThreshold +                                 " scaleup
> due to LA: " + (laReset && (laPredictedValue > laThreshold)) +
>                                 " [laPredictedValue] " + laPredictedValue +
> " [laThreshold] " + laThreshold);
> log.debug("[scale-up] " + " [partition] " +
> partitionContext.getPartitionId() + " [cluster] " + clusterId );
>                             long scalingTime = System.currentTimeMillis();
>                             delegator.delegateSpawn(partitionContext,
> clusterId, clusterInstanceContext.getId(),
> isPrimary,autoscalingReason,scalingTime);
> count++;                         } else {
> log.warn("[scale-up] No more partition available even though " +
>                              "cartridge-max is not reached!, [cluster] " +
> clusterId +                             " Please update deployment-policy
> with new partitions or with higher " +
> "partition-max");                             partitionsAvailable = false;
>                         }                     }                 }
>             } else {                 log.info <http://log.info>("[scale-up]
> Trying to scale up over max, hence not scaling up cluster itself and
>                         notifying to parent for possible group scaling or
> app bursting.                         [cluster] " + clusterId + " [instance
> id]" + clusterInstanceContext.getId() +                         " [max] " +
> clusterMaxMembers);
> delegator.delegateScalingOverMaxNotification(clusterId,
> clusterInstanceContext.getNetworkPartitionId(),
> clusterInstanceContext.getId());             }         } else
> if(scaleDown){             if(nonTerminatedMembers >
> clusterInstanceContext.getMinInstanceCount){
> log.debug("[scale-down] Decided to Scale down [cluster] " + clusterId);
>                 if(clusterInstanceContext.getScaleDownRequestsCount() > 2
> ){                     log.debug("[scale-down] Reached scale down requests
> threshold [cluster] " + clusterId + " Count " +
> clusterInstanceContext.getScaleDownRequestsCount());
> if(clusterInstanceContext.hasScalingDependants()) {
> log.debug("[scale-up] Notifying dependencies [cluster] " + clusterId);
>
> delegator.delegateScalingDependencyNotification(clusterId,
> clusterInstanceContext.getNetworkPartitionId(),
> clusterInstanceContext.getId(), numberOfRequiredInstances,
> clusterInstanceContext.getMinInstanceCount());                     } else{
>                         MemberStatsContext selectedMemberStatsContext =
> null;                         double lowestOverallLoad = 0.0;
>                         boolean foundAValue = false;
>                         ClusterLevelPartitionContext partitionContext =
> (ClusterLevelPartitionContext)
> partitionAlgorithm.getNextScaleDownPartitionContext(clusterInstanceContext.getPartitionCtxtsAsAnArray());
>                         if(partitionContext != null){
>                             log.info <http://log.info>("[scale-down]
> Partition available to scale down " +                                 "
> [application id] " + applicationId +                                 "
> [cluster] " + clusterId + " [instance id] " +
> clusterInstanceContext.getId() +                                 "
> [network-partition] " + clusterInstanceContext.getNetworkPartitionId() +
>                                 " [partition] " +
> partitionContext.getPartitionId() +                                 "
> scaledown due to RIF: " + (rifReset && (rifPredictedValue < rifThreshold))
> +                                 " [rifPredictedValue] " +
> rifPredictedValue + " [rifThreshold] " + rifThreshold +
>                                 " scaledown due to MC: " + (mcReset &&
> (mcPredictedValue < mcThreshold)) +                                 "
> [mcPredictedValue] " + mcPredictedValue + " [mcThreshold] " + mcThreshold +
>                                 " scaledown due to LA: " + (laReset &&
> (laPredictedValue < laThreshold)) +                                 "
> [laPredictedValue] " + laPredictedValue + " [laThreshold] " + laThreshold
>                             );                             // In partition
> context member stat context, all the primary members need to be
>                             // avoided being selected as the member to
> terminated                             for(MemberStatsContext
> memberStatsContext: partitionContext.getMemberStatsContexts().values()){
>                                 if(
> !primaryMembers.contains(memberStatsContext.getMemberId()) ) {
>                                 LoadAverage loadAverage =
> memberStatsContext.getLoadAverage();
> log.debug("[scale-down] " + " [cluster] "
>                                     + clusterId + " [member] " +
> memberStatsContext.getMemberId() + " Load average: " + loadAverage);
>                                 MemoryConsumption memoryConsumption =
> memberStatsContext.getMemoryConsumption();
> log.debug("[scale-down] " + " [partition] " +
> partitionContext.getPartitionId() + " [cluster] "
>                                     + clusterId + " [member] " +
> memberStatsContext.getMemberId() + " Memory consumption: " +
>                                     memoryConsumption);
>                                 double predictedCpu =
> delegator.getPredictedValueForNextMinute(loadAverage.getAverage(),
>
> loadAverage.getGradient(),loadAverage.getSecondDerivative(), 1);
>                                 log.debug("[scale-down] " + " [partition] "
> + partitionContext.getPartitionId() + " [cluster] "
>                                     + clusterId + " [member] " +
> memberStatsContext.getMemberId() + " Predicted CPU: " + predictedCpu);
>                                 double predictedMemoryConsumption =
> delegator.getPredictedValueForNextMinute(
>
> memoryConsumption.getAverage(),memoryConsumption.getGradient(),memoryConsumption.getSecondDerivative(),
> 1);                                 log.debug("[scale-down] " + "
> [partition] " + partitionContext.getPartitionId() + " [cluster] "
>                                     + clusterId + " [member] " +
> memberStatsContext.getMemberId() + " Predicted memory consumption: " +
>                                         predictedMemoryConsumption);
>                                 double overallLoad = (predictedCpu +
> predictedMemoryConsumption) / 2;
> log.debug("[scale-down] " + " [partition] " +
> partitionContext.getPartitionId() + " [cluster] "
>                                     + clusterId + " [member] " +
> memberStatsContext.getMemberId() + " Overall load: " + overallLoad);
>                                 if(!foundAValue){
>                                     foundAValue = true;
>                                     selectedMemberStatsContext =
> memberStatsContext;                                     lowestOverallLoad =
> overallLoad;                                 } else if(overallLoad <
> lowestOverallLoad){
> selectedMemberStatsContext = memberStatsContext;
>                                     lowestOverallLoad = overallLoad;
>                                 }                               }
>                             }
> if(selectedMemberStatsContext != null) {
> log.info <http://log.info>("[scale-down] Trying to terminating an instace
> to scale down!" );                                 log.debug("[scale-down]
> " + " [partition] " + partitionContext.getPartitionId() + " [cluster] "
>                                     + clusterId + " Member with lowest
> overall load: " + selectedMemberStatsContext.getMemberId());
>
> delegator.delegateTerminate(partitionContext,
> selectedMemberStatsContext.getMemberId());                             }
>                         }                     }                 } else{
>                      log.debug("[scale-down] Not reached scale down
> requests threshold. " + clusterId + " Count " +
> clusterInstanceContext.getScaleDownRequestsCount());
> clusterInstanceContext.increaseScaleDownRequestsCount();                 }
>             } else {                 log.debug("[scale-down] Min is
> reached, hence not scaling down [cluster] " + clusterId + " [instance id]"
>                     + clusterInstanceContext.getId());
> //if(clusterInstanceContext.isInGroupScalingEnabledSubtree()){
>
> delegator.delegateScalingDownBeyondMinNotification(clusterId,
> clusterInstanceContext.getNetworkPartitionId(),
> clusterInstanceContext.getId());                 //}             }
> }  else{             log.debug("[scaling] No decision made to either scale
> up or scale down ... [cluster] " + clusterId + " [instance id]"
>              + clusterInstanceContext.getId());         };: [Error: unable
> to resolve method using strict-mode:
> org.apache.stratos.autoscaler.rule.RuleTasksDelegator.delegateSpawn(org.apache.stratos.autoscaler.context.partition.ClusterLevelPartitionContext,
> java.lang.String, java.lang.String, java.lang.Boolean, java.lang.String,
> long)] [Near : {... delegator.delegateSpawn(partitionContext ....}] *
> Once we try to deploy the application, it hangs and it's impossible  to
> remove it unless we erase and repopulate Stratos database. It comes up
> everytime we restart stratos (if we don't recreate DB), but in any case
> it's impossible to deploy any application.
>
> These are configurations that we used for WSO2 ESB (we configured puppet
> side correctly according to
> https://docs.wso2.com/display/PP410/Configuring+Puppet+Master and
> https://github.com/wso2/product-private-paas/tree/master/cartridges/templates-modules/wso2esb-4.8.1).
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
> *Autoscaling policy: {     "id": "Autoscaling-WSO2",     "loadThresholds":
> {         "requestsInFlight": {             "threshold": 20         },
>         "memoryConsumption": {             "threshold": 80         },
>         "loadAverage": {             "threshold": 120         }     } }
> DEPLOYMENT POLICY: {     "id": "Deployment-WSO2",     "networkPartitions":
> [         {             "id": "NP1",             "partitionAlgo":
> "round-robin",             "partitions": [                 {
>                     "id": "P1",                     "partitionMax": 5,
>                 "partitionMin": 1                 }             ]         }
>     ] } APPLICATION POLICY: {     "id": "Application-WSO2",
> "algorithm": "one-after-another",     "networkPartitions": [         "NP1"
>     ],     "properties": [     ] } CARTRIDGES: MANAGER: {     "type":
> "wso2esb-481-manager",     "category": "framework",     "provider": "wso2",
>     "host": "esb.alma.it <http://esb.alma.it>",     "displayName": "WSO2
> ESB 4.8.1 Manager",     "description": "WSO2 ESB 4.8.1 Manager Cartridge",
>     "version": "4.8.1",     "multiTenant": false,
> "loadBalancingIPType": "public",     "portMapping": [         {
> "name": "mgt-http",             "protocol": "http",             "port":
> 9763,             "proxyPort": 0         },         {             "name":
> "mgt-https",             "protocol": "https",             "port": 9443,
>             "proxyPort": 0         },         {             "name":
> "pt-http",             "protocol": "http",             "port": 8280,
>             "proxyPort": 0         },         {             "name":
> "pt-https",             "protocol": "https",             "port": 8243,
>             "proxyPort": 0         }     ],     "iaasProvider": [         {
>             "type": "openstack",             "imageId":
> "RegionOne/c2951a15-47b7-4f9c-a6e0-d3b7a50bc9aa",             "property": [
>                 {                     "name": "instanceType",
>                     "value": "RegionOne/3"                 },
>                 {                     "name": "keyPair",
>                     "value": "alma_admin_keypair"                 },
>                 {                     "name": "securityGroups",
>                     "value": "default"                 }             ],
>             "networkInterfaces": [                 {
> "networkUuid": "ea0edbc6-6d6d-4efe-b11c-7cb3cb78256f"                 }
>             ]         }     ],     "property": [         {
> "name": "payload_parameter.CONFIG_PARAM_CLUSTERING",             "value":
> "true"         },         {             "name": "payload_parameter.LB_IP",
>             "value": "<LOAD_BALANCER_IP>"         }     ] } WORKER: {
> "type": "wso2esb-481-worker",     "category": "framework",     "provider":
> "wso2",     "host": "esb.alma.it <http://esb.alma.it>",     "displayName":
> "WSO2 ESB 4.8.1 Worker",     "description": "WSO2 ESB 4.8.1 Worker
> Cartridge",     "version": "4.8.1",     "multiTenant": false,
> "loadBalancingIPType": "public",     "portMapping": [         {
> "name": "pt-http",             "protocol": "http",             "port":
> 8280,             "proxyPort": 0         },         {             "name":
> "pt-https",             "protocol": "https",             "port": 8243,
>             "proxyPort": 0         }     ],     "iaasProvider": [         {
>             "type": "openstack",             "imageId":
> "RegionOne/c2951a15-47b7-4f9c-a6e0-d3b7a50bc9aa",             "property": [
>                 {                     "name": "instanceType",
>                     "value": "RegionOne/3"                 },
>                 {                     "name": "keyPair",
>                     "value": "alma_admin_keypair"                 },
>                 {                     "name": "securityGroups",
>                     "value": "default"                 }             ],
>             "networkInterfaces": [                 {
> "networkUuid": "ea0edbc6-6d6d-4efe-b11c-7cb3cb78256f"                 }
>             ]         }     ],     "property": [         {
> "name": "payload_parameter.CONFIG_PARAM_CLUSTERING",             "value":
> "true"         },         {             "name": "payload_parameter.LB_IP",
>             "value": "<LOAD_BALANCER_IP>"         }     ] } GRORUPING: {
>     "name": "wso2esb-481-group",     "cartridges": [
> "wso2esb-481-manager",         "wso2esb-481-worker"     ],
> "dependencies": {         "startupOrders": [             {
> "aliases": [                     "cartridge.wso2esb-481-manager",
>                     "cartridge.wso2esb-481-worker"                 ]
>             }         ]     } } APPLICATION: {     "applicationId":
> "wso2esb-481-application",     "alias": "wso2esb-481-application",
> "multiTenant": true,     "components": {         "groups": [             {
>                 "name": "wso2esb-481-group",                 "alias":
> "wso2esb-481-group",                 "deploymentPolicy": "Deployment-WSO2",
>                 "groupMinInstances": 1,
> "groupMaxInstances": 1,                 "cartridges": [
> {                         "type": "wso2esb-481-manager",
>                         "cartridgeMin": 1,
> "cartridgeMax": 1,                         "subscribableInfo": {
>                             "alias": "wso2esb-481-manager",
>                             "autoscalingPolicy": "Autoscaling-WSO2"
>                         }                     },                     {
>                         "type": "wso2esb-481-worker",
>                         "cartridgeMin": 2,
> "cartridgeMax": 5,                         "subscribableInfo": {
>                             "alias": "wso2esb-481-worker",
>                             "autoscalingPolicy": "Autoscaling-WSO2"
>                         }                     }                 ]
>             }         ]     } }*
>
>
> Can you please help us? we are stucked at this point.
>
> Thank you all,
>
> Marco
>



-- 
Imesh Gunaratne

Senior Technical Lead, WSO2
Committer & PMC Member, Apache Stratos

Reply via email to