http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --cc 
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 4ba5427,b2993e3..d14bd33
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@@ -77,8 -63,8 +78,9 @@@ import java.util.Map
  import java.util.Map.Entry;
  import java.util.Set;
  import java.util.TreeMap;
+ import java.util.UUID;
  import java.util.concurrent.TimeUnit;
 +import java.util.stream.Collectors;
  
  import javax.persistence.RollbackException;
  
@@@ -228,8 -201,8 +227,9 @@@ import org.apache.ambari.server.topolog
  import org.apache.ambari.server.utils.SecretReference;
  import org.apache.ambari.server.utils.StageUtils;
  import org.apache.commons.collections.CollectionUtils;
 +import org.apache.commons.collections.MapUtils;
  import org.apache.commons.io.IOUtils;
+ import org.apache.commons.lang.BooleanUtils;
  import org.apache.commons.lang.StringUtils;
  import org.apache.commons.lang.math.NumberUtils;
  import org.apache.http.client.utils.URIBuilder;
@@@ -341,23 -313,12 +341,27 @@@ public class AmbariManagementController
    @Inject
    private ExtensionDAO extensionDAO;
    @Inject
+   private ExtensionLinkDAO linkDAO;
+   @Inject
    private StackDAO stackDAO;
+   @Inject
+   protected OsFamily osFamily;
  
 +  @Inject
 +  private TopologyDeleteFormer topologyDeleteFormer;
 +
 +  @Inject
 +  private AmbariCustomCommandExecutionHelper 
ambariCustomCommandExecutionHelper;
 +
 +  @Inject
 +  private Provider<TopologyHolder> m_topologyHolder;
 +
 +  @Inject
 +  private Provider<MetadataHolder> m_metadataHolder;
 +
 +  @Inject
 +  private Provider<AgentConfigsHolder> m_agentConfigsHolder;
 +
    /**
     * The KerberosHelper to help setup for enabling for disabling Kerberos
     */
@@@ -2447,57 -2452,37 +2548,25 @@@
      if 
(customCommandExecutionHelper.isTopologyRefreshRequired(roleCommand.name(), 
clusterName, serviceName)) {
        commandParams.put(ExecutionCommand.KeyNames.REFRESH_TOPOLOGY, "True");
      }
+     StageUtils.useAmbariJdkInCommandParams(commandParams, configs);
  
-     String repoInfo = ambariMetaInfo.getRepoInfoString(cluster, 
hostEntity.getOsType(), osFamily , hostname);
 -    String repoInfo = customCommandExecutionHelper.getRepoInfo(cluster, 
component, host);
++    String repoInfo = ambariMetaInfo.getRepoInfoString(cluster, component, 
host);
      if (LOG.isDebugEnabled()) {
-       LOG.debug("Sending repo information to agent"
-         + ", hostname=" + scHost.getHostName()
-         + ", clusterName=" + clusterName
-         + ", stackInfo=" + stackId.getStackId()
-         + ", repoInfo=" + repoInfo);
+       LOG.debug("Sending repo information to agent, hostname={}, 
clusterName={}, stackInfo={}, repoInfo={}",
+         scHost.getHostName(), clusterName, stackId.getStackId(), repoInfo);
      }
  
      Map<String, String> hostParams = new TreeMap<>();
      hostParams.put(REPO_INFO, repoInfo);
      hostParams.putAll(getRcaParameters());
  
-     Map<String, String> roleParams = new TreeMap<>();
- 
-     // use the effective cluster version here since this command might happen
-     // in the context of an upgrade and we should send the repo ID which 
matches
-     // the version being send down
-     RepositoryVersionEntity repoVersion = null;
-     if (null != effectiveClusterVersion) {
-       repoVersion = effectiveClusterVersion.getRepositoryVersion();
-     } else {
-       List<ClusterVersionEntity> list = 
clusterVersionDAO.findByClusterAndState(cluster.getClusterName(),
-           RepositoryVersionState.INIT);
-       if (1 == list.size()) {
-         repoVersion = list.get(0).getRepositoryVersion();
-       }
-     }
- 
--    if (null != repoVersion) {
--      try {
--        VersionDefinitionXml xml = repoVersion.getRepositoryXml();
--        if (null != xml && 
!StringUtils.isBlank(xml.getPackageVersion(osFamily))) {
-           roleParams.put(PACKAGE_VERSION, xml.getPackageVersion(osFamily));
 -          hostParams.put(PACKAGE_VERSION, xml.getPackageVersion(osFamily));
--        }
--      } catch (Exception e) {
--        throw new AmbariException(String.format("Could not load version xml 
from repo version %s",
--            repoVersion.getVersion()), e);
--      }
- 
-       roleParams.put(KeyNames.REPO_VERSION_ID, 
repoVersion.getId().toString());
-     }
--
 -      hostParams.put(KeyNames.REPO_VERSION_ID, 
repoVersion.getId().toString());
 +    if (roleCommand.equals(RoleCommand.INSTALL)) {
 +      List<ServiceOsSpecific.Package> packages =
 +          getPackagesForServiceHost(serviceInfo, hostParams, osFamily);
 +      String packageList = gson.toJson(packages);
 +      commandParams.put(PACKAGE_LIST, packageList);
      }
  
 -    List<ServiceOsSpecific.Package> packages =
 -            getPackagesForServiceHost(serviceInfo, hostParams, osFamily);
 -    String packageList = gson.toJson(packages);
 -    hostParams.put(PACKAGE_LIST, packageList);
 -
      Set<PropertyInfo> stackProperties = 
ambariMetaInfo.getStackProperties(stackInfo.getName(), stackInfo.getVersion());
  
      Set<String> userSet = 
configHelper.getPropertyValuesWithPropertyType(PropertyType.USER, cluster, 
clusterDesiredConfigs, servicesMap, stackProperties);
@@@ -2548,11 -2552,10 +2620,9 @@@
      execCmd.setRoleParams(roleParams);
      execCmd.setCommandParams(commandParams);
  
-     
execCmd.setAvailableServicesFromServiceInfoMap(ambariMetaInfo.getServices(stackId.getStackName(),
 stackId.getStackVersion()));
- 
 -    
execCmd.setRepositoryFile(customCommandExecutionHelper.getCommandRepository(cluster,
 component, host));
 -    execCmdWrapper.setVersions(cluster);
++    execCmd.setRepositoryFile(ambariMetaInfo.getCommandRepository(cluster, 
component, host));
  
 -    if (execCmd.getConfigurationTags().containsKey("cluster-env")) {
 +    if ((execCmd != null) && 
(execCmd.getConfigurationTags().containsKey("cluster-env"))) {
        LOG.debug("AmbariManagementControllerImpl.createHostAction: created 
ExecutionCommand for host {}, role {}, roleCommand {}, and command ID {}, with 
cluster-env tags {}",
          execCmd.getHostname(), execCmd.getRole(), execCmd.getRoleCommand(), 
execCmd.getCommandId(), 
execCmd.getConfigurationTags().get("cluster-env").get("tag"));
      }
@@@ -2941,18 -2957,25 +3024,18 @@@
                  }
                  break;
                case INIT:
 -                if (oldSchState == State.INSTALLED ||
 -                    oldSchState == State.INSTALL_FAILED ||
 -                    oldSchState == State.INIT) {
 -                  scHost.setState(State.INIT);
 -                  continue;
 -                } else  {
 -                  throw new AmbariException("Unsupported transition to INIT 
for"
 -                      + " servicecomponenthost"
 -                      + ", clusterName=" + cluster.getClusterName()
 -                      + ", clusterId=" + cluster.getClusterId()
 -                      + ", serviceName=" + scHost.getServiceName()
 -                      + ", componentName=" + scHost.getServiceComponentName()
 -                      + ", hostname=" + scHost.getHostName()
 -                      + ", currentState=" + oldSchState
 -                      + ", newDesiredState=" + newState);
 -                }
 +                throw new AmbariException("Unsupported transition to INIT for"
 +                    + " servicecomponenthost"
 +                    + ", clusterName=" + cluster.getClusterName()
 +                    + ", clusterId=" + cluster.getClusterId()
 +                    + ", serviceName=" + scHost.getServiceName()
 +                    + ", componentName=" + scHost.getServiceComponentName()
 +                    + ", hostname=" + scHost.getHostName()
 +                    + ", currentState=" + oldSchState
 +                    + ", newDesiredState=" + newState);
                default:
                  throw new AmbariException("Unsupported state change operation"
-                     + ", newState=" + newState.toString());
+                     + ", newState=" + newState);
              }
  
              if (LOG.isDebugEnabled()) {
@@@ -3550,21 -3624,51 +3684,51 @@@
      for (Entry<ServiceComponent, Set<ServiceComponentHost>> entry : 
safeToRemoveSCHs.entrySet()) {
        for (ServiceComponentHost componentHost : entry.getValue()) {
          try {
-           deleteHostComponent(entry.getKey(), componentHost, deleteMetaData);
+           //actually delete the component
 -          
entry.getKey().deleteServiceComponentHosts(componentHost.getHostName());
++          
entry.getKey().deleteServiceComponentHosts(componentHost.getHostName(), 
deleteMetaData);
+ 
+           //create cluster-master-service map to update all include/exclude 
files in one action
+           String componentName = componentHost.getServiceComponentName();
+           if (masterToSlaveMappingForDecom.containsValue(componentName)) {
+             String masterComponentName = null;
+             for (Entry<String, String> entrySet : 
masterToSlaveMappingForDecom.entrySet()) {
+               if (entrySet.getValue().equals(componentName)) {
+                 masterComponentName = entrySet.getKey();
+               }
+             }
+             if 
(clusterServiceMasterForDecommissionMap.containsKey(componentHost.getClusterName()))
 {
+               
clusterServiceMasterForDecommissionMap.get(componentHost.getClusterName()).put(componentHost.getServiceName(),
 masterComponentName);
+               Map<String, Set<String>> masterSlaveMap  = 
clusterMasterSlaveHostsMap.get(componentHost.getClusterName());
+               masterSlaveMap.putIfAbsent(masterComponentName, new 
HashSet<>());
+               
masterSlaveMap.get(masterComponentName).add(componentHost.getHostName());
+             } else {
+               Map<String, String> serviceMasterMap = new HashMap<>();
+               serviceMasterMap.put(componentHost.getServiceName(), 
masterComponentName);
+               
clusterServiceMasterForDecommissionMap.put(componentHost.getClusterName(), 
serviceMasterMap);
+ 
+               Map<String, Set<String>> masterSlaveHostsMap = new HashMap<>();
+               masterSlaveHostsMap.put(masterComponentName, new 
HashSet<>(Collections.singletonList(componentHost.getHostName())));
+               clusterMasterSlaveHostsMap.put(componentHost.getClusterName(), 
masterSlaveHostsMap);
+             }
+           }
 -          deleteStatusMetaData.addDeletedKey(componentHost.getHostName() + 
"/" + componentHost.getServiceComponentName());
          } catch (Exception ex) {
 -          deleteStatusMetaData.addException(componentHost.getHostName() + "/" 
+ componentHost.getServiceComponentName(), ex);
 +          deleteMetaData.addException(componentHost.getHostName() + "/" + 
componentHost.getServiceComponentName(), ex);
          }
        }
      }
  
+     for (String cluster : clusterServiceMasterForDecommissionMap.keySet()) {
+       
createAndExecuteRefreshIncludeExcludeFilesActionForMasters(clusterServiceMasterForDecommissionMap.get(cluster),
 clusterMasterSlaveHostsMap.get(cluster), cluster, true);
+     }
+ 
      //Do not break behavior for existing clients where delete request 
contains only 1 host component.
      //Response for these requests will have empty body with appropriate error 
code.
 -    if (deleteStatusMetaData.getDeletedKeys().size() + 
deleteStatusMetaData.getExceptionForKeys().size() == 1) {
 -      if (deleteStatusMetaData.getDeletedKeys().size() == 1) {
 +    if (deleteMetaData.getDeletedKeys().size() + 
deleteMetaData.getExceptionForKeys().size() == 1) {
 +      if (deleteMetaData.getDeletedKeys().size() == 1) {
 +        topologyDeleteFormer.processDeleteMetaData(deleteMetaData);
          return null;
        }
 -      Exception ex =  
deleteStatusMetaData.getExceptionForKeys().values().iterator().next();
 +      Exception ex = 
deleteMetaData.getExceptionForKeys().values().iterator().next();
        if (ex instanceof AmbariException) {
          throw (AmbariException)ex;
        } else {
@@@ -3576,65 -3680,9 +3740,10 @@@
      if (!safeToRemoveSCHs.isEmpty()) {
        setMonitoringServicesRestartRequired(requests);
      }
 -    return deleteStatusMetaData;
 +    topologyDeleteFormer.processDeleteMetaData(deleteMetaData);
 +    return deleteMetaData;
    }
  
-   private void deleteHostComponent(ServiceComponent serviceComponent, 
ServiceComponentHost componentHost,
-                                    DeleteHostComponentStatusMetaData 
deleteMetaData) throws AmbariException {
-     String included_hostname = componentHost.getHostName();
-     String serviceName = serviceComponent.getServiceName();
-     String master_component_name = null;
-     String slave_component_name = componentHost.getServiceComponentName();
-     HostComponentAdminState desiredAdminState = 
componentHost.getComponentAdminState();
-     State slaveState = componentHost.getState();
-     //Delete hostcomponents
-     serviceComponent.deleteServiceComponentHosts(componentHost.getHostName(), 
deleteMetaData);
-     // If deleted hostcomponents support decomission and were decommited and 
stopped
-     if 
(AmbariCustomCommandExecutionHelper.masterToSlaveMappingForDecom.containsValue(slave_component_name)
-             && 
desiredAdminState.equals(HostComponentAdminState.DECOMMISSIONED)
-             && slaveState.equals(State.INSTALLED)) {
- 
-       for (Entry<String, String> entrySet : 
AmbariCustomCommandExecutionHelper.masterToSlaveMappingForDecom.entrySet()) {
-         if (entrySet.getValue().equals(slave_component_name)) {
-           master_component_name = entrySet.getKey();
-         }
-       }
-       //Clear exclud file or draining list except HBASE
-       if (!serviceName.equals(Service.Type.HBASE.toString())) {
-         HashMap<String, String> requestProperties = new HashMap<>();
-         requestProperties.put("context", "Remove host " +
-                 included_hostname + " from exclude file");
-         requestProperties.put("exclusive", "true");
-         HashMap<String, String> params = new HashMap<>();
-         params.put("included_hosts", included_hostname);
-         params.put("slave_type", slave_component_name);
-         
params.put(AmbariCustomCommandExecutionHelper.UPDATE_EXCLUDE_FILE_ONLY, "true");
- 
-         //Create filter for RECOMISSION command
-         RequestResourceFilter resourceFilter
-                 = new RequestResourceFilter(serviceName, 
master_component_name, null);
-         //Create request for RECOMISSION command
-         ExecuteActionRequest actionRequest = new ExecuteActionRequest(
-                 serviceComponent.getClusterName(), 
AmbariCustomCommandExecutionHelper.DECOMMISSION_COMMAND_NAME, null,
-                 Collections.singletonList(resourceFilter), null, params, 
true);
-         //Send request
-         createAction(actionRequest, requestProperties);
-       }
- 
-       //Mark master component as needed to restart for remove host info from 
components UI
-       Cluster cluster = 
clusters.getCluster(serviceComponent.getClusterName());
-       Service service = cluster.getService(serviceName);
-       ServiceComponent sc = 
service.getServiceComponent(master_component_name);
- 
-       if (sc != null && sc.isMasterComponent()) {
-         for (ServiceComponentHost sch : 
sc.getServiceComponentHosts().values()) {
-           sch.setRestartRequired(true);
-         }
-       }
-     }
-   }
- 
    @Override
    public void deleteUsers(Set<UserRequest> requests)
      throws AmbariException {
@@@ -3662,6 -3709,47 +3770,48 @@@
      }
    }
  
+   /**
+    * Creates and triggers an action to update include and exclude files for 
the master components depending on current cluster topology and components state
+    * @param serviceMasterMap
+    * @param masterSlaveHostsMap
+    *@param clusterName  @throws AmbariException
+    */
+   private void 
createAndExecuteRefreshIncludeExcludeFilesActionForMasters(Map<String, String> 
serviceMasterMap, Map<String, Set<String>> masterSlaveHostsMap, String 
clusterName, boolean isDecommission) throws AmbariException {
+     //Clear include/exclude files or draining list except HBASE
+     serviceMasterMap.remove(Service.Type.HBASE.toString());
+     //exit if empty
+     if (serviceMasterMap.isEmpty()) {
+       return;
+     }
+     LOG.debug("Refresh include/exclude files action will be executed for " + 
serviceMasterMap);
+     HashMap<String, String> requestProperties = new HashMap<>();
+     requestProperties.put("context", "Update Include/Exclude Files for " + 
serviceMasterMap.keySet().toString());
++    requestProperties.put("exclusive", "true");
+     HashMap<String, String> params = new HashMap<>();
+     params.put(AmbariCustomCommandExecutionHelper.UPDATE_FILES_ONLY, 
String.valueOf(isDecommission));
+ 
+     for (String masterName : masterSlaveHostsMap.keySet()) {
+       if (!isDecommission) {
+         params.put(masterName + "_" + 
AmbariCustomCommandExecutionHelper.DECOM_INCLUDED_HOSTS, 
StringUtils.join(masterSlaveHostsMap.get(masterName).toArray(), ","));
+       }
+     }
+ 
+     
params.put(AmbariCustomCommandExecutionHelper.IS_ADD_OR_DELETE_SLAVE_REQUEST, 
"true");
+ 
+     //Create filter for command
+     List<RequestResourceFilter> resourceFilters = new 
ArrayList<>(serviceMasterMap.size());
+     for (String serviceName : serviceMasterMap.keySet()) {
+       resourceFilters.add(new RequestResourceFilter(serviceName, 
serviceMasterMap.get(serviceName), null));
+     }
+ 
+     //Create request for command
+     ExecuteActionRequest actionRequest = new ExecuteActionRequest(
+       clusterName, 
AmbariCustomCommandExecutionHelper.DECOMMISSION_COMMAND_NAME, null,
 -      resourceFilters, null, params, false);
++      resourceFilters, null, params, true);
+     //Send action
+     createAction(actionRequest, requestProperties);
+   }
+ 
    @Override
    public void deleteMembers(java.util.Set<MemberRequest> requests) throws 
AmbariException {
      for (MemberRequest request : requests) {
@@@ -4361,7 -4473,8 +4535,7 @@@
          for (OperatingSystemEntity operatingSystem: 
repositoryVersion.getOperatingSystems()) {
            if (operatingSystem.getOsType().equals(osType)) {
              for (RepositoryEntity repository: 
operatingSystem.getRepositories()) {
-               final RepositoryResponse response = new 
RepositoryResponse(repository.getBaseUrl(), osType, 
repository.getRepositoryId(), repository.getName(), "", "", "");
 -              final RepositoryResponse response = new 
RepositoryResponse(repository.getBaseUrl(), osType, 
repository.getRepositoryId(),
 -                      repository.getName(), repository.getDistribution(), 
repository.getComponents(), "", "");
++              final RepositoryResponse response = new 
RepositoryResponse(repository.getBaseUrl(), osType, 
repository.getRepositoryId(), repository.getName(), "", "");
                if (null != versionDefinitionId) {
                  response.setVersionDefinitionId(versionDefinitionId);
                } else {
@@@ -4389,8 -4502,8 +4563,8 @@@
  
          for (RepositoryXml.Repo repo : os.getRepos()) {
            RepositoryResponse resp = new RepositoryResponse(repo.getBaseUrl(), 
os.getFamily(),
 -              repo.getRepoId(), repo.getRepoName(), repo.getDistribution(), 
repo.getComponents(), repo.getMirrorsList(),
 +              repo.getRepoId(), repo.getRepoName(), repo.getMirrorsList(),
-               repo.getBaseUrl(), repo.getLatestUri());
+               repo.getBaseUrl());
  
            resp.setVersionDefinitionId(versionDefinitionId);
            resp.setStackName(stackId.getStackName());
@@@ -5652,253 -5684,4 +5745,245 @@@
      return QuickLinkVisibilityControllerFactory.get(quickLinkProfileJson);
    }
  
 +  /**
 +   * Collects metadata info about clusters for agent.
 +   * @return metadata info about clusters
 +   * @throws AmbariException
 +   */
 +  public MetadataUpdateEvent getClustersMetadata() throws AmbariException {
 +    TreeMap<String, MetadataCluster> metadataClusters = new TreeMap<>();
 +
 +    for (Cluster cl : clusters.getClusters().values()) {
 +      StackId stackId = cl.getDesiredStackVersion();
 +
 +      SecurityType securityType = cl.getSecurityType();
 +
 +      TreeMap<String, MetadataServiceInfo> serviceLevelParams = new 
TreeMap<>();
 +      Collection<ServiceInfo> servicesInfo = 
ambariMetaInfo.getServices(stackId.getStackName(),
 +          stackId.getStackVersion()).values();
 +      for (ServiceInfo serviceInfo : servicesInfo) {
 +        Long statusCommandTimeout = null;
 +        if (serviceInfo.getCommandScript() != null) {
 +          statusCommandTimeout = new Long 
(ambariCustomCommandExecutionHelper.getStatusCommandTimeout(serviceInfo));
 +        }
 +
 +        String servicePackageFolder = serviceInfo.getServicePackageFolder();
 +
 +        serviceLevelParams.put(serviceInfo.getName(),
 +            new MetadataServiceInfo(serviceInfo.getVersion(),
 +                serviceInfo.isCredentialStoreEnabled(),
 +                statusCommandTimeout,
 +                servicePackageFolder));
 +      }
 +
 +      MetadataCluster metadataCluster = new MetadataCluster(securityType,
 +          serviceLevelParams,
 +          getMetadataClusterLevelParams(cl, stackId));
 +      metadataClusters.put(Long.toString(cl.getClusterId()), metadataCluster);
 +    }
 +
 +    MetadataUpdateEvent metadataUpdateEvent = new 
MetadataUpdateEvent(metadataClusters,
 +        getMetadataAmbariLevelParams());
 +    return metadataUpdateEvent;
 +  }
 +
 +  public MetadataUpdateEvent getClusterMetadata(Cluster cl) throws 
AmbariException {
 +    TreeMap<String, MetadataCluster> metadataClusters = new TreeMap<>();
 +    StackId stackId = cl.getDesiredStackVersion();
 +
 +    SecurityType securityType = cl.getSecurityType();
 +
 +    TreeMap<String, MetadataServiceInfo> serviceLevelParams = new TreeMap<>();
 +    Collection<ServiceInfo> servicesInfo = 
ambariMetaInfo.getServices(stackId.getStackName(),
 +        stackId.getStackVersion()).values();
 +    for (ServiceInfo serviceInfo : servicesInfo) {
 +      Long statusCommandTimeout = null;
 +      if (serviceInfo.getCommandScript() != null) {
 +        statusCommandTimeout = new 
Long(ambariCustomCommandExecutionHelper.getStatusCommandTimeout(serviceInfo));
 +      }
 +
 +      String servicePackageFolder = serviceInfo.getServicePackageFolder();
 +
 +      serviceLevelParams.put(serviceInfo.getName(),
 +          new MetadataServiceInfo(serviceInfo.getVersion(),
 +              serviceInfo.isCredentialStoreEnabled(),
 +              statusCommandTimeout,
 +              servicePackageFolder));
 +    }
 +
 +    MetadataCluster metadataCluster = new MetadataCluster(securityType,
 +        serviceLevelParams,
 +        getMetadataClusterLevelParams(cl, stackId));
 +    metadataClusters.put(Long.toString(cl.getClusterId()), metadataCluster);
 +
 +    MetadataUpdateEvent metadataUpdateEvent = new 
MetadataUpdateEvent(metadataClusters,
 +        null);
 +    return metadataUpdateEvent;
 +  }
 +
 +  public MetadataUpdateEvent getClusterMetadataOnConfigsUpdate(Cluster cl) 
throws AmbariException {
 +    TreeMap<String, MetadataCluster> metadataClusters = new TreeMap<>();
 +    StackId stackId = cl.getDesiredStackVersion();
 +
 +    MetadataCluster metadataCluster = new MetadataCluster(null,
 +        new TreeMap<>(),
 +        getMetadataClusterLevelConfigsParams(cl, stackId));
 +    metadataClusters.put(Long.toString(cl.getClusterId()), metadataCluster);
 +
 +    MetadataUpdateEvent metadataUpdateEvent = new 
MetadataUpdateEvent(metadataClusters,
 +        null);
 +    return metadataUpdateEvent;
 +  }
 +
 +  private String getClientsToUpdateConfigs(ComponentInfo componentInfo) {
 +    List<String> clientsToUpdateConfigsList = 
componentInfo.getClientsToUpdateConfigs();
 +    if (clientsToUpdateConfigsList == null) {
 +      clientsToUpdateConfigsList = new ArrayList<>();
 +      clientsToUpdateConfigsList.add("*");
 +    }
 +    return gson.toJson(clientsToUpdateConfigsList);
 +  }
 +
 +  private Boolean getUnlimitedKeyJCERequirement(ComponentInfo componentInfo, 
SecurityType clusterSecurityType) {
 +    UnlimitedKeyJCERequirement unlimitedKeyJCERequirement = 
componentInfo.getUnlimitedKeyJCERequired();
 +    // Ensure that the unlimited key requirement is set. If null, the default 
value should be used.
 +    if(unlimitedKeyJCERequirement == null) {
 +      unlimitedKeyJCERequirement = UnlimitedKeyJCERequirement.DEFAULT;
 +    }
 +
 +    return (UnlimitedKeyJCERequirement.ALWAYS == unlimitedKeyJCERequirement) 
||
 +        ((UnlimitedKeyJCERequirement.KERBEROS_ENABLED == 
unlimitedKeyJCERequirement) &&
 +            (clusterSecurityType == SecurityType.KERBEROS));
 +
 +  }
 +
 +  //TODO will be a need to change to multi-instance usage
 +  public TreeMap<String, String> getTopologyComponentLevelParams(StackId 
stackId, String serviceName, String componentName,
 +                                                             SecurityType 
clusterSecurityType) throws AmbariException {
 +    ComponentInfo componentInfo = ambariMetaInfo.getComponent(
 +        stackId.getStackName(), stackId.getStackVersion(),
 +        serviceName, componentName);
 +
 +    TreeMap<String, String> statusCommandParams = new TreeMap<>();
 +    
statusCommandParams.put(ExecutionCommand.KeyNames.CLIENTS_TO_UPDATE_CONFIGS,
 +        getClientsToUpdateConfigs(componentInfo));
 +    
statusCommandParams.put(ExecutionCommand.KeyNames.UNLIMITED_KEY_JCE_REQUIRED,
 +         Boolean.toString(getUnlimitedKeyJCERequirement(componentInfo, 
clusterSecurityType)));
 +    return statusCommandParams;
 +  }
 +
 +  //TODO will be a need to change to multi-instance usage
 +  public TreeMap<String, String> getTopologyCommandParams(StackId stackId, 
String serviceName, String componentName) throws AmbariException {
 +    ServiceInfo serviceInfo = 
ambariMetaInfo.getService(stackId.getStackName(),
 +        stackId.getStackVersion(), serviceName);
 +    ComponentInfo componentInfo = ambariMetaInfo.getComponent(
 +        stackId.getStackName(), stackId.getStackVersion(),
 +        serviceName, componentName);
 +
 +    String scriptName = null;
 +    String scriptCommandTimeout = "";
 +    CommandScriptDefinition script = componentInfo.getCommandScript();
 +    if 
(serviceInfo.getSchemaVersion().equals(AmbariMetaInfo.SCHEMA_VERSION_2)) {
 +      if (script != null) {
 +        scriptName = script.getScript();
 +        if (script.getTimeout() > 0) {
 +          scriptCommandTimeout = String.valueOf(script.getTimeout());
 +        }
 +      } else {
 +        String message = String.format("Component %s of service %s has not " +
 +            "command script defined", componentName, serviceName);
 +        throw new AmbariException(message);
 +      }
 +    }
 +    String agentDefaultCommandTimeout = 
configs.getDefaultAgentTaskTimeout(false);
 +    String actualTimeout = (!scriptCommandTimeout.equals("") ? 
scriptCommandTimeout : agentDefaultCommandTimeout);
 +
 +    TreeMap<String, String> commandParams = new TreeMap<>();
 +    commandParams.put(COMMAND_TIMEOUT, actualTimeout);
 +    commandParams.put(SCRIPT, scriptName);
 +    commandParams.put(SCRIPT_TYPE, script.getScriptType().toString());
 +    return commandParams;
 +  }
 +
 +  public TreeMap<String, String> getMetadataClusterLevelParams(Cluster 
cluster, StackId stackId) throws AmbariException {
 +    TreeMap<String, String> clusterLevelParams = new TreeMap<>();
 +    clusterLevelParams.put(STACK_NAME, stackId.getStackName());
 +    clusterLevelParams.put(STACK_VERSION, stackId.getStackVersion());
 +
 +    Map<String, DesiredConfig> desiredConfigs = cluster.getDesiredConfigs();
 +    if (MapUtils.isNotEmpty(desiredConfigs)) {
 +
 +      Set<String> userSet = 
configHelper.getPropertyValuesWithPropertyType(stackId, PropertyType.USER, 
cluster, desiredConfigs);
 +      String userList = gson.toJson(userSet);
 +      clusterLevelParams.put(USER_LIST, userList);
 +
 +      //Create a user_group mapping and send it as part of the hostLevelParams
 +      Map<String, Set<String>> userGroupsMap = 
configHelper.createUserGroupsMap(
 +          stackId, cluster, desiredConfigs);
 +      String userGroups = gson.toJson(userGroupsMap);
 +      clusterLevelParams.put(USER_GROUPS, userGroups);
 +
 +      Set<String> groupSet = 
configHelper.getPropertyValuesWithPropertyType(stackId, PropertyType.GROUP, 
cluster, desiredConfigs);
 +      String groupList = gson.toJson(groupSet);
 +      clusterLevelParams.put(GROUP_LIST, groupList);
 +    }
 +    Set<String> notManagedHdfsPathSet = 
configHelper.getPropertyValuesWithPropertyType(stackId,
 +        PropertyType.NOT_MANAGED_HDFS_PATH, cluster, desiredConfigs);
 +    String notManagedHdfsPathList = gson.toJson(notManagedHdfsPathSet);
 +    clusterLevelParams.put(NOT_MANAGED_HDFS_PATH_LIST, 
notManagedHdfsPathList);
 +
 +    clusterLevelParams.put(CLUSTER_NAME, cluster.getClusterName());
 +
 +    StackInfo stackInfo = ambariMetaInfo.getStack(stackId.getStackName(), 
stackId.getStackVersion());
 +    clusterLevelParams.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
 +
 +    return clusterLevelParams;
 +  }
 +
 +  public TreeMap<String, String> getMetadataClusterLevelConfigsParams(Cluster 
cluster, StackId stackId) throws AmbariException {
 +    TreeMap<String, String> clusterLevelParams = new TreeMap<>();
 +
 +    Map<String, DesiredConfig> desiredConfigs = cluster.getDesiredConfigs();
 +    if (MapUtils.isNotEmpty(desiredConfigs)) {
 +
 +      Set<String> userSet = 
configHelper.getPropertyValuesWithPropertyType(stackId, PropertyType.USER, 
cluster, desiredConfigs);
 +      String userList = gson.toJson(userSet);
 +      clusterLevelParams.put(USER_LIST, userList);
 +
 +      //Create a user_group mapping and send it as part of the hostLevelParams
 +      Map<String, Set<String>> userGroupsMap = 
configHelper.createUserGroupsMap(
 +          stackId, cluster, desiredConfigs);
 +      String userGroups = gson.toJson(userGroupsMap);
 +      clusterLevelParams.put(USER_GROUPS, userGroups);
 +
 +      Set<String> groupSet = 
configHelper.getPropertyValuesWithPropertyType(stackId, PropertyType.GROUP, 
cluster, desiredConfigs);
 +      String groupList = gson.toJson(groupSet);
 +      clusterLevelParams.put(GROUP_LIST, groupList);
 +    }
 +    Set<String> notManagedHdfsPathSet = 
configHelper.getPropertyValuesWithPropertyType(stackId,
 +        PropertyType.NOT_MANAGED_HDFS_PATH, cluster, desiredConfigs);
 +    String notManagedHdfsPathList = gson.toJson(notManagedHdfsPathSet);
 +    clusterLevelParams.put(NOT_MANAGED_HDFS_PATH_LIST, 
notManagedHdfsPathList);
 +
 +    return clusterLevelParams;
 +  }
 +
 +  public TreeMap<String, String> getMetadataAmbariLevelParams() throws 
AmbariException {
 +    TreeMap<String, String> clusterLevelParams = new TreeMap<>();
 +    clusterLevelParams.put(JDK_LOCATION, getJdkResourceUrl());
 +    clusterLevelParams.put(JAVA_HOME, getJavaHome());
 +    clusterLevelParams.put(JAVA_VERSION, 
String.valueOf(configs.getJavaVersion()));
 +    clusterLevelParams.put(JDK_NAME, getJDKName());
 +    clusterLevelParams.put(JCE_NAME, getJCEName());
 +    clusterLevelParams.put(DB_NAME, getServerDB());
 +    clusterLevelParams.put(MYSQL_JDBC_URL, getMysqljdbcUrl());
 +    clusterLevelParams.put(ORACLE_JDBC_URL, getOjdbcUrl());
 +    clusterLevelParams.put(DB_DRIVER_FILENAME, configs.getMySQLJarName());
 +    clusterLevelParams.putAll(getRcaParameters());
 +    clusterLevelParams.put(HOST_SYS_PREPPED, configs.areHostsSysPrepped());
 +    clusterLevelParams.put(AGENT_STACK_RETRY_ON_UNAVAILABILITY, 
configs.isAgentStackRetryOnInstallEnabled());
 +    clusterLevelParams.put(AGENT_STACK_RETRY_COUNT, 
configs.getAgentStackRetryOnInstallCount());
 +
 +    return clusterLevelParams;
 +  }
- 
-   public TreeMap<String, String> getTopologyHostLevelParams(Cluster cluster, 
Host host) throws AmbariException {
-     TreeMap<String, String> hostLevelParams = new TreeMap<>();
-     String repoInfo = ambariMetaInfo.getRepoInfoString(cluster, host);
- 
-     hostLevelParams.put(REPO_INFO, repoInfo);
-     return hostLevelParams;
-   }
  }

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
----------------------------------------------------------------------
diff --cc 
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
index 3a3908d,8988be0..4143beb
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
@@@ -598,30 -618,25 +603,31 @@@ public class AmbariServer 
  
        String srvrCrtPass = 
configsMap.get(Configuration.SRVR_CRT_PASS.getKey());
  
 +
 +      HttpConfiguration https_config = new HttpConfiguration();
 +      https_config.addCustomizer(new SecureRequestCustomizer());
 +      https_config.setRequestHeaderSize(configs.getHttpRequestHeaderSize());
 +      https_config.setResponseHeaderSize(configs.getHttpResponseHeaderSize());
++      https_config.setSendServerVersion(false);
 +
        // Secured connector - default constructor sets trustAll = true for 
certs
 -      SslContextFactory contextFactory = new SslContextFactory();
 -      disableInsecureProtocols(contextFactory);
 -
 -      SslSelectChannelConnector agentSslConnector = new 
SslSelectChannelConnector(contextFactory);
 -      agentSslConnector.setKeystore(keystore);
 -      agentSslConnector.setTruststore(truststore);
 -      agentSslConnector.setPassword(srvrCrtPass);
 -      agentSslConnector.setKeyPassword(srvrCrtPass);
 -      agentSslConnector.setTrustPassword(srvrCrtPass);
 -      
agentSslConnector.setKeystoreType(configsMap.get(Configuration.KSTR_TYPE.getKey()));
 -      
agentSslConnector.setTruststoreType(configsMap.get(Configuration.TSTR_TYPE.getKey()));
 -      agentSslConnector.setNeedClientAuth(needClientAuth);
 -      
agentSslConnector.setRequestHeaderSize(configs.getHttpRequestHeaderSize());
 -      
agentSslConnector.setResponseHeaderSize(configs.getHttpResponseHeaderSize());
 +      SslContextFactory sslContextFactory = new SslContextFactory();
 +      disableInsecureProtocols(sslContextFactory);
 +      sslContextFactory.setKeyStorePath(keystore);
 +      sslContextFactory.setTrustStorePath(truststore);
 +      sslContextFactory.setKeyStorePassword(srvrCrtPass);
 +      sslContextFactory.setKeyManagerPassword(srvrCrtPass);
 +      sslContextFactory.setTrustStorePassword(srvrCrtPass);
 +      
sslContextFactory.setKeyStoreType(configsMap.get(Configuration.KSTR_TYPE.getKey()));
 +      
sslContextFactory.setTrustStoreType(configsMap.get(Configuration.TSTR_TYPE.getKey()));
 +      sslContextFactory.setNeedClientAuth(needClientAuth);
 +      ServerConnector agentSslConnector = new ServerConnector(server, 
acceptors, -1,
 +        new SslConnectionFactory(sslContextFactory, 
HttpVersion.HTTP_1_1.toString()),
 +        new HttpConnectionFactory(https_config));
        agentConnector = agentSslConnector;
      } else {
 -      agentConnector = new SelectChannelConnector();
 -      agentConnector.setMaxIdleTime(configs.getConnectionMaxIdleTime());
 +      agentConnector = new ServerConnector(server, acceptors, -1);
 +      agentConnector.setIdleTimeout(configs.getConnectionMaxIdleTime());
      }
  
      agentConnector.setPort(port);
@@@ -630,13 -645,10 +636,14 @@@
    }
  
    @SuppressWarnings("deprecation")
 -  private SelectChannelConnector createSelectChannelConnectorForClient() {
 +  private ServerConnector createSelectChannelConnectorForClient(Server 
server, int acceptors) {
      Map<String, String> configsMap = configs.getConfigsMap();
 -    SelectChannelConnector apiConnector;
 +    ServerConnector apiConnector;
  
 +    HttpConfiguration http_config = new HttpConfiguration();
 +    http_config.setRequestHeaderSize(configs.getHttpRequestHeaderSize());
 +    http_config.setResponseHeaderSize(configs.getHttpResponseHeaderSize());
++    http_config.setSendServerVersion(false);
      if (configs.getApiSSLAuthentication()) {
        String httpsKeystore = 
configsMap.get(Configuration.CLIENT_API_SSL_KSTR_DIR_NAME.getKey()) +
          File.separator + 
configsMap.get(Configuration.CLIENT_API_SSL_KSTR_NAME.getKey());

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariSessionManager.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/java/org/apache/ambari/server/controller/ConfigGroupRequest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/java/org/apache/ambari/server/controller/DeleteIdentityHandler.java
----------------------------------------------------------------------
diff --cc 
ambari-server/src/main/java/org/apache/ambari/server/controller/DeleteIdentityHandler.java
index 0000000,29f8e2a..a7b9d80
mode 000000,100644..100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/DeleteIdentityHandler.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/DeleteIdentityHandler.java
@@@ -1,0 -1,329 +1,328 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.ambari.server.controller;
+ 
+ import static java.util.Collections.singleton;
+ import static java.util.stream.Collectors.toSet;
+ import static 
org.apache.ambari.server.controller.KerberosHelperImpl.BASE_LOG_DIR;
+ 
+ import java.io.File;
+ import java.lang.reflect.Type;
+ import java.util.ArrayList;
+ import java.util.HashMap;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Set;
+ import java.util.concurrent.ConcurrentMap;
+ 
+ import org.apache.ambari.server.AmbariException;
+ import org.apache.ambari.server.Role;
+ import org.apache.ambari.server.RoleCommand;
+ import org.apache.ambari.server.actionmanager.HostRoleStatus;
+ import org.apache.ambari.server.actionmanager.Stage;
+ import org.apache.ambari.server.actionmanager.StageFactory;
+ import org.apache.ambari.server.agent.CommandReport;
+ import org.apache.ambari.server.controller.internal.RequestResourceFilter;
+ import org.apache.ambari.server.serveraction.ServerAction;
+ import 
org.apache.ambari.server.serveraction.kerberos.AbstractPrepareKerberosServerAction;
+ import org.apache.ambari.server.serveraction.kerberos.Component;
+ import 
org.apache.ambari.server.serveraction.kerberos.DestroyPrincipalsServerAction;
+ import org.apache.ambari.server.serveraction.kerberos.KDCType;
+ import 
org.apache.ambari.server.serveraction.kerberos.KerberosOperationHandler;
+ import org.apache.ambari.server.serveraction.kerberos.KerberosServerAction;
+ import org.apache.ambari.server.state.Cluster;
+ import org.apache.ambari.server.state.Config;
+ import org.apache.ambari.server.state.StackId;
+ import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
+ import 
org.apache.ambari.server.state.svccomphost.ServiceComponentHostServerActionEvent;
+ import org.apache.ambari.server.utils.StageUtils;
+ 
+ import com.google.gson.reflect.TypeToken;
+ 
+ 
+ /**
+  * I delete kerberos identities (principals and keytabs) of a given component.
+  */
+ class DeleteIdentityHandler {
+   private final AmbariCustomCommandExecutionHelper 
customCommandExecutionHelper;
+   private final Integer taskTimeout;
+   private final StageFactory stageFactory;
+   private final AmbariManagementController ambariManagementController;
+ 
+   public DeleteIdentityHandler(AmbariCustomCommandExecutionHelper 
customCommandExecutionHelper, Integer taskTimeout, StageFactory stageFactory, 
AmbariManagementController ambariManagementController) {
+     this.customCommandExecutionHelper = customCommandExecutionHelper;
+     this.taskTimeout = taskTimeout;
+     this.stageFactory = stageFactory;
+     this.ambariManagementController = ambariManagementController;
+   }
+ 
+   /**
+    * Creates and adds stages to the given stage container for deleting 
kerberos identities.
+    * The service component that belongs to the identity doesn't need to be 
installed.
+    */
+   public void addDeleteIdentityStages(Cluster cluster, 
OrderedRequestStageContainer stageContainer, CommandParams commandParameters, 
boolean manageIdentities)
+     throws AmbariException
+   {
+     ServiceComponentHostServerActionEvent event = new 
ServiceComponentHostServerActionEvent("AMBARI_SERVER", 
StageUtils.getHostName(), System.currentTimeMillis());
+     String hostParamsJson = 
StageUtils.getGson().toJson(customCommandExecutionHelper.createDefaultHostParams(cluster,
 cluster.getDesiredStackVersion()));
+     
stageContainer.setClusterHostInfo(StageUtils.getGson().toJson(StageUtils.getClusterHostInfo(cluster)));
+     if (manageIdentities) {
+       addPrepareDeleteIdentity(cluster, hostParamsJson, event, 
commandParameters, stageContainer);
+       addDestroyPrincipals(cluster, hostParamsJson, event, commandParameters, 
stageContainer);
+       addDeleteKeytab(cluster, commandParameters.getAffectedHostNames(), 
hostParamsJson, commandParameters, stageContainer);
+     }
+     addFinalize(cluster, hostParamsJson, event, stageContainer, 
commandParameters);
+   }
+ 
+   private void addPrepareDeleteIdentity(Cluster cluster,
+                                         String hostParamsJson, 
ServiceComponentHostServerActionEvent event,
+                                         CommandParams commandParameters,
+                                         OrderedRequestStageContainer 
stageContainer)
+     throws AmbariException
+   {
+     Stage stage = createServerActionStage(stageContainer.getLastStageId(),
+       cluster,
+       stageContainer.getId(),
+       "Prepare delete identities",
+       "{}",
+       hostParamsJson,
+       PrepareDeleteIdentityServerAction.class,
+       event,
+       commandParameters.asMap(),
+       "Prepare delete identities",
+       taskTimeout);
+     stageContainer.addStage(stage);
+   }
+ 
+   private void addDestroyPrincipals(Cluster cluster,
+                                     String hostParamsJson, 
ServiceComponentHostServerActionEvent event,
+                                     CommandParams commandParameters,
+                                     OrderedRequestStageContainer 
stageContainer)
+     throws AmbariException
+   {
+     Stage stage = createServerActionStage(stageContainer.getLastStageId(),
+       cluster,
+       stageContainer.getId(),
+       "Destroy Principals",
+       "{}",
+       hostParamsJson,
+       DestroyPrincipalsServerAction.class,
+       event,
+       commandParameters.asMap(),
+       "Destroy Principals",
+       Math.max(ServerAction.DEFAULT_LONG_RUNNING_TASK_TIMEOUT_SECONDS, 
taskTimeout));
+     stageContainer.addStage(stage);
+   }
+ 
+   private void addDeleteKeytab(Cluster cluster,
+                                Set<String> hostFilter,
+                                String hostParamsJson,
+                                CommandParams commandParameters,
+                                OrderedRequestStageContainer stageContainer)
+     throws AmbariException
+   {
+     Stage stage = createNewStage(stageContainer.getLastStageId(),
+       cluster,
+       stageContainer.getId(),
+       "Delete Keytabs",
+       commandParameters.asJson(),
+       hostParamsJson);
+ 
+     Map<String, String> requestParams = new HashMap<>();
+     List<RequestResourceFilter> requestResourceFilters = new ArrayList<>();
+     RequestResourceFilter reqResFilter = new 
RequestResourceFilter("KERBEROS", "KERBEROS_CLIENT", new 
ArrayList<>(hostFilter));
+     requestResourceFilters.add(reqResFilter);
+ 
+     ActionExecutionContext actionExecContext = new ActionExecutionContext(
+       cluster.getClusterName(),
+       "REMOVE_KEYTAB",
+       requestResourceFilters,
+       requestParams);
+     
customCommandExecutionHelper.addExecutionCommandsToStage(actionExecContext, 
stage, requestParams, null);
+     stageContainer.addStage(stage);
+   }
+ 
+   private void addFinalize(Cluster cluster,
+                            String hostParamsJson, 
ServiceComponentHostServerActionEvent event,
+                            OrderedRequestStageContainer requestStageContainer,
+                            CommandParams commandParameters)
+     throws AmbariException
+   {
+     Stage stage = 
createServerActionStage(requestStageContainer.getLastStageId(),
+       cluster,
+       requestStageContainer.getId(),
+       "Finalize Operations",
+       "{}",
+       hostParamsJson,
+       DeleteDataDirAction.class,
+       event,
+       commandParameters.asMap(),
+       "Finalize Operations", 300);
+     requestStageContainer.addStage(stage);
+   }
+ 
+ 
+   public static class CommandParams {
+     private final List<Component> components;
+     private final Set<String> identities;
+     private final String authName;
+     private final File dataDirectory;
+     private final String defaultRealm;
+     private final KDCType kdcType;
+ 
+     public CommandParams(List<Component> components, Set<String> identities, 
String authName, File dataDirectory, String defaultRealm, KDCType kdcType) {
+       this.components = components;
+       this.identities = identities;
+       this.authName = authName;
+       this.dataDirectory = dataDirectory;
+       this.defaultRealm = defaultRealm;
+       this.kdcType = kdcType;
+     }
+ 
+     public Map<String, String> asMap() {
+       Map<String, String> commandParameters = new HashMap<>();
+       commandParameters.put(KerberosServerAction.AUTHENTICATED_USER_NAME, 
authName);
+       commandParameters.put(KerberosServerAction.DEFAULT_REALM, defaultRealm);
+       commandParameters.put(KerberosServerAction.KDC_TYPE, kdcType.name());
+       commandParameters.put(KerberosServerAction.IDENTITY_FILTER, 
StageUtils.getGson().toJson(identities));
+       commandParameters.put(KerberosServerAction.COMPONENT_FILTER, 
StageUtils.getGson().toJson(components));
+       commandParameters.put(KerberosServerAction.DATA_DIRECTORY, 
dataDirectory.getAbsolutePath());
+       return commandParameters;
+     }
+ 
+     public Set<String> getAffectedHostNames() {
+       return components.stream().map(Component::getHostName).collect(toSet());
+     }
+ 
+     public String asJson() {
+       return StageUtils.getGson().toJson(asMap());
+     }
+   }
+ 
+   private static class PrepareDeleteIdentityServerAction extends 
AbstractPrepareKerberosServerAction {
+     @Override
+     public CommandReport execute(ConcurrentMap<String, Object> 
requestSharedDataContext) throws AmbariException, InterruptedException {
+       KerberosDescriptor kerberosDescriptor = getKerberosDescriptor();
+       processServiceComponents(
+         getCluster(),
+         kerberosDescriptor,
+         componentFilter(),
+         getIdentityFilter(),
+         dataDirectory(),
+         calculateConfig(kerberosDescriptor, serviceNames()),
+         new HashMap<>(),
+         false,
 -        new HashMap<>(),
 -          false);
++        new HashMap<>());
+       return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", 
actionLog.getStdOut(), actionLog.getStdErr());
+     }
+ 
+     private Set<String> serviceNames() {
+       return componentFilter().stream().map(component -> 
component.getServiceName()).collect(toSet());
+     }
+ 
+     private List<Component> componentFilter() {
+       Type jsonType = new TypeToken<List<Component>>() {}.getType();
+       return 
StageUtils.getGson().fromJson(getCommandParameterValue(KerberosServerAction.COMPONENT_FILTER),
 jsonType);
+     }
+ 
+     /**
+      * Cleaning identities is asynchronous, it can happen that the service 
and its configuration is already deleted at this point.
+      * We're extending the actual config with the properties of the latest 
deleted configuration of the service.
+      * The service configuration is needed because principal names may 
contain placeholder variables which are replaced based on the service 
configuration.
+      */
+     private Map<String, Map<String, String>> 
calculateConfig(KerberosDescriptor kerberosDescriptor, Set<String> 
serviceNames) throws AmbariException {
+       Map<String, Map<String, String>> actualConfig = 
getKerberosHelper().calculateConfigurations(getCluster(), null, 
kerberosDescriptor, false, false);
+       extendWithDeletedConfigOfService(actualConfig, serviceNames);
+       return actualConfig;
+     }
+ 
+     private void extendWithDeletedConfigOfService(Map<String, Map<String, 
String>> configToBeExtended, Set<String> serviceNames) throws AmbariException {
+       Set<String> deletedConfigTypes = serviceNames.stream()
+         .flatMap(serviceName -> configTypesOfService(serviceName).stream())
+         .collect(toSet());
+       for (Config deletedConfig : 
getCluster().getLatestConfigsWithTypes(deletedConfigTypes)) {
+         configToBeExtended.put(deletedConfig.getType(), 
deletedConfig.getProperties());
+       }
+     }
+ 
+     private Set<String> configTypesOfService(String serviceName) {
+       try {
+         StackId stackId = getCluster().getCurrentStackVersion();
+         StackServiceRequest stackServiceRequest = new 
StackServiceRequest(stackId.getStackName(), stackId.getStackVersion(), 
serviceName);
+         return 
AmbariServer.getController().getStackServices(singleton(stackServiceRequest)).stream()
+           .findFirst()
+           .orElseThrow(() -> new IllegalArgumentException("Could not find 
stack service " + serviceName))
+           .getConfigTypes()
+           .keySet();
+       } catch (AmbariException e) {
+         throw new RuntimeException(e);
+       }
+     }
+ 
+     private String dataDirectory() {
+       return getCommandParameterValue(getCommandParameters(), DATA_DIRECTORY);
+     }
+ 
+     private KerberosDescriptor getKerberosDescriptor() throws AmbariException 
{
+       return getKerberosHelper().getKerberosDescriptor(getCluster(), false);
+     }
+   }
+ 
+   private Stage createNewStage(long id, Cluster cluster, long requestId, 
String requestContext, String commandParams, String hostParams) {
+     Stage stage = stageFactory.createNew(requestId,
+       BASE_LOG_DIR + File.pathSeparator + requestId,
+       cluster.getClusterName(),
+       cluster.getClusterId(),
+       requestContext,
+       commandParams,
+       hostParams);
+     stage.setStageId(id);
+     return stage;
+   }
+ 
+   private Stage createServerActionStage(long id, Cluster cluster, long 
requestId,
+                                        String requestContext,
+                                        String commandParams, String 
hostParams,
+                                        Class<? extends ServerAction> 
actionClass,
+                                        ServiceComponentHostServerActionEvent 
event,
+                                        Map<String, String> commandParameters, 
String commandDetail,
+                                        Integer timeout) throws 
AmbariException {
+ 
+     Stage stage = createNewStage(id, cluster, requestId, requestContext,  
commandParams, hostParams);
+     stage.addServerActionCommand(actionClass.getName(), null, 
Role.AMBARI_SERVER_ACTION,
+       RoleCommand.EXECUTE, cluster.getClusterName(), event, 
commandParameters, commandDetail,
+       ambariManagementController.findConfigurationTagsWithOverrides(cluster, 
null), timeout,
+       false, false);
+ 
+     return stage;
+   }
+ 
+   private static class DeleteDataDirAction extends KerberosServerAction {
+ 
+     @Override
+     public CommandReport execute(ConcurrentMap<String, Object> 
requestSharedDataContext) throws AmbariException, InterruptedException {
+       deleteDataDirectory(getCommandParameterValue(DATA_DIRECTORY));
+       return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", 
actionLog.getStdOut(), actionLog.getStdErr());
+     }
+ 
+     @Override
+     protected CommandReport processIdentity(Map<String, String> 
identityRecord, String evaluatedPrincipal, KerberosOperationHandler 
operationHandler, Map<String, String> kerberosConfiguration, Map<String, 
Object> requestSharedDataContext) throws AmbariException {
+       return null;
+     }
+   }
+ }

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
----------------------------------------------------------------------
diff --cc 
ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
index ca2dda5,20c5708..bb360b5
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
@@@ -49,6 -51,18 +51,14 @@@ public interface KerberosHelper 
     */
    String DIRECTIVE_REGENERATE_KEYTABS = "regenerate_keytabs";
    /**
+    * directive used to pass host list to regenerate keytabs on
+    */
+   String DIRECTIVE_HOSTS = "regenerate_hosts";
+   /**
+    * directive used to pass list of services and their components to 
regenerate keytabs for
+    */
+   String DIRECTIVE_COMPONENTS = "regenerate_components";
+   /**
 -   * directive used to pass host list to regenerate keytabs on
 -   */
 -  String DIRECTIVE_IGNORE_CONFIGS = "ignore_config_updates";
 -  /**
     * directive used to indicate that the enable Kerberos operation should 
proceed even if the
     * cluster's security type is not changing
     */

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
----------------------------------------------------------------------
diff --cc 
ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
index 6687942,67b08fd..013a063
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
@@@ -248,10 -262,16 +262,13 @@@ public class KerberosHelperImpl impleme
  
                CreatePrincipalsAndKeytabsHandler handler = null;
  
+               Set<String> hostFilter = parseHostFilter(requestProperties);
+               Map<String, Set<String>> serviceComponentFilter = 
parseComponentFilter(requestProperties);
+ 
 -              boolean updateConfigurations = 
!requestProperties.containsKey(DIRECTIVE_IGNORE_CONFIGS)
 -                  || 
!"true".equalsIgnoreCase(requestProperties.get(DIRECTIVE_IGNORE_CONFIGS));
 -
                if ("true".equalsIgnoreCase(value) || 
"all".equalsIgnoreCase(value)) {
 -                handler = new CreatePrincipalsAndKeytabsHandler(true, 
updateConfigurations, true);
 +                handler = new CreatePrincipalsAndKeytabsHandler(true, true, 
true);
                } else if ("missing".equalsIgnoreCase(value)) {
 -                handler = new CreatePrincipalsAndKeytabsHandler(false, 
updateConfigurations, true);
 +                handler = new CreatePrincipalsAndKeytabsHandler(false, true, 
true);
                }
  
                if (handler != null) {
@@@ -1194,18 -1525,21 +1522,20 @@@
              }
  
              // Append an entry to the action data file builder...
-             kerberosIdentityDataFileWriter.writeRecord(
-                 hostname,
-                 serviceName,
-                 componentName,
-                 principal,
-                 principalType,
-                 keytabFilePath,
-                 keytabFileOwnerName,
-                 keytabFileOwnerAccess,
-                 keytabFileGroupName,
-                 keytabFileGroupAccess,
-                 (keytabIsCachable) ? "true" : "false");
+             if(kerberosIdentityDataFileWriter != null) {
+               kerberosIdentityDataFileWriter.writeRecord(
+                   hostname,
+                   serviceName,
+                   componentName,
+                   principal,
+                   principalType,
+                   keytabFilePath,
+                   keytabFileOwnerName,
+                   keytabFileOwnerAccess,
+                   keytabFileGroupName,
+                   keytabFileGroupAccess,
 -                  (keytabIsCachable) ? "true" : "false",
 -                  (ignoreHeadless && principalDescriptor.getType() == 
KerberosPrincipalType.USER) ? "true" : "false");
++                  (keytabIsCachable) ? "true" : "false");
+             }
  
              // Add the principal-related configuration to the map of 
configurations
              mergeConfiguration(kerberosConfigurations, 
principalConfiguration, principal, null);

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/java/org/apache/ambari/server/controller/RepositoryResponse.java
----------------------------------------------------------------------
diff --cc 
ambari-server/src/main/java/org/apache/ambari/server/controller/RepositoryResponse.java
index 0735e27,8c68f41..792a166
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/RepositoryResponse.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/RepositoryResponse.java
@@@ -26,23 -26,26 +26,21 @@@ public class RepositoryResponse 
    private String osType;
    private String repoId;
    private String repoName;
 -  private String distribution;
 -  private String components;
    private String mirrorsList;
    private String defaultBaseUrl;
-   private String latestBaseUrl;
    private Long repositoryVersionId;
    private String versionDefinitionId;
    private Long clusterVersionId;
    private boolean unique;
  
    public RepositoryResponse(String baseUrl, String osType, String repoId,
-       String repoName, String mirrorsList, String defaultBaseUrl, String 
latestBaseUrl) {
 -                            String repoName, String distribution, String 
components,
 -                            String mirrorsList, String defaultBaseUrl) {
++                            String repoName, String mirrorsList, String 
defaultBaseUrl) {
      setBaseUrl(baseUrl);
      setOsType(osType);
      setRepoId(repoId);
      setRepoName(repoName);
 -    setDistribution(distribution);
 -    setComponents(components);
      setMirrorsList(mirrorsList);
      setDefaultBaseUrl(defaultBaseUrl);
-     setLatestBaseUrl(latestBaseUrl);
    }
  
    public String getStackName() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/java/org/apache/ambari/server/controller/ResourceProviderFactory.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractControllerResourceProvider.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
----------------------------------------------------------------------
diff --cc 
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
index 6c4e096,1cd2d10..e0df487
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
@@@ -494,17 -524,17 +520,20 @@@ public abstract class AbstractProviderM
  
    @Override
    public String getPort(String clusterName, String componentName, String 
hostName, boolean httpsEnabled) throws SystemException {
 +    // Parent map need not be synchronized
-     ConcurrentMap<String, ConcurrentMap<String, String>> clusterJmxPorts = 
jmxPortMap.get(clusterName);
-     if (clusterJmxPorts == null) {
+     ConcurrentMap<String, ConcurrentMap<String, String>> clusterJmxPorts;
+     // Still need double check to ensure single init
+     if (!jmxPortMap.containsKey(clusterName)) {
        synchronized (jmxPortMap) {
-         clusterJmxPorts = jmxPortMap.get(clusterName);
-         if (clusterJmxPorts == null) {
+         if (!jmxPortMap.containsKey(clusterName)) {
            clusterJmxPorts = new ConcurrentHashMap<>();
            jmxPortMap.put(clusterName, clusterJmxPorts);
          }
        }
      }
++
+     clusterJmxPorts = jmxPortMap.get(clusterName);
++
      Service.Type service = componentServiceMap.get(componentName);
  
      if (service != null) {
@@@ -828,49 -866,33 +865,34 @@@
      }
    }
  
+   // TODO: Fix for multi-service feature support (trunk)
+   // Called from a synchornized block !
    private void initProviderMaps() throws SystemException {
-     ResourceProvider provider = getResourceProvider(Resource.Type.Cluster);
- 
-     Set<String> propertyIds = new HashSet<>();
-     propertyIds.add(ClusterResourceProvider.CLUSTER_NAME_PROPERTY_ID);
- 
-     Map<String, String> requestInfoProperties = new HashMap<>();
-     
requestInfoProperties.put(ClusterResourceProvider.GET_IGNORE_PERMISSIONS_PROPERTY_ID,
 "true");
- 
-     Request request = PropertyHelper.getReadRequest(propertyIds,
-         requestInfoProperties, null, null, null);
- 
-     try {
-       jmxPortMap.clear();
-       Set<Resource> clusters = provider.getResources(request, null);
- 
-       clusterHostComponentMap = new HashMap<>();
-       clusterGangliaCollectorMap = new HashMap<>();
- 
-       for (Resource cluster : clusters) {
  
-         String clusterName = (String) 
cluster.getPropertyValue(CLUSTER_NAME_PROPERTY_ID);
+     jmxPortMap.clear();
+     clusterHostComponentMap = new HashMap<>();
+     clusterGangliaCollectorMap = new HashMap<>();
  
-         // initialize the host component map and Ganglia server from the 
known hosts components...
-         provider = getResourceProvider(Resource.Type.HostComponent);
- 
-         request = 
PropertyHelper.getReadRequest(HOST_COMPONENT_HOST_NAME_PROPERTY_ID,
-             HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID);
+     Map<String, Cluster> clusterMap = clusters.getClusters();
+     if (MapUtils.isEmpty(clusterMap)) {
+       return;
+     }
  
-         Predicate predicate = new 
PredicateBuilder().property(HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID).
-             equals(clusterName).toPredicate();
+     for (Cluster cluster : clusterMap.values()) {
+       String clusterName = cluster.getClusterName();
 -      Map<String, String> hostComponentMap = 
clusterHostComponentMap.get(clusterName);
  
-         Set<Resource> hostComponents = provider.getResources(request, 
predicate);
-         Map<String, String> hostComponentMap = 
clusterHostComponentMap.get(clusterName);
++      Map<String, String> hostComponentMap = 
clusterHostComponentMap.get(clusterName);
+       if (hostComponentMap == null) {
+         hostComponentMap = new HashMap<>();
+         clusterHostComponentMap.put(clusterName, hostComponentMap);
+       }
  
-         if (hostComponentMap == null) {
-           hostComponentMap = new HashMap<>();
-           clusterHostComponentMap.put(clusterName, hostComponentMap);
-         }
+       List<ServiceComponentHost> serviceComponentHosts = 
cluster.getServiceComponentHosts();
 +
-         for (Resource hostComponent : hostComponents) {
-           String componentName = (String) 
hostComponent.getPropertyValue(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID);
-           String hostName = (String) 
hostComponent.getPropertyValue(HOST_COMPONENT_HOST_NAME_PROPERTY_ID);
+       if (!CollectionUtils.isEmpty(serviceComponentHosts)) {
+         for (ServiceComponentHost sch : serviceComponentHosts) {
+           String componentName = sch.getServiceComponentName();
+           String hostName = sch.getHostName();
  
            hostComponentMap.put(componentName, hostName);
  

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/CalculatedStatus.java
----------------------------------------------------------------------
diff --cc 
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/CalculatedStatus.java
index 081737a,c7450b3..1d4c155
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/CalculatedStatus.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/CalculatedStatus.java
@@@ -427,36 -424,6 +427,44 @@@ public class CalculatedStatus 
    }
  
    /**
 +   * Calculates the status for specified request by id.
 +   * @param s_hostRoleCommandDAO is used to retrieve the map of 
stage-to-summary value objects
 +   * @param topologyManager topology manager
 +   * @param requestId the request id
 +   * @return the calculated status
 +   */
 +  public static CalculatedStatus statusFromRequest(HostRoleCommandDAO 
s_hostRoleCommandDAO,
 +                                                   TopologyManager 
topologyManager, Long requestId) {
 +    Map<Long, HostRoleCommandStatusSummaryDTO> summary = 
s_hostRoleCommandDAO.findAggregateCounts(requestId);
 +
 +    // get summaries from TopologyManager for logical requests
 +    summary.putAll(topologyManager.getStageSummaries(requestId));
 +
 +    // summary might be empty due to delete host have cleared all
 +    // HostRoleCommands or due to hosts haven't registered yet with the 
cluster
 +    // when the cluster is provisioned with a Blueprint
++    final CalculatedStatus status;
 +    LogicalRequest logicalRequest = topologyManager.getRequest(requestId);
 +    if (summary.isEmpty() && null != logicalRequest) {
-       // in this case, it appears that there are no tasks but this is a 
logical
++      // In this case, it appears that there are no tasks but this is a 
logical
 +      // topology request, so it's a matter of hosts simply not registering 
yet
-       // for tasks to be created
-       return CalculatedStatus.PENDING;
++      // for tasks to be created ==> status = PENDING.
++      // For a new LogicalRequest there should be at least one HostRequest,
++      // while if they were removed already ==> status = COMPLETED.
++      if (logicalRequest.getHostRequests().isEmpty()) {
++        status = CalculatedStatus.COMPLETED;
++      } else {
++        status = CalculatedStatus.PENDING;
++      }
 +    } else {
 +      // there are either tasks or this is not a logical request, so do normal
 +      // status calculations
-       return CalculatedStatus.statusFromStageSummary(summary, 
summary.keySet());
++      status = CalculatedStatus.statusFromStageSummary(summary, 
summary.keySet());
 +    }
++    return status;
 +  }
 +
 +  /**
     * Calculates the overall status of an upgrade. If there are no tasks, then 
a
     * status of {@link HostRoleStatus#COMPLETED} is returned.
     *

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
----------------------------------------------------------------------
diff --cc 
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
index cc5b6c0,9f4a4a0..0ad967b
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
@@@ -442,7 -443,10 +443,8 @@@ public class ClientConfigResourceProvid
          jsonContent.put("clusterHostInfo", clusterHostInfo);
          jsonContent.put("hostLevelParams", hostLevelParams);
          jsonContent.put("hostname", hostName);
+         jsonContent.put("public_hostname", publicHostName);
          jsonContent.put("clusterName", cluster.getClusterName());
 -        jsonContent.put("serviceName", serviceName);
 -        jsonContent.put("role", componentName);
          jsonConfigurations = gson.toJson(jsonContent);
  
          File tmpDirectory = new File(TMP_PATH);

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --cc 
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index ae2fa25,ba5a4e7..5d75b99
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@@ -190,23 -192,23 +197,29 @@@ public class ClusterStackVersionResourc
    private static Configuration configuration;
  
    @Inject
-   private static Injector injector;
+   private static RepositoryVersionHelper repoVersionHelper;
+ 
+   @Inject
+   private static Gson gson;
+ 
+   @Inject
+   private static Provider<AmbariMetaInfo> metaInfo;
  
    @Inject
-   private static HostComponentStateDAO hostComponentStateDAO;
+   private static Provider<Clusters> clusters;
  
 +  @Inject
 +  private static Provider<MetadataHolder> m_metadataHolder;
 +
 +  @Inject
 +  private static Provider<AmbariManagementControllerImpl> 
m_ambariManagementController;
 +
    /**
-    * We have to include such a hack here, because if we
-    * make finalizeUpgradeAction field static and request injection
-    * for it, there will be a circle dependency error
+    * Used for updating the existing stack tools with those of the stack being
+    * distributed.
     */
-   private FinalizeUpgradeAction finalizeUpgradeAction = 
injector.getInstance(FinalizeUpgradeAction.class);
+   @Inject
+   private static Provider<ConfigHelper> configHelperProvider;
  
    /**
     * Constructor.
@@@ -374,76 -442,74 +452,73 @@@
            String.format("Version %s is backed by a version definition, but it 
could not be parsed", desiredRepoVersion), e);
      }
  
-     // get all of the host eligible for stack distribution
-     List<Host> hosts = getHostsForStackDistribution(cluster);
+     // if true, then we need to force all new host versions into the 
INSTALLED state
+     boolean forceInstalled = Boolean.parseBoolean((String)propertyMap.get(
+         CLUSTER_STACK_VERSION_FORCE));
+ 
+     try {
+       // either create the necessary host version entries, or set them to 
INSTALLING when attempting to re-distribute an existing version
+       return createOrUpdateHostVersions(cluster, repoVersionEntity, 
desiredVersionDefinition,
+           stackId, forceInstalled, propertyMap);
+     } catch (AmbariException e) {
+       throw new SystemException("Can not persist request", e);
+     }
+   }
+ 
 -  @Transactional(rollbackOn = {RuntimeException.class, SystemException.class, 
AmbariException.class})
++  @Transactional
+   RequestStatus createOrUpdateHostVersions(Cluster cluster,
+       RepositoryVersionEntity repoVersionEntity, VersionDefinitionXml 
versionDefinitionXml,
+       StackId stackId, boolean forceInstalled, Map<String, Object> 
propertyMap)
+       throws AmbariException, SystemException {
  
-     /*
-     If there is a repository that is already ATTEMPTED to be installed and 
the version
-     is GREATER than the one trying to install, we must fail (until we can 
support that via Patch Upgrades)
+     final String desiredRepoVersion = repoVersionEntity.getVersion();
  
-     For example:
+     // get all of the hosts eligible for stack distribution
+     List<Host> hosts = Lists.newArrayList(cluster.getHosts());
  
-     1. Install 2.3.0.0
-     2. Register and Install 2.5.0.0 (with or without package-version; it gets 
computed correctly)
-     3. Register 2.4 (without package-version)
  
-     Installation of 2.4 will fail because the way agents invoke installation 
is to
-     install by name.  if the package-version is not known, then the 'newest' 
is ALWAYS installed.
-     In this case, 2.5.0.0.  2.4 is never picked up.
-     */
-     for (ClusterVersionEntity clusterVersion : 
clusterVersionDAO.findByCluster(clName)) {
-       RepositoryVersionEntity clusterRepoVersion = 
clusterVersion.getRepositoryVersion();
+     for (Host host : hosts) {
+       for (HostVersionEntity hostVersion : host.getAllHostVersions()) {
+         RepositoryVersionEntity hostRepoVersion = 
hostVersion.getRepositoryVersion();
  
-       int compare = compareVersions(clusterRepoVersion.getVersion(), 
desiredRepoVersion);
+         // !!! ignore stack differences
+         if 
(!hostRepoVersion.getStackName().equals(repoVersionEntity.getStackName())) {
+           continue;
+         }
  
-       // ignore earlier versions
-       if (compare <= 0) {
-         continue;
-       }
+         int compare = compareVersions(hostRepoVersion.getVersion(), 
desiredRepoVersion);
  
-       // !!! the version is greater to the one to install
+         // ignore earlier versions
+         if (compare <= 0) {
+           continue;
+         }
  
-       // if the stacks are different, then don't fail (further check 
same-stack version strings)
-       if (!StringUtils.equals(clusterRepoVersion.getStackName(), 
repoVersionEnt.getStackName())) {
-         continue;
-       }
+         // !!! the version is greater to the one to install
  
-       // if there is no backing VDF for the desired version, allow the 
operation (legacy behavior)
-       if (null == desiredVersionDefinition) {
-         continue;
-       }
+         // if there is no backing VDF for the desired version, allow the 
operation (legacy behavior)
+         if (null == versionDefinitionXml) {
+           continue;
+         }
  
-       // backing VDF does not define the package version for any of the 
hosts, cannot install (allows a VDF with package-version)
-       for (Host host : hosts) {
-         if 
(StringUtils.isBlank(desiredVersionDefinition.getPackageVersion(host.getOsFamily())))
 {
+         if 
(StringUtils.isBlank(versionDefinitionXml.getPackageVersion(host.getOsFamily())))
 {
            String msg = String.format("Ambari cannot install version %s.  
Version %s is already installed.",
-             desiredRepoVersion, clusterRepoVersion.getVersion());
+             desiredRepoVersion, hostRepoVersion.getVersion());
            throw new IllegalArgumentException(msg);
          }
        }
      }
  
-     // if true, then we need to force all new host versions into the 
INSTALLED state
-     boolean forceInstalled = Boolean.parseBoolean((String)propertyMap.get(
-         CLUSTER_STACK_VERSION_FORCE));
- 
-     final RequestStatusResponse response;
 -    checkPatchVDFAvailableServices(cluster, repoVersionEntity, 
versionDefinitionXml);
  
-     try {
-       if (forceInstalled) {
-         createHostVersions(cluster, hosts, stackId, desiredRepoVersion, 
RepositoryVersionState.INSTALLED);
-         response = null;
-       } else {
-         createHostVersions(cluster, hosts, stackId, desiredRepoVersion,
-             RepositoryVersionState.INSTALLING);
+     // the cluster will create/update all of the host versions to the correct 
state
+     List<Host> hostsNeedingInstallCommands = 
cluster.transitionHostsToInstalling(
+         repoVersionEntity, versionDefinitionXml, forceInstalled);
  
-         RequestStageContainer installRequest = createOrchestration(cluster, 
stackId, hosts,
-             repoVersionEnt, propertyMap);
+     RequestStatusResponse response = null;
+     if (!forceInstalled) {
+       RequestStageContainer installRequest = createOrchestration(cluster, 
stackId,
+           hostsNeedingInstallCommands, repoVersionEntity, 
versionDefinitionXml, propertyMap);
  
-         response = installRequest.getRequestStatusResponse();
-       }
-     } catch (AmbariException e) {
-       throw new SystemException("Can not persist request", e);
+       response = installRequest.getRequestStatusResponse();
      }
  
      return getRequestStatus(response);
@@@ -572,8 -597,12 +606,15 @@@
        // determine services for the repo
        Set<String> serviceNames = new HashSet<>();
  
-       // !!! TODO for patch upgrades, we need to limit the serviceNames to 
those
-       // that are detailed for the repository
++
++      checkPatchVDFAvailableServices(cluster, repoVersionEnt, 
desiredVersionDefinition);
++
+       // !!! limit the serviceNames to those that are detailed for the 
repository.
+       // TODO packages don't have component granularity
+       if (RepositoryType.STANDARD != repoVersionEnt.getType()) {
+         ClusterVersionSummary clusterSummary = 
desiredVersionDefinition.getClusterSummary(cluster);
+         serviceNames.addAll(clusterSummary.getAvailableServiceNames());
+       }
  
        // Populate with commands for host
        for (int i = 0; i < maxTasks && hostIterator.hasNext(); i++) {
@@@ -605,21 -635,60 +647,58 @@@
      return req;
    }
  
+   /**
+    * Reject PATCH VDFs with Services that are not included in the Cluster
+    * @param cluster cluster instance
+    * @param repoVersionEnt repo version entity
+    * @param desiredVersionDefinition VDF
+    * @throws IllegalArgumentException thrown if VDF includes services that 
are not installed
+    * @throws AmbariException thrown if could not load stack for repo 
repoVersionEnt
+    */
+   protected void checkPatchVDFAvailableServices(Cluster cluster, 
RepositoryVersionEntity repoVersionEnt,
+                                               VersionDefinitionXml 
desiredVersionDefinition) throws SystemException, AmbariException {
+     if (repoVersionEnt.getType() == RepositoryType.PATCH) {
+ 
+       Collection<String> notPresentServices = new ArrayList<>();
+       Collection<String> presentServices = new ArrayList<>();
+ 
+       presentServices.addAll(cluster.getServices().keySet());
+       final StackInfo stack;
+       stack = metaInfo.get().getStack(repoVersionEnt.getStackName(), 
repoVersionEnt.getStackVersion());
+ 
+       for (AvailableService availableService : 
desiredVersionDefinition.getAvailableServices(stack)) {
+         String name = availableService.getName();
+         if (!presentServices.contains(name)) {
+           notPresentServices.add(name);
+         }
+       }
+       if (!notPresentServices.isEmpty()) {
+         throw new IllegalArgumentException(String.format("%s VDF includes 
services that are not installed: %s",
+             RepositoryType.PATCH, StringUtils.join(notPresentServices, ",")));
+       }
+     }
+   }
+ 
 -  @Transactional
 -  ActionExecutionContext getHostVersionInstallCommand(RepositoryVersionEntity 
repoVersion,
 +  private ActionExecutionContext 
getHostVersionInstallCommand(RepositoryVersionEntity repoVersion,
        Cluster cluster, AmbariManagementController managementController, 
AmbariMetaInfo ami,
-       final StackId stackId, Set<String> repoServices, Map<String, 
List<RepositoryEntity>> perOsRepos, Stage stage1, Host host)
+       final StackId stackId, Set<String> repoServices, Stage stage1, Host 
host)
            throws SystemException {
+ 
 -
      // Determine repositories for host
      String osFamily = host.getOsFamily();
-     final List<RepositoryEntity> repoInfo = perOsRepos.get(osFamily);
-     if (repoInfo == null) {
-       throw new SystemException(String.format("Repositories for os type %s 
are " +
-                       "not defined. Repo version=%s, stackId=%s",
-         osFamily, repoVersion.getVersion(), stackId));
+ 
+     OperatingSystemEntity osEntity = null;
+     for (OperatingSystemEntity os : repoVersion.getOperatingSystems()) {
+       if (os.getOsType().equals(osFamily)) {
+         osEntity = os;
+         break;
+       }
      }
  
-     if (repoInfo.isEmpty()){
-       LOG.error(String.format("Repository list is empty. Ambari may not be 
managing the repositories for %s", osFamily));
+     if (null == osEntity || 
CollectionUtils.isEmpty(osEntity.getRepositories())) {
+       throw new SystemException(String.format("Repositories for os type %s 
are " +
 -          "not defined for version %s of Stack %s.",
++          "not defined. Repo version=%s, stackId=%s",
+             osFamily, repoVersion.getVersion(), stackId));
      }
  
      // determine packages for all services that are installed on host

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
----------------------------------------------------------------------
diff --cc 
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
index 5a56919,2e86b2d..447d7e8
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
@@@ -65,7 -63,7 +64,8 @@@ import org.apache.ambari.server.topolog
  import org.apache.commons.lang.StringUtils;
  import org.apache.commons.lang.Validate;
  
 +import com.google.inject.Inject;
+ import com.google.common.collect.Sets;
  import com.google.inject.assistedinject.Assisted;
  import com.google.inject.assistedinject.AssistedInject;
  import com.google.inject.persist.Transactional;
@@@ -97,11 -100,49 +100,48 @@@ public class ComponentResourceProvider 
    //Parameters from the predicate
    private static final String QUERY_PARAMETERS_RUN_SMOKE_TEST_ID = 
"params/run_smoke_test";
  
-   private static Set<String> pkPropertyIds =
-       new HashSet<>(Arrays.asList(new String[]{
+   private static Set<String> pkPropertyIds = Sets.newHashSet(
            COMPONENT_CLUSTER_NAME_PROPERTY_ID,
            COMPONENT_SERVICE_NAME_PROPERTY_ID,
-           COMPONENT_COMPONENT_NAME_PROPERTY_ID}));
+           COMPONENT_COMPONENT_NAME_PROPERTY_ID);
+ 
+   /**
+    * The property ids for an servce resource.
+    */
+   private static final Set<String> PROPERTY_IDS = new HashSet<>();
+ 
+   /**
+    * The key property ids for an service resource.
+    */
+   private static final Map<Resource.Type, String> KEY_PROPERTY_IDS = new 
HashMap<>();
+ 
+   static {
+     // properties
+     PROPERTY_IDS.add(COMPONENT_CLUSTER_NAME_PROPERTY_ID);
+     PROPERTY_IDS.add(COMPONENT_SERVICE_NAME_PROPERTY_ID);
+     PROPERTY_IDS.add(COMPONENT_COMPONENT_NAME_PROPERTY_ID);
+     PROPERTY_IDS.add(COMPONENT_DISPLAY_NAME_PROPERTY_ID);
+     PROPERTY_IDS.add(COMPONENT_STATE_PROPERTY_ID);
+     PROPERTY_IDS.add(COMPONENT_CATEGORY_PROPERTY_ID);
+     PROPERTY_IDS.add(COMPONENT_TOTAL_COUNT_PROPERTY_ID);
+     PROPERTY_IDS.add(COMPONENT_STARTED_COUNT_PROPERTY_ID);
+     PROPERTY_IDS.add(COMPONENT_INSTALLED_COUNT_PROPERTY_ID);
 -    
PROPERTY_IDS.add(COMPONENT_INSTALLED_AND_MAINTENANCE_OFF_COUNT_PROPERTY_ID);
+ 
+     PROPERTY_IDS.add(COMPONENT_INIT_COUNT_PROPERTY_ID);
+     PROPERTY_IDS.add(COMPONENT_UNKNOWN_COUNT_PROPERTY_ID);
+     PROPERTY_IDS.add(COMPONENT_INSTALL_FAILED_COUNT_PROPERTY_ID);
+     PROPERTY_IDS.add(COMPONENT_RECOVERY_ENABLED_ID);
+     PROPERTY_IDS.add(COMPONENT_DESIRED_STACK);
+     PROPERTY_IDS.add(COMPONENT_DESIRED_VERSION);
+     PROPERTY_IDS.add(COMPONENT_REPOSITORY_STATE);
+ 
+     PROPERTY_IDS.add(QUERY_PARAMETERS_RUN_SMOKE_TEST_ID);
+ 
+     // keys
+     KEY_PROPERTY_IDS.put(Resource.Type.Component, 
COMPONENT_COMPONENT_NAME_PROPERTY_ID);
+     KEY_PROPERTY_IDS.put(Resource.Type.Service, 
COMPONENT_SERVICE_NAME_PROPERTY_ID);
+     KEY_PROPERTY_IDS.put(Resource.Type.Cluster, 
COMPONENT_CLUSTER_NAME_PROPERTY_ID);
+   }
  
    private MaintenanceStateHelper maintenanceStateHelper;
  

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/DefaultProviderModule.java
----------------------------------------------------------------------

Reply via email to