http://git-wip-us.apache.org/repos/asf/ambari/blob/edbb5492/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintFactory.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintFactory.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintFactory.java
index 3c5314b..40fa2d3 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintFactory.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintFactory.java
@@ -137,7 +137,7 @@ public class BlueprintFactory {
       throw new IllegalArgumentException("At least one host group must be 
specified in a blueprint");
     }
 
-    Collection<HostGroup> hostGroups = new ArrayList<HostGroup>();
+    Collection<HostGroup> hostGroups = new ArrayList<>();
     for (HashMap<String, Object> hostGroupProperties : hostGroupProps) {
       String hostGroupName = (String) 
hostGroupProperties.get(HOST_GROUP_NAME_PROPERTY_ID);
       if (hostGroupName == null || hostGroupName.isEmpty()) {
@@ -167,7 +167,7 @@ public class BlueprintFactory {
     }
 
     Collection<String> stackComponentNames = getAllStackComponents(stack);
-    Collection<Component> components = new ArrayList<Component>();
+    Collection<Component> components = new ArrayList<>();
 
     for (HashMap<String, String> componentProperties : componentProps) {
       String componentName = 
componentProperties.get(COMPONENT_NAME_PROPERTY_ID);
@@ -200,7 +200,7 @@ public class BlueprintFactory {
    * @throws IllegalArgumentException if the specified stack doesn't exist
    */
   private Collection<String> getAllStackComponents(Stack stack) {
-    Collection<String> allComponents = new HashSet<String>();
+    Collection<String> allComponents = new HashSet<>();
     for (Collection<String> components: stack.getComponents().values()) {
       allComponents.addAll(components);
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/edbb5492/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImpl.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImpl.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImpl.java
index faf0c77..c90e35c 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImpl.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImpl.java
@@ -49,7 +49,7 @@ import com.google.gson.Gson;
 public class BlueprintImpl implements Blueprint {
 
   private String name;
-  private Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+  private Map<String, HostGroup> hostGroups = new HashMap<>();
   private Stack stack;
   private Configuration configuration;
   private BlueprintValidator validator;
@@ -142,7 +142,7 @@ public class BlueprintImpl implements Blueprint {
    */
   @Override
   public Collection<String> getServices() {
-    Collection<String> services = new HashSet<String>();
+    Collection<String> services = new HashSet<>();
     for (HostGroup group : getHostGroups().values()) {
       services.addAll(group.getServices());
     }
@@ -151,7 +151,7 @@ public class BlueprintImpl implements Blueprint {
 
   @Override
   public Collection<String> getComponents(String service) {
-    Collection<String> components = new HashSet<String>();
+    Collection<String> components = new HashSet<>();
     for (HostGroup group : getHostGroupsForService(service)) {
       components.addAll(group.getComponents(service));
     }
@@ -286,7 +286,7 @@ public class BlueprintImpl implements Blueprint {
    */
   @Override
   public Collection<HostGroup> getHostGroupsForComponent(String component) {
-    Collection<HostGroup> resultGroups = new HashSet<HostGroup>();
+    Collection<HostGroup> resultGroups = new HashSet<>();
     for (HostGroup group : hostGroups.values() ) {
       if (group.getComponentNames().contains(component)) {
         resultGroups.add(group);
@@ -304,7 +304,7 @@ public class BlueprintImpl implements Blueprint {
    */
   @Override
   public Collection<HostGroup> getHostGroupsForService(String service) {
-    Collection<HostGroup> resultGroups = new HashSet<HostGroup>();
+    Collection<HostGroup> resultGroups = new HashSet<>();
     for (HostGroup group : hostGroups.values() ) {
       if (group.getServices().contains(service)) {
         resultGroups.add(group);
@@ -404,7 +404,7 @@ public class BlueprintImpl implements Blueprint {
    */
   private Map<String, Map<String, String>> 
parseConfigurations(Collection<BlueprintConfigEntity> configs) {
 
-    Map<String, Map<String, String>> properties = new HashMap<String, 
Map<String, String>>();
+    Map<String, Map<String, String>> properties = new HashMap<>();
     Gson gson = new Gson();
     for (BlueprintConfiguration config : configs) {
       String type = config.getType();
@@ -422,7 +422,7 @@ public class BlueprintImpl implements Blueprint {
    */
   private Map<String, Set<HashMap<String, String>>> 
parseSetting(Collection<BlueprintSettingEntity> blueprintSetting) {
 
-    Map<String, Set<HashMap<String, String>>> properties = new HashMap<String, 
Set<HashMap<String, String>>>();
+    Map<String, Set<HashMap<String, String>>> properties = new HashMap<>();
     Gson gson = new Gson();
     for (BlueprintSettingEntity setting : blueprintSetting) {
       String settingName = setting.getSettingName();
@@ -441,7 +441,7 @@ public class BlueprintImpl implements Blueprint {
   //todo: do inline with config processing
   private Map<String, Map<String, Map<String, String>>> 
parseAttributes(Collection<BlueprintConfigEntity> configs) {
     Map<String, Map<String, Map<String, String>>> mapAttributes =
-        new HashMap<String, Map<String, Map<String, String>>>();
+      new HashMap<>();
 
     if (configs != null) {
       Gson gson = new Gson();
@@ -461,7 +461,7 @@ public class BlueprintImpl implements Blueprint {
    */
   @SuppressWarnings("unchecked")
   private void createHostGroupEntities(BlueprintEntity blueprintEntity) {
-    Collection<HostGroupEntity> entities = new ArrayList<HostGroupEntity>();
+    Collection<HostGroupEntity> entities = new ArrayList<>();
     for (HostGroup group : getHostGroups().values()) {
       HostGroupEntity hostGroupEntity = new HostGroupEntity();
       entities.add(hostGroupEntity);
@@ -483,7 +483,7 @@ public class BlueprintImpl implements Blueprint {
    */
   private void createHostGroupConfigEntities(HostGroupEntity hostGroup, 
Configuration groupConfiguration) {
     Gson jsonSerializer = new Gson();
-    Map<String, HostGroupConfigEntity> configEntityMap = new HashMap<String, 
HostGroupConfigEntity>();
+    Map<String, HostGroupConfigEntity> configEntityMap = new HashMap<>();
     for (Map.Entry<String, Map<String, String>> propEntry : 
groupConfiguration.getProperties().entrySet()) {
       String type = propEntry.getKey();
       Map<String, String> properties = propEntry.getValue();
@@ -520,7 +520,7 @@ public class BlueprintImpl implements Blueprint {
     */
   @SuppressWarnings("unchecked")
   private void createComponentEntities(HostGroupEntity group, 
Collection<Component> components) {
-    Collection<HostGroupComponentEntity> componentEntities = new 
HashSet<HostGroupComponentEntity>();
+    Collection<HostGroupComponentEntity> componentEntities = new HashSet<>();
     group.setComponents(componentEntities);
 
     for (Component component : components) {
@@ -548,7 +548,7 @@ public class BlueprintImpl implements Blueprint {
   private void createBlueprintConfigEntities(BlueprintEntity blueprintEntity) {
     Gson jsonSerializer = new Gson();
     Configuration config = getConfiguration();
-    Map<String, BlueprintConfigEntity> configEntityMap = new HashMap<String, 
BlueprintConfigEntity>();
+    Map<String, BlueprintConfigEntity> configEntityMap = new HashMap<>();
     for (Map.Entry<String, Map<String, String>> propEntry : 
config.getProperties().entrySet()) {
       String type = propEntry.getKey();
       Map<String, String> properties = propEntry.getValue();

http://git-wip-us.apache.org/repos/asf/ambari/blob/edbb5492/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidatorImpl.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidatorImpl.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidatorImpl.java
index c5647c3..f994457 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidatorImpl.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidatorImpl.java
@@ -53,7 +53,7 @@ public class BlueprintValidatorImpl implements 
BlueprintValidator {
   public void validateTopology() throws InvalidTopologyException {
     LOGGER.info("Validating topology for blueprint: [{}]", 
blueprint.getName());
     Collection<HostGroup> hostGroups = blueprint.getHostGroups().values();
-    Map<String, Map<String, Collection<DependencyInfo>>> missingDependencies = 
new HashMap<String, Map<String, Collection<DependencyInfo>>>();
+    Map<String, Map<String, Collection<DependencyInfo>>> missingDependencies = 
new HashMap<>();
 
     for (HostGroup group : hostGroups) {
       Map<String, Collection<DependencyInfo>> missingGroupDependencies = 
validateHostGroup(group);
@@ -62,7 +62,7 @@ public class BlueprintValidatorImpl implements 
BlueprintValidator {
       }
     }
 
-    Collection<String> cardinalityFailures = new HashSet<String>();
+    Collection<String> cardinalityFailures = new HashSet<>();
     Collection<String> services = blueprint.getServices();
 
     for (String service : services) {
@@ -87,7 +87,7 @@ public class BlueprintValidatorImpl implements 
BlueprintValidator {
   public void validateRequiredProperties() throws InvalidTopologyException {
     //todo: combine with RequiredPasswordValidator
     Map<String, Map<String, Collection<String>>> missingProperties =
-        new HashMap<String, Map<String, Collection<String>>>();
+      new HashMap<>();
 
     // we don't want to include default stack properties so we can't just use 
hostGroup full properties
     Map<String, Map<String, String>> clusterConfigurations = 
blueprint.getConfiguration().getProperties();
@@ -119,9 +119,9 @@ public class BlueprintValidatorImpl implements 
BlueprintValidator {
 
 
     for (HostGroup hostGroup : blueprint.getHostGroups().values()) {
-      Collection<String> processedServices = new HashSet<String>();
-      Map<String, Collection<String>> allRequiredProperties = new 
HashMap<String, Collection<String>>();
-      Map<String, Map<String, String>> operationalConfiguration = new 
HashMap<String, Map<String, String>>(clusterConfigurations);
+      Collection<String> processedServices = new HashSet<>();
+      Map<String, Collection<String>> allRequiredProperties = new HashMap<>();
+      Map<String, Map<String, String>> operationalConfiguration = new 
HashMap<>(clusterConfigurations);
 
       
operationalConfiguration.putAll(hostGroup.getConfiguration().getProperties());
       for (String component : hostGroup.getComponentNames()) {
@@ -137,8 +137,8 @@ public class BlueprintValidatorImpl implements 
BlueprintValidator {
         if (ClusterTopologyImpl.isNameNodeHAEnabled(clusterConfigurations) && 
component.equals("NAMENODE")) {
             Map<String, String> hadoopEnvConfig = 
clusterConfigurations.get("hadoop-env");
             if(hadoopEnvConfig != null && !hadoopEnvConfig.isEmpty() && 
hadoopEnvConfig.containsKey("dfs_ha_initial_namenode_active") && 
hadoopEnvConfig.containsKey("dfs_ha_initial_namenode_standby")) {
-              ArrayList<HostGroup> hostGroupsForComponent = new 
ArrayList<HostGroup>( blueprint.getHostGroupsForComponent(component));
-              Set<String> givenHostGroups = new HashSet<String>();
+              ArrayList<HostGroup> hostGroupsForComponent = new 
ArrayList<>(blueprint.getHostGroupsForComponent(component));
+              Set<String> givenHostGroups = new HashSet<>();
               
givenHostGroups.add(hadoopEnvConfig.get("dfs_ha_initial_namenode_active"));
               
givenHostGroups.add(hadoopEnvConfig.get("dfs_ha_initial_namenode_standby"));
               if(givenHostGroups.size() != hostGroupsForComponent.size()) {
@@ -195,7 +195,7 @@ public class BlueprintValidatorImpl implements 
BlueprintValidator {
               if (! stack.isPasswordProperty(serviceName, configCategory, 
propertyName)) {
                 Collection<String> typeRequirements = 
allRequiredProperties.get(configCategory);
                 if (typeRequirements == null) {
-                  typeRequirements = new HashSet<String>();
+                  typeRequirements = new HashSet<>();
                   allRequiredProperties.put(configCategory, typeRequirements);
                 }
                 typeRequirements.add(propertyName);
@@ -216,7 +216,7 @@ public class BlueprintValidatorImpl implements 
BlueprintValidator {
           String hostGroupName = hostGroup.getName();
           Map<String, Collection<String>> hostGroupMissingProps = 
missingProperties.get(hostGroupName);
           if (hostGroupMissingProps == null) {
-            hostGroupMissingProps = new HashMap<String, Collection<String>>();
+            hostGroupMissingProps = new HashMap<>();
             missingProperties.put(hostGroupName, hostGroupMissingProps);
           }
           hostGroupMissingProps.put(requiredCategory, requiredProperties);
@@ -241,7 +241,7 @@ public class BlueprintValidatorImpl implements 
BlueprintValidator {
    */
   private Collection<String> verifyComponentInAllHostGroups(String component, 
AutoDeployInfo autoDeploy) {
 
-    Collection<String> cardinalityFailures = new HashSet<String>();
+    Collection<String> cardinalityFailures = new HashSet<>();
     int actualCount = blueprint.getHostGroupsForComponent(component).size();
     Map<String, HostGroup> hostGroups = blueprint.getHostGroups();
     if (actualCount != hostGroups.size()) {
@@ -258,9 +258,9 @@ public class BlueprintValidatorImpl implements 
BlueprintValidator {
 
   private Map<String, Collection<DependencyInfo>> validateHostGroup(HostGroup 
group) {
     LOGGER.info("Validating hostgroup: {}", group.getName());
-    Map<String, Collection<DependencyInfo>> missingDependencies = new 
HashMap<String, Collection<DependencyInfo>>();
+    Map<String, Collection<DependencyInfo>> missingDependencies = new 
HashMap<>();
 
-    for (String component : new HashSet<String>(group.getComponentNames())) {
+    for (String component : new HashSet<>(group.getComponentNames())) {
       LOGGER.debug("Processing component: {}", component);
 
       for (DependencyInfo dependency : 
stack.getDependenciesForComponent(component)) {
@@ -314,7 +314,7 @@ public class BlueprintValidatorImpl implements 
BlueprintValidator {
         if (! resolved) {
           Collection<DependencyInfo> missingCompDependencies = 
missingDependencies.get(component);
           if (missingCompDependencies == null) {
-            missingCompDependencies = new HashSet<DependencyInfo>();
+            missingCompDependencies = new HashSet<>();
             missingDependencies.put(component, missingCompDependencies);
           }
           missingCompDependencies.add(dependency);
@@ -339,7 +339,7 @@ public class BlueprintValidatorImpl implements 
BlueprintValidator {
                                                             AutoDeployInfo 
autoDeploy) {
 
     Map<String, Map<String, String>> configProperties = 
blueprint.getConfiguration().getProperties();
-    Collection<String> cardinalityFailures = new HashSet<String>();
+    Collection<String> cardinalityFailures = new HashSet<>();
     //todo: don't hard code this HA logic here
     if (ClusterTopologyImpl.isNameNodeHAEnabled(configProperties) &&
         (component.equals("SECONDARY_NAMENODE"))) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/edbb5492/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java
index e29417b..5913f4b 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java
@@ -125,7 +125,7 @@ public class ClusterConfigurationRequest {
 
   // get names of required host groups
   public Collection<String> getRequiredHostGroups() {
-    Collection<String> requiredHostGroups = new HashSet<String>();
+    Collection<String> requiredHostGroups = new HashSet<>();
     requiredHostGroups.addAll(configurationProcessor.getRequiredHostGroups());
     if (configureSecurity) {
       
requiredHostGroups.addAll(getRequiredHostgroupsForKerberosConfiguration());
@@ -191,7 +191,7 @@ public class ClusterConfigurationRequest {
       // generate principals & keytabs for headless identities
       AmbariContext.getController().getKerberosHelper()
         .ensureHeadlessIdentities(cluster, existingConfigurations,
-          new HashSet<String>(blueprint.getServices()));
+          new HashSet<>(blueprint.getServices()));
 
       // apply Kerberos specific configurations
       Map<String, Map<String, String>> updatedConfigs = 
AmbariContext.getController().getKerberosHelper()
@@ -204,7 +204,7 @@ public class ClusterConfigurationRequest {
       Map<String, String> clusterEnv = updatedConfigs.get("cluster-env");
 
       if(clusterEnv == null) {
-        clusterEnv = new HashMap<String,String>();
+        clusterEnv = new HashMap<>();
         updatedConfigs.put("cluster-env", clusterEnv);
       }
 
@@ -247,7 +247,7 @@ public class ClusterConfigurationRequest {
    * @return a map of service names to component names
    */
   private Map<String, Set<String>> createServiceComponentMap(Blueprint 
blueprint) {
-    Map<String, Set<String>> serviceComponents = new HashMap<String, 
Set<String>>();
+    Map<String, Set<String>> serviceComponents = new HashMap<>();
     Collection<String> services = blueprint.getServices();
 
     if(services != null) {
@@ -256,7 +256,7 @@ public class ClusterConfigurationRequest {
         serviceComponents.put(service,
             (components == null)
                 ? Collections.<String>emptySet()
-                : new HashSet<String>(blueprint.getComponents(service)));
+                : new HashSet<>(blueprint.getComponents(service)));
       }
     }
 
@@ -294,7 +294,7 @@ public class ClusterConfigurationRequest {
   }
 
   private Map<String, String> createComponentHostMap(Blueprint blueprint) {
-    Map<String, String> componentHostsMap = new HashMap<String, String>();
+    Map<String, String> componentHostsMap = new HashMap<>();
     for (String service : blueprint.getServices()) {
       Collection<String> components = blueprint.getComponents(service);
       for (String component : components) {
@@ -311,7 +311,7 @@ public class ClusterConfigurationRequest {
   }
 
   private Collection<String> getRequiredHostgroupsForKerberosConfiguration() {
-    Collection<String> requiredHostGroups = new HashSet<String>();
+    Collection<String> requiredHostGroups = new HashSet<>();
 
     try {
       Cluster cluster = getCluster();
@@ -367,7 +367,7 @@ public class ClusterConfigurationRequest {
    */
   public void setConfigurationsOnCluster(ClusterTopology clusterTopology, 
String tag, Set<String> updatedConfigTypes)  {
     //todo: also handle setting of host group scoped configuration which is 
updated by config processor
-    List<BlueprintServiceConfigRequest> configurationRequests = new 
LinkedList<BlueprintServiceConfigRequest>();
+    List<BlueprintServiceConfigRequest> configurationRequests = new 
LinkedList<>();
 
     Blueprint blueprint = clusterTopology.getBlueprint();
     Configuration clusterConfiguration = clusterTopology.getConfiguration();
@@ -429,9 +429,9 @@ public class ClusterConfigurationRequest {
     for (BlueprintServiceConfigRequest blueprintConfigRequest : 
configurationRequests) {
       ClusterRequest clusterRequest = null;
       // iterate over the config types associated with this service
-      List<ConfigurationRequest> requestsPerService = new 
LinkedList<ConfigurationRequest>();
+      List<ConfigurationRequest> requestsPerService = new LinkedList<>();
       for (BlueprintServiceConfigElement blueprintElement : 
blueprintConfigRequest.getConfigElements()) {
-        Map<String, Object> clusterProperties = new HashMap<String, Object>();
+        Map<String, Object> clusterProperties = new HashMap<>();
         
clusterProperties.put(ClusterResourceProvider.CLUSTER_NAME_PROPERTY_ID, 
clusterName);
         
clusterProperties.put(ClusterResourceProvider.CLUSTER_DESIRED_CONFIGS_PROPERTY_ID
 + "/type", blueprintElement.getTypeName());
         
clusterProperties.put(ClusterResourceProvider.CLUSTER_DESIRED_CONFIGS_PROPERTY_ID
 + "/tag", tag);
@@ -512,7 +512,7 @@ public class ClusterConfigurationRequest {
     private final String serviceName;
 
     private List<BlueprintServiceConfigElement> configElements =
-        new LinkedList<BlueprintServiceConfigElement>();
+      new LinkedList<>();
 
     BlueprintServiceConfigRequest(String serviceName) {
       this.serviceName = serviceName;

http://git-wip-us.apache.org/repos/asf/ambari/blob/edbb5492/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
index d0a4df9..37fb7d4 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
@@ -51,8 +51,8 @@ public class ClusterTopologyImpl implements ClusterTopology {
   private Configuration configuration;
   private ConfigRecommendationStrategy configRecommendationStrategy;
   private ProvisionAction provisionAction = ProvisionAction.INSTALL_AND_START;
-  private Map<String, AdvisedConfiguration> advisedConfigurations = new 
HashMap<String, AdvisedConfiguration>();
-  private final Map<String, HostGroupInfo> hostGroupInfoMap = new 
HashMap<String, HostGroupInfo>();
+  private Map<String, AdvisedConfiguration> advisedConfigurations = new 
HashMap<>();
+  private final Map<String, HostGroupInfo> hostGroupInfoMap = new HashMap<>();
   private final AmbariContext ambariContext;
 
   private final static Logger LOG = 
LoggerFactory.getLogger(ClusterTopologyImpl.class);
@@ -121,7 +121,7 @@ public class ClusterTopologyImpl implements ClusterTopology 
{
   //todo: do we want to return groups with no requested hosts?
   @Override
   public Collection<String> getHostGroupsForComponent(String component) {
-    Collection<String> resultGroups = new ArrayList<String>();
+    Collection<String> resultGroups = new ArrayList<>();
     for (HostGroup group : getBlueprint().getHostGroups().values() ) {
       if (group.getComponentNames().contains(component)) {
         resultGroups.add(group.getName());
@@ -173,7 +173,7 @@ public class ClusterTopologyImpl implements ClusterTopology 
{
   @Override
   public Collection<String> getHostAssignmentsForComponent(String component) {
     //todo: ordering requirements?
-    Collection<String> hosts = new ArrayList<String>();
+    Collection<String> hosts = new ArrayList<>();
     Collection<String> hostGroups = getHostGroupsForComponent(component);
     for (String group : hostGroups) {
       HostGroupInfo hostGroupInfo = getHostGroupInfo().get(group);
@@ -382,12 +382,12 @@ public class ClusterTopologyImpl implements 
ClusterTopology {
 
 
   private void checkForDuplicateHosts(Map<String, HostGroupInfo> groupInfoMap) 
throws InvalidTopologyException {
-    Set<String> hosts = new HashSet<String>();
-    Set<String> duplicates = new HashSet<String>();
+    Set<String> hosts = new HashSet<>();
+    Set<String> duplicates = new HashSet<>();
     for (HostGroupInfo group : groupInfoMap.values()) {
       // check for duplicates within the new groups
       Collection<String> groupHosts = group.getHostNames();
-      Collection<String> groupHostsCopy = new 
HashSet<String>(group.getHostNames());
+      Collection<String> groupHostsCopy = new HashSet<>(group.getHostNames());
       groupHostsCopy.retainAll(hosts);
       duplicates.addAll(groupHostsCopy);
       hosts.addAll(groupHosts);

http://git-wip-us.apache.org/repos/asf/ambari/blob/edbb5492/ambari-server/src/main/java/org/apache/ambari/server/topology/Configuration.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/Configuration.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/Configuration.java
index 79281b4..6d1ea86 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/Configuration.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/Configuration.java
@@ -115,20 +115,20 @@ public class Configuration {
    */
   public Map<String, Map<String, String>> getFullProperties(int depthLimit) {
     if (depthLimit == 0) {
-      HashMap<String, Map<String, String>> propertiesCopy = new 
HashMap<String, Map<String, String>>();
+      HashMap<String, Map<String, String>> propertiesCopy = new HashMap<>();
       for (Map.Entry<String, Map<String, String>> typeProperties : 
properties.entrySet()) {
-        propertiesCopy.put(typeProperties.getKey(), new HashMap<String, 
String>(typeProperties.getValue()));
+        propertiesCopy.put(typeProperties.getKey(), new 
HashMap<>(typeProperties.getValue()));
       }
       return propertiesCopy;
     }
 
     Map<String, Map<String, String>> mergedProperties = parentConfiguration == 
null ?
         new HashMap<String, Map<String, String>>() :
-        new HashMap<String, Map<String, 
String>>(parentConfiguration.getFullProperties(--depthLimit));
+      new HashMap<>(parentConfiguration.getFullProperties(--depthLimit));
 
     for (Map.Entry<String, Map<String, String>> entry : properties.entrySet()) 
{
       String configType = entry.getKey();
-      Map<String, String> typeProps = new HashMap<String, 
String>(entry.getValue());
+      Map<String, String> typeProps = new HashMap<>(entry.getValue());
 
       if (mergedProperties.containsKey(configType)) {
         mergedProperties.get(configType).putAll(typeProps);
@@ -159,13 +159,13 @@ public class Configuration {
   public Map<String, Map<String, Map<String, String>>> getFullAttributes() {
     Map<String, Map<String, Map<String, String>>> mergedAttributeMap = 
parentConfiguration == null ?
         new HashMap<String, Map<String, Map<String, String>>>() :
-        new HashMap<String, Map<String, Map<String, 
String>>>(parentConfiguration.getFullAttributes());
+      new HashMap<>(parentConfiguration.getFullAttributes());
 
     for (Map.Entry<String, Map<String, Map<String, String>>> typeEntry : 
attributes.entrySet()) {
       String type = typeEntry.getKey();
-      Map<String, Map<String, String>> typeAttributes = new HashMap<String, 
Map<String, String>>();
+      Map<String, Map<String, String>> typeAttributes = new HashMap<>();
       for (Map.Entry<String, Map<String, String>> attributeEntry : 
typeEntry.getValue().entrySet()) {
-        typeAttributes.put(attributeEntry.getKey(), new HashMap<String, 
String>(attributeEntry.getValue()));
+        typeAttributes.put(attributeEntry.getKey(), new 
HashMap<>(attributeEntry.getValue()));
       }
 
       if (! mergedAttributeMap.containsKey(type)) {
@@ -245,7 +245,7 @@ public class Configuration {
     String previousValue = getPropertyValue(configType, propertyName);
     Map<String, String> typeProperties = properties.get(configType);
     if (typeProperties == null) {
-      typeProperties = new HashMap<String, String>();
+      typeProperties = new HashMap<>();
       properties.put(configType, typeProperties);
     }
     typeProperties.put(propertyName, value);
@@ -295,13 +295,13 @@ public class Configuration {
 
     Map<String, Map<String, String>> typeAttributes = 
attributes.get(configType);
     if (typeAttributes == null) {
-      typeAttributes = new HashMap<String, Map<String, String>>();
+      typeAttributes = new HashMap<>();
       attributes.put(configType, typeAttributes);
     }
 
     Map<String, String> attributes = typeAttributes.get(attributeName);
     if (attributes == null) {
-      attributes = new HashMap<String, String>();
+      attributes = new HashMap<>();
       typeAttributes.put(attributeName, attributes);
     }
 
@@ -315,7 +315,7 @@ public class Configuration {
    * @return collection of all represented configuration types
    */
   public Collection<String> getAllConfigTypes() {
-    Collection<String> allTypes = new HashSet<String>();
+    Collection<String> allTypes = new HashSet<>();
     for (String type : getFullProperties().keySet()) {
       allTypes.add(type);
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/edbb5492/ambari-server/src/main/java/org/apache/ambari/server/topology/ConfigurationFactory.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/ConfigurationFactory.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/ConfigurationFactory.java
index f6990af..90c7872 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/ConfigurationFactory.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/ConfigurationFactory.java
@@ -34,8 +34,8 @@ public class ConfigurationFactory {
       "Provided configuration format is not supported";
 
   public Configuration getConfiguration(Collection<Map<String, String>> 
configProperties) {
-    Map<String, Map<String, String>> properties = new HashMap<String, 
Map<String, String>>();
-    Map<String, Map<String, Map<String, String>>> attributes = new 
HashMap<String, Map<String, Map<String, String>>>();
+    Map<String, Map<String, String>> properties = new HashMap<>();
+    Map<String, Map<String, Map<String, String>>> attributes = new HashMap<>();
     Configuration configuration = new Configuration(properties, attributes);
 
     if (configProperties != null) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/edbb5492/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupImpl.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupImpl.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupImpl.java
index 715479e..c276532 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupImpl.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupImpl.java
@@ -52,12 +52,12 @@ public class HostGroupImpl implements HostGroup {
   /**
    * components contained in the host group
    */
-  private Map<String, Component> components = new HashMap<String, Component>();
+  private Map<String, Component> components = new HashMap<>();
 
   /**
    * map of service to components for the host group
    */
-  private Map<String, Set<String>> componentsForService = new HashMap<String, 
Set<String>>();
+  private Map<String, Set<String>> componentsForService = new HashMap<>();
 
   /**
    * configuration
@@ -125,7 +125,7 @@ public class HostGroupImpl implements HostGroup {
 
   @Override
   public Collection<String> getComponentNames(ProvisionAction provisionAction) 
{
-    Set<String> setOfComponentNames = new HashSet<String>();
+    Set<String> setOfComponentNames = new HashSet<>();
     for (String componentName : components.keySet()) {
       Component component = components.get(componentName);
       if ( (component.getProvisionAction() != null) && 
(component.getProvisionAction() == provisionAction) ) {
@@ -186,7 +186,7 @@ public class HostGroupImpl implements HostGroup {
         // an example of a component without a service in the stack is 
AMBARI_SERVER
         Set<String> serviceComponents = componentsForService.get(service);
         if (serviceComponents == null) {
-          serviceComponents = new HashSet<String>();
+          serviceComponents = new HashSet<>();
           componentsForService.put(service, serviceComponents);
         }
         serviceComponents.add(component);
@@ -205,7 +205,7 @@ public class HostGroupImpl implements HostGroup {
   @Override
   public Collection<String> getComponents(String service) {
     return componentsForService.containsKey(service) ?
-        new HashSet<String>(componentsForService.get(service)) :
+      new HashSet<>(componentsForService.get(service)) :
         Collections.<String>emptySet();
   }
 
@@ -265,13 +265,13 @@ public class HostGroupImpl implements HostGroup {
    */
   //todo: use ConfigurationFactory
   private void parseConfigurations(HostGroupEntity entity) {
-    Map<String, Map<String, String>> config = new HashMap<String, Map<String, 
String>>();
+    Map<String, Map<String, String>> config = new HashMap<>();
     Gson jsonSerializer = new Gson();
     for (HostGroupConfigEntity configEntity : entity.getConfigurations()) {
       String type = configEntity.getType();
       Map<String, String> typeProperties = config.get(type);
       if ( typeProperties == null) {
-        typeProperties = new HashMap<String, String>();
+        typeProperties = new HashMap<>();
         config.put(type, typeProperties);
       }
       Map<String, String> propertyMap =  jsonSerializer.<Map<String, 
String>>fromJson(
@@ -282,7 +282,7 @@ public class HostGroupImpl implements HostGroup {
       }
     }
     //todo: parse attributes
-    Map<String, Map<String, Map<String, String>>> attributes = new 
HashMap<String, Map<String, Map<String, String>>>();
+    Map<String, Map<String, Map<String, String>>> attributes = new HashMap<>();
     configuration = new Configuration(config, attributes);
   }
   public String toString(){

http://git-wip-us.apache.org/repos/asf/ambari/blob/edbb5492/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupInfo.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupInfo.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupInfo.java
index c6704a0..5a9058b 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupInfo.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupInfo.java
@@ -48,7 +48,7 @@ public class HostGroupInfo {
   /**
    * hosts contained associated with the host group
    */
-  private final Collection<String> hostNames = new HashSet<String>();
+  private final Collection<String> hostNames = new HashSet<>();
 
   /**
    * maps host names to rack information
@@ -106,7 +106,7 @@ public class HostGroupInfo {
     // needs to be an exclusive lock, not a read lock because collection
     // shouldn't change while copying elements into the new set instance
     synchronized (hostNames) {
-      return new HashSet<String>(hostNames);
+      return new HashSet<>(hostNames);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/edbb5492/ambari-server/src/main/java/org/apache/ambari/server/topology/HostRequest.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/HostRequest.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/HostRequest.java
index 9152fd2..168d13b 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/HostRequest.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/HostRequest.java
@@ -70,14 +70,14 @@ public class HostRequest implements Comparable<HostRequest> 
{
   private boolean isOutstanding = true;
   private final boolean skipFailure;
 
-  private Map<TopologyTask, Map<String, Long>> logicalTaskMap = new 
HashMap<TopologyTask, Map<String, Long>>();
+  private Map<TopologyTask, Map<String, Long>> logicalTaskMap = new 
HashMap<>();
 
-  Map<Long, HostRoleCommand> logicalTasks = new HashMap<Long, 
HostRoleCommand>();
+  Map<Long, HostRoleCommand> logicalTasks = new HashMap<>();
 
   // logical task id -> physical tasks
-  private Map<Long, Long> physicalTasks = new HashMap<Long, Long>();
+  private Map<Long, Long> physicalTasks = new HashMap<>();
 
-  private List<TopologyTask> topologyTasks = new ArrayList<TopologyTask>();
+  private List<TopologyTask> topologyTasks = new ArrayList<>();
 
   private ClusterTopology topology;
 
@@ -346,7 +346,7 @@ public class HostRequest implements Comparable<HostRequest> 
{
   }
 
   public Map<String, Long> getLogicalTasksForTopologyTask(TopologyTask 
topologyTask) {
-    return new HashMap<String, Long>(logicalTaskMap.get(topologyTask));
+    return new HashMap<>(logicalTaskMap.get(topologyTask));
   }
 
   public HostRoleCommand getLogicalTask(long logicalTaskId) {
@@ -354,7 +354,7 @@ public class HostRequest implements Comparable<HostRequest> 
{
   }
 
   public Collection<HostRoleCommandEntity> getTaskEntities() {
-    Collection<HostRoleCommandEntity> taskEntities = new 
ArrayList<HostRoleCommandEntity>();
+    Collection<HostRoleCommandEntity> taskEntities = new ArrayList<>();
     for (HostRoleCommand task : logicalTasks.values()) {
       HostRoleCommandEntity entity = task.constructNewPersistenceEntity();
       // the above method doesn't set all of the fields for some unknown reason

http://git-wip-us.apache.org/repos/asf/ambari/blob/edbb5492/ambari-server/src/main/java/org/apache/ambari/server/topology/LogicalRequest.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/LogicalRequest.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/LogicalRequest.java
index de4211f..a271c0b 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/LogicalRequest.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/LogicalRequest.java
@@ -57,10 +57,10 @@ import com.google.common.collect.Iterables;
  */
 public class LogicalRequest extends Request {
 
-  private final Collection<HostRequest> allHostRequests = new 
ArrayList<HostRequest>();
+  private final Collection<HostRequest> allHostRequests = new ArrayList<>();
   // sorted set with master host requests given priority
-  private final Collection<HostRequest> outstandingHostRequests = new 
TreeSet<HostRequest>();
-  private final Map<String, HostRequest> requestsWithReservedHosts = new 
HashMap<String, HostRequest>();
+  private final Collection<HostRequest> outstandingHostRequests = new 
TreeSet<>();
+  private final Map<String, HostRequest> requestsWithReservedHosts = new 
HashMap<>();
 
   private final ClusterTopology topology;
 
@@ -153,9 +153,9 @@ public class LogicalRequest extends Request {
 
   @Override
   public List<HostRoleCommand> getCommands() {
-    List<HostRoleCommand> commands = new ArrayList<HostRoleCommand>();
+    List<HostRoleCommand> commands = new ArrayList<>();
     for (HostRequest hostRequest : allHostRequests) {
-      commands.addAll(new 
ArrayList<HostRoleCommand>(hostRequest.getLogicalTasks()));
+      commands.addAll(new ArrayList<>(hostRequest.getLogicalTasks()));
     }
     return commands;
   }
@@ -169,7 +169,7 @@ public class LogicalRequest extends Request {
   }
 
   public Collection<HostRequest> getCompletedHostRequests() {
-    Collection<HostRequest> completedHostRequests = new 
ArrayList<HostRequest>(allHostRequests);
+    Collection<HostRequest> completedHostRequests = new 
ArrayList<>(allHostRequests);
     completedHostRequests.removeAll(outstandingHostRequests);
     completedHostRequests.removeAll(requestsWithReservedHosts.values());
 
@@ -178,11 +178,11 @@ public class LogicalRequest extends Request {
 
   //todo: this is only here for toEntity() functionality
   public Collection<HostRequest> getHostRequests() {
-    return new ArrayList<HostRequest>(allHostRequests);
+    return new ArrayList<>(allHostRequests);
   }
 
   public Map<String, Collection<String>> getProjectedTopology() {
-    Map<String, Collection<String>> hostComponentMap = new HashMap<String, 
Collection<String>>();
+    Map<String, Collection<String>> hostComponentMap = new HashMap<>();
 
     //todo: synchronization
     for (HostRequest hostRequest : allHostRequests) {
@@ -190,7 +190,7 @@ public class LogicalRequest extends Request {
       for (String host : 
topology.getHostGroupInfo().get(hostGroup.getName()).getHostNames()) {
         Collection<String> hostComponents = hostComponentMap.get(host);
         if (hostComponents == null) {
-          hostComponents = new HashSet<String>();
+          hostComponents = new HashSet<>();
           hostComponentMap.put(host, hostComponents);
         }
         hostComponents.addAll(hostGroup.getComponentNames());
@@ -202,7 +202,7 @@ public class LogicalRequest extends Request {
   // currently we are just returning all stages for all requests
   //TODO technically StageEntity is simply a container for HostRequest info 
with additional redundant transformations
   public Collection<StageEntity> getStageEntities() {
-    Collection<StageEntity> stages = new ArrayList<StageEntity>();
+    Collection<StageEntity> stages = new ArrayList<>();
     for (HostRequest hostRequest : allHostRequests) {
       StageEntity stage = new StageEntity();
       stage.setStageId(hostRequest.getStageId());
@@ -227,7 +227,7 @@ public class LogicalRequest extends Request {
     requestStatus.setRequestContext(getRequestContext());
 
     // convert HostRoleCommands to ShortTaskStatus
-    List<ShortTaskStatus> shortTasks = new ArrayList<ShortTaskStatus>();
+    List<ShortTaskStatus> shortTasks = new ArrayList<>();
     for (HostRoleCommand task : getCommands()) {
       shortTasks.add(new ShortTaskStatus(task));
     }
@@ -237,7 +237,7 @@ public class LogicalRequest extends Request {
   }
 
   public Map<Long, HostRoleCommandStatusSummaryDTO> getStageSummaries() {
-    Map<Long, HostRoleCommandStatusSummaryDTO> summaryMap = new HashMap<Long, 
HostRoleCommandStatusSummaryDTO>();
+    Map<Long, HostRoleCommandStatusSummaryDTO> summaryMap = new HashMap<>();
 
     Map<Long, Collection<HostRoleCommand>> stageTasksMap = new HashMap<>();
 
@@ -371,7 +371,7 @@ public class LogicalRequest extends Request {
     for (HostGroupInfo hostGroupInfo : hostGroupInfoMap.values()) {
       String groupName = hostGroupInfo.getHostGroupName();
       int hostCardinality = hostGroupInfo.getRequestedHostCount();
-      List<String> hostnames = new 
ArrayList<String>(hostGroupInfo.getHostNames());
+      List<String> hostnames = new ArrayList<>(hostGroupInfo.getHostNames());
 
       for (int i = 0; i < hostCardinality; ++i) {
         if (! hostnames.isEmpty()) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/edbb5492/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedStateImpl.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedStateImpl.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedStateImpl.java
index 912b2ff..2ac9950 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedStateImpl.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedStateImpl.java
@@ -172,10 +172,10 @@ public class PersistedStateImpl implements PersistedState 
{
   public Map<ClusterTopology, List<LogicalRequest>> getAllRequests() {
     //todo: we only currently support a single request per ambari instance so 
there should only
     //todo: be a single cluster topology
-    Map<ClusterTopology, List<LogicalRequest>> allRequests = new 
HashMap<ClusterTopology, List<LogicalRequest>>();
+    Map<ClusterTopology, List<LogicalRequest>> allRequests = new HashMap<>();
     Collection<TopologyRequestEntity> entities = topologyRequestDAO.findAll();
 
-    Map<Long, ClusterTopology> topologyRequests = new HashMap<Long, 
ClusterTopology>();
+    Map<Long, ClusterTopology> topologyRequests = new HashMap<>();
     for (TopologyRequestEntity entity : entities) {
       TopologyRequest replayedRequest = new ReplayedTopologyRequest(entity, 
blueprintFactory);
       ClusterTopology clusterTopology = 
topologyRequests.get(replayedRequest.getClusterId());
@@ -239,7 +239,7 @@ public class PersistedStateImpl implements PersistedState {
     }
 
     // host groups
-    Collection<TopologyHostGroupEntity> hostGroupEntities = new 
ArrayList<TopologyHostGroupEntity>();
+    Collection<TopologyHostGroupEntity> hostGroupEntities = new ArrayList<>();
     for (HostGroupInfo groupInfo : request.getHostGroupInfo().values())  {
       hostGroupEntities.add(toEntity(groupInfo, entity));
     }
@@ -257,7 +257,7 @@ public class PersistedStateImpl implements PersistedState {
     entity.setTopologyRequestId(topologyRequestEntity.getId());
 
     // host requests
-    Collection<TopologyHostRequestEntity> hostRequests = new 
ArrayList<TopologyHostRequestEntity>();
+    Collection<TopologyHostRequestEntity> hostRequests = new ArrayList<>();
     entity.setTopologyHostRequestEntities(hostRequests);
     for (HostRequest hostRequest : request.getHostRequests()) {
       hostRequests.add(toEntity(hostRequest, entity));
@@ -276,7 +276,7 @@ public class PersistedStateImpl implements PersistedState {
         logicalRequestEntity.getTopologyRequestId(), 
request.getHostgroupName()));
 
     // logical tasks
-    Collection<TopologyHostTaskEntity> hostRequestTaskEntities = new 
ArrayList<TopologyHostTaskEntity>();
+    Collection<TopologyHostTaskEntity> hostRequestTaskEntities = new 
ArrayList<>();
     entity.setTopologyHostTaskEntities(hostRequestTaskEntities);
     // for now only worry about install and start tasks
     for (TopologyTask task : request.getTopologyTasks()) {
@@ -285,7 +285,7 @@ public class PersistedStateImpl implements PersistedState {
         hostRequestTaskEntities.add(topologyTaskEntity);
         topologyTaskEntity.setType(task.getType().name());
         topologyTaskEntity.setTopologyHostRequestEntity(entity);
-        Collection<TopologyLogicalTaskEntity> logicalTaskEntities = new 
ArrayList<TopologyLogicalTaskEntity>();
+        Collection<TopologyLogicalTaskEntity> logicalTaskEntities = new 
ArrayList<>();
         topologyTaskEntity.setTopologyLogicalTaskEntities(logicalTaskEntities);
         for (Long logicalTaskId : 
request.getLogicalTasksForTopologyTask(task).values()) {
           TopologyLogicalTaskEntity logicalTaskEntity = new 
TopologyLogicalTaskEntity();
@@ -313,7 +313,7 @@ public class PersistedStateImpl implements PersistedState {
     entity.setTopologyRequestEntity(topologyRequestEntity);
 
     // host info
-    Collection<TopologyHostInfoEntity> hostInfoEntities = new 
ArrayList<TopologyHostInfoEntity>();
+    Collection<TopologyHostInfoEntity> hostInfoEntities = new ArrayList<>();
     entity.setTopologyHostInfoEntities(hostInfoEntities);
 
     Collection<String> hosts = groupInfo.getHostNames();
@@ -356,7 +356,7 @@ public class PersistedStateImpl implements PersistedState {
     private final String description;
     private final Blueprint blueprint;
     private final Configuration configuration;
-    private final Map<String, HostGroupInfo> hostGroupInfoMap = new 
HashMap<String, HostGroupInfo>();
+    private final Map<String, HostGroupInfo> hostGroupInfoMap = new 
HashMap<>();
 
     public ReplayedTopologyRequest(TopologyRequestEntity entity, 
BlueprintFactory blueprintFactory) {
       clusterId = entity.getClusterId();

http://git-wip-us.apache.org/repos/asf/ambari/blob/edbb5492/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
index 8e991d6..392a53e 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
@@ -103,14 +103,14 @@ public class TopologyManager {
   private final ExecutorService executor = Executors.newSingleThreadExecutor();
   private final Executor taskExecutor; // executes TopologyTasks
   private final boolean parallelTaskCreationEnabled;
-  private Collection<String> hostsToIgnore = new HashSet<String>();
-  private final List<HostImpl> availableHosts = new LinkedList<HostImpl>();
-  private final Map<String, LogicalRequest> reservedHosts = new 
HashMap<String, LogicalRequest>();
-  private final Map<Long, LogicalRequest> allRequests = new HashMap<Long, 
LogicalRequest>();
+  private Collection<String> hostsToIgnore = new HashSet<>();
+  private final List<HostImpl> availableHosts = new LinkedList<>();
+  private final Map<String, LogicalRequest> reservedHosts = new HashMap<>();
+  private final Map<Long, LogicalRequest> allRequests = new HashMap<>();
   // priority is given to oldest outstanding requests
-  private final Collection<LogicalRequest> outstandingRequests = new 
ArrayList<LogicalRequest>();
+  private final Collection<LogicalRequest> outstandingRequests = new 
ArrayList<>();
   //todo: currently only support a single cluster
-  private Map<Long, ClusterTopology> clusterTopologyMap = new HashMap<Long, 
ClusterTopology>();
+  private Map<Long, ClusterTopology> clusterTopologyMap = new HashMap<>();
 
   @Inject
   private StackAdvisorBlueprintProcessor stackAdvisorBlueprintProcessor;
@@ -639,7 +639,7 @@ public class TopologyManager {
     if (requestIds.isEmpty()) {
       return allRequests.values();
     } else {
-      Collection<LogicalRequest> matchingRequests = new 
ArrayList<LogicalRequest>();
+      Collection<LogicalRequest> matchingRequests = new ArrayList<>();
       for (long id : requestIds) {
         LogicalRequest request = allRequests.get(id);
         if (request != null) {
@@ -656,7 +656,7 @@ public class TopologyManager {
    */
   public Collection<StageEntity> getStages() {
     ensureInitialized();
-    Collection<StageEntity> stages = new ArrayList<StageEntity>();
+    Collection<StageEntity> stages = new ArrayList<>();
     for (LogicalRequest logicalRequest : allRequests.values()) {
       stages.addAll(logicalRequest.getStageEntities());
     }
@@ -671,7 +671,7 @@ public class TopologyManager {
 
   public Collection<HostRoleCommand> getTasks(Collection<Long> requestIds) {
     ensureInitialized();
-    Collection<HostRoleCommand> tasks = new ArrayList<HostRoleCommand>();
+    Collection<HostRoleCommand> tasks = new ArrayList<>();
     for (long id : requestIds) {
       tasks.addAll(getTasks(id));
     }
@@ -694,7 +694,7 @@ public class TopologyManager {
 
   public Collection<RequestStatusResponse> getRequestStatus(Collection<Long> 
ids) {
     ensureInitialized();
-    List<RequestStatusResponse> requestStatusResponses = new 
ArrayList<RequestStatusResponse>();
+    List<RequestStatusResponse> requestStatusResponses = new ArrayList<>();
     for (long id : ids) {
       RequestStatusResponse response = getRequestStatus(id);
       if (response != null) {
@@ -720,7 +720,7 @@ public class TopologyManager {
    */
   public Map<String, Collection<String>> getPendingHostComponents() {
     ensureInitialized();
-    Map<String, Collection<String>> hostComponentMap = new HashMap<String, 
Collection<String>>();
+    Map<String, Collection<String>> hostComponentMap = new HashMap<>();
 
     for (LogicalRequest logicalRequest : allRequests.values()) {
       Map<Long, HostRoleCommandStatusSummaryDTO> summary = 
logicalRequest.getStageSummaries();
@@ -740,7 +740,7 @@ public class TopologyManager {
           String host = entry.getKey();
           Collection<String> hostComponents = hostComponentMap.get(host);
           if (hostComponents == null) {
-            hostComponents = new HashSet<String>();
+            hostComponents = new HashSet<>();
             hostComponentMap.put(host, hostComponents);
           }
           hostComponents.addAll(entry.getValue());

http://git-wip-us.apache.org/repos/asf/ambari/blob/edbb5492/ambari-server/src/main/java/org/apache/ambari/server/topology/tasks/PersistHostResourcesTask.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/tasks/PersistHostResourcesTask.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/tasks/PersistHostResourcesTask.java
index 0730fe8..e4b10c2 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/tasks/PersistHostResourcesTask.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/tasks/PersistHostResourcesTask.java
@@ -49,9 +49,9 @@ public class PersistHostResourcesTask extends 
TopologyHostTask  {
   public void runTask() {
     LOG.info("HostRequest: Executing RESOURCE_CREATION task for host: {}", 
hostRequest.getHostName());
     HostGroup group = hostRequest.getHostGroup();
-    Map<String, Collection<String>> serviceComponents = new HashMap<String, 
Collection<String>>();
+    Map<String, Collection<String>> serviceComponents = new HashMap<>();
     for (String service : group.getServices()) {
-      serviceComponents.put(service, new 
HashSet<String>(group.getComponents(service)));
+      serviceComponents.put(service, new 
HashSet<>(group.getComponents(service)));
     }
     
clusterTopology.getAmbariContext().createAmbariHostResources(hostRequest.getClusterId(),
       hostRequest.getHostName(), serviceComponents);

http://git-wip-us.apache.org/repos/asf/ambari/blob/edbb5492/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/ClusterConfigTypeValidator.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/ClusterConfigTypeValidator.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/ClusterConfigTypeValidator.java
index 634ab08..dce38b4 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/ClusterConfigTypeValidator.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/ClusterConfigTypeValidator.java
@@ -49,7 +49,7 @@ public class ClusterConfigTypeValidator implements 
TopologyValidator {
     }
 
     // identifying invalid config types
-    Set<String> configTypeIntersection = new 
HashSet<String>(topologyClusterConfigTypes);
+    Set<String> configTypeIntersection = new 
HashSet<>(topologyClusterConfigTypes);
 
     if (configTypeIntersection.retainAll(stackServiceConfigTypes)) {
       // there are config types not present in the stack for the services 
listed in the blueprint

http://git-wip-us.apache.org/repos/asf/ambari/blob/edbb5492/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredPasswordValidator.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredPasswordValidator.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredPasswordValidator.java
index 3cc9b16..591a124 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredPasswordValidator.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredPasswordValidator.java
@@ -70,14 +70,14 @@ public class RequiredPasswordValidator implements 
TopologyValidator {
   private Map<String, Map<String, Collection<String>>> 
validateRequiredPasswords(ClusterTopology topology) {
 
     Map<String, Map<String, Collection<String>>> missingProperties =
-        new HashMap<String, Map<String, Collection<String>>>();
+      new HashMap<>();
 
     for (Map.Entry<String, HostGroupInfo> groupEntry: 
topology.getHostGroupInfo().entrySet()) {
       String hostGroupName = groupEntry.getKey();
       Map<String, Map<String, String>> groupProperties =
           groupEntry.getValue().getConfiguration().getFullProperties(3);
 
-      Collection<String> processedServices = new HashSet<String>();
+      Collection<String> processedServices = new HashSet<>();
       Blueprint blueprint = topology.getBlueprint();
       Stack stack = blueprint.getStack();
 
@@ -100,12 +100,12 @@ public class RequiredPasswordValidator implements 
TopologyValidator {
             if (! propertyExists(topology, groupProperties, category, name)) {
               Map<String, Collection<String>> missingHostGroupPropsMap = 
missingProperties.get(hostGroupName);
               if (missingHostGroupPropsMap == null) {
-                missingHostGroupPropsMap = new HashMap<String, 
Collection<String>>();
+                missingHostGroupPropsMap = new HashMap<>();
                 missingProperties.put(hostGroupName, missingHostGroupPropsMap);
               }
               Collection<String> missingHostGroupTypeProps = 
missingHostGroupPropsMap.get(category);
               if (missingHostGroupTypeProps == null) {
-                missingHostGroupTypeProps = new HashSet<String>();
+                missingHostGroupTypeProps = new HashSet<>();
                 missingHostGroupPropsMap.put(category, 
missingHostGroupTypeProps);
               }
               missingHostGroupTypeProps.add(name);

http://git-wip-us.apache.org/repos/asf/ambari/blob/edbb5492/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
index 712e309..3e1d3b8 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
@@ -139,7 +139,7 @@ public abstract class AbstractUpgradeCatalog implements 
UpgradeCatalog {
   private static final Logger LOG = LoggerFactory.getLogger
     (AbstractUpgradeCatalog.class);
   private static final Map<String, UpgradeCatalog> upgradeCatalogMap =
-    new HashMap<String, UpgradeCatalog>();
+    new HashMap<>();
 
   protected String ambariUpgradeConfigUpdatesFileName;
 
@@ -377,9 +377,9 @@ public abstract class AbstractUpgradeCatalog implements 
UpgradeCatalog {
 
     if (clusterMap != null && !clusterMap.isEmpty()) {
       for (Cluster cluster : clusterMap.values()) {
-        Map<String, Set<String>> toAddProperties = new HashMap<String, 
Set<String>>();
-        Map<String, Set<String>> toUpdateProperties = new HashMap<String, 
Set<String>>();
-        Map<String, Set<String>> toRemoveProperties = new HashMap<String, 
Set<String>>();
+        Map<String, Set<String>> toAddProperties = new HashMap<>();
+        Map<String, Set<String>> toUpdateProperties = new HashMap<>();
+        Map<String, Set<String>> toRemoveProperties = new HashMap<>();
 
 
         Set<PropertyInfo> stackProperties = 
configHelper.getStackProperties(cluster);
@@ -497,7 +497,7 @@ public abstract class AbstractUpgradeCatalog implements 
UpgradeCatalog {
 
     if (clusterMap != null && !clusterMap.isEmpty()) {
       for (Cluster cluster : clusterMap.values()) {
-        Map<String, String> properties = new HashMap<String, String>();
+        Map<String, String> properties = new HashMap<>();
 
         for(String propertyName:propertyNames) {
           String propertyValue = 
configHelper.getPropertyValueFromStackDefinitions(cluster, configType, 
propertyName);
@@ -551,7 +551,7 @@ public abstract class AbstractUpgradeCatalog implements 
UpgradeCatalog {
               "Skipping configuration properties update");
           return;
         } else if (oldConfig == null) {
-          oldConfigProperties = new HashMap<String, String>();
+          oldConfigProperties = new HashMap<>();
         } else {
           oldConfigProperties = oldConfig.getProperties();
         }
@@ -659,7 +659,7 @@ public abstract class AbstractUpgradeCatalog implements 
UpgradeCatalog {
                                Map<String, String> newProperties,
                                boolean updateIfExists, 
Multimap<AbstractUpgradeCatalog.ConfigUpdateType, Entry<String, String>> 
propertiesToLog) {
 
-    Map<String, String> properties = new HashMap<String, 
String>(originalProperties);
+    Map<String, String> properties = new HashMap<>(originalProperties);
     for (Map.Entry<String, String> entry : newProperties.entrySet()) {
       if (!properties.containsKey(entry.getKey())) {
         properties.put(entry.getKey(), entry.getValue());
@@ -675,12 +675,12 @@ public abstract class AbstractUpgradeCatalog implements 
UpgradeCatalog {
 
   private Map<String, String> removeProperties(Map<String, String> 
originalProperties,
                                                Set<String> removeList, 
Multimap<AbstractUpgradeCatalog.ConfigUpdateType, Entry<String, String>> 
propertiesToLog){
-    Map<String, String> properties = new HashMap<String, String>();
+    Map<String, String> properties = new HashMap<>();
     properties.putAll(originalProperties);
     for (String removeProperty: removeList){
       if (originalProperties.containsKey(removeProperty)){
         properties.remove(removeProperty);
-        propertiesToLog.put(ConfigUpdateType.REMOVED, new 
AbstractMap.SimpleEntry<String, String>(removeProperty, ""));
+        propertiesToLog.put(ConfigUpdateType.REMOVED, new 
AbstractMap.SimpleEntry<>(removeProperty, ""));
       }
     }
     return properties;
@@ -784,7 +784,7 @@ public abstract class AbstractUpgradeCatalog implements 
UpgradeCatalog {
     ArtifactDAO artifactDAO = injector.getInstance(ArtifactDAO.class);
     KerberosDescriptor artifactDescriptor = null;
     ArtifactEntity artifactEntity = 
artifactDAO.findByNameAndForeignKeys("kerberos_descriptor",
-        new TreeMap<String, String>(Collections.singletonMap("cluster", 
String.valueOf(cluster.getClusterId()))));
+      new TreeMap<>(Collections.singletonMap("cluster", 
String.valueOf(cluster.getClusterId()))));
     if (artifactEntity != null) {
       Map<String, Object> data = artifactEntity.getArtifactData();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/edbb5492/ambari-server/src/main/java/org/apache/ambari/server/upgrade/StackUpgradeUtil.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/StackUpgradeUtil.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/StackUpgradeUtil.java
index 8e21aea..b258aa8 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/StackUpgradeUtil.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/StackUpgradeUtil.java
@@ -52,7 +52,7 @@ public class StackUpgradeUtil {
   public void updateStackDetails(String stackName, String stackVersion) {
     ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
     StackDAO stackDAO = injector.getInstance(StackDAO.class);
-    List<Long> clusterIds = new ArrayList<Long>();
+    List<Long> clusterIds = new ArrayList<>();
 
     StackEntity stackEntity = stackDAO.find(stackName, stackVersion);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/edbb5492/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog200.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog200.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog200.java
index 1f5b466..edf107a 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog200.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog200.java
@@ -167,7 +167,7 @@ public class UpgradeCatalog200 extends 
AbstractUpgradeCatalog {
         Short.class, 1, 0, false));
 
     // create alert_target_states table
-    ArrayList<DBColumnInfo> columns = new ArrayList<DBColumnInfo>();
+    ArrayList<DBColumnInfo> columns = new ArrayList<>();
     columns.add(new DBColumnInfo("target_id", Long.class, null, null, false));
     columns.add(new DBColumnInfo("alert_state", String.class, 255, null, 
false));
     dbAccessor.createTable(ALERT_TARGET_STATES_TABLE, columns);
@@ -185,7 +185,7 @@ public class UpgradeCatalog200 extends 
AbstractUpgradeCatalog {
    * @throws SQLException
    */
   private void prepareRollingUpgradesDDL() throws SQLException {
-    List<DBAccessor.DBColumnInfo> columns = new 
ArrayList<DBAccessor.DBColumnInfo>();
+    List<DBAccessor.DBColumnInfo> columns = new ArrayList<>();
 
     columns.add(new DBColumnInfo("repo_version_id", Long.class,    null,  
null, false));
     columns.add(new DBColumnInfo("stack",           String.class,  255,   
null, false));
@@ -214,7 +214,7 @@ public class UpgradeCatalog200 extends 
AbstractUpgradeCatalog {
         Integer.class, 1, 0, false));
 
     // New tables
-    columns = new ArrayList<DBColumnInfo>();
+    columns = new ArrayList<>();
     columns.add(new DBAccessor.DBColumnInfo("id", Long.class, null, null, 
false));
     columns.add(new DBAccessor.DBColumnInfo("repo_version_id", Long.class, 
null, null, false));
     columns.add(new DBAccessor.DBColumnInfo("cluster_id", Long.class, null, 
null, false));
@@ -224,7 +224,7 @@ public class UpgradeCatalog200 extends 
AbstractUpgradeCatalog {
     columns.add(new DBAccessor.DBColumnInfo("user_name", String.class, 32, 
null, true));
     dbAccessor.createTable("cluster_version", columns, "id");
 
-    columns = new ArrayList<DBColumnInfo>();
+    columns = new ArrayList<>();
     columns.add(new DBAccessor.DBColumnInfo("id", Long.class, null, null, 
false));
     columns.add(new DBAccessor.DBColumnInfo("repo_version_id", Long.class, 
null, null, false));
     columns.add(new DBAccessor.DBColumnInfo("host_name", String.class, 255, 
null, false));
@@ -244,7 +244,7 @@ public class UpgradeCatalog200 extends 
AbstractUpgradeCatalog {
     addSequence("host_version_id_seq", 0L, false);
 
     // upgrade tables
-    columns = new ArrayList<DBColumnInfo>();
+    columns = new ArrayList<>();
     columns.add(new DBAccessor.DBColumnInfo("upgrade_id", Long.class, null, 
null, false));
     columns.add(new DBAccessor.DBColumnInfo("cluster_id", Long.class, null, 
null, false));
     columns.add(new DBAccessor.DBColumnInfo("request_id", Long.class, null, 
null, false));
@@ -256,7 +256,7 @@ public class UpgradeCatalog200 extends 
AbstractUpgradeCatalog {
     dbAccessor.addFKConstraint("upgrade", "fk_upgrade_request_id", 
"request_id", "request", "request_id", false);
     addSequence("upgrade_id_seq", 0L, false);
 
-    columns = new ArrayList<DBColumnInfo>();
+    columns = new ArrayList<>();
     columns.add(new DBAccessor.DBColumnInfo("upgrade_group_id", Long.class, 
null, null, false));
     columns.add(new DBAccessor.DBColumnInfo("upgrade_id", Long.class, null, 
null, false));
     columns.add(new DBAccessor.DBColumnInfo("group_name", String.class, 255, 
"", false));
@@ -266,7 +266,7 @@ public class UpgradeCatalog200 extends 
AbstractUpgradeCatalog {
     addSequence("upgrade_group_id_seq", 0L, false);
 
 
-    columns = new ArrayList<DBColumnInfo>();
+    columns = new ArrayList<>();
     columns.add(new DBAccessor.DBColumnInfo("upgrade_item_id", Long.class, 
null, null, false));
     columns.add(new DBAccessor.DBColumnInfo("upgrade_group_id", Long.class, 
null, null, false));
     columns.add(new DBAccessor.DBColumnInfo("stage_id", Long.class, null, 
null, false));
@@ -280,7 +280,7 @@ public class UpgradeCatalog200 extends 
AbstractUpgradeCatalog {
   }
 
   private void createArtifactTable() throws SQLException {
-    ArrayList<DBColumnInfo> columns = new ArrayList<DBColumnInfo>();
+    ArrayList<DBColumnInfo> columns = new ArrayList<>();
     columns.add(new DBColumnInfo("artifact_name", String.class, 255, null, 
false));
     columns.add(new DBColumnInfo("foreign_keys", String.class, 255, null, 
false));
     columns.add(new DBColumnInfo("artifact_data", char[].class, null, null, 
false));
@@ -290,13 +290,13 @@ public class UpgradeCatalog200 extends 
AbstractUpgradeCatalog {
   private void createKerberosPrincipalTables() throws SQLException {
     ArrayList<DBColumnInfo> columns;
 
-    columns = new ArrayList<DBColumnInfo>();
+    columns = new ArrayList<>();
     columns.add(new DBColumnInfo("principal_name", String.class, 255, null, 
false));
     columns.add(new DBColumnInfo("is_service", Short.class, 1, 1, false));
     columns.add(new DBColumnInfo("cached_keytab_path", String.class, 255, 
null, true));
     dbAccessor.createTable(KERBEROS_PRINCIPAL_TABLE, columns, 
"principal_name");
 
-    columns = new ArrayList<DBColumnInfo>();
+    columns = new ArrayList<>();
     columns.add(new DBColumnInfo("principal_name", String.class, 255, null, 
false));
     columns.add(new DBColumnInfo("host_name", String.class, 255, null, false));
     dbAccessor.createTable(KERBEROS_PRINCIPAL_HOST_TABLE, columns, 
"principal_name", "host_name");
@@ -426,7 +426,7 @@ public class UpgradeCatalog200 extends 
AbstractUpgradeCatalog {
 
     if (clusters != null) {
       Map<String, Cluster> clusterMap = clusters.getClusters();
-      Map<String, String> prop = new HashMap<String, String>();
+      Map<String, String> prop = new HashMap<>();
       String hive_database_type = null;
 
       if (clusterMap != null && !clusterMap.isEmpty()) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/edbb5492/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog210.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog210.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog210.java
index 5c2d2a6..7b7681c 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog210.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog210.java
@@ -193,7 +193,7 @@ public class UpgradeCatalog210 extends 
AbstractUpgradeCatalog {
   }
 
   private void executeTopologyDDLUpdates() throws AmbariException, 
SQLException {
-    List<DBColumnInfo> columns = new ArrayList<DBColumnInfo>();
+    List<DBColumnInfo> columns = new ArrayList<>();
 
     columns.add(new DBColumnInfo("id", Long.class, null, null, false));
     columns.add(new DBColumnInfo("action", String.class, 255, null, false));
@@ -576,7 +576,7 @@ public class UpgradeCatalog210 extends 
AbstractUpgradeCatalog {
   }
 
   private void executeWidgetDDLUpdates() throws AmbariException, SQLException {
-    List<DBColumnInfo> columns = new ArrayList<DBColumnInfo>();
+    List<DBColumnInfo> columns = new ArrayList<>();
 
     columns.add(new DBColumnInfo("id", Long.class,    null,  null, false));
     columns.add(new DBColumnInfo("widget_name", String.class,  255,   null, 
false));
@@ -592,7 +592,7 @@ public class UpgradeCatalog210 extends 
AbstractUpgradeCatalog {
     columns.add(new DBColumnInfo("cluster_id", Long.class, null, null, false));
     dbAccessor.createTable(WIDGET_TABLE, columns, "id");
 
-    columns = new ArrayList<DBColumnInfo>();
+    columns = new ArrayList<>();
     columns.add(new DBColumnInfo("id", Long.class, null, null, false));
     columns.add(new DBColumnInfo("layout_name", String.class, 255, null, 
false));
     columns.add(new DBColumnInfo("section_name", String.class, 255, null, 
false));
@@ -603,7 +603,7 @@ public class UpgradeCatalog210 extends 
AbstractUpgradeCatalog {
 
     dbAccessor.createTable(WIDGET_LAYOUT_TABLE, columns, "id");
 
-    columns = new ArrayList<DBColumnInfo>();
+    columns = new ArrayList<>();
     columns.add(new DBColumnInfo("widget_layout_id", Long.class, null, null, 
false));
     columns.add(new DBColumnInfo("widget_id", Long.class, null, null, false));
     columns.add(new DBColumnInfo("widget_order", Short.class, null, null, 
true));
@@ -623,7 +623,7 @@ public class UpgradeCatalog210 extends 
AbstractUpgradeCatalog {
    */
   private void executeStackDDLUpdates() throws AmbariException, SQLException {
     // stack table creation
-    ArrayList<DBColumnInfo> columns = new ArrayList<DBColumnInfo>();
+    ArrayList<DBColumnInfo> columns = new ArrayList<>();
     columns.add(new DBColumnInfo("stack_id", Long.class, null, null, false));
     columns.add(new DBColumnInfo("stack_name", String.class, 255, null, 
false));
     columns.add(new DBColumnInfo("stack_version", String.class, 255, null,
@@ -677,7 +677,7 @@ public class UpgradeCatalog210 extends 
AbstractUpgradeCatalog {
 
     StackDAO stackDAO = injector.getInstance(StackDAO.class);
     List<StackEntity> stacks = stackDAO.findAll();
-    Map<Long,String> entityToJsonMap = new HashMap<Long, String>();
+    Map<Long,String> entityToJsonMap = new HashMap<>();
 
     // build a mapping of stack entity to old-school JSON
     for( StackEntity stack : stacks ){
@@ -1399,9 +1399,9 @@ public class UpgradeCatalog210 extends 
AbstractUpgradeCatalog {
           if (RangerHiveConfig != null
                   && 
RangerHiveConfig.getProperties().containsKey("ranger-hive-plugin-enabled")
                   && cluster.getDesiredConfigByType("hive-env") != null) {
-            Map<String, String> newHiveEnvProperties = new HashMap<String, 
String>();
-            Map<String, String> newHiveServerProperties = new HashMap<String, 
String>();
-            Set<String> removeRangerHiveProperties = new HashSet<String>();
+            Map<String, String> newHiveEnvProperties = new HashMap<>();
+            Map<String, String> newHiveServerProperties = new HashMap<>();
+            Set<String> removeRangerHiveProperties = new HashSet<>();
             removeRangerHiveProperties.add("ranger-hive-plugin-enabled");
 
             if 
(RangerHiveConfig.getProperties().get("ranger-hive-plugin-enabled") != null
@@ -1432,7 +1432,7 @@ public class UpgradeCatalog210 extends 
AbstractUpgradeCatalog {
           if (RangerHBaseConfig != null
                 && 
RangerHBaseConfig.getProperties().containsKey("ranger-hbase-plugin-enabled")
                 && cluster.getDesiredConfigByType("hbase-site") != null) {
-            Map<String, String> newHBaseSiteProperties = new HashMap<String, 
String>();
+            Map<String, String> newHBaseSiteProperties = new HashMap<>();
 
             if 
(RangerHBaseConfig.getProperties().get("ranger-hbase-plugin-enabled") != null
                   && 
RangerHBaseConfig.getProperties().get("ranger-hbase-plugin-enabled").equalsIgnoreCase("yes"))
 {
@@ -1454,7 +1454,7 @@ public class UpgradeCatalog210 extends 
AbstractUpgradeCatalog {
 
     if (clusters != null) {
       Map<String, Cluster> clusterMap = clusters.getClusters();
-      Map<String, String> prop = new HashMap<String, String>();
+      Map<String, String> prop = new HashMap<>();
       String content = null;
 
       if (clusterMap != null && !clusterMap.isEmpty()) {
@@ -1495,7 +1495,7 @@ public class UpgradeCatalog210 extends 
AbstractUpgradeCatalog {
                 } else {
                   // NN HA disabled
                   nameNodeRpc = new 
URI(cluster.getDesiredConfigByType("core-site").getProperties().get("fs.defaultFS"));
-                  Map<String, String> hdfsProp = new HashMap<String, String>();
+                  Map<String, String> hdfsProp = new HashMap<>();
                   hdfsProp.put("dfs.namenode.rpc-address", hostName + ":" + 
nameNodeRpc.getPort());
                   updateConfigurationPropertiesForCluster(cluster, 
HDFS_SITE_CONFIG,
                           hdfsProp, false, false);
@@ -1529,8 +1529,8 @@ public class UpgradeCatalog210 extends 
AbstractUpgradeCatalog {
           }
 
           if(cluster.getDesiredConfigByType("hive-env") != null) {
-            Map<String, String> hiveEnvProps = new HashMap<String, String>();
-            Set<String> hiveServerSiteRemoveProps = new HashSet<String>();
+            Map<String, String> hiveEnvProps = new HashMap<>();
+            Set<String> hiveServerSiteRemoveProps = new HashSet<>();
             // Update logic for setting HIVE_AUX_JARS_PATH in hive-env.sh
             content = 
cluster.getDesiredConfigByType("hive-env").getProperties().get("content");
             if(content != null) {
@@ -1570,8 +1570,8 @@ public class UpgradeCatalog210 extends 
AbstractUpgradeCatalog {
           }
 
           if(cluster.getDesiredConfigByType("hive-site") != null) {
-            Set<String> hiveSiteRemoveProps = new HashSet<String>();
-            Map<String, String> hiveSiteAddProps = new HashMap<String, 
String>();
+            Set<String> hiveSiteRemoveProps = new HashSet<>();
+            Map<String, String> hiveSiteAddProps = new HashMap<>();
 
             if (!"pam".equalsIgnoreCase(hive_server2_auth)) {
               
hiveSiteRemoveProps.add("hive.server2.authentication.pam.services");
@@ -1615,10 +1615,10 @@ public class UpgradeCatalog210 extends 
AbstractUpgradeCatalog {
       if (clusterMap != null && !clusterMap.isEmpty()) {
         for (final Cluster cluster : clusterMap.values()) {
           if (cluster.getDesiredConfigByType("hbase-site") != null && 
cluster.getDesiredConfigByType("hbase-env") != null) {
-            Map<String, String> hbaseEnvProps = new HashMap<String, String>();
-            Map<String, String> hbaseSiteProps = new HashMap<String, String>();
-            Set<String> hbaseEnvRemoveProps = new HashSet<String>();
-            Set<String> hbaseSiteRemoveProps = new HashSet<String>();
+            Map<String, String> hbaseEnvProps = new HashMap<>();
+            Map<String, String> hbaseSiteProps = new HashMap<>();
+            Set<String> hbaseEnvRemoveProps = new HashSet<>();
+            Set<String> hbaseSiteRemoveProps = new HashSet<>();
 
             if 
(cluster.getDesiredConfigByType("hbase-site").getProperties().containsKey("hbase.region.server.rpc.scheduler.factory.class")
 &&
                 
"org.apache.phoenix.hbase.index.ipc.PhoenixIndexRpcSchedulerFactory".equals(cluster.getDesiredConfigByType("hbase-site").getProperties().get(
@@ -1697,7 +1697,7 @@ public class UpgradeCatalog210 extends 
AbstractUpgradeCatalog {
           if(cluster.getDesiredConfigByType("cluster-env") != null
                   && 
cluster.getDesiredConfigByType("cluster-env").getProperties().get("security_enabled").equals("true")
                   && cluster.getDesiredConfigByType("storm-site") != null ) {
-            Map<String, String> newStormProps = new HashMap<String, String>();
+            Map<String, String> newStormProps = new HashMap<>();
             if 
(!cluster.getDesiredConfigByType("storm-site").getProperties().containsKey("java.security.auth.login.config"))
 {
               newStormProps.put("java.security.auth.login.config", 
"{{conf_dir}}/storm_jaas.conf");
             }

http://git-wip-us.apache.org/repos/asf/ambari/blob/edbb5492/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
index 810451d..20f0d72 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
@@ -160,7 +160,7 @@ public class UpgradeCatalog212 extends 
AbstractUpgradeCatalog {
   }
 
   protected void addClusterIdToTopology() throws AmbariException, SQLException 
{
-    Map<String, Long> clusterNameIdMap = new HashMap<String, Long>();
+    Map<String, Long> clusterNameIdMap = new HashMap<>();
     try (Statement statement = dbAccessor.getConnection().createStatement();
          ResultSet rs = statement.executeQuery("SELECT DISTINCT cluster_name, 
cluster_id FROM clusters");
     ) {
@@ -218,7 +218,7 @@ public class UpgradeCatalog212 extends 
AbstractUpgradeCatalog {
       if ((clusterMap != null) && !clusterMap.isEmpty()) {
         // Iterate through the clusters and perform any configuration updates
         for (final Cluster cluster : clusterMap.values()) {
-          Set<String> removes = new HashSet<String>();
+          Set<String> removes = new HashSet<>();
           removes.add("topology.metrics.consumer.register");
           updateConfigurationPropertiesForCluster(cluster, "storm-site",
             new HashMap<String, String>(), removes, false, false);
@@ -258,8 +258,8 @@ public class UpgradeCatalog212 extends 
AbstractUpgradeCatalog {
             // Remove override_hbase_uid from hbase-env and add override_uid 
to cluster-env
             String value = 
hbaseEnvProps.getProperties().get("override_hbase_uid");
             if (value != null) {
-              Map<String, String> updates = new HashMap<String, String>();
-              Set<String> removes = new HashSet<String>();
+              Map<String, String> updates = new HashMap<>();
+              Set<String> removes = new HashSet<>();
               updates.put("override_uid", value);
               removes.add("override_hbase_uid");
               updateConfigurationPropertiesForCluster(cluster, HBASE_ENV, new 
HashMap<String, String>(), removes, false, true);
@@ -276,7 +276,7 @@ public class UpgradeCatalog212 extends 
AbstractUpgradeCatalog {
             if (value != null) {
               if (value.endsWith("m")) {
                 value = value.substring(0, value.length() - 1);
-                Map<String, String> updates = new HashMap<String, String>();
+                Map<String, String> updates = new HashMap<>();
                 updates.put("hbase.bucketcache.size", value);
                 updateConfigurationPropertiesForCluster(cluster, HBASE_SITE, 
updates, true, false);
               }
@@ -293,7 +293,7 @@ public class UpgradeCatalog212 extends 
AbstractUpgradeCatalog {
    * Ambari version 2.1.0 where HBase does not have override_hbase_uid.
    * */
   private void updateOverrideUIDClusterConfig(String toOverride, Cluster 
cluster) throws AmbariException{
-    Map<String, String> updates = new HashMap<String, String>();
+    Map<String, String> updates = new HashMap<>();
     updates.put("override_uid", toOverride);
     updateConfigurationPropertiesForCluster(cluster, CLUSTER_ENV, updates, 
true, false);
   }
@@ -315,7 +315,7 @@ public class UpgradeCatalog212 extends 
AbstractUpgradeCatalog {
                   VersionUtils.compareVersions(stackId.getStackVersion(), 
"2.2") >= 0);
 
           if (cluster.getDesiredConfigByType(HIVE_ENV) != null && 
isStackNotLess22) {
-            Map<String, String> hiveEnvProps = new HashMap<String, String>();
+            Map<String, String> hiveEnvProps = new HashMap<>();
             content = 
cluster.getDesiredConfigByType(HIVE_ENV).getProperties().get("content");
             if(content != null) {
               content = updateHiveEnvContent(content);
@@ -325,7 +325,7 @@ public class UpgradeCatalog212 extends 
AbstractUpgradeCatalog {
           }
 
           if (isHiveSitePresent && isStackNotLess22) {
-            Set<String> hiveSiteRemoveProps = new HashSet<String>();
+            Set<String> hiveSiteRemoveProps = new HashSet<>();
             hiveSiteRemoveProps.add("hive.heapsize");
             hiveSiteRemoveProps.add("hive.optimize.mapjoin.mapreduce");
             hiveSiteRemoveProps.add("hive.server2.enable.impersonation");

http://git-wip-us.apache.org/repos/asf/ambari/blob/edbb5492/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog2121.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog2121.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog2121.java
index df84782..0487cd7 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog2121.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog2121.java
@@ -120,7 +120,7 @@ public class UpgradeCatalog2121 extends 
AbstractUpgradeCatalog {
    */
   protected void updatePHDConfigs() throws AmbariException {
 
-    Map<String, String> replacements = new LinkedHashMap<String, String>();
+    Map<String, String> replacements = new LinkedHashMap<>();
     replacements.put("-Dstack.name=\\{\\{\\s*stack_name\\s*\\}\\}\\s*", "");
     replacements.put("-Dstack.name=\\$\\{stack.name\\}\\s*", "");
     
replacements.put("-Dstack.version=\\{\\{\\s*stack_version_buildnum\\s*\\}\\}", 
"-Dhdp.version=\\$HDP_VERSION");
@@ -150,7 +150,7 @@ public class UpgradeCatalog2121 extends 
AbstractUpgradeCatalog {
 
                 Map<String, String> properties = config.getProperties();
                 if(properties != null && !properties.isEmpty()) {
-                  Map<String, String> updates = new HashMap<String, String>();
+                  Map<String, String> updates = new HashMap<>();
                   for (Map.Entry<String, String> property : 
properties.entrySet()) {
                     String propertyKey = property.getKey();
                     String propertyValue = property.getValue();
@@ -182,7 +182,7 @@ public class UpgradeCatalog2121 extends 
AbstractUpgradeCatalog {
         // Remove oozie.authentication.kerberos.name.rules if empty
         String oozieAuthKerbRules = 
oozieSiteProps.getProperties().get(OOZIE_AUTHENTICATION_KERBEROS_NAME_RULES);
         if (StringUtils.isBlank(oozieAuthKerbRules)) {
-          Set<String> removeProperties = new HashSet<String>();
+          Set<String> removeProperties = new HashSet<>();
           removeProperties.add(OOZIE_AUTHENTICATION_KERBEROS_NAME_RULES);
           updateConfigurationPropertiesForCluster(cluster, OOZIE_SITE_CONFIG, 
new HashMap<String, String>(), removeProperties, true, false);
         }

http://git-wip-us.apache.org/repos/asf/ambari/blob/edbb5492/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog220.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog220.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog220.java
index d806dde..11e90ee 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog220.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog220.java
@@ -244,7 +244,7 @@ public class UpgradeCatalog220 extends 
AbstractUpgradeCatalog {
   }
 
   private void addKerberosDescriptorTable() throws SQLException {
-    List<DBAccessor.DBColumnInfo> columns = new 
ArrayList<DBAccessor.DBColumnInfo>();
+    List<DBAccessor.DBColumnInfo> columns = new ArrayList<>();
     columns.add(new DBAccessor.DBColumnInfo(KERBEROS_DESCRIPTOR_NAME_COLUMN, 
String.class, 255, null, false));
     columns.add(new DBAccessor.DBColumnInfo(KERBEROS_DESCRIPTOR_COLUMN, 
char[].class, null, null, false));
 
@@ -1026,7 +1026,7 @@ public class UpgradeCatalog220 extends 
AbstractUpgradeCatalog {
 
       Config hiveEnvConfig = cluster.getDesiredConfigByType(HIVE_ENV_CONFIG);
       if (hiveEnvConfig != null) {
-        Map<String, String> hiveEnvProps = new HashMap<String, String>();
+        Map<String, String> hiveEnvProps = new HashMap<>();
         String content = hiveEnvConfig.getProperties().get(CONTENT_PROPERTY);
         // For HDP-2.3 we need to add hive heap size management to content,
         // for others we need to update content
@@ -1396,7 +1396,7 @@ public class UpgradeCatalog220 extends 
AbstractUpgradeCatalog {
         if (clientProps != null) {
           Map<String, String> properties = clientProps.getProperties();
           if (properties == null) {
-            properties = new HashMap<String, String>();
+            properties = new HashMap<>();
           }
           // <2.2.0 did not account for a custom service principal.
           // Need to ensure that the client knows the server's principal (the 
primary) to properly authenticate.

Reply via email to