http://git-wip-us.apache.org/repos/asf/hadoop/blob/c07469f9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java new file mode 100644 index 0000000..be36335 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java @@ -0,0 +1,530 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.registry.client.api.RegistryConstants; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.exceptions.RestApiErrorMessages; +import org.apache.hadoop.yarn.service.api.records.Artifact; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.api.records.Resource; +import org.apache.hadoop.yarn.service.utils.JsonSerDeser; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; + +import static org.apache.hadoop.yarn.service.conf.RestApiConstants.DEFAULT_COMPONENT_NAME; +import static org.apache.hadoop.yarn.service.conf.RestApiConstants.DEFAULT_UNLIMITED_LIFETIME; +import static org.apache.hadoop.yarn.service.exceptions.RestApiErrorMessages.*; +import static org.easymock.EasyMock.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +/** + * Test for ServiceApiUtil helper methods. + */ +public class TestServiceApiUtil { + private static final Logger LOG = LoggerFactory + .getLogger(TestServiceApiUtil.class); + private static final String EXCEPTION_PREFIX = "Should have thrown " + + "exception: "; + private static final String NO_EXCEPTION_PREFIX = "Should not have thrown " + + "exception: "; + + private static final String LEN_64_STR = + "abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz01"; + + private static final YarnConfiguration CONF_DEFAULT_DNS = new + YarnConfiguration(); + private static final YarnConfiguration CONF_DNS_ENABLED = new + YarnConfiguration(); + + @BeforeClass + public static void init() { + CONF_DNS_ENABLED.setBoolean(RegistryConstants.KEY_DNS_ENABLED, true); + } + + @Test(timeout = 90000) + public void testResourceValidation() throws Exception { + assertEquals(RegistryConstants.MAX_FQDN_LABEL_LENGTH + 1, LEN_64_STR + .length()); + + SliderFileSystem sfs = initMock(null); + + Service app = new Service(); + + // no name + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with no name"); + } catch (IllegalArgumentException e) { + assertEquals(ERROR_APPLICATION_NAME_INVALID, e.getMessage()); + } + + // bad format name + String[] badNames = {"4finance", "Finance", "finance@home", LEN_64_STR}; + for (String badName : badNames) { + app.setName(badName); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with bad name " + badName); + } catch (IllegalArgumentException e) { + + } + } + + // launch command not specified + app.setName(LEN_64_STR); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DEFAULT_DNS); + Assert.fail(EXCEPTION_PREFIX + "service with no launch command"); + } catch (IllegalArgumentException e) { + assertEquals(RestApiErrorMessages.ERROR_ABSENT_LAUNCH_COMMAND, + e.getMessage()); + } + + // launch command not specified + app.setName(LEN_64_STR.substring(0, RegistryConstants + .MAX_FQDN_LABEL_LENGTH)); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with no launch command"); + } catch (IllegalArgumentException e) { + assertEquals(RestApiErrorMessages.ERROR_ABSENT_LAUNCH_COMMAND, + e.getMessage()); + } + + // resource not specified + app.setLaunchCommand("sleep 3600"); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with no resource"); + } catch (IllegalArgumentException e) { + assertEquals(String.format( + RestApiErrorMessages.ERROR_RESOURCE_FOR_COMP_INVALID, + DEFAULT_COMPONENT_NAME), e.getMessage()); + } + + // memory not specified + Resource res = new Resource(); + app.setResource(res); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with no memory"); + } catch (IllegalArgumentException e) { + assertEquals(String.format( + RestApiErrorMessages.ERROR_RESOURCE_MEMORY_FOR_COMP_INVALID, + DEFAULT_COMPONENT_NAME), e.getMessage()); + } + + // invalid no of cpus + res.setMemory("100mb"); + res.setCpus(-2); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail( + EXCEPTION_PREFIX + "service with invalid no of cpus"); + } catch (IllegalArgumentException e) { + assertEquals(String.format( + RestApiErrorMessages.ERROR_RESOURCE_CPUS_FOR_COMP_INVALID_RANGE, + DEFAULT_COMPONENT_NAME), e.getMessage()); + } + + // number of containers not specified + res.setCpus(2); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with no container count"); + } catch (IllegalArgumentException e) { + Assert.assertTrue(e.getMessage() + .contains(ERROR_CONTAINERS_COUNT_INVALID)); + } + + // specifying profile along with cpus/memory raises exception + res.setProfile("hbase_finance_large"); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + + "service with resource profile along with cpus/memory"); + } catch (IllegalArgumentException e) { + assertEquals(String.format(RestApiErrorMessages + .ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_FOR_COMP_NOT_SUPPORTED, + DEFAULT_COMPONENT_NAME), + e.getMessage()); + } + + // currently resource profile alone is not supported. + // TODO: remove the next test once resource profile alone is supported. + res.setCpus(null); + res.setMemory(null); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with resource profile only"); + } catch (IllegalArgumentException e) { + assertEquals(ERROR_RESOURCE_PROFILE_NOT_SUPPORTED_YET, + e.getMessage()); + } + + // unset profile here and add cpus/memory back + res.setProfile(null); + res.setCpus(2); + res.setMemory("2gb"); + + // null number of containers + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "null number of containers"); + } catch (IllegalArgumentException e) { + Assert.assertTrue(e.getMessage() + .startsWith(ERROR_CONTAINERS_COUNT_INVALID)); + } + + // negative number of containers + app.setNumberOfContainers(-1L); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "negative number of containers"); + } catch (IllegalArgumentException e) { + Assert.assertTrue(e.getMessage() + .startsWith(ERROR_CONTAINERS_COUNT_INVALID)); + } + + // everything valid here + app.setNumberOfContainers(5L); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + } catch (IllegalArgumentException e) { + LOG.error("service attributes specified should be valid here", e); + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + } + + @Test + public void testArtifacts() throws IOException { + SliderFileSystem sfs = initMock(null); + + Service app = new Service(); + app.setName("name"); + Resource res = new Resource(); + app.setResource(res); + res.setMemory("512M"); + app.setNumberOfContainers(3L); + + // no artifact id fails with default type + Artifact artifact = new Artifact(); + app.setArtifact(artifact); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with no artifact id"); + } catch (IllegalArgumentException e) { + assertEquals(ERROR_ARTIFACT_ID_INVALID, e.getMessage()); + } + + // no artifact id fails with SERVICE type + artifact.setType(Artifact.TypeEnum.SERVICE); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with no artifact id"); + } catch (IllegalArgumentException e) { + assertEquals(ERROR_ARTIFACT_ID_INVALID, e.getMessage()); + } + + // no artifact id fails with TARBALL type + artifact.setType(Artifact.TypeEnum.TARBALL); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with no artifact id"); + } catch (IllegalArgumentException e) { + assertEquals(ERROR_ARTIFACT_ID_INVALID, e.getMessage()); + } + + // everything valid here + artifact.setType(Artifact.TypeEnum.DOCKER); + artifact.setId("docker.io/centos:centos7"); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + } catch (IllegalArgumentException e) { + LOG.error("service attributes specified should be valid here", e); + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + + // defaults assigned + assertEquals(app.getComponents().get(0).getName(), + DEFAULT_COMPONENT_NAME); + assertEquals(app.getLifetime(), DEFAULT_UNLIMITED_LIFETIME); + } + + private static Resource createValidResource() { + Resource res = new Resource(); + res.setMemory("512M"); + return res; + } + + private static Component createValidComponent(String compName) { + Component comp = new Component(); + comp.setName(compName); + comp.setResource(createValidResource()); + comp.setNumberOfContainers(1L); + return comp; + } + + private static Service createValidApplication(String compName) { + Service app = new Service(); + app.setLaunchCommand("sleep 3600"); + app.setName("name"); + app.setResource(createValidResource()); + app.setNumberOfContainers(1L); + if (compName != null) { + app.addComponent(createValidComponent(compName)); + } + return app; + } + + private static SliderFileSystem initMock(Service ext) throws IOException { + SliderFileSystem sfs = createNiceMock(SliderFileSystem.class); + FileSystem mockFs = createNiceMock(FileSystem.class); + JsonSerDeser<Service> jsonSerDeser = createNiceMock(JsonSerDeser + .class); + expect(sfs.getFileSystem()).andReturn(mockFs).anyTimes(); + expect(sfs.buildClusterDirPath(anyObject())).andReturn( + new Path("cluster_dir_path")).anyTimes(); + if (ext != null) { + expect(jsonSerDeser.load(anyObject(), anyObject())).andReturn(ext) + .anyTimes(); + } + replay(sfs, mockFs, jsonSerDeser); + ServiceApiUtil.setJsonSerDeser(jsonSerDeser); + return sfs; + } + + @Test + public void testExternalApplication() throws IOException { + Service ext = createValidApplication("comp1"); + SliderFileSystem sfs = initMock(ext); + + Service app = createValidApplication(null); + + Artifact artifact = new Artifact(); + artifact.setType(Artifact.TypeEnum.SERVICE); + artifact.setId("id"); + app.setArtifact(artifact); + + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + + assertEquals(1, app.getComponents().size()); + assertNotNull(app.getComponent("comp1")); + } + + @Test + public void testDuplicateComponents() throws IOException { + SliderFileSystem sfs = initMock(null); + + String compName = "comp1"; + Service app = createValidApplication(compName); + app.addComponent(createValidComponent(compName)); + + // duplicate component name fails + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with component collision"); + } catch (IllegalArgumentException e) { + assertEquals("Component name collision: " + compName, e.getMessage()); + } + } + + @Test + public void testExternalDuplicateComponent() throws IOException { + Service ext = createValidApplication("comp1"); + SliderFileSystem sfs = initMock(ext); + + Service app = createValidApplication("comp1"); + Artifact artifact = new Artifact(); + artifact.setType(Artifact.TypeEnum.SERVICE); + artifact.setId("id"); + app.getComponent("comp1").setArtifact(artifact); + + // duplicate component name okay in the case of SERVICE component + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + } + + @Test + public void testExternalComponent() throws IOException { + Service ext = createValidApplication("comp1"); + SliderFileSystem sfs = initMock(ext); + + Service app = createValidApplication("comp2"); + Artifact artifact = new Artifact(); + artifact.setType(Artifact.TypeEnum.SERVICE); + artifact.setId("id"); + app.setArtifact(artifact); + + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + + assertEquals(1, app.getComponents().size()); + // artifact ID not inherited from global + assertNotNull(app.getComponent("comp2")); + + // set SERVICE artifact id on component + app.getComponent("comp2").setArtifact(artifact); + + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + + assertEquals(1, app.getComponents().size()); + // original component replaced by external component + assertNotNull(app.getComponent("comp1")); + } + + public static void verifyDependencySorting(List<Component> components, + Component... expectedSorting) { + Collection<Component> actualSorting = ServiceApiUtil.sortByDependencies( + components); + assertEquals(expectedSorting.length, actualSorting.size()); + int i = 0; + for (Component component : actualSorting) { + assertEquals(expectedSorting[i++], component); + } + } + + @Test + public void testDependencySorting() throws IOException { + Component a = new Component().name("a"); + Component b = new Component().name("b"); + Component c = new Component().name("c"); + Component d = new Component().name("d").dependencies(Arrays.asList("c")); + Component e = new Component().name("e").dependencies(Arrays.asList("b", + "d")); + + verifyDependencySorting(Arrays.asList(a, b, c), a, b, c); + verifyDependencySorting(Arrays.asList(c, a, b), c, a, b); + verifyDependencySorting(Arrays.asList(a, b, c, d, e), a, b, c, d, e); + verifyDependencySorting(Arrays.asList(e, d, c, b, a), c, b, a, d, e); + + c.setDependencies(Arrays.asList("e")); + try { + verifyDependencySorting(Arrays.asList(a, b, c, d, e)); + Assert.fail(EXCEPTION_PREFIX + "components with dependency cycle"); + } catch (IllegalArgumentException ex) { + assertEquals(String.format( + RestApiErrorMessages.ERROR_DEPENDENCY_CYCLE, Arrays.asList(c, d, + e)), ex.getMessage()); + } + + SliderFileSystem sfs = initMock(null); + Service service = createValidApplication(null); + service.setComponents(Arrays.asList(c, d, e)); + try { + ServiceApiUtil.validateAndResolveService(service, sfs, + CONF_DEFAULT_DNS); + Assert.fail(EXCEPTION_PREFIX + "components with bad dependencies"); + } catch (IllegalArgumentException ex) { + assertEquals(String.format( + RestApiErrorMessages.ERROR_DEPENDENCY_INVALID, "b", "e"), ex + .getMessage()); + } + } + + @Test + public void testInvalidComponent() throws IOException { + SliderFileSystem sfs = initMock(null); + testComponent(sfs); + } + + @Test + public void testValidateCompName() { + String[] invalidNames = { + "EXAMPLE", // UPPER case not allowed + "example_app" // underscore not allowed. + }; + for (String name : invalidNames) { + try { + ServiceApiUtil.validateNameFormat(name, new Configuration()); + Assert.fail(); + } catch (IllegalArgumentException ex) { + ex.printStackTrace(); + } + } + } + + private static void testComponent(SliderFileSystem sfs) + throws IOException { + int maxLen = RegistryConstants.MAX_FQDN_LABEL_LENGTH; + assertEquals(19, Long.toString(Long.MAX_VALUE).length()); + maxLen = maxLen - Long.toString(Long.MAX_VALUE).length(); + + String compName = LEN_64_STR.substring(0, maxLen + 1); + Service app = createValidApplication(null); + app.addComponent(createValidComponent(compName)); + + // invalid component name fails if dns is enabled + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with invalid component name"); + } catch (IllegalArgumentException e) { + assertEquals(String.format(RestApiErrorMessages + .ERROR_COMPONENT_NAME_INVALID, maxLen, compName), e.getMessage()); + } + + // does not fail if dns is disabled + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DEFAULT_DNS); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + + compName = LEN_64_STR.substring(0, maxLen); + app = createValidApplication(null); + app.addComponent(createValidComponent(compName)); + + // does not fail + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + } +}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c07469f9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java new file mode 100644 index 0000000..63aa9c6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java @@ -0,0 +1,472 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service; + +import org.apache.commons.io.FileUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.curator.test.TestingCluster; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.MiniYARNCluster; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.api.records.Container; +import org.apache.hadoop.yarn.service.api.records.ContainerState; +import org.apache.hadoop.yarn.service.client.ServiceClient; +import org.apache.hadoop.yarn.service.conf.YarnServiceConf; +import org.apache.hadoop.yarn.service.exceptions.SliderException; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.apache.hadoop.yarn.util.LinuxResourceCalculatorPlugin; +import org.apache.hadoop.yarn.util.ProcfsBasedProcessTree; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.net.URL; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.TreeSet; +import java.util.concurrent.TimeoutException; + +import static org.apache.hadoop.registry.client.api.RegistryConstants.KEY_REGISTRY_ZK_QUORUM; +import static org.apache.hadoop.yarn.api.records.YarnApplicationState.FINISHED; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.*; +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.AM_RESOURCE_MEM; +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.YARN_SERVICE_BASE_PATH; + +/** + * End to end tests to test deploying services with MiniYarnCluster and a in-JVM + * ZK testing cluster. + */ +public class TestYarnNativeServices extends ServiceTestUtils{ + + private static final Log LOG = + LogFactory.getLog(TestYarnNativeServices.class); + + private MiniYARNCluster yarnCluster = null; + private MiniDFSCluster hdfsCluster = null; + private FileSystem fs = null; + protected Configuration conf = null; + private static final int NUM_NMS = 1; + private File basedir; + + @Rule + public TemporaryFolder tmpFolder = new TemporaryFolder(); + + @Before + public void setup() throws Exception { + setupInternal(NUM_NMS); + } + + private void setupInternal(int numNodeManager) + throws Exception { + LOG.info("Starting up YARN cluster"); +// Logger rootLogger = LogManager.getRootLogger(); +// rootLogger.setLevel(Level.DEBUG); + conf = new YarnConfiguration(); + conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 128); + // reduce the teardown waiting time + conf.setLong(YarnConfiguration.DISPATCHER_DRAIN_EVENTS_TIMEOUT, 1000); + conf.set("yarn.log.dir", "target"); + // mark if we need to launch the v1 timeline server + // disable aux-service based timeline aggregators + conf.set(YarnConfiguration.NM_AUX_SERVICES, ""); + conf.set(YarnConfiguration.NM_VMEM_PMEM_RATIO, "8"); + // Enable ContainersMonitorImpl + conf.set(YarnConfiguration.NM_CONTAINER_MON_RESOURCE_CALCULATOR, + LinuxResourceCalculatorPlugin.class.getName()); + conf.set(YarnConfiguration.NM_CONTAINER_MON_PROCESS_TREE, + ProcfsBasedProcessTree.class.getName()); + conf.setBoolean( + YarnConfiguration.YARN_MINICLUSTER_CONTROL_RESOURCE_MONITORING, true); + conf.setBoolean(TIMELINE_SERVICE_ENABLED, false); + conf.setInt(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE, 100); + conf.setLong(DEBUG_NM_DELETE_DELAY_SEC, 60000); + conf.setLong(AM_RESOURCE_MEM, 526); + conf.setLong(YarnServiceConf.READINESS_CHECK_INTERVAL, 5); + // Disable vmem check to disallow NM killing the container + conf.setBoolean(NM_VMEM_CHECK_ENABLED, false); + conf.setBoolean(NM_PMEM_CHECK_ENABLED, false); + // setup zk cluster + TestingCluster zkCluster; + zkCluster = new TestingCluster(1); + zkCluster.start(); + conf.set(YarnConfiguration.RM_ZK_ADDRESS, zkCluster.getConnectString()); + conf.set(KEY_REGISTRY_ZK_QUORUM, zkCluster.getConnectString()); + LOG.info("ZK cluster: " + zkCluster.getConnectString()); + + fs = FileSystem.get(conf); + basedir = new File("target", "apps"); + if (basedir.exists()) { + FileUtils.deleteDirectory(basedir); + } else { + basedir.mkdirs(); + } + + conf.set(YARN_SERVICE_BASE_PATH, basedir.getAbsolutePath()); + + if (yarnCluster == null) { + yarnCluster = + new MiniYARNCluster(TestYarnNativeServices.class.getSimpleName(), 1, + numNodeManager, 1, 1); + yarnCluster.init(conf); + yarnCluster.start(); + + waitForNMsToRegister(); + + URL url = Thread.currentThread().getContextClassLoader() + .getResource("yarn-site.xml"); + if (url == null) { + throw new RuntimeException( + "Could not find 'yarn-site.xml' dummy file in classpath"); + } + Configuration yarnClusterConfig = yarnCluster.getConfig(); + yarnClusterConfig.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, + new File(url.getPath()).getParent()); + //write the document to a buffer (not directly to the file, as that + //can cause the file being written to get read -which will then fail. + ByteArrayOutputStream bytesOut = new ByteArrayOutputStream(); + yarnClusterConfig.writeXml(bytesOut); + bytesOut.close(); + //write the bytes to the file in the classpath + OutputStream os = new FileOutputStream(new File(url.getPath())); + os.write(bytesOut.toByteArray()); + os.close(); + LOG.info("Write yarn-site.xml configs to: " + url); + } + if (hdfsCluster == null) { + HdfsConfiguration hdfsConfig = new HdfsConfiguration(); + hdfsCluster = new MiniDFSCluster.Builder(hdfsConfig) + .numDataNodes(1).build(); + } + + try { + Thread.sleep(2000); + } catch (InterruptedException e) { + LOG.info("setup thread sleep interrupted. message=" + e.getMessage()); + } + + + } + + private void waitForNMsToRegister() throws Exception { + int sec = 60; + while (sec >= 0) { + if (yarnCluster.getResourceManager().getRMContext().getRMNodes().size() + >= NUM_NMS) { + break; + } + Thread.sleep(1000); + sec--; + } + } + + @After + public void tearDown() throws IOException { + if (yarnCluster != null) { + try { + yarnCluster.stop(); + } finally { + yarnCluster = null; + } + } + if (hdfsCluster != null) { + try { + hdfsCluster.shutdown(); + } finally { + hdfsCluster = null; + } + } + if (basedir != null) { + FileUtils.deleteDirectory(basedir); + } + SliderFileSystem sfs = new SliderFileSystem(conf); + Path appDir = sfs.getBaseApplicationPath(); + sfs.getFileSystem().delete(appDir, true); + } + + + + // End-to-end test to use ServiceClient to deploy a service. + // 1. Create a service with 2 components, each of which has 2 containers + // 2. Flex up each component to 3 containers and check the component instance names + // 3. Flex down each component to 1 container and check the component instance names + // 4. Flex up each component to 2 containers and check the component instance names + // 5. Stop the service + // 6. Destroy the service + @Test (timeout = 200000) + public void testCreateFlexStopDestroyService() throws Exception { + ServiceClient client = createClient(); + Service exampleApp = createExampleApplication(); + client.actionCreate(exampleApp); + SliderFileSystem fileSystem = new SliderFileSystem(conf); + Path appDir = fileSystem.buildClusterDirPath(exampleApp.getName()); + // check app.json is persisted. + Assert.assertTrue( + fs.exists(new Path(appDir, exampleApp.getName() + ".json"))); + waitForAllCompToBeReady(client, exampleApp); + + // Flex two components, each from 2 container to 3 containers. + flexComponents(client, exampleApp, 3L); + // wait for flex to be completed, increase from 2 to 3 containers. + waitForAllCompToBeReady(client, exampleApp); + // check all instances name for each component are in sequential order. + checkCompInstancesInOrder(client, exampleApp); + + // flex down to 1 + flexComponents(client, exampleApp, 1L); + waitForAllCompToBeReady(client, exampleApp); + checkCompInstancesInOrder(client, exampleApp); + + // check component dir and registry are cleaned up. + + // flex up again to 2 + flexComponents(client, exampleApp, 2L); + waitForAllCompToBeReady(client, exampleApp); + checkCompInstancesInOrder(client, exampleApp); + + // stop the service + LOG.info("Stop the service"); + client.actionStop(exampleApp.getName(), true); + ApplicationReport report = client.getYarnClient() + .getApplicationReport(ApplicationId.fromString(exampleApp.getId())); + // AM unregisters with RM successfully + Assert.assertEquals(FINISHED, report.getYarnApplicationState()); + Assert.assertEquals(FinalApplicationStatus.ENDED, + report.getFinalApplicationStatus()); + + LOG.info("Destroy the service"); + //destroy the service and check the app dir is deleted from fs. + client.actionDestroy(exampleApp.getName()); + // check the service dir on hdfs (in this case, local fs) are deleted. + Assert.assertFalse(fs.exists(appDir)); + } + + // Create compa with 2 containers + // Create compb with 2 containers which depends on compa + // Check containers for compa started before containers for compb + @Test (timeout = 200000) + public void testComponentStartOrder() throws Exception { + ServiceClient client = createClient(); + Service exampleApp = new Service(); + exampleApp.setName("teststartorder"); + exampleApp.addComponent(createComponent("compa", 2, "sleep 1000")); + Component compb = createComponent("compb", 2, "sleep 1000"); + + // Let compb depedends on compa; + compb.setDependencies(Collections.singletonList("compa")); + exampleApp.addComponent(compb); + + client.actionCreate(exampleApp); + waitForAllCompToBeReady(client, exampleApp); + + // check that containers for compa are launched before containers for compb + checkContainerLaunchDependencies(client, exampleApp, "compa", "compb"); + + client.actionStop(exampleApp.getName(), true); + client.actionDestroy(exampleApp.getName()); + } + + // Check containers launched are in dependency order + // Get all containers into a list and sort based on container launch time e.g. + // compa-c1, compa-c2, compb-c1, compb-c2; + // check that the container's launch time are align with the dependencies. + private void checkContainerLaunchDependencies(ServiceClient client, + Service exampleApp, String... compOrder) + throws IOException, YarnException { + Service retrievedApp = client.getStatus(exampleApp.getName()); + List<Container> containerList = new ArrayList<>(); + for (Component component : retrievedApp.getComponents()) { + containerList.addAll(component.getContainers()); + } + // sort based on launchTime + containerList + .sort((o1, o2) -> o1.getLaunchTime().compareTo(o2.getLaunchTime())); + LOG.info("containerList: " + containerList); + // check the containers are in the dependency order. + int index = 0; + for (String comp : compOrder) { + long num = retrievedApp.getComponent(comp).getNumberOfContainers(); + for (int i = 0; i < num; i++) { + String compInstanceName = containerList.get(index).getComponentName(); + String compName = + compInstanceName.substring(0, compInstanceName.lastIndexOf('-')); + Assert.assertEquals(comp, compName); + index++; + } + } + } + + + private Map<String, Long> flexComponents(ServiceClient client, + Service exampleApp, long count) throws YarnException, IOException { + Map<String, Long> compCounts = new HashMap<>(); + compCounts.put("compa", count); + compCounts.put("compb", count); + // flex will update the persisted conf to reflect latest number of containers. + exampleApp.getComponent("compa").setNumberOfContainers(count); + exampleApp.getComponent("compb").setNumberOfContainers(count); + client.flexByRestService(exampleApp.getName(), compCounts); + return compCounts; + } + + // Check each component's comp instances name are in sequential order. + // E.g. If there are two instances compA-1 and compA-2 + // When flex up to 4 instances, it should be compA-1 , compA-2, compA-3, compA-4 + // When flex down to 3 instances, it should be compA-1 , compA-2, compA-3. + private void checkCompInstancesInOrder(ServiceClient client, + Service exampleApp) throws IOException, YarnException { + Service service = client.getStatus(exampleApp.getName()); + for (Component comp : service.getComponents()) { + checkEachCompInstancesInOrder(comp); + } + } + + private void checkRegistryAndCompDirDeleted() { + + } + + private void checkEachCompInstancesInOrder(Component component) { + long expectedNumInstances = component.getNumberOfContainers(); + Assert.assertEquals(expectedNumInstances, component.getContainers().size()); + TreeSet<String> instances = new TreeSet<>(); + for (Container container : component.getContainers()) { + instances.add(container.getComponentName()); + } + + int i = 0; + for (String s : instances) { + Assert.assertEquals(component.getName() + "-" + i, s); + i++; + } + } + + private void waitForOneCompToBeReady(ServiceClient client, + Service exampleApp, String readyComp) + throws TimeoutException, InterruptedException { + long numExpectedContainers = + exampleApp.getComponent(readyComp).getNumberOfContainers(); + GenericTestUtils.waitFor(() -> { + try { + Service retrievedApp = client.getStatus(exampleApp.getName()); + Component retrievedComp = retrievedApp.getComponent(readyComp); + + if (retrievedComp.getContainers() != null + && retrievedComp.getContainers().size() == numExpectedContainers) { + LOG.info(readyComp + " found " + numExpectedContainers + + " containers running"); + return true; + } else { + LOG.info(" Waiting for " + readyComp + "'s containers to be running"); + return false; + } + } catch (Exception e) { + e.printStackTrace(); + return false; + } + }, 2000, 200000); + } + + // wait until all the containers for all components become ready state + private void waitForAllCompToBeReady(ServiceClient client, + Service exampleApp) throws TimeoutException, InterruptedException { + int expectedTotalContainers = countTotalContainers(exampleApp); + GenericTestUtils.waitFor(() -> { + try { + Service retrievedApp = client.getStatus(exampleApp.getName()); + int totalReadyContainers = 0; + LOG.info("Num Components " + retrievedApp.getComponents().size()); + for (Component component : retrievedApp.getComponents()) { + LOG.info("looking for " + component.getName()); + LOG.info(component); + if (component.getContainers() != null) { + if (component.getContainers().size() == exampleApp + .getComponent(component.getName()).getNumberOfContainers()) { + for (Container container : component.getContainers()) { + LOG.info( + "Container state " + container.getState() + ", component " + + component.getName()); + if (container.getState() == ContainerState.READY) { + totalReadyContainers++; + LOG.info("Found 1 ready container " + container.getId()); + } + } + } else { + LOG.info(component.getName() + " Expected number of containers " + + exampleApp.getComponent(component.getName()) + .getNumberOfContainers() + ", current = " + component + .getContainers()); + } + } + } + LOG.info("Exit loop, totalReadyContainers= " + totalReadyContainers + + " expected = " + expectedTotalContainers); + return totalReadyContainers == expectedTotalContainers; + } catch (Exception e) { + e.printStackTrace(); + return false; + } + }, 2000, 200000); + } + + private ServiceClient createClient() throws Exception { + ServiceClient client = new ServiceClient() { + @Override protected Path addJarResource(String appName, + Map<String, LocalResource> localResources) + throws IOException, SliderException { + // do nothing, the Unit test will use local jars + return null; + } + }; + client.init(conf); + client.start(); + return client; + } + + + private int countTotalContainers(Service service) { + int totalContainers = 0; + for (Component component : service.getComponents()) { + totalContainers += component.getNumberOfContainers(); + } + return totalContainers; + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/c07469f9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestBuildExternalComponents.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestBuildExternalComponents.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestBuildExternalComponents.java new file mode 100644 index 0000000..93a15cc --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestBuildExternalComponents.java @@ -0,0 +1,128 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.client; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.conf.ExampleAppJson; +import org.apache.hadoop.yarn.service.client.params.ClientArgs; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.apache.hadoop.yarn.service.client.params.Arguments.ARG_APPDEF; +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.YARN_SERVICE_BASE_PATH; + +/** + * Test for building / resolving components of type SERVICE. + */ +public class TestBuildExternalComponents { + + protected Configuration conf = new YarnConfiguration(); + private File basedir; + + // Check component names match with expected + private static void checkComponentNames(List<Component> components, + Set<String> expectedComponents) { + Assert.assertEquals(expectedComponents.size(), components.size()); + for (Component comp : components) { + Assert.assertTrue(expectedComponents.contains(comp.getName())); + } + } + + // 1. Build the appDef and store on fs + // 2. check component names + private void buildAndCheckComponents(String appName, String appDef, + SliderFileSystem sfs, Set<String> names) throws Throwable { + String[] args = + { "build", appName, ARG_APPDEF, ExampleAppJson.resourceName(appDef) }; + ClientArgs clientArgs = new ClientArgs(args); + clientArgs.parse(); + ServiceCLI cli = new ServiceCLI() { + @Override protected void createServiceClient() { + client = new ServiceClient(); + client.init(conf); + client.start(); + } + }; + cli.exec(clientArgs); + + // verify generated conf + List<Component> components = + ServiceApiUtil.getComponents(sfs, appName); + checkComponentNames(components, names); + } + + @Before + public void setup() throws IOException { + basedir = new File("target", "apps"); + if (basedir.exists()) { + FileUtils.deleteDirectory(basedir); + } else { + basedir.mkdirs(); + } + conf.set(YARN_SERVICE_BASE_PATH, basedir.getAbsolutePath()); + } + + @After + public void tearDown() throws IOException { + if (basedir != null) { + FileUtils.deleteDirectory(basedir); + } + } + + // Test applications defining external components(SERVICE type) + // can be resolved correctly + @Test + public void testExternalComponentBuild() throws Throwable { + SliderFileSystem sfs = new SliderFileSystem(conf); + + Set<String> nameSet = new HashSet<>(); + nameSet.add("simple"); + nameSet.add("master"); + nameSet.add("worker"); + + // app-1 has 3 components: simple, master, worker + buildAndCheckComponents("app-1", ExampleAppJson.APP_JSON, sfs, nameSet); + buildAndCheckComponents("external-0", ExampleAppJson.EXTERNAL_JSON_0, sfs, + nameSet); + + nameSet.add("other"); + + // external1 has 3 components: simple(SERVICE - app1), master and other + buildAndCheckComponents("external-1", ExampleAppJson.EXTERNAL_JSON_1, sfs, + nameSet); + + nameSet.add("another"); + + // external2 has 2 components: ext(SERVICE - external1), another + buildAndCheckComponents("external-2", ExampleAppJson.EXTERNAL_JSON_2, sfs, + nameSet); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/c07469f9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceCLI.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceCLI.java new file mode 100644 index 0000000..ecc529d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceCLI.java @@ -0,0 +1,155 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.client; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.service.ClientAMProtocol; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.client.params.ClientArgs; +import org.apache.hadoop.yarn.service.conf.ExampleAppJson; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.apache.hadoop.yarn.util.Records; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.util.List; + +import static org.apache.hadoop.yarn.conf.YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS; +import static org.apache.hadoop.yarn.service.client.params.Arguments.ARG_APPDEF; +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.YARN_SERVICE_BASE_PATH; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.*; + +public class TestServiceCLI { + + protected Configuration conf = new YarnConfiguration(); + private File basedir; + private ServiceCLI cli; + private SliderFileSystem fs; + + private void buildApp(String appName, String appDef) throws Throwable { + String[] args = + { "build", appName, ARG_APPDEF, ExampleAppJson.resourceName(appDef) }; + ClientArgs clientArgs = new ClientArgs(args); + clientArgs.parse(); + cli.exec(clientArgs); + } + + @Before + public void setup() throws Throwable { + basedir = new File("target", "apps"); + conf.set(YARN_SERVICE_BASE_PATH, basedir.getAbsolutePath()); + conf.setLong(RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, 0); + conf.setLong(RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS, 1); + conf.setInt( + CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0); + conf.setInt(CommonConfigurationKeysPublic. + IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY, 0); + fs = new SliderFileSystem(conf); + if (basedir.exists()) { + FileUtils.deleteDirectory(basedir); + } else { + basedir.mkdirs(); + } + + // create a CLI and skip connection to AM + cli = new ServiceCLI() { + @Override protected void createServiceClient() { + client = new ServiceClient() { + @Override + protected void serviceInit(Configuration configuration) + throws Exception { + super.serviceInit(conf); + yarnClient = spy(yarnClient); + ApplicationReport report = Records.newRecord(ApplicationReport.class); + report.setYarnApplicationState(YarnApplicationState.RUNNING); + report.setHost("localhost"); + doReturn(report).when(yarnClient).getApplicationReport(anyObject()); + } + @Override + protected ClientAMProtocol createAMProxy(String host, int port) + throws IOException { + return mock(ClientAMProtocol.class); + } + }; + client.init(conf); + client.start(); + } + }; + } + + @After + public void tearDown() throws IOException { + if (basedir != null) { + FileUtils.deleteDirectory(basedir); + } + } + + // Test flex components count are persisted. + @Test + public void testFlexComponents() throws Throwable { + buildApp("service-1", ExampleAppJson.APP_JSON); + + checkCompCount("master", 1L); + + // increase by 2 + String[] flexUpArgs = {"flex", "service-1", "--component", "master" , "+2"}; + ClientArgs clientArgs = new ClientArgs(flexUpArgs); + clientArgs.parse(); + cli.exec(clientArgs); + checkCompCount("master", 3L); + + // decrease by 1 + String[] flexDownArgs = {"flex", "service-1", "--component", "master", "-1"}; + clientArgs = new ClientArgs(flexDownArgs); + clientArgs.parse(); + cli.exec(clientArgs); + checkCompCount("master", 2L); + + String[] flexAbsoluteArgs = {"flex", "service-1", "--component", "master", "10"}; + clientArgs = new ClientArgs(flexAbsoluteArgs); + clientArgs.parse(); + cli.exec(clientArgs); + checkCompCount("master", 10L); + } + + private void checkCompCount(String compName, long count) throws IOException { + List<Component> components = + ServiceApiUtil.getComponents(fs, "service-1"); + for (Component component : components) { + if (component.getName().equals(compName)) { + Assert.assertEquals(count, component.getNumberOfContainers().longValue()); + return; + } + } + Assert.fail(); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/c07469f9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/ExampleAppJson.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/ExampleAppJson.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/ExampleAppJson.java new file mode 100644 index 0000000..5fdd2ab --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/ExampleAppJson.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.conf; + + +import org.apache.hadoop.yarn.service.api.records.Service; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.apache.hadoop.yarn.service.ServiceTestUtils.JSON_SER_DESER; + +/** + * Names of the example configs. + */ +public final class ExampleAppJson { + + public static final String APP_JSON = "app.json"; + public static final String OVERRIDE_JSON = "app-override.json"; + public static final String DEFAULT_JSON = "default.json"; + public static final String EXTERNAL_JSON_0 = "external0.json"; + public static final String EXTERNAL_JSON_1 = "external1.json"; + public static final String EXTERNAL_JSON_2 = "external2.json"; + + public static final String PACKAGE = "/org/apache/hadoop/yarn/service/conf/examples/"; + + + private static final String[] ALL_EXAMPLES = {APP_JSON, OVERRIDE_JSON, + DEFAULT_JSON}; + + public static final List<String> ALL_EXAMPLE_RESOURCES = new ArrayList<>(); + static { + for (String example : ALL_EXAMPLES) { + ALL_EXAMPLE_RESOURCES.add(PACKAGE + example); + } + } + + private ExampleAppJson() { + } + + public static Service loadResource(String name) throws IOException { + return JSON_SER_DESER.fromResource(PACKAGE + name); + } + + public static String resourceName(String name) { + return "target/test-classes" + PACKAGE + name; + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/c07469f9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestAppJsonResolve.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestAppJsonResolve.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestAppJsonResolve.java new file mode 100644 index 0000000..04ec526 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestAppJsonResolve.java @@ -0,0 +1,222 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.conf; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.api.records.ConfigFile; +import org.apache.hadoop.yarn.service.api.records.Configuration; +import org.apache.hadoop.yarn.service.utils.JsonSerDeser; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.junit.Assert; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import static org.apache.hadoop.yarn.service.conf.ExampleAppJson.*; +import static org.easymock.EasyMock.*; + +/** + * Test global configuration resolution. + */ +public class TestAppJsonResolve extends Assert { + protected static final Logger LOG = + LoggerFactory.getLogger(TestAppJsonResolve.class); + + @Test + public void testOverride() throws Throwable { + Service orig = ExampleAppJson.loadResource(OVERRIDE_JSON); + + Configuration global = orig.getConfiguration(); + assertEquals("a", global.getProperty("g1")); + assertEquals("b", global.getProperty("g2")); + assertEquals(2, global.getFiles().size()); + + Configuration simple = orig.getComponent("simple").getConfiguration(); + assertEquals(0, simple.getProperties().size()); + assertEquals(1, simple.getFiles().size()); + + Configuration master = orig.getComponent("master").getConfiguration(); + assertEquals("m", master.getProperty("name")); + assertEquals("overridden", master.getProperty("g1")); + assertEquals(0, master.getFiles().size()); + + Configuration worker = orig.getComponent("worker").getConfiguration(); + LOG.info("worker = {}", worker); + assertEquals(3, worker.getProperties().size()); + assertEquals(0, worker.getFiles().size()); + + assertEquals("worker", worker.getProperty("name")); + assertEquals("overridden-by-worker", worker.getProperty("g1")); + assertNull(worker.getProperty("g2")); + assertEquals("1000", worker.getProperty("timeout")); + + // here is the resolution + SliderFileSystem sfs = createNiceMock(SliderFileSystem.class); + FileSystem mockFs = createNiceMock(FileSystem.class); + expect(sfs.getFileSystem()).andReturn(mockFs).anyTimes(); + expect(sfs.buildClusterDirPath(anyObject())).andReturn( + new Path("cluster_dir_path")).anyTimes(); + replay(sfs, mockFs); + ServiceApiUtil.validateAndResolveService(orig, sfs, new + YarnConfiguration()); + + global = orig.getConfiguration(); + LOG.info("global = {}", global); + assertEquals("a", global.getProperty("g1")); + assertEquals("b", global.getProperty("g2")); + assertEquals(2, global.getFiles().size()); + + simple = orig.getComponent("simple").getConfiguration(); + assertEquals(2, simple.getProperties().size()); + assertEquals("a", simple.getProperty("g1")); + assertEquals("b", simple.getProperty("g2")); + assertEquals(2, simple.getFiles().size()); + + Set<ConfigFile> files = new HashSet<>(); + Map<String, String> props = new HashMap<>(); + props.put("k1", "overridden"); + props.put("k2", "v2"); + files.add(new ConfigFile().destFile("file1").type(ConfigFile.TypeEnum + .PROPERTIES).props(props)); + files.add(new ConfigFile().destFile("file2").type(ConfigFile.TypeEnum + .XML).props(Collections.singletonMap("k3", "v3"))); + assertTrue(files.contains(simple.getFiles().get(0))); + assertTrue(files.contains(simple.getFiles().get(1))); + + master = orig.getComponent("master").getConfiguration(); + LOG.info("master = {}", master); + assertEquals(3, master.getProperties().size()); + assertEquals("m", master.getProperty("name")); + assertEquals("overridden", master.getProperty("g1")); + assertEquals("b", master.getProperty("g2")); + assertEquals(2, master.getFiles().size()); + + props.put("k1", "v1"); + files.clear(); + files.add(new ConfigFile().destFile("file1").type(ConfigFile.TypeEnum + .PROPERTIES).props(props)); + files.add(new ConfigFile().destFile("file2").type(ConfigFile.TypeEnum + .XML).props(Collections.singletonMap("k3", "v3"))); + + assertTrue(files.contains(master.getFiles().get(0))); + assertTrue(files.contains(master.getFiles().get(1))); + + worker = orig.getComponent("worker").getConfiguration(); + LOG.info("worker = {}", worker); + assertEquals(4, worker.getProperties().size()); + + assertEquals("worker", worker.getProperty("name")); + assertEquals("overridden-by-worker", worker.getProperty("g1")); + assertEquals("b", worker.getProperty("g2")); + assertEquals("1000", worker.getProperty("timeout")); + assertEquals(2, worker.getFiles().size()); + + assertTrue(files.contains(worker.getFiles().get(0))); + assertTrue(files.contains(worker.getFiles().get(1))); + } + + @Test + public void testOverrideExternalConfiguration() throws IOException { + Service orig = ExampleAppJson.loadResource(EXTERNAL_JSON_1); + + Configuration global = orig.getConfiguration(); + assertEquals(0, global.getProperties().size()); + + assertEquals(3, orig.getComponents().size()); + + Configuration simple = orig.getComponent("simple").getConfiguration(); + assertEquals(0, simple.getProperties().size()); + + Configuration master = orig.getComponent("master").getConfiguration(); + assertEquals(1, master.getProperties().size()); + assertEquals("is-overridden", master.getProperty("g3")); + + Configuration other = orig.getComponent("other").getConfiguration(); + assertEquals(0, other.getProperties().size()); + + // load the external service + SliderFileSystem sfs = createNiceMock(SliderFileSystem.class); + FileSystem mockFs = createNiceMock(FileSystem.class); + expect(sfs.getFileSystem()).andReturn(mockFs).anyTimes(); + expect(sfs.buildClusterDirPath(anyObject())).andReturn( + new Path("cluster_dir_path")).anyTimes(); + replay(sfs, mockFs); + Service ext = ExampleAppJson.loadResource(APP_JSON); + ServiceApiUtil.validateAndResolveService(ext, sfs, new + YarnConfiguration()); + reset(sfs, mockFs); + + // perform the resolution on original service + JsonSerDeser<Service> jsonSerDeser = createNiceMock(JsonSerDeser + .class); + expect(sfs.getFileSystem()).andReturn(mockFs).anyTimes(); + expect(sfs.buildClusterDirPath(anyObject())).andReturn( + new Path("cluster_dir_path")).anyTimes(); + expect(jsonSerDeser.load(anyObject(), anyObject())).andReturn(ext) + .anyTimes(); + replay(sfs, mockFs, jsonSerDeser); + ServiceApiUtil.setJsonSerDeser(jsonSerDeser); + ServiceApiUtil.validateAndResolveService(orig, sfs, new + YarnConfiguration()); + + global = orig.getConfiguration(); + assertEquals(0, global.getProperties().size()); + + assertEquals(4, orig.getComponents().size()); + + simple = orig.getComponent("simple").getConfiguration(); + assertEquals(3, simple.getProperties().size()); + assertEquals("a", simple.getProperty("g1")); + assertEquals("b", simple.getProperty("g2")); + assertEquals("60", + simple.getProperty("yarn.service.failure-count-reset.window")); + + master = orig.getComponent("master").getConfiguration(); + assertEquals(5, master.getProperties().size()); + assertEquals("512M", master.getProperty("jvm.heapsize")); + assertEquals("overridden", master.getProperty("g1")); + assertEquals("b", master.getProperty("g2")); + assertEquals("is-overridden", master.getProperty("g3")); + assertEquals("60", + simple.getProperty("yarn.service.failure-count-reset.window")); + + Configuration worker = orig.getComponent("worker").getConfiguration(); + LOG.info("worker = {}", worker); + assertEquals(4, worker.getProperties().size()); + assertEquals("512M", worker.getProperty("jvm.heapsize")); + assertEquals("overridden-by-worker", worker.getProperty("g1")); + assertEquals("b", worker.getProperty("g2")); + assertEquals("60", + worker.getProperty("yarn.service.failure-count-reset.window")); + + other = orig.getComponent("other").getConfiguration(); + assertEquals(0, other.getProperties().size()); + } +} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/c07469f9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestLoadExampleAppJson.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestLoadExampleAppJson.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestLoadExampleAppJson.java new file mode 100644 index 0000000..83e9502 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestLoadExampleAppJson.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.conf; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.Arrays; +import java.util.Collection; + +import static org.apache.hadoop.yarn.service.ServiceTestUtils.JSON_SER_DESER; +import static org.easymock.EasyMock.*; + +/** + * Test loading example resources. + */ +@RunWith(value = Parameterized.class) +public class TestLoadExampleAppJson extends Assert { + private String resource; + + public TestLoadExampleAppJson(String resource) { + this.resource = resource; + } + + @Parameterized.Parameters + public static Collection<String[]> filenames() { + String[][] stringArray = new String[ExampleAppJson + .ALL_EXAMPLE_RESOURCES.size()][1]; + int i = 0; + for (String s : ExampleAppJson.ALL_EXAMPLE_RESOURCES) { + stringArray[i++][0] = s; + } + return Arrays.asList(stringArray); + } + + @Test + public void testLoadResource() throws Throwable { + try { + Service service = JSON_SER_DESER.fromResource(resource); + + SliderFileSystem sfs = createNiceMock(SliderFileSystem.class); + FileSystem mockFs = createNiceMock(FileSystem.class); + expect(sfs.getFileSystem()).andReturn(mockFs).anyTimes(); + expect(sfs.buildClusterDirPath(anyObject())).andReturn( + new Path("cluster_dir_path")).anyTimes(); + replay(sfs, mockFs); + + ServiceApiUtil.validateAndResolveService(service, sfs, + new YarnConfiguration()); + } catch (Exception e) { + throw new Exception("exception loading " + resource + ":" + e.toString()); + } + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/c07469f9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestValidateServiceNames.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestValidateServiceNames.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestValidateServiceNames.java new file mode 100644 index 0000000..6159215 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestValidateServiceNames.java @@ -0,0 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.conf; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.apache.hadoop.yarn.service.utils.SliderUtils; +import org.junit.Assert; +import org.junit.Test; + +import java.util.Arrays; +import java.util.List; + +/** + * Test cluster name validation. + */ +public class TestValidateServiceNames { + + void assertValidName(String name) { + ServiceApiUtil.validateNameFormat(name, new Configuration()); + } + + void assertInvalidName(String name) { + try { + ServiceApiUtil.validateNameFormat(name, new Configuration()); + Assert.fail(); + } catch (IllegalArgumentException e) { + // + } + } + + void assertInvalid(List<String> names) { + for (String name : names) { + assertInvalidName(name); + } + } + + void assertValid(List<String> names) { + for (String name : names) { + assertValidName(name); + } + } + + @Test + public void testEmptyName() throws Throwable { + assertInvalidName(""); + } + + @Test + public void testSpaceName() throws Throwable { + assertInvalidName(" "); + } + + + @Test + public void testLeadingHyphen() throws Throwable { + assertInvalidName("-hyphen"); + } + + @Test + public void testTitleLetters() throws Throwable { + assertInvalidName("Title"); + } + + @Test + public void testCapitalLetters() throws Throwable { + assertInvalidName("UPPER-CASE-CLUSTER"); + } + + @Test + public void testInnerBraced() throws Throwable { + assertInvalidName("a[a"); + } + + @Test + public void testLeadingBrace() throws Throwable { + assertInvalidName("["); + } + + @Test + public void testNonalphaLeadingChars() throws Throwable { + assertInvalid(Arrays.asList( + "[a", "#", "@", "=", "*", "." + )); + } + + @Test + public void testNonalphaInnerChars() throws Throwable { + assertInvalid(Arrays.asList( + "a[a", "b#", "c@", "d=", "e*", "f.", "g ", "h i" + )); + } + + @Test + public void testClusterValid() throws Throwable { + assertValidName("cluster"); + } + + @Test + public void testValidNames() throws Throwable { + assertValid(Arrays.asList( + "cluster", + "cluster1", + "very-very-very-long-cluster-name", + "c1234567890" + )); + + } + +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/c07469f9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/monitor/TestServiceMonitor.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/monitor/TestServiceMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/monitor/TestServiceMonitor.java new file mode 100644 index 0000000..0e03a2c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/monitor/TestServiceMonitor.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +package org.apache.hadoop.yarn.service.monitor; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.service.MockServiceAM; +import org.apache.hadoop.yarn.service.ServiceTestUtils; + +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.conf.YarnServiceConf; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.util.Collections; + +public class TestServiceMonitor extends ServiceTestUtils { + + private File basedir; + YarnConfiguration conf = new YarnConfiguration(); + + @Before + public void setup() throws Exception { + basedir = new File("target", "apps"); + if (basedir.exists()) { + FileUtils.deleteDirectory(basedir); + } else { + basedir.mkdirs(); + } + conf.setLong(YarnServiceConf.READINESS_CHECK_INTERVAL, 2); + } + + @After + public void tearDown() throws IOException { + if (basedir != null) { + FileUtils.deleteDirectory(basedir); + } + } + + // Create compa with 1 container + // Create compb with 1 container + // Verify compb dependency satisfied + // Increase compa to 2 containers + // Verify compb dependency becomes unsatisfied. + @Test + public void testComponentDependency() throws Exception{ + ApplicationId applicationId = ApplicationId.newInstance(123456, 1); + Service exampleApp = new Service(); + exampleApp.setId(applicationId.toString()); + exampleApp.setName("testComponentDependency"); + exampleApp.addComponent(createComponent("compa", 1, "sleep 1000")); + Component compb = createComponent("compb", 1, "sleep 1000"); + + // Let compb depends on compa; + compb.setDependencies(Collections.singletonList("compa")); + exampleApp.addComponent(compb); + + MockServiceAM am = new MockServiceAM(exampleApp); + am.init(conf); + am.start(); + + // compa ready + Assert.assertTrue(am.getComponent("compa").areDependenciesReady()); + //compb not ready + Assert.assertFalse(am.getComponent("compb").areDependenciesReady()); + + // feed 1 container to compa, + am.feedContainerToComp(exampleApp, 1, "compa"); + // waiting for compb's dependencies are satisfied + am.waitForDependenciesSatisfied("compb"); + + // feed 1 container to compb + am.feedContainerToComp(exampleApp, 2, "compb"); + am.flexComponent("compa", 2); + am.waitForNumDesiredContainers("compa", 2); + + // compb dependencies not satisfied again. + Assert.assertFalse(am.getComponent("compb").areDependenciesReady()); + am.stop(); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/c07469f9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/providers/TestAbstractClientProvider.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/providers/TestAbstractClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/providers/TestAbstractClientProvider.java new file mode 100644 index 0000000..5b24a1d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/providers/TestAbstractClientProvider.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.providers; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.yarn.service.api.records.Artifact; +import org.apache.hadoop.yarn.service.api.records.ConfigFile; +import org.apache.hadoop.yarn.service.provider.AbstractClientProvider; +import org.junit.Assert; +import org.junit.Test; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.easymock.EasyMock.*; + +/** + * Test the AbstractClientProvider shared methods. + */ +public class TestAbstractClientProvider { + private static final String EXCEPTION_PREFIX = "Should have thrown " + + "exception: "; + private static final String NO_EXCEPTION_PREFIX = "Should not have thrown " + + "exception: "; + + private static class ClientProvider extends AbstractClientProvider { + @Override + public void validateArtifact(Artifact artifact, FileSystem fileSystem) + throws IOException { + } + + @Override + protected void validateConfigFile(ConfigFile configFile, + FileSystem fileSystem) throws IOException { + } + } + + @Test + public void testConfigFiles() throws IOException { + ClientProvider clientProvider = new ClientProvider(); + FileSystem mockFs = createNiceMock(FileSystem.class); + expect(mockFs.exists(anyObject(Path.class))).andReturn(true).anyTimes(); + replay(mockFs); + + ConfigFile configFile = new ConfigFile(); + List<ConfigFile> configFiles = new ArrayList<>(); + configFiles.add(configFile); + + try { + clientProvider.validateConfigFiles(configFiles, mockFs); + Assert.fail(EXCEPTION_PREFIX + "null file type"); + } catch (IllegalArgumentException e) { + } + + configFile.setType(ConfigFile.TypeEnum.TEMPLATE); + try { + clientProvider.validateConfigFiles(configFiles, mockFs); + Assert.fail(EXCEPTION_PREFIX + "empty src_file for type template"); + } catch (IllegalArgumentException e) { + } + + configFile.setSrcFile("srcfile"); + try { + clientProvider.validateConfigFiles(configFiles, mockFs); + Assert.fail(EXCEPTION_PREFIX + "empty dest file"); + } catch (IllegalArgumentException e) { + } + + configFile.setDestFile("destfile"); + try { + clientProvider.validateConfigFiles(configFiles, mockFs); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + + configFile = new ConfigFile(); + configFile.setType(ConfigFile.TypeEnum.JSON); + configFile.setSrcFile(null); + configFile.setDestFile("path/destfile2"); + configFiles.add(configFile); + try { + clientProvider.validateConfigFiles(configFiles, mockFs); + Assert.fail(EXCEPTION_PREFIX + "dest file with multiple path elements"); + } catch (IllegalArgumentException e) { + } + + configFile.setDestFile("/path/destfile2"); + try { + clientProvider.validateConfigFiles(configFiles, mockFs); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + + configFile.setDestFile("destfile"); + try { + clientProvider.validateConfigFiles(configFiles, mockFs); + Assert.fail(EXCEPTION_PREFIX + "duplicate dest file"); + } catch (IllegalArgumentException e) { + } + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/c07469f9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/providers/TestProviderFactory.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/providers/TestProviderFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/providers/TestProviderFactory.java new file mode 100644 index 0000000..56f4555 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/providers/TestProviderFactory.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.providers; + +import org.apache.hadoop.yarn.service.api.records.Artifact; +import org.apache.hadoop.yarn.service.api.records.Artifact.TypeEnum; +import org.apache.hadoop.yarn.service.provider.ProviderFactory; +import org.apache.hadoop.yarn.service.provider.defaultImpl.DefaultClientProvider; +import org.apache.hadoop.yarn.service.provider.defaultImpl.DefaultProviderFactory; +import org.apache.hadoop.yarn.service.provider.defaultImpl.DefaultProviderService; +import org.apache.hadoop.yarn.service.provider.docker.DockerClientProvider; +import org.apache.hadoop.yarn.service.provider.docker.DockerProviderFactory; +import org.apache.hadoop.yarn.service.provider.docker.DockerProviderService; +import org.apache.hadoop.yarn.service.provider.tarball.TarballClientProvider; +import org.apache.hadoop.yarn.service.provider.tarball.TarballProviderFactory; +import org.apache.hadoop.yarn.service.provider.tarball.TarballProviderService; + +import org.junit.Test; + +import static org.junit.Assert.assertTrue; + +/** + * Test provider factories. + */ +public class TestProviderFactory { + @Test + public void testDockerFactory() throws Throwable { + ProviderFactory factory = ProviderFactory + .createServiceProviderFactory(new Artifact().type(TypeEnum.DOCKER)); + assertTrue(factory instanceof DockerProviderFactory); + assertTrue(factory.createClientProvider() instanceof DockerClientProvider); + assertTrue(factory.createServerProvider() instanceof DockerProviderService); + assertTrue(ProviderFactory.getProviderService(new Artifact() + .type(TypeEnum.DOCKER)) instanceof DockerProviderService); + } + + @Test + public void testTarballFactory() throws Throwable { + ProviderFactory factory = ProviderFactory + .createServiceProviderFactory(new Artifact().type(TypeEnum.TARBALL)); + assertTrue(factory instanceof TarballProviderFactory); + assertTrue(factory.createClientProvider() instanceof TarballClientProvider); + assertTrue(factory.createServerProvider() instanceof + TarballProviderService); + assertTrue(ProviderFactory.getProviderService(new Artifact() + .type(TypeEnum.TARBALL)) instanceof TarballProviderService); + } + + @Test + public void testDefaultFactory() throws Throwable { + ProviderFactory factory = ProviderFactory + .createServiceProviderFactory(null); + assertTrue(factory instanceof DefaultProviderFactory); + assertTrue(factory.createClientProvider() instanceof DefaultClientProvider); + assertTrue(factory.createServerProvider() instanceof DefaultProviderService); + assertTrue(ProviderFactory.getProviderService(null) instanceof + DefaultProviderService); + } + +} --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org