[hadoop] branch trunk updated: SUBMARINE-40. Add TonY runtime to Submarine. Contributed by Keqiu Hu.
This is an automated email from the ASF dual-hosted git repository. ztang pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/hadoop.git The following commit(s) were added to refs/heads/trunk by this push: new e79a9c1 SUBMARINE-40. Add TonY runtime to Submarine. Contributed by Keqiu Hu. e79a9c1 is described below commit e79a9c12c14dcecc674048ed182e74d8690e663a Author: Zhankun Tang AuthorDate: Tue Apr 23 15:45:42 2019 +0800 SUBMARINE-40. Add TonY runtime to Submarine. Contributed by Keqiu Hu. --- .../yarn/submarine/client/cli/CliConstants.java| 2 + .../yarn/submarine/client/cli/RunJobCli.java | 3 + .../client/cli/param/RunJobParameters.java | 29 ++- .../src/site/markdown/QuickStart.md| 4 +- .../hadoop-submarine-tony-runtime/README.md| 25 +++ .../hadoop-submarine-tony-runtime/pom.xml | 66 +++ .../submarine/runtimes/tony/TonyJobMonitor.java| 52 ++ .../submarine/runtimes/tony/TonyJobSubmitter.java | 97 ++ .../runtimes/tony/TonyRuntimeFactory.java | 55 ++ .../yarn/submarine/runtimes/tony/TonyUtils.java| 164 + .../runtimes/tony/buider/JobStatusBuilder.java | 61 +++ .../runtimes/tony/buider/package-info.java | 14 ++ .../yarn/submarine/runtimes/tony/package-info.java | 14 ++ .../src/site/markdown/QuickStart.md| 198 + .../src/site/resources/css/site.css| 29 +++ .../src/site/site.xml | 28 +++ .../src/test/java/TestTonyUtils.java | 113 .../yarnservice/YarnServiceJobSubmitter.java | 6 +- hadoop-submarine/pom.xml | 1 + 19 files changed, 955 insertions(+), 6 deletions(-) diff --git a/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliConstants.java b/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliConstants.java index f952aff..00190f0 100644 --- a/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliConstants.java +++ b/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliConstants.java @@ -38,6 +38,7 @@ public class CliConstants { public static final String TENSORBOARD_RESOURCES = "tensorboard_resources"; public static final String TENSORBOARD_DEFAULT_RESOURCES = "memory=4G,vcores=1"; + public static final String ARG_CONF = "conf"; public static final String WORKER_LAUNCH_CMD = "worker_launch_cmd"; public static final String SERVING_LAUNCH_CMD = "serving_launch_cmd"; @@ -57,4 +58,5 @@ public class CliConstants { public static final String PRINCIPAL = "principal"; public static final String DISTRIBUTE_KEYTAB = "distribute_keytab"; public static final String YAML_CONFIG = "f"; + public static final String INSECURE_CLUSTER = "insecure"; } diff --git a/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/RunJobCli.java b/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/RunJobCli.java index f9583c6..b38bddf 100644 --- a/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/RunJobCli.java +++ b/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/RunJobCli.java @@ -166,6 +166,9 @@ public class RunJobCli extends AbstractCli { " parameter" + CliConstants.KEYTAB + " on cluster machines will be " + "used"); options.addOption("h", "help", false, "Print help"); +options.addOption("insecure", false, "Cluster is not Kerberos enabled."); +options.addOption("conf", true, +"User specified configuration, as key=val pairs."); return options; } diff --git a/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/RunJobParameters.java b/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/RunJobParameters.java index 4792144..e7b1e2f 100644 --- a/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/RunJobParameters.java +++ b/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/RunJobParameters.java @@ -56,9 +56,11 @@ public class RunJobParameters extends RunParameters { private boolean waitJobFinish = false; private boolean distributed = false; + private boolean securityDisabled = false; private String keytab; private String principal; private boolean distributeKeytab = false; + private List confPairs = new ArrayList<>(); @Override public void updateParameters(ParametersHolder parametersHolder, @@ -97,6 +99,10 @@ p
[hadoop] branch trunk updated: YARN-9475. [YARN-9473] Create basic VE plugin. Contributed by Peter Bacsko.
This is an automated email from the ASF dual-hosted git repository. ztang pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/hadoop.git The following commit(s) were added to refs/heads/trunk by this push: new 8a95ea6 YARN-9475. [YARN-9473] Create basic VE plugin. Contributed by Peter Bacsko. 8a95ea6 is described below commit 8a95ea61e12384389f2103df0fcba594469cc024 Author: Zhankun Tang AuthorDate: Tue Apr 23 17:33:58 2019 +0800 YARN-9475. [YARN-9473] Create basic VE plugin. Contributed by Peter Bacsko. --- .../resourceplugin/com/nec/NECVEPlugin.java| 306 + .../resourceplugin/com/nec/package-info.java | 19 ++ 2 files changed, 325 insertions(+) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nec/NECVEPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nec/NECVEPlugin.java new file mode 100644 index 000..d226237 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nec/NECVEPlugin.java @@ -0,0 +1,306 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.com.nec; + +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.util.Shell; +import org.apache.hadoop.util.Shell.CommandExecutor; +import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.Device; +import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.DevicePlugin; +import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.DevicePluginScheduler; +import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.DeviceRegisterRequest; +import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.DeviceRuntimeSpec; +import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.YarnRuntimeType; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.annotations.VisibleForTesting; + +import java.io.File; +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.function.Consumer; +import java.util.function.Function; + +/** + * A device framework plugin which supports NEC Vector Engine. + * + */ +public class NECVEPlugin implements DevicePlugin, DevicePluginScheduler { + private static final String HADOOP_COMMON_HOME = "HADOOP_COMMON_HOME"; + private static final String ENV_SCRIPT_PATH = "NEC_VE_GET_SCRIPT_PATH"; + private static final String ENV_SCRIPT_NAME = "NEC_VE_GET_SCRIPT_NAME"; + private static final String DEFAULT_SCRIPT_NAME = "nec-ve-get.py"; + private static final Logger LOG = LoggerFactory.getLogger(NECVEPlugin.class); + private static final String[] DEFAULT_BINARY_SEARCH_DIRS = new String[]{ + "/usr/bin", "/bin", "/opt/nec/ve/bin"}; + + private String binaryPath; + + private Function + commandExecutorProvider = this::createCommandExecutor; + + public NECVEPlugin() throws ResourceHandlerException { +this(System::getenv, DEFAULT_BINARY_SEARCH_DIRS); + } + + @VisibleForTesting + NECVEPlugin(Function envProvider, String[] scriptPaths) + throws ResourceHandlerException { +String binaryName = DEFAULT_SCRIPT_NAME; + +String envScriptName = envProvider.apply(ENV_SCRIPT_NAME); +if (envScriptName != null) { + binaryName = envScriptName; +} +LOG.info("Use {} as script name.", envScriptName); + +// Try to find the script based on an environment variable, if set +boolean found = false; +String envBinaryPath = envProvider.apply(ENV_SCRIPT_PATH); +if (envBinaryPath != null) { + this.binaryPath = getScriptFromEnvSetting(envBinaryPath); + fou
[hadoop] branch trunk updated: HDDS-1368. Cleanup old ReplicationManager code from SCM.
This is an automated email from the ASF dual-hosted git repository. nanda pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/hadoop.git The following commit(s) were added to refs/heads/trunk by this push: new 7e1f8d3 HDDS-1368. Cleanup old ReplicationManager code from SCM. 7e1f8d3 is described below commit 7e1f8d3a1b7b48d3debcce1d7096ed4c46fdeb0f Author: Nanda kumar AuthorDate: Tue Apr 23 17:35:39 2019 +0530 HDDS-1368. Cleanup old ReplicationManager code from SCM. --- .../common/statemachine/StateContext.java | 32 +- .../DeleteContainerCommandHandler.java | 4 - .../ReplicateContainerCommandHandler.java | 28 +- .../scm/command/CommandStatusReportHandler.java| 43 +-- .../container/DeleteContainerCommandWatcher.java | 56 --- .../replication/ReplicationCommandWatcher.java | 56 --- .../container/replication/ReplicationManager.java | 384 - .../container/replication/ReplicationQueue.java| 73 .../container/replication/ReplicationRequest.java | 123 --- .../apache/hadoop/hdds/scm/events/SCMEvents.java | 65 +--- .../hadoop/hdds/scm/node/DeadNodeHandler.java | 186 ++ .../hadoop/hdds/scm/node/SCMNodeManager.java | 4 +- .../hdds/scm/server/StorageContainerManager.java | 2 +- .../hadoop/hdds/scm/block/TestBlockManager.java| 11 +- .../command/TestCommandStatusReportHandler.java| 5 - .../replication/TestReplicationManager.java| 290 .../replication/TestReplicationQueue.java | 134 --- .../hadoop/hdds/scm/node/TestDeadNodeHandler.java | 63 +--- .../hadoop/hdds/scm/node/TestStatisticsUpdate.java | 4 +- .../ozone/container/common/TestEndPoint.java | 13 +- 20 files changed, 150 insertions(+), 1426 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java index 7e06473..56151f8 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java @@ -19,10 +19,11 @@ package org.apache.hadoop.ozone.container.common.statemachine; import com.google.common.base.Preconditions; import com.google.protobuf.GeneratedMessage; import java.util.Map; -import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.protocol.proto +.StorageContainerDatanodeProtocolProtos.SCMCommandProto; +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.PipelineAction; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerAction; @@ -34,8 +35,6 @@ import org.apache.hadoop.ozone.container.common.states.datanode import org.apache.hadoop.ozone.container.common.states.datanode .RunningDatanodeState; import org.apache.hadoop.ozone.protocol.commands.CommandStatus; -import org.apache.hadoop.ozone.protocol.commands.CommandStatus -.CommandStatusBuilder; import org.apache.hadoop.ozone.protocol.commands .DeleteBlockCommandStatus.DeleteBlockCommandStatusBuilder; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; @@ -432,27 +431,14 @@ public class StateContext { * @param cmd - {@link SCMCommand}. */ public void addCmdStatus(SCMCommand cmd) { -final Optional cmdStatusBuilder; -switch (cmd.getType()) { -case replicateContainerCommand: - cmdStatusBuilder = Optional.of(CommandStatusBuilder.newBuilder()); - break; -case deleteBlocksCommand: - cmdStatusBuilder = Optional.of( - DeleteBlockCommandStatusBuilder.newBuilder()); - break; -case deleteContainerCommand: - cmdStatusBuilder = Optional.of(CommandStatusBuilder.newBuilder()); - break; -default: - cmdStatusBuilder = Optional.empty(); +if (cmd.getType() == SCMCommandProto.Type.deleteBlocksCommand) { + addCmdStatus(cmd.getId(), + DeleteBlockCommandStatusBuilder.newBuilder() + .setCmdId(cmd.getId()) + .setStatus(Status.PENDING) + .setType(cmd.getType()) + .build()); } -cmdStatusBuilder.ifPresent(statusBuilder -> -addCmdStatus(cmd.getId(), statusBuilder -.setCmdId(cmd.getId()) -.setStatus(Status.PENDING) -.setType(cmd.getType()) -.build())); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/com
[hadoop] branch trunk updated: HDDS-1411. Add unit test to check if SCM correctly sends close commands for containers in closing state after a restart. (#755)
This is an automated email from the ASF dual-hosted git repository. nanda pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/hadoop.git The following commit(s) were added to refs/heads/trunk by this push: new 59ded76 HDDS-1411. Add unit test to check if SCM correctly sends close commands for containers in closing state after a restart. (#755) 59ded76 is described below commit 59ded7641f5dfcaca6df96aba5243ead3610d005 Author: Siddharth AuthorDate: Tue Apr 23 08:34:14 2019 -0700 HDDS-1411. Add unit test to check if SCM correctly sends close commands for containers in closing state after a restart. (#755) --- .../hdds/scm/server/StorageContainerManager.java | 4 +- .../hadoop/ozone/TestStorageContainerManager.java | 127 +++-- 2 files changed, 122 insertions(+), 9 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 270d356..cbd1ac2 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -400,14 +400,14 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl new SCMPipelineManager(conf, scmNodeManager, eventQueue); } -if(configurator.getContainerManager() != null) { +if (configurator.getContainerManager() != null) { containerManager = configurator.getContainerManager(); } else { containerManager = new SCMContainerManager( conf, scmNodeManager, pipelineManager, eventQueue); } -if(configurator.getScmBlockManager() != null) { +if (configurator.getScmBlockManager() != null) { scmBlockManager = configurator.getScmBlockManager(); } else { scmBlockManager = new BlockManagerImpl(conf, this); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java index a0c58db..e882657 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java @@ -20,10 +20,17 @@ package org.apache.hadoop.ozone; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; import static org.junit.Assert.fail; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.argThat; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; import java.io.IOException; +import java.lang.reflect.Field; +import java.lang.reflect.Modifier; import java.nio.file.Path; import java.nio.file.Paths; import java.util.Collections; @@ -33,6 +40,7 @@ import java.util.Map; import java.util.Set; import java.util.UUID; import java.util.concurrent.TimeUnit; + import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -46,33 +54,44 @@ import org.apache.hadoop.hdds.scm.ScmInfo; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.block.DeletedBlockLog; import org.apache.hadoop.hdds.scm.block.SCMBlockDeletingService; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; +import org.apache.hadoop.hdds.scm.container.ReplicationManager; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; +import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.server.SCMClientProtocolServer; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.scm.server.StorageContainerManager.StartupOption; +import org.apache.hadoop.hdds.server.events.EventPublisher; +import org.apache.hadoop.hdds.server.events.TypedEvent; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; +import org.apache.hadoop.ozone.protocol.com
[hadoop] branch trunk updated: YARN-9339. Apps pending metric incorrect after moving app to a new queue. Contributed by Abhishek Modi.
This is an automated email from the ASF dual-hosted git repository. inigoiri pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/hadoop.git The following commit(s) were added to refs/heads/trunk by this push: new c504eee YARN-9339. Apps pending metric incorrect after moving app to a new queue. Contributed by Abhishek Modi. c504eee is described below commit c504eee0c29276a385ff68ce456f08150aa25e80 Author: Inigo Goiri AuthorDate: Tue Apr 23 12:40:44 2019 -0700 YARN-9339. Apps pending metric incorrect after moving app to a new queue. Contributed by Abhishek Modi. --- .../scheduler/capacity/CSQueue.java| 9 +++ .../scheduler/capacity/CapacityScheduler.java | 2 +- .../scheduler/capacity/LeafQueue.java | 8 ++- .../scheduler/capacity/ParentQueue.java| 7 +++ .../scheduler/capacity/TestCapacityScheduler.java | 72 +- 5 files changed, 94 insertions(+), 4 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java index 1af3250..d507e53 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java @@ -177,6 +177,15 @@ public interface CSQueue extends SchedulerQueue { String userName); /** + * Submit an application attempt to the queue. + * @param application application whose attempt is being submitted + * @param userName user who submitted the application attempt + * @param isMoveApp is application being moved across the queue + */ + public void submitApplicationAttempt(FiCaSchedulerApp application, + String userName, boolean isMoveApp); + + /** * An application submitted to this queue has finished. * @param applicationId * @param user user who submitted the application diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index b8fdd42..7cd2c1c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -2431,7 +2431,7 @@ public class CapacityScheduler extends if (!app.isStopped()) { source.finishApplicationAttempt(app, sourceQueueName); // Submit to a new queue - dest.submitApplicationAttempt(app, user); + dest.submitApplicationAttempt(app, user, true); } // Finish app & update metrics app.move(dest); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index 3403544..9d8e1e3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -578,6 +578,12 @@ public class LeafQueue extends AbstractCSQueue { @Override public void submitApplicationAttempt(FiCaSchedulerApp application, String userName) { +submitApplicationAttempt(application, userName, false); + } + + @Override + public void submitApplicationAttempt(FiCaSchedulerApp application, + String userName, boolean isMoveApp) { // Careful! Locking order is important! writeLock.lock(); try { @@ -592,7 +598,7 @@ public class LeafQueue extends AbstractCSQueue { } // We don't want to update metrics for move app -if (application.isPending()) { +
[hadoop] branch trunk updated: YARN-9491. TestApplicationMasterServiceFair#ApplicationMasterServiceTestBase.testUpdateTrackingUrl fails intermittent. Contributed by Prabhu Joseph.
This is an automated email from the ASF dual-hosted git repository. gifuma pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/hadoop.git The following commit(s) were added to refs/heads/trunk by this push: new 4a0ba24 YARN-9491. TestApplicationMasterServiceFair#ApplicationMasterServiceTestBase.testUpdateTrackingUrl fails intermittent. Contributed by Prabhu Joseph. 4a0ba24 is described below commit 4a0ba249595a7edd2a8ff755d2689171f62958dd Author: Giovanni Matteo Fumarola AuthorDate: Tue Apr 23 15:27:04 2019 -0700 YARN-9491. TestApplicationMasterServiceFair#ApplicationMasterServiceTestBase.testUpdateTrackingUrl fails intermittent. Contributed by Prabhu Joseph. --- .../yarn/server/resourcemanager/ApplicationMasterServiceTestBase.java | 4 1 file changed, 4 insertions(+) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterServiceTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterServiceTestBase.java index c775bd7..868b4e0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterServiceTestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterServiceTestBase.java @@ -464,6 +464,10 @@ public abstract class ApplicationMasterServiceTestBase { allocateRequest.setTrackingUrl(newTrackingUrl); am1.allocate(allocateRequest); + +// wait until RMAppAttemptEventType.STATUS_UPDATE is handled +rm.drainEvents(); + Assert.assertEquals(newTrackingUrl, rm.getRMContext().getRMApps().get( app1.getApplicationId()).getOriginalTrackingUrl()); - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[hadoop] branch trunk updated: YARN-9501. TestCapacitySchedulerOvercommit#testReducePreemptAndCancel fails intermittent. Contributed by Prabhu Joseph.
This is an automated email from the ASF dual-hosted git repository. gifuma pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/hadoop.git The following commit(s) were added to refs/heads/trunk by this push: new fec9bf4 YARN-9501. TestCapacitySchedulerOvercommit#testReducePreemptAndCancel fails intermittent. Contributed by Prabhu Joseph. fec9bf4 is described below commit fec9bf4b0ba82f46f663ebb6310fd28c5315d0d7 Author: Giovanni Matteo Fumarola AuthorDate: Tue Apr 23 15:42:56 2019 -0700 YARN-9501. TestCapacitySchedulerOvercommit#testReducePreemptAndCancel fails intermittent. Contributed by Prabhu Joseph. --- .../resourcemanager/scheduler/TestSchedulerOvercommit.java | 14 +- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerOvercommit.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerOvercommit.java index cc665fb..758fd33 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerOvercommit.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerOvercommit.java @@ -286,6 +286,9 @@ public abstract class TestSchedulerOvercommit { updateNodeResource(rm, nmId, 2 * GB, 2, timeout); waitMemory(scheduler, nm, 4 * GB, -2 * GB, INTERVAL, timeout); +// wait until MARK_CONTAINER_FOR_PREEMPTION is handled +rm.drainEvents(); + // We should receive a notification to preempt the container PreemptionMessage preemptMsg = am.schedule().getPreemptionMessage(); assertPreemption(container.getId(), preemptMsg); @@ -315,13 +318,16 @@ public abstract class TestSchedulerOvercommit { Container container = createContainer(am, 2 * GB); assertMemory(scheduler, nmId, 4 * GB, 0); -// We give an overcommit time out of 2 seconds +// We give an overcommit time out of 1 seconds final int timeout = (int)TimeUnit.SECONDS.toMillis(1); // Reducing to 2GB should first preempt the container updateNodeResource(rm, nmId, 2 * GB, 2, timeout); waitMemory(scheduler, nm, 4 * GB, -2 * GB, INTERVAL, timeout); +// wait until MARK_CONTAINER_FOR_PREEMPTION is handled +rm.drainEvents(); + // We should receive a notification to preempt the container PreemptionMessage preemptMsg = am.schedule().getPreemptionMessage(); assertPreemption(container.getId(), preemptMsg); @@ -479,6 +485,9 @@ public abstract class TestSchedulerOvercommit { updateNodeResource(rm, nmId, 3 * GB, 2, 2 * 1000); waitMemory(scheduler, nmId, 5 * GB, -2 * GB, 200, 5 * 1000); +// wait until MARK_CONTAINER_FOR_PREEMPTION is handled +rm.drainEvents(); + PreemptionMessage preemptMsg = am.schedule().getPreemptionMessage(); assertPreemption(c2.getId(), preemptMsg); @@ -493,6 +502,9 @@ public abstract class TestSchedulerOvercommit { updateNodeResource(rm, nmId, 3 * GB, 2, 2 * 1000); waitMemory(scheduler, nmId, 5 * GB, -2 * GB, 200, 5 * 1000); +// wait until MARK_CONTAINER_FOR_PREEMPTION is handled +rm.drainEvents(); + preemptMsg = am.schedule().getPreemptionMessage(); assertPreemption(c2.getId(), preemptMsg); - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[hadoop] branch trunk updated: YARN-9081. Update jackson from 1.9.13 to 2.x in hadoop-yarn-services-core.
This is an automated email from the ASF dual-hosted git repository. tasanuma pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/hadoop.git The following commit(s) were added to refs/heads/trunk by this push: new 9d40062 YARN-9081. Update jackson from 1.9.13 to 2.x in hadoop-yarn-services-core. 9d40062 is described below commit 9d400627c28b0ba50b0eb0bb016d955d05cc28a4 Author: Akira Ajisaka AuthorDate: Fri Dec 7 16:39:53 2018 +0900 YARN-9081. Update jackson from 1.9.13 to 2.x in hadoop-yarn-services-core. Signed-off-by: Takanobu Asanuma --- .../hadoop-yarn-services-core/pom.xml | 10 ++- .../service/utils/ApplicationReportSerDeser.java | 12 ++-- .../hadoop/yarn/service/utils/JsonSerDeser.java| 34 ++ .../yarn/service/utils/PublishedConfiguration.java | 12 .../service/utils/SerializedApplicationReport.java | 8 ++--- .../hadoop/yarn/service/utils/ServiceApiUtil.java | 10 +++ .../hadoop/yarn/service/ServiceTestUtils.java | 4 +-- 7 files changed, 36 insertions(+), 54 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml index ff585f9..fea9b92 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml @@ -118,15 +118,9 @@ - org.codehaus.jackson - jackson-core-asl - - - - org.codehaus.jackson - jackson-mapper-asl + com.fasterxml.jackson.core + jackson-databind - com.fasterxml.jackson.core jackson-annotations diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ApplicationReportSerDeser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ApplicationReportSerDeser.java index 2607c08..ffaf27f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ApplicationReportSerDeser.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ApplicationReportSerDeser.java @@ -18,11 +18,7 @@ package org.apache.hadoop.yarn.service.utils; -import org.codehaus.jackson.JsonGenerationException; -import org.codehaus.jackson.JsonParseException; -import org.codehaus.jackson.map.JsonMappingException; - -import java.io.IOException; +import com.fasterxml.jackson.core.JsonProcessingException; /** * Persistence of {@link SerializedApplicationReport} @@ -43,14 +39,12 @@ public class ApplicationReportSerDeser * object instance * @param instance object to convert * @return a JSON string description - * @throws JsonParseException parse problems - * @throws JsonMappingException O/J mapping problems + * @throws JsonProcessingException parse problems */ public static String toString(SerializedApplicationReport instance) - throws IOException, JsonGenerationException, JsonMappingException { + throws JsonProcessingException { synchronized (staticinstance) { return staticinstance.toJson(instance); } } - } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/JsonSerDeser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/JsonSerDeser.java index 2c27ea7..00b8e0c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/JsonSerDeser.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/JsonSerDeser.java @@ -18,19 +18,19 @@ package org.apache.hadoop.yarn.service.utils; +import com.fasterxml.jackson.core.JsonParseException; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.JsonMappingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.PropertyNamingStrategy; +import com.fasterxml.jackson.databind.SerializationFeature; import org.apache.hadoop.fs.FSDataInp
[hadoop] branch HDFS-13891 updated (bd3161e -> bd215fc)
This is an automated email from the ASF dual-hosted git repository. inigoiri pushed a change to branch HDFS-13891 in repository https://gitbox.apache.org/repos/asf/hadoop.git. discard bd3161e HDFS-14422. RBF: Router shouldn't allow READ operations in safe mode. Contributed by Inigo Goiri. discard e508ab9 HDFS-14369. RBF: Fix trailing / for webhdfs. Contributed by Akira Ajisaka. discard 007b8ea HDFS-13853. RBF: RouterAdmin update cmd is overwriting the entry not updating the existing. Contributed by Ayush Saxena. discard 2577e3e HDFS-14316. RBF: Support unavailable subclusters for mount points with multiple destinations. Contributed by Inigo Goiri. discard d10765d HDFS-14388. RBF: Prevent loading metric system when disabled. Contributed by Inigo Goiri. discard 0b0c334 HDFS-14351. RBF: Optimize configuration item resolving for monitor namenode. Contributed by He Xiaoqiao and Inigo Goiri. discard ba429bc2 HDFS-14343. RBF: Fix renaming folders spread across multiple subclusters. Contributed by Ayush Saxena. discard 64ad3d6 HDFS-14334. RBF: Use human readable format for long numbers in the Router UI. Contributed by Inigo Goiri. discard 4a21db8 HDFS-14335. RBF: Fix heartbeat typos in the Router. Contributed by CR Hota. discard e6eacbd HDFS-14331. RBF: IOE While Removing Mount Entry. Contributed by Ayush Saxena. discard 5634f14 HDFS-14329. RBF: Add maintenance nodes to federation metrics. Contributed by Ayush Saxena. discard 3e01881 HDFS-14259. RBF: Fix safemode message for Router. Contributed by Ranith Sadar. discard 58c5457 HDFS-14322. RBF: Security manager should not load if security is disabled. Contributed by CR Hota. discard 9d809a2 HDFS-14052. RBF: Use Router keytab for WebHDFS. Contributed by CR Hota. discard d897cee HDFS-14307. RBF: Update tests to use internal Whitebox instead of Mockito. Contributed by CR Hota. discard f330e6f HDFS-14249. RBF: Tooling to identify the subcluster location of a file. Contributed by Inigo Goiri. discard 1761b90 HDFS-14268. RBF: Fix the location of the DNs in getDatanodeReport(). Contributed by Inigo Goiri. discard 8478112 HDFS-14226. RBF: Setting attributes should set on all subclusters' directories. Contributed by Ayush Saxena. discard 49d489c HDFS-13358. RBF: Support for Delegation Token (RPC). Contributed by CR Hota. discard 7bbe35e HDFS-14230. RBF: Throw RetriableException instead of IOException when no namenodes available. Contributed by Fei Hui. discard 4585b97 HDFS-14252. RBF : Exceptions are exposing the actual sub cluster path. Contributed by Ayush Saxena. discard 08a4e69 HDFS-14225. RBF : MiniRouterDFSCluster should configure the failover proxy provider for namespace. Contributed by Ranith Sardar. discard e43400c HDFS-13404. RBF: TestRouterWebHDFSContractAppend.testRenameFileBeingAppended fails. discard 4feb3ae HDFS-14215. RBF: Remove dependency on availability of default namespace. Contributed by Ayush Saxena. discard 8dc059d HDFS-14224. RBF: NPE in getContentSummary() for getEcPolicy() in case of multiple destinations. Contributed by Ayush Saxena. discard 29fa6ae HDFS-14223. RBF: Add configuration documents for using multiple sub-clusters. Contributed by Takanobu Asanuma. discard 3864de4 HDFS-14209. RBF: setQuota() through router is working for only the mount Points under the Source column in MountTable. Contributed by Shubham Dewan. discard 4257376 HDFS-14156. RBF: rollEdit() command fails with Router. Contributed by Shubham Dewan. discard 9c22816 HDFS-14193. RBF: Inconsistency with the Default Namespace. Contributed by Ayush Saxena. discard 7c01d25 HDFS-14129. addendum to HDFS-14129. Contributed by Ranith Sardar. discard e86634a HDFS-14129. RBF: Create new policy provider for router. Contributed by Ranith Sardar. discard 692c581 HDFS-14206. RBF: Cleanup quota modules. Contributed by Inigo Goiri. discard 03d98f5 HDFS-13856. RBF: RouterAdmin should support dfsrouteradmin -refreshRouterArgs command. Contributed by yanghuafeng. discard 64689a2 HDFS-14191. RBF: Remove hard coded router status from FederationMetrics. Contributed by Ranith Sardar. discard 2a7998b HDFS-14150. RBF: Quotas of the sub-cluster should be removed when removing the mount point. Contributed by Takanobu Asanuma. discard 1372cc8 HDFS-14161. RBF: Throw StandbyException instead of IOException so that client can retry when can not get connection. Contributed by Fei Hui. discard 2b48aa0 HDFS-14167. RBF: Add stale nodes to federation metrics. Contributed by Inigo Goiri. discard e3ab7a4 HDFS-13443. RBF: Update mount table cache immediately after changing (add/update/remove) mount table entries. Contributed by Mohammad Arshad. discard aa6cff0 HDFS-14151. RBF: Make the read-only column of Mount Table clearly understandable. discard c6eb9ee HDFS-13869. RBF: Handle NPE for NamenodeBeanMetrics#getFederationMetrics. Contributed by Ranith Sardar. discard 1555e5c HDFS-14152. RBF: Fi
[hadoop] branch trunk updated: YARN-9424. Change getDeclaredMethods to getMethods in FederationClientInterceptor#invokeConcurrent. Contributed by Shen Yinjie.
This is an automated email from the ASF dual-hosted git repository. gifuma pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/hadoop.git The following commit(s) were added to refs/heads/trunk by this push: new 3f2f418 YARN-9424. Change getDeclaredMethods to getMethods in FederationClientInterceptor#invokeConcurrent. Contributed by Shen Yinjie. 3f2f418 is described below commit 3f2f4186f6543272cf4bb7c815f1f46ba50e7640 Author: Giovanni Matteo Fumarola AuthorDate: Tue Apr 23 19:58:41 2019 -0700 YARN-9424. Change getDeclaredMethods to getMethods in FederationClientInterceptor#invokeConcurrent. Contributed by Shen Yinjie. --- .../hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java index 03dde04..3422fd1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java @@ -622,7 +622,7 @@ public class FederationClientInterceptor ApplicationClientProtocol protocol = getClientRMProxyForSubCluster(subClusterId); Method method = ApplicationClientProtocol.class - .getDeclaredMethod(request.getMethodName(), request.getTypes()); + .getMethod(request.getMethodName(), request.getTypes()); return method.invoke(protocol, request.getParams()); } }); - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[hadoop] branch trunk updated: HDDS-1450. Fix nightly run failures after HDDS-976. Contributed by Xiaoyu Yao. (#757)
This is an automated email from the ASF dual-hosted git repository. ajay pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/hadoop.git The following commit(s) were added to refs/heads/trunk by this push: new 64f30da HDDS-1450. Fix nightly run failures after HDDS-976. Contributed by Xiaoyu Yao. (#757) 64f30da is described below commit 64f30da42813182e9cf69ec306c1f1c0c633ece0 Author: Xiaoyu Yao AuthorDate: Tue Apr 23 21:38:23 2019 -0700 HDDS-1450. Fix nightly run failures after HDDS-976. Contributed by Xiaoyu Yao. (#757) --- .../src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java| 2 -- .../java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java | 7 +++ hadoop-hdds/common/src/main/resources/ozone-default.xml| 6 -- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index 2c267fb..b097321 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -368,8 +368,6 @@ public final class ScmConfigKeys { "hdds.scm.http.kerberos.keytab"; // Network topology - public static final String OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE_TYPE = - "ozone.scm.network.topology.schema.file.type"; public static final String OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE = "ozone.scm.network.topology.schema.file"; public static final String OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE_DEFAULT = diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java index 8e5d935..9a598c6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.scm.net; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import org.apache.commons.io.FilenameUtils; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.net.NodeSchemaLoader.NodeSchemaLoadResult; import org.slf4j.Logger; @@ -59,16 +60,14 @@ public final class NodeSchemaManager { /** * Load schemas from network topology schema configuration file */ -String schemaFileType = conf.get( -ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE_TYPE); - String schemaFile = conf.get( ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE_DEFAULT); NodeSchemaLoadResult result; try { - if (schemaFileType.toLowerCase().compareTo("yaml") == 0) { + if (FilenameUtils.getExtension(schemaFile).toLowerCase() + .compareTo("yaml") == 0) { result = NodeSchemaLoader.getInstance().loadSchemaFromYaml(schemaFile); } else { result = NodeSchemaLoader.getInstance().loadSchemaFromXml(schemaFile); diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index f40040c..162c93f 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -2288,10 +2288,12 @@ ozone.scm.network.topology.schema.file -network-topology-default.xm +network-topology-default.xml OZONE, MANAGEMENT - The schema file defines the ozone network topology + The schema file defines the ozone network topology. We currently support + xml(default) and yaml format. Refer to the samples in the topology + awareness document for xml and yaml topology definition samples. - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org