http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/StorageContainerDatanodeProtocol.proto
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/StorageContainerDatanodeProtocol.proto
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/StorageContainerDatanodeProtocol.proto
deleted file mode 100644
index 7306b81..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/StorageContainerDatanodeProtocol.proto
+++ /dev/null
@@ -1,367 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * These .proto interfaces are private and unstable.
- * Please see http://wiki.apache.org/hadoop/Compatibility
- * for what changes are allowed for a *unstable* .proto interface.
- */
-
-option java_package = "org.apache.hadoop.ozone.protocol.proto";
-
-option java_outer_classname = "StorageContainerDatanodeProtocolProtos";
-
-option java_generic_services = true;
-
-option java_generate_equals_and_hash = true;
-
-package hadoop.hdfs;
-
-import "hdfs.proto";
-
-import "HdfsServer.proto";
-
-import "DatanodeProtocol.proto";
-
-import "Ozone.proto";
-
-
-/**
-* This message is send by data node to indicate that it is alive or it is
-* registering with the node manager.
-*/
-message SCMHeartbeatRequestProto {
-  required DatanodeIDProto datanodeID = 1;
-  optional SCMNodeReport nodeReport = 2;
-  optional ReportState containerReportState = 3;
-}
-
-enum DatanodeContainerState {
-  closed = 0;
-  open = 1;
-}
-
-/**
-NodeState contains messages from datanode to SCM saying that it has
-some information that SCM might be interested in.*/
-message ReportState {
-  enum states {
-    noContainerReports = 0;
-    completeContinerReport = 1;
-    deltaContainerReport = 2;
-  }
-  required states state = 1;
-  required int64 count = 2 [default = 0];
-}
-
-
-/**
-This message is used to persist the information about a container in the
-SCM database, This information allows SCM to startup faster and avoid having
-all container info in memory all the time.
-  */
-message ContainerPersistanceProto {
-  required DatanodeContainerState state = 1;
-  required hadoop.hdfs.ozone.Pipeline pipeline = 2;
-  required ContainerInfo info = 3;
-}
-
-/**
-This message is used to do a quick look up of which containers are effected
-if a node goes down
-*/
-message NodeContianerMapping {
-  repeated string contianerName = 1;
-}
-
-/**
-A container report contains the following information.
-*/
-message ContainerInfo {
-  required string containerName = 1;
-  optional string finalhash = 2;
-  optional int64 size = 3;
-  optional int64 used = 4;
-  optional int64 keyCount = 5;
-  // TODO: move the io count to separate message
-  optional int64 readCount = 6;
-  optional int64 writeCount = 7;
-  optional int64 readBytes = 8;
-  optional int64 writeBytes = 9;
-  required int64 containerID = 10;
-  optional hadoop.hdfs.ozone.LifeCycleState state = 11;
-}
-
-// The deleted blocks which are stored in deletedBlock.db of scm.
-message DeletedBlocksTransaction {
-  required int64 txID = 1;
-  required string containerName = 2;
-  repeated string blockID = 3;
-  // the retry time of sending deleting command to datanode.
-  required int32 count = 4;
-}
-
-/**
-A set of container reports, max count is generally set to
-8192 since that keeps the size of the reports under 1 MB.
-*/
-message ContainerReportsRequestProto {
-  enum reportType {
-    fullReport = 0;
-    deltaReport = 1;
-  }
-  required DatanodeIDProto datanodeID = 1;
-  repeated ContainerInfo reports = 2;
-  required reportType type = 3;
-}
-
-message ContainerReportsResponseProto {
-}
-
-/**
-* This message is send along with the heart beat to report datanode
-* storage utilization by SCM.
-*/
-message SCMNodeReport {
-  repeated SCMStorageReport storageReport = 1;
-}
-
-message SCMStorageReport {
-  required string storageUuid = 1;
-  optional uint64 capacity = 2 [default = 0];
-  optional uint64 scmUsed = 3 [default = 0];
-  optional uint64 remaining = 4 [default = 0];
-  optional StorageTypeProto storageType = 5 [default = DISK];
-}
-
-message SCMRegisterRequestProto {
-  required DatanodeIDProto datanodeID = 1;
-  optional SCMNodeAddressList addressList = 2;
-}
-
-/**
- * Request for version info of the software stack on the server.
- */
-message SCMVersionRequestProto {
-
-}
-
-/**
-* Generic response that is send to a version request. This allows keys to be
-* added on the fly and protocol to remain stable.
-*/
-message SCMVersionResponseProto {
-  required uint32 softwareVersion = 1;
-  repeated hadoop.hdfs.ozone.KeyValue keys = 2;
-}
-
-message SCMNodeAddressList {
-  repeated string addressList = 1;
-}
-
-/**
- * Datanode ID returned by the SCM. This is similar to name node
- * registeration of a datanode.
- */
-message SCMRegisteredCmdResponseProto {
-  enum ErrorCode {
-    success = 1;
-    errorNodeNotPermitted = 2;
-  }
-  required ErrorCode errorCode = 2;
-  optional string datanodeUUID = 3;
-  optional string clusterID = 4;
-  optional SCMNodeAddressList addressList = 5;
-}
-
-/**
- * SCM informs a datanode to register itself again.
- * With recieving this command, datanode will transit to REGISTER state.
- */
-message SCMReregisterCmdResponseProto {}
-
-/**
- * Container ID maintains the container's Identity along with cluster ID
- * after the registration is done.
- */
-message ContainerNodeIDProto {
-  required DatanodeIDProto datanodeID = 1;
-  optional string clusterID = 2;
-}
-
-
-
-/**
-This command tells the data node to send in the container report when possible
-*/
-message SendContainerReportProto {
-}
-
-/**
-This command asks the datanode to close a specific container.
-*/
-message SCMCloseContainerCmdResponseProto {
-  required string containerName = 1;
-}
-
-/**
-Type of commands supported by SCM to datanode protocol.
-*/
-enum Type {
-  versionCommand = 2;
-  registeredCommand = 3;
-  sendContainerReport = 4;
-  reregisterCommand = 5;
-  deleteBlocksCommand = 6;
-  closeContainerCommand = 7;
-}
-
-/*
- * These are commands returned by SCM for to the datanode to execute.
- */
-message SCMCommandResponseProto {
-  required Type cmdType = 2; // Type of the command
-  optional SCMRegisteredCmdResponseProto registeredProto = 3;
-  optional SCMVersionResponseProto versionProto = 4;
-  optional SendContainerReportProto sendReport = 5;
-  optional SCMReregisterCmdResponseProto reregisterProto = 6;
-  optional SCMDeleteBlocksCmdResponseProto deleteBlocksProto = 7;
-  required string datanodeUUID = 8;
-  optional SCMCloseContainerCmdResponseProto closeContainerProto = 9;
-}
-
-
-/*
- * A group of commands for the datanode to execute
- */
-message SCMHeartbeatResponseProto {
-  repeated SCMCommandResponseProto commands = 1;
-}
-
-// HB response from SCM, contains a list of block deletion transactions.
-message SCMDeleteBlocksCmdResponseProto {
-  repeated DeletedBlocksTransaction deletedBlocksTransactions = 1;
-}
-
-// SendACK response returned by datanode to SCM, currently empty.
-message ContainerBlocksDeletionACKResponseProto {
-}
-
-// ACK message datanode sent to SCM, contains the result of
-// block deletion transactions.
-message ContainerBlocksDeletionACKProto {
-  message DeleteBlockTransactionResult {
-    required int64 txID = 1;
-    required bool success = 2;
-  }
-  repeated DeleteBlockTransactionResult results = 1;
-}
-
-/**
- * Protocol used from a datanode to StorageContainerManager.
- *
- * Please see the request and response messages for details of the RPC calls.
- *
- * Here is a simple state diagram that shows how a datanode would boot up and
- * communicate with SCM.
- *
- *           -----------------------
- *          |         Start         |
- *           ---------- ------------
- *                     |
- *                     |
- *                     |
- *                     |
- *                     |
- *                     |
- *                     |
- *           ----------v-------------
- *          |   Searching for  SCM    ------------
- *           ---------- -------------             |
- *                     |                          |
- *                     |                          |
- *                     |                ----------v-------------
- *                     |               | Register if needed     |
- *                     |                ----------- ------------
- *                     |                           |
- *                     v                           |
- *            ----------- ----------------         |
- *  ---------   Heartbeat state           <--------
- * |          --------^-------------------
- * |                  |
- * |                  |
- * |                  |
- * |                  |
- * |                  |
- * |                  |
- * |                  |
- *  ------------------
- *
- *
- *
- * Here is how this protocol is used by the datanode. When a datanode boots up
- * it moves into a stated called SEARCHING_SCM. In this state datanode is
- * trying to establish communication with the SCM. The address of the SCMs are
- * retrieved from the configuration information.
- *
- * In the SEARCHING_SCM state, only rpc call made by datanode is a getVersion
- * call to SCM. Once any of the SCMs reply, datanode checks if it has a local
- * persisted datanode ID. If it has this means that this datanode is already
- * registered with some SCM. If this file is not found, datanode assumes that
- * it needs to do a registration.
- *
- * If registration is need datanode moves into REGISTER state. It will
- * send a register call with datanodeID data structure and presist that info.
- *
- * The response to the command contains clusterID. This information is
- * also persisted by the datanode and moves into heartbeat state.
- *
- * Once in the heartbeat state, datanode sends heartbeats and container reports
- * to SCM and process commands issued by SCM until it is shutdown.
- *
- */
-service StorageContainerDatanodeProtocolService {
-
-  /**
-  * Gets the version information from the SCM.
-  */
-  rpc getVersion (SCMVersionRequestProto) returns (SCMVersionResponseProto);
-
-  /**
-  * Registers a data node with SCM.
-  */
-  rpc register (SCMRegisterRequestProto) returns 
(SCMRegisteredCmdResponseProto);
-
-  /**
-   * Send heartbeat from datanode to SCM. HB's under SCM looks more
-   * like life line protocol than HB's under HDFS. In other words, it is
-   * extremely light weight and contains no data payload.
-   */
-  rpc sendHeartbeat (SCMHeartbeatRequestProto) returns 
(SCMHeartbeatResponseProto);
-
-  /**
-    send container reports sends the container report to SCM. This will
-    return a null command as response.
-  */
-  rpc sendContainerReport(ContainerReportsRequestProto) returns 
(ContainerReportsResponseProto);
-
-  /**
-   * Sends the block deletion ACK to SCM.
-   */
-  rpc sendContainerBlocksDeletionACK (ContainerBlocksDeletionACKProto) returns 
(ContainerBlocksDeletionACKResponseProto);
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/com.sun.jersey.spi.container.ContainerProvider
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/com.sun.jersey.spi.container.ContainerProvider
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/com.sun.jersey.spi.container.ContainerProvider
deleted file mode 100644
index 2e103fe..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/com.sun.jersey.spi.container.ContainerProvider
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-org.apache.hadoop.ozone.web.netty.ObjectStoreJerseyContainerProvider

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml
deleted file mode 100644
index 97eaeae..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml
+++ /dev/null
@@ -1,1308 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<!-- Do not modify this file directly.  Instead, copy entries that you -->
-<!-- wish to modify from this file into ozone-site.xml and change them -->
-<!-- there.  If ozone-site.xml does not already exist, create it.      -->
-
-<!--Tags supported are OZONE, CBLOCK, MANAGEMENT, SECURITY, PERFORMANCE,   -->
-<!--DEBUG, CLIENT, SERVER, KSM, SCM, CRITICAL, RATIS, CONTAINER, REQUIRED, -->
-<!--REST, STORAGE, PIPELINE, STANDALONE                                    -->
-
-<configuration>
-  <!--CBlock Settings-->
-  <property>
-    <name>dfs.cblock.block.buffer.flush.interval</name>
-    <value>60s</value>
-    <tag>CBLOCK, PERFORMANCE</tag>
-    <description>
-      Controls the frequency at this the local cache flushes the
-      blocks to the remote containers.
-    </description>
-  </property>
-  <property>
-    <name>dfs.cblock.cache.block.buffer.size</name>
-    <value>512</value>
-    <tag>CBLOCK, PERFORMANCE</tag>
-    <description>
-      Size of the local cache for blocks. So cache size will be block
-      size multiplied by this number.
-    </description>
-  </property>
-  <property>
-    <name>dfs.cblock.cache.core.min.pool.size</name>
-    <value>16</value>
-    <tag>CBLOCK, PERFORMANCE</tag>
-    <description>
-      A minimum number of threads in the pool that cBlock cache will
-      use for the background I/O to remote containers.
-    </description>
-  </property>
-  <property>
-    <name>dfs.cblock.cache.max.pool.size</name>
-    <value>256</value>
-    <tag>CBLOCK, PERFORMANCE</tag>
-    <description>
-      Maximum number of threads in the pool that cBlock cache will
-      use for background I/O to remote containers.
-    </description>
-  </property>
-  <property>
-    <name>dfs.cblock.cache.keep.alive</name>
-    <value>60s</value>
-    <tag>CBLOCK, PERFORMANCE</tag>
-    <description>
-      If the cblock cache has no I/O, then the threads in the cache
-      pool are kept idle for this amount of time before shutting down.
-    </description>
-  </property>
-  <property>
-    <name>dfs.cblock.cache.leveldb.cache.size.mb</name>
-    <value>256</value>
-    <tag>CBLOCK, PERFORMANCE</tag>
-    <description>
-      The amount of physical memory allocated to the local cache. The
-      SCSI driver will allocate this much RAM cache instances.
-    </description>
-  </property>
-  <property>
-    <name>dfs.cblock.cache.max.retry</name>
-    <value>65536</value>
-    <tag>CBLOCK, PERFORMANCE</tag>
-    <description>
-      If the local cache is enabled then, CBlock writes to the local
-      cache when I/O happens. Then the background I/O threads write this
-      block to the remote containers. This value controls how many times the
-      background thread should attempt to do I/O to the remote containers
-      before giving up.
-    </description>
-  </property>
-  <property>
-    <name>dfs.cblock.cache.queue.size.in.kb</name>
-    <value>256</value>
-    <tag>CBLOCK, PERFORMANCE</tag>
-    <description>
-      Size of the in memory cache queue, that is flushed to local
-      disk.
-    </description>
-  </property>
-  <property>
-    <name>dfs.cblock.cache.thread.priority</name>
-    <value>5</value>
-    <tag>CBLOCK, PERFORMANCE</tag>
-    <description>
-      Priority of cache flusher thread, affecting the relative performance of
-      write and read. Supported values are 1, 5, 10.
-      Use 10 for high priority and 1 for low priority.
-    </description>
-  </property>
-  <property>
-    <name>dfs.cblock.container.size.gb</name>
-    <value>5</value>
-    <tag>CBLOCK, MANAGEMENT</tag>
-    <description>
-      The size of ozone container in the number of GBs. Note that
-      this is not setting container size for ozone. This setting is
-      instructing CBlock to manage containers at a standard size.
-    </description>
-  </property>
-  <property>
-    <name>dfs.cblock.disk.cache.path</name>
-    <value>${hadoop.tmp.dir}/cblockCacheDB</value>
-    <tag>CBLOCK, REQUIRED</tag>
-    <description>
-      The default path for the cblock local cache. If the cblock
-      local cache is enabled, then it must be set to a valid path. This cache
-      *should* be mapped to the fastest disk on a given machine, For example,
-      an SSD drive would be a good idea. Currently, all mounted disk on a
-      data node is mapped to a single path, so having a large number of IOPS
-      is essential.
-    </description>
-  </property>
-  <property>
-    <name>dfs.cblock.jscsi-address</name>
-    <value/>
-    <tag>CBLOCK, MANAGEMENT</tag>
-    <description>
-      The address that cblock will be bind to, should be a host:port
-      format, This setting is required for cblock server to start.
-      This address to be used by jscsi to mount volume.
-    </description>
-  </property>
-  <property>
-    <name>dfs.cblock.jscsi.cblock.server.address</name>
-    <value>127.0.0.1</value>
-    <tag>CBLOCK, MANAGEMENT</tag>
-    <description>
-      The address local jscsi server will use to talk to cblock manager.
-    </description>
-  </property>
-  <property>
-    <name>dfs.cblock.jscsi.port</name>
-    <value>9811</value>
-    <tag>CBLOCK, MANAGEMENT</tag>
-    <description>
-      The port on CBlockManager node for jSCSI to talk to.
-    </description>
-  </property>
-  <property>
-    <name>dfs.cblock.jscsi.rpc-bind-host</name>
-    <value>0.0.0.0</value>
-    <tag>CBLOCK, MANAGEMENT</tag>
-    <description>
-      The actual address the cblock jscsi rpc server will bind to. If
-      this optional address is set, it overrides only the hostname portion of
-      dfs.cblock.jscsi-address.
-    </description>
-  </property>
-  <property>
-    <name>dfs.cblock.jscsi.server.address</name>
-    <value>0.0.0.0</value>
-    <tag>CBLOCK, MANAGEMENT</tag>
-    <description>
-      The address that jscsi server will be running, it is nice have one
-      local jscsi server for each client(Linux JSCSI client) that tries to
-      mount cblock.
-    </description>
-  </property>
-  <property>
-    <name>dfs.cblock.manager.pool.size</name>
-    <value>16</value>
-    <tag>CBLOCK, PERFORMANCE</tag>
-    <description>
-      Number of active threads that cblock manager will use for container
-      operations. The maximum number of the threads are limited to the
-      processor count * 2.
-    </description>
-  </property>
-  <property>
-    <name>dfs.cblock.rpc.timeout</name>
-    <value>300s</value>
-    <tag>CBLOCK, MANAGEMENT</tag>
-    <description>
-      RPC timeout used for cblock CLI operations. When you
-      create very large disks, like 5TB, etc. The number of containers
-      allocated in the system is huge. It is will 5TB/5GB, which is 1000
-      containers. The client CLI might timeout even though the cblock manager
-      creates the specified disk. This value allows the user to wait for a
-      longer period.
-    </description>
-  </property>
-  <property>
-    <name>dfs.cblock.scm.ipaddress</name>
-    <value>127.0.0.1</value>
-    <tag>CBLOCK, MANAGEMENT</tag>
-    <description>
-      IP address used by cblock to connect to SCM.
-    </description>
-  </property>
-  <property>
-    <name>dfs.cblock.scm.port</name>
-    <value>9860</value>
-    <tag>CBLOCK, MANAGEMENT</tag>
-    <description>
-      Port used by cblock to connect to SCM.
-    </description>
-  </property>
-  <property>
-    <name>dfs.cblock.service.handler.count</name>
-    <value>10</value>
-    <tag>CBLOCK, MANAGEMENT</tag>
-    <description>
-      Default number of handlers for CBlock service rpc.
-    </description>
-  </property>
-  <property>
-    <name>dfs.cblock.service.leveldb.path</name>
-    <value>${hadoop.tmp.dir}/cblock_server.dat</value>
-    <tag>CBLOCK, REQUIRED</tag>
-    <description>
-      Default path for the cblock meta data store.
-    </description>
-  </property>
-  <property>
-    <name>dfs.cblock.service.rpc-bind-host</name>
-    <value>0.0.0.0</value>
-    <tag>CBLOCK, MANAGEMENT</tag>
-    <description>
-      The actual address the cblock service RPC server will bind to.
-      If the optional address is set, it overrides only the hostname portion of
-      dfs.cblock.servicerpc-address.
-    </description>
-  </property>
-  <property>
-    <name>dfs.cblock.servicerpc-address</name>
-    <value/>
-    <tag>CBLOCK, MANAGEMENT, REQUIRED</tag>
-    <description>
-      The address that cblock will be bind to, should be a host:port
-      format, this setting is required for cblock server to start.
-      This address is used for cblock management operations like create, 
delete,
-      info and list volumes
-    </description>
-  </property>
-  <property>
-    <name>dfs.cblock.short.circuit.io</name>
-    <value>false</value>
-    <tag>CBLOCK, PERFORMANCE</tag>
-    <description>
-      Enables use of the local cache in cblock. Enabling this allows
-      I/O against the local cache and background threads do actual I/O against
-      the
-      containers.
-    </description>
-  </property>
-  <property>
-    <name>dfs.cblock.trace.io</name>
-    <value>false</value>
-    <tag>CBLOCK, DEBUG</tag>
-    <description>Default flag for enabling trace io, Trace I/O logs all I/O 
with
-      hashes of
-      data. This is useful for detecting things like data corruption.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.cblock.iscsi.advertised.ip</name>
-    <value>0.0.0.0</value>
-    <tag>CBLOCK</tag>
-    <description>
-      IP address returned during the iscsi discovery.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.cblock.iscsi.advertised.port</name>
-    <value>3260</value>
-    <tag>CBLOCK</tag>
-    <description>
-      TCP port returned during the iscsi discovery.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.cblock.kubernetes.dynamic-provisioner.enabled</name>
-    <value>false</value>
-    <tag>CBLOCK, KUBERNETES</tag>
-    <description>Flag to enable automatic creation of cblocks and
-      kubernetes PersitentVolumes in kubernetes environment.</description>
-  </property>
-
-  <property>
-    <name>dfs.cblock.kubernetes.cblock-user</name>
-    <value>iqn.2001-04.org.apache.hadoop</value>
-    <tag>CBLOCK, KUBERNETES</tag>
-    <description>CBlock user to use for the dynamic provisioner.
-      This user will own all of the auto-created cblocks.</description>
-  </property>
-
-  <property>
-    <name>dfs.cblock.kubernetes.configfile</name>
-    <value></value>
-    <tag>CBLOCK, KUBERNETES</tag>
-    <description>Location of the kubernetes configuration file
-      to access the kubernetes cluster. Not required inside a pod
-      as the default service account will be if this value is
-      empty.</description>
-  </property>
-
-  <property>
-    <name>dfs.cblock.iscsi.advertised.ip</name>
-    <value></value>
-    <tag>CBLOCK, KUBERNETES</tag>
-    <description>IP where the cblock target server is available
-      from the kubernetes nodes. Usually it's a cluster ip address
-    which is defined by a deployed Service.</description>
-  </property>
-
-  <property>
-    <name>dfs.cblock.iscsi.advertised.port</name>
-    <value>3260</value>
-    <tag>CBLOCK, KUBERNETES</tag>
-    <description>Port where the cblock target server is available
-      from the kubernetes nodes. Could be different from the
-      listening port if jscsi is behind a Service.</description>
-  </property>
-
-  <!--Container Settings used by Datanode-->
-  <property>
-    <name>ozone.container.cache.size</name>
-    <value>1024</value>
-    <tag>PERFORMANCE, CONTAINER, STORAGE</tag>
-    <description>The open container is cached on the data node side. We 
maintain
-      an LRU
-      cache for caching the recently used containers. This setting controls the
-      size of that cache.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ipc</name>
-    <value>9859</value>
-    <tag>OZONE, CONTAINER, MANAGEMENT</tag>
-    <description>The ipc port number of container.</description>
-  </property>
-  <property>
-    <name>dfs.container.ipc.random.port</name>
-    <value>false</value>
-    <tag>OZONE, DEBUG, CONTAINER</tag>
-    <description>Allocates a random free port for ozone container. This is used
-      only while
-      running unit tests.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.datanode.storage.dir</name>
-    <value/>
-    <tag>OZONE, CONTAINER, STORAGE, MANAGEMENT, RATIS</tag>
-    <description>This directory is used for storing Ratis metadata like logs. 
If
-      this is
-      not set then default metadata dirs is used. A warning will be logged if
-      this not set. Ideally, this should be mapped to a fast disk like an SSD.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.enabled</name>
-    <value>false</value>
-    <tag>OZONE, MANAGEMENT, PIPELINE, RATIS</tag>
-    <description>Ozone supports different kinds of replication pipelines. Ratis
-      is one of
-      the replication pipeline supported by ozone.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.ipc</name>
-    <value>9858</value>
-    <tag>OZONE, CONTAINER, PIPELINE, RATIS, MANAGEMENT</tag>
-    <description>The ipc port number of container.</description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.ipc.random.port</name>
-    <value>false</value>
-    <tag>OZONE,DEBUG</tag>
-    <description>Allocates a random free port for ozone ratis port for the
-      container. This
-      is used only while running unit tests.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.rpc.type</name>
-    <value>GRPC</value>
-    <tag>OZONE, RATIS, MANAGEMENT</tag>
-    <description>Ratis supports different kinds of transports like netty, GRPC,
-      Hadoop RPC
-      etc. This picks one of those for this cluster.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.num.write.chunk.threads</name>
-    <value>60</value>
-    <tag>OZONE, RATIS, PERFORMANCE</tag>
-    <description>Maximum number of threads in the thread pool that Ratis
-      will use for writing chunks (60 by default).
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.segment.size</name>
-    <value>1073741824</value>
-    <tag>OZONE, RATIS, PERFORMANCE</tag>
-    <description>The size of the raft segment used by Apache Ratis on 
datanodes.
-      (1 GB by default)
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.segment.preallocated.size</name>
-    <value>134217728</value>
-    <tag>OZONE, RATIS, PERFORMANCE</tag>
-    <description>The size of the buffer which is preallocated for raft segment
-      used by Apache Ratis on datanodes.(128 MB by default)
-    </description>
-  </property>
-  <property>
-    <name>ozone.container.report.interval</name>
-    <value>60000ms</value>
-    <tag>OZONE, CONTAINER, MANAGEMENT</tag>
-    <description>Time interval of the datanode to send container report. Each
-      datanode periodically send container report upon receive
-      sendContainerReport from SCM. Unit could be defined with
-      postfix (ns,ms,s,m,h,d)</description>
-  </property>
-  <!--Ozone Settings-->
-  <property>
-    <name>ozone.administrators</name>
-    <value/>
-    <tag>OZONE, SECURITY</tag>
-    <description>Ozone administrator users delimited by the comma.
-      If not set, only the user who launches an ozone service will be the admin
-      user. This property must be set if ozone services are started by 
different
-      users. Otherwise, the RPC layer will reject calls from other servers 
which
-      are started by users not in the list.
-    </description>
-  </property>
-  <property>
-    <name>ozone.block.deleting.container.limit.per.interval</name>
-    <value>10</value>
-    <tag>OZONE, PERFORMANCE, SCM</tag>
-    <description>A maximum number of containers to be scanned by block deleting
-      service per
-      time interval. The block deleting service spawns a thread to handle block
-      deletions in a container. This property is used to throttle the number of
-      threads spawned for block deletions.
-    </description>
-  </property>
-  <property>
-    <name>ozone.block.deleting.limit.per.task</name>
-    <value>1000</value>
-    <tag>OZONE, PERFORMANCE, SCM</tag>
-    <description>A maximum number of blocks to be deleted by block deleting
-      service per
-      time interval. This property is used to throttle the actual number of
-      block deletions on a data node per container.
-    </description>
-  </property>
-  <property>
-    <name>ozone.block.deleting.service.interval</name>
-    <value>1m</value>
-    <tag>OZONE, PERFORMANCE, SCM</tag>
-    <description>Time interval of the block deleting service.
-      The block deleting service runs on each datanode periodically and
-      deletes blocks queued for deletion. Unit could be defined with
-      postfix (ns,ms,s,m,h,d)
-    </description>
-  </property>
-  <property>
-    <name>ozone.block.deleting.service.timeout</name>
-    <value>300000ms</value>
-    <tag>OZONE, PERFORMANCE, SCM</tag>
-    <description>A timeout value of block deletion service. If this is set
-      greater than 0,
-      the service will stop waiting for the block deleting completion after 
this
-      time. If timeout happens to a large proportion of block deletion, this
-      needs to be increased with ozone.block.deleting.limit.per.task. This
-      setting supports multiple time unit suffixes as described in
-      dfs.heartbeat.interval. If no suffix is specified, then milliseconds is
-      assumed.
-    </description>
-  </property>
-  <property>
-    <name>ozone.client.connection.timeout</name>
-    <value>5000ms</value>
-    <tag>OZONE, PERFORMANCE, CLIENT</tag>
-    <description>Connection timeout for Ozone client in milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>ozone.client.protocol</name>
-    <value>org.apache.hadoop.ozone.client.rpc.RpcClient</value>
-    <tag>OZONE, CLIENT, MANAGEMENT</tag>
-    <description>Protocol class to be used by the client to connect to ozone
-      cluster.
-      The build-in implementation includes:
-      org.apache.hadoop.ozone.client.rpc.RpcClient for RPC
-      org.apache.hadoop.ozone.client.rest.RestClient for REST
-      The default is the RpClient. Please do not change this unless you have a
-      very good understanding of what you are doing.
-    </description>
-  </property>
-  <property>
-    <name>ozone.client.socket.timeout</name>
-    <value>5000ms</value>
-    <tag>OZONE, CLIENT</tag>
-    <description>Socket timeout for Ozone client. Unit could be defined with
-      postfix (ns,ms,s,m,h,d)</description>
-  </property>
-  <property>
-    <name>ozone.enabled</name>
-    <value>false</value>
-    <tag>OZONE, REQUIRED</tag>
-    <description>
-      Status of the Ozone Object Storage service is enabled.
-      Set to true to enable Ozone.
-      Set to false to disable Ozone.
-      Unless this value is set to true, Ozone services will not be started in
-      the cluster.
-
-      Please note: By default ozone is disabled on a hadoop cluster.
-    </description>
-  </property>
-  <property>
-    <name>ozone.handler.type</name>
-    <value>distributed</value>
-    <tag>OZONE, REST</tag>
-    <description>
-      Tells ozone which storage handler to use. The possible values are:
-      distributed - The Ozone distributed storage handler, which speaks to
-      KSM/SCM on the backend and provides REST services to clients.
-      local - Local Storage handler strictly for testing - To be removed.
-    </description>
-  </property>
-  <property>
-    <name>ozone.key.deleting.limit.per.task</name>
-    <value>1000</value>
-    <tag>KSM, PERFORMANCE</tag>
-    <description>
-      A maximum number of keys to be scanned by key deleting service
-      per time interval in KSM. Those keys are sent to delete metadata and
-      generate transactions in SCM for next async deletion between SCM
-      and DataNode.
-    </description>
-  </property>
-  <property>
-    <name>ozone.ksm.address</name>
-    <value/>
-    <tag>KSM, REQUIRED</tag>
-    <description>
-      The address of the Ozone KSM service. This allows clients to discover
-      the KSMs address.
-    </description>
-  </property>
-  <property>
-    <name>ozone.ksm.group.rights</name>
-    <value>READ_WRITE</value>
-    <tag>KSM, SECURITY</tag>
-    <description>
-      Default group permissions in Ozone KSM.
-    </description>
-  </property>
-  <property>
-    <name>ozone.ksm.handler.count.key</name>
-    <value>20</value>
-    <tag>KSM, PERFORMANCE</tag>
-    <description>
-      The number of RPC handler threads for KSM service endpoints.
-    </description>
-  </property>
-  <property>
-    <name>ozone.ksm.http-address</name>
-    <value>0.0.0.0:9874</value>
-    <tag>KSM, MANAGEMENT</tag>
-    <description>
-      The address and the base port where the KSM web UI will listen on.
-
-      If the port is 0, then the server will start on a free port. However, it
-      is best to specify a well-known port, so it is easy to connect and see
-      the KSM management UI.
-    </description>
-  </property>
-  <property>
-    <name>ozone.ksm.http-bind-host</name>
-    <value>0.0.0.0</value>
-    <tag>KSM, MANAGEMENT</tag>
-    <description>
-      The actual address the KSM web server will bind to. If this optional
-      the address is set, it overrides only the hostname portion of
-      ozone.ksm.http-address.
-    </description>
-  </property>
-  <property>
-    <name>ozone.ksm.http.enabled</name>
-    <value>true</value>
-    <tag>KSM, MANAGEMENT</tag>
-    <description>
-      Property to enable or disable KSM web user interface.
-    </description>
-  </property>
-  <property>
-    <name>ozone.ksm.https-address</name>
-    <value>0.0.0.0:9875</value>
-    <tag>KSM, MANAGEMENT, SECURITY</tag>
-    <description>
-      The address and the base port where the KSM web UI will listen
-      on using HTTPS.
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-  <property>
-    <name>ozone.ksm.https-bind-host</name>
-    <value>0.0.0.0</value>
-    <tag>KSM, MANAGEMENT, SECURITY</tag>
-    <description>
-      The actual address the KSM web server will bind to using HTTPS.
-      If this optional address is set, it overrides only the hostname portion 
of
-      ozone.ksm.http-address.
-    </description>
-  </property>
-  <property>
-    <name>ozone.ksm.keytab.file</name>
-    <value/>
-    <tag>KSM, SECURITY</tag>
-    <description>
-      The keytab file for Kerberos authentication in KSM.
-    </description>
-  </property>
-  <property>
-    <name>ozone.ksm.db.cache.size.mb</name>
-    <value>128</value>
-    <tag>KSM, PERFORMANCE</tag>
-    <description>
-      The size of KSM DB cache in MB that used for caching files.
-      This value is set to an abnormally low value in the default 
configuration.
-      That is to make unit testing easy. Generally, this value should be set to
-      something like 16GB or more, if you intend to use Ozone at scale.
-
-      A large value for this key allows a proportionally larger amount of KSM
-      metadata to be cached in memory. This makes KSM operations faster.
-    </description>
-  </property>
-  <property>
-    <name>ozone.ksm.user.max.volume</name>
-    <value>1024</value>
-    <tag>KSM, MANAGEMENT</tag>
-    <description>
-      The maximum number of volumes a user can have on a cluster.Increasing or
-      decreasing this number has no real impact on ozone cluster. This is
-      defined only for operational purposes. Only an administrator can create a
-      volume, once a volume is created there are no restrictions on the number
-      of buckets or keys inside each bucket a user can create.
-    </description>
-  </property>
-  <property>
-    <name>ozone.ksm.user.rights</name>
-    <value>READ_WRITE</value>
-    <tag>KSM, SECURITY</tag>
-    <description>
-      Default user permissions used in KSM.
-    </description>
-  </property>
-  <property>
-    <name>ozone.localstorage.root</name>
-    <value>${hadoop.tmp.dir}/ozone</value>
-    <tag>OZONE, DEBUG</tag>
-    <description>
-      This is used only for testing purposes. This value is used by the local
-      storage handler to simulate a REST backend. This is useful only when
-      debugging the REST front end independent of KSM and SCM. To be removed.
-    </description>
-  </property>
-  <property>
-    <name>ozone.metadata.dirs</name>
-    <value/>
-    <tag>OZONE, KSM, SCM, CONTAINER, REQUIRED, STORAGE</tag>
-    <description>
-      Ozone metadata is shared among KSM, which acts as the namespace
-      manager for ozone, SCM which acts as the block manager and data nodes
-      which maintain the name of the key(Key Name and BlockIDs). This
-      replicated and distributed metadata store is maintained under the
-      directory pointed by this key. Since metadata can be I/O intensive, at
-      least on KSM and SCM we recommend having SSDs. If you have the luxury
-      of mapping this path to SSDs on all machines in the cluster, that will
-      be excellent.
-
-      If Ratis metadata directories are not specified, Ratis server will emit a
-      warning and use this path for storing its metadata too.
-    </description>
-  </property>
-  <property>
-    <name>ozone.metastore.impl</name>
-    <value>RocksDB</value>
-    <tag>OZONE, KSM, SCM, CONTAINER, STORAGE</tag>
-    <description>
-      Ozone metadata store implementation. Ozone metadata are well
-      distributed to multiple services such as ksm, scm. They are stored in
-      some local key-value databases. This property determines which database
-      library to use. Supported value is either LevelDB or RocksDB.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.metastore.rocksdb.statistics</name>
-    <value>ALL</value>
-    <tag>OZONE, KSM, SCM, STORAGE, PERFORMANCE</tag>
-    <description>
-      The statistics level of the rocksdb store. If you use any value from
-      org.rocksdb.StatsLevel (eg. ALL or EXCEPT_DETAILED_TIMERS), the rocksdb
-      statistics will be exposed over JMX bean with the choosed setting. Set
-      it to OFF to not initialize rocksdb statistics at all. Please note that
-      collection of statistics could have 5-10% performance penalty.
-      Check the rocksdb documentation for more details.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.scm.block.client.address</name>
-    <value/>
-    <tag>OZONE, SCM</tag>
-    <description>The address of the Ozone SCM block client service. If not
-      defined value of ozone.scm.client.address is used.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.block.client.bind.host</name>
-    <value>0.0.0.0</value>
-    <tag>OZONE, SCM</tag>
-    <description>
-      The hostname or IP address used by the SCM block client
-      endpoint to bind.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.block.client.port</name>
-    <value>9863</value>
-    <tag>OZONE, SCM</tag>
-    <description>
-      The port number of the Ozone SCM block client service.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.block.deletion.max.retry</name>
-    <value>4096</value>
-    <tag>OZONE, SCM</tag>
-    <description>
-      SCM wraps up many blocks in a deletion transaction and sends that to data
-      node for physical deletion periodically. This property determines how 
many
-      times SCM is going to retry sending a deletion operation to the data 
node.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.block.size.in.mb</name>
-    <value>256</value>
-    <tag>OZONE, SCM</tag>
-    <description>
-      The default size of a scm block in bytes. This is maps to the default
-      Ozone block size.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.chunk.size</name>
-    <value>16777216</value>
-    <tag>OZONE, SCM, CONTAINER, PERFORMANCE</tag>
-    <description>
-      The chunk size for reading/writing chunk operations in bytes.
-
-      The chunk size defaults to 8MB. If the value configured is more than the
-      maximum size (16MB), it will be reset to the maximum size. This maps to
-      the network packet sizes and file write operations in the client to
-      datanode protocol.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.client.address</name>
-    <value/>
-    <tag>OZONE, SCM, REQUIRED</tag>
-    <description>
-      The address of the Ozone SCM client service. This is a required setting.
-
-      It is a string in the host:port format. The port number is optional
-      and defaults to 9860.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.client.bind.host</name>
-    <value>0.0.0.0</value>
-    <tag>OZONE, SCM, MANAGEMENT</tag>
-    <description>The hostname or IP address used by the SCM client endpoint to
-      bind.
-      This setting is used by the SCM only and never used by clients.
-
-      The setting can be useful in multi-homed setups to restrict the
-      availability of the SCM client service to a specific interface.
-
-      The default is appropriate for most clusters.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.client.port</name>
-    <value>9860</value>
-    <tag>OZONE, SCM, MANAGEMENT</tag>
-    <description>The port number of the Ozone SCM client service.</description>
-  </property>
-  <property>
-    <name>ozone.scm.container.deletion-choosing.policy</name>
-    <value>
-      
org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy
-    </value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The policy used for choosing desire containers for block deletion.
-      Datanode selects some containers to process block deletion
-      in a certain interval defined by ozone.block.deleting.service.interval.
-      The number of containers to process in each interval is defined
-      by ozone.block.deleting.container.limit.per.interval. This property is
-      used to configure the policy applied while selecting containers.
-      There are two policies supporting now:
-      RandomContainerDeletionChoosingPolicy and
-      TopNOrderedContainerDeletionChoosingPolicy.
-      
org.apache.hadoop.ozone.container.common.impl.RandomContainerDeletionChoosingPolicy
-      implements a simply random policy that to return a random list of
-      containers.
-      
org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy
-      implements a policy that choosing top count number of containers in a
-      pending-deletion-blocks's num
-      based descending order.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.container.placement.impl</name>
-    <value>
-      
org.apache.hadoop.ozone.scm.container.placement.algorithms.SCMContainerPlacementRandom
-    </value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>Placement policy class for containers.
-      Defaults to SCMContainerPlacementRandom.class
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.container.provision_batch_size</name>
-    <value>20</value>
-    <tag>OZONE, PERFORMANCE</tag>
-    <description>Pre-provision specified number of containers for block
-      allocation.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.container.report.processing.interval</name>
-    <value>60s</value>
-    <tag>OZONE, PERFORMANCE</tag>
-    <description>Time interval for scm to process container reports
-      for a node pool. Scm handles node pool reports in a cyclic clock
-      manner, it fetches pools periodically with this time interval.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.container.reports.wait.timeout</name>
-    <value>300s</value>
-    <tag>OZONE, PERFORMANCE, MANAGEMENT</tag>
-    <description>Maximum time to wait in seconds for processing all container
-      reports from
-      a node pool. It determines the timeout for a
-      node pool report.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.container.size.gb</name>
-    <value>5</value>
-    <tag>OZONE, PERFORMANCE, MANAGEMENT</tag>
-    <description>
-      Default container size used by Ozone. This value is specified
-      in GB.
-      There are two considerations while picking this number. The speed at 
which
-      a container can be replicated, determined by the network speed and the
-      metadata that each container generates. So selecting a large number
-      creates less SCM metadata, but recovery time will be more. 5GB is a 
number
-      that maps to quick replication times in gigabit networks, but still
-      balances the amount of metadata.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.datanode.address</name>
-    <value/>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The address of the Ozone SCM service used for internal
-      communication between the DataNodes and the SCM.
-
-      It is a string in the host:port format. The port number is optional
-      and defaults to 9861.
-
-      This setting is optional. If unspecified then the hostname portion
-      is picked from the ozone.scm.client.address setting and the
-      default service port of 9861 is chosen.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.datanode.bind.host</name>
-    <value/>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The hostname or IP address used by the SCM service endpoint to
-      bind.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.datanode.id</name>
-    <value/>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>The path that datanodes will use to store the datanode ID.
-      If this value is not set, then datanode ID is created under the
-      metadata directory.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.datanode.port</name>
-    <value>9861</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The port number of the Ozone SCM service.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.db.cache.size.mb</name>
-    <value>128</value>
-    <tag>OZONE, PERFORMANCE</tag>
-    <description>SCM keeps track of the Containers in the cluster. This DB 
holds
-      the container metadata. This value is set to a small value to make the
-      unit
-      testing runs smooth. In production, we recommend a value of 16GB or
-      higher. This allows SCM to avoid disk I/O's while looking up the 
container
-      location.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.dead.node.interval</name>
-    <value>10m</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The interval between heartbeats before a node is tagged as dead.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.handler.count.key</name>
-    <value>10</value>
-    <tag>OZONE, MANAGEMENT, PERFORMANCE</tag>
-    <description>
-      The number of RPC handler threads for each SCM service
-      endpoint.
-
-      The default is appropriate for small clusters (tens of nodes).
-
-      Set a value that is appropriate for the cluster size. Generally, HDFS
-      recommends RPC handler count is set to 20 * log2(Cluster Size) with an
-      upper limit of 200. However, SCM will not have the same amount of
-      traffic as Namenode, so a value much smaller than that will work well 
too.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.heartbeat.interval</name>
-    <value>30s</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The heartbeat interval from a data node to SCM. Yes,
-      it is not three but 30, since most data nodes will heart beating via 
Ratis
-      heartbeats. If a client is not able to talk to a data node, it will 
notify
-      KSM/SCM eventually. So a 30 second HB seems to work. This assumes that
-      replication strategy used is Ratis if not, this value should be set to
-      something smaller like 3 seconds.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.heartbeat.log.warn.interval.count</name>
-    <value>10</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      Defines how frequently we will log the missing of a heartbeat to SCM.
-      For example in the default case, we will write a warning message for each
-      ten consecutive heartbeats that we miss to SCM. This helps in reducing
-      clutter in a data node log, but trade off is that logs will have less of
-      this statement.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.heartbeat.rpc-timeout</name>
-    <value>1000</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      Timeout value for the RPC from Datanode to SCM in milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.heartbeat.thread.interval</name>
-    <value>3s</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      When a heartbeat from the data node arrives on SCM, It is queued for
-      processing with the time stamp of when the heartbeat arrived. There is a
-      heartbeat processing thread inside SCM that runs at a specified interval.
-      This value controls how frequently this thread is run.
-
-      There are some assumptions build into SCM such as this value should allow
-      the heartbeat processing thread to run at least three times more
-      frequently than heartbeats and at least five times more than stale node
-      detection time. If you specify a wrong value, SCM will gracefully refuse
-      to run. For more info look at the node manager tests in SCM.
-
-      In short, you don't need to change this.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.http-address</name>
-    <value>0.0.0.0:9876</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The address and the base port where the SCM web ui will listen on.
-
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.http-bind-host</name>
-    <value>0.0.0.0</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The actual address the SCM web server will bind to. If this
-      optional address is set, it overrides only the hostname portion of
-      ozone.scm.http-address.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.http.enabled</name>
-    <value>true</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      Property to enable or disable SCM web ui.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.https-address</name>
-    <value>0.0.0.0:9877</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The address and the base port where the SCM web UI will listen
-      on using HTTPS.
-
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.https-bind-host</name>
-    <value>0.0.0.0</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The actual address the SCM web server will bind to using HTTPS.
-      If this optional address is set, it overrides only the hostname portion 
of
-      ozone.scm.http-address.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.keytab.file</name>
-    <value/>
-    <tag>OZONE, SECURITY</tag>
-    <description>
-      The keytab file for Kerberos authentication in SCM.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.max.container.report.threads</name>
-    <value>100</value>
-    <tag>OZONE, PERFORMANCE</tag>
-    <description>
-      Maximum number of threads to process container reports in scm.
-      Each container report from a data node is processed by scm in a worker
-      thread, fetched from a thread pool. This property is used to control the
-      maximum size of the thread pool.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.max.hb.count.to.process</name>
-    <value>5000</value>
-    <tag>OZONE, MANAGEMENT, PERFORMANCE</tag>
-    <description>
-      The maximum number of heartbeat to process per loop of the
-      heartbeat process thread. Please see
-      ozone.scm.heartbeat.thread.interval
-      for more info.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.max.nodepool.processing.threads</name>
-    <value>1</value>
-    <tag>OZONE, MANAGEMENT, PERFORMANCE</tag>
-    <description>
-      Number of node pools to process in parallel.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.names</name>
-    <value/>
-    <tag>OZONE</tag>
-    <description>
-      The value of this property is a set of DNS | DNS:PORT | IP
-      Address | IP:PORT. Written as a comma separated string. e.g. scm1,
-      scm2:8020, 7.7.7.7:7777.
-      This property allows datanodes to discover where SCM is, so that
-      datanodes can send heartbeat to SCM.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.stale.node.interval</name>
-    <value>90s</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The interval for stale node flagging. Please
-      see ozone.scm.heartbeat.thread.interval before changing this value.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.max.nodepool.processing.threads</name>
-    <value>1</value>
-    <tag>OZONE, SCM</tag>
-    <description>
-      Controls the number of node pools that can be processed in parallel by
-      Container Supervisor.
-    </description>
-  </property>
-  <property>
-    <name>ozone.trace.enabled</name>
-    <value>false</value>
-    <tag>OZONE, DEBUG</tag>
-    <description>
-      Setting this flag to true dumps the HTTP request/ response in
-      the logs. Very useful when debugging REST protocol.
-    </description>
-  </property>
-  <property>
-    <name>ozone.web.authentication.kerberos.principal</name>
-    <value/>
-    <tag>OZONE, SECURITY</tag>
-    <description>
-      The server principal used by the SCM and KSM for web UI SPNEGO
-      authentication when Kerberos security is enabled. This is typically set 
to
-      HTTP/_h...@realm.tld The SPNEGO server principal begins with the prefix
-      HTTP/ by convention.
-
-      If the value is '*', the web server will attempt to login with
-      every principal specified in the keytab file.
-    </description>
-  </property>
-
-  <!--Client Settings-->
-  <property>
-    <name>scm.container.client.idle.threshold</name>
-    <value>10s</value>
-    <tag>OZONE, PERFORMANCE</tag>
-    <description>
-      In the standalone pipelines, the SCM clients use netty to
-      communicate with the container. It also uses connection pooling to
-      reduce client side overheads. This allows a connection to stay idle for
-      a while before the connection is closed.
-    </description>
-  </property>
-  <property>
-    <name>scm.container.client.max.size</name>
-    <value>256</value>
-    <tag>OZONE, PERFORMANCE</tag>
-    <description>
-      Controls the maximum number of connections that we cached via
-      clientconnection pooling. If the number of connection
-      exceed this count then the oldest idle connection is evicted.
-    </description>
-  </property>
-
-  <property>
-    <name>scm.container.client.max.outstanding.requests</name>
-    <value>100</value>
-    <tag>OZONE, PERFORMANCE</tag>
-    <description>
-      Controls the maximum number of outstanding async requests that can be
-      handled by the Standalone as well as Ratis client.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.scm.container.creation.lease.timeout</name>
-    <value>60s</value>
-    <tag>OZONE, SCM</tag>
-    <description>
-      Container creation timeout in milliseconds to be used by SCM. When
-      BEGIN_CREATE event happens the container is moved from ALLOCATED to
-      CREATING state, SCM will now wait for the configured amount of time
-      to get COMPLETE_CREATE event if it doesn't receive it will move the
-      container to DELETING.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.key.preallocation.maxsize</name>
-    <value>134217728</value>
-    <tag>OZONE, KSM, PERFORMANCE</tag>
-    <description>
-      When a new key write request is sent to KSM, if a size is requested, at 
most
-      128MB of size is allocated at request time. If client needs more space 
for the
-      write, separate block allocation requests will be made.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.client.list.cache</name>
-    <value>1000</value>
-    <tag>OZONE, PERFORMANCE</tag>
-    <description>
-      Configuration property to configure the cache size of client list calls.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.replication</name>
-    <value>3</value>
-    <tag>OZONE, CLIENT</tag>
-    <description>
-      Default replication value. The actual number of replications can be
-      specified when writing the key. The default is used if replication
-      is not specified. Supported values: 1 and 3.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.replication.type</name>
-    <value>RATIS</value>
-    <tag>OZONE, CLIENT</tag>
-    <description>
-      Default replication type to be used while writing key into ozone. The
-      value can be specified when writing the key, default is used when
-      nothing is specified. Supported values: RATIS, STAND_ALONE and CHAINED.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.container.close.threshold</name>
-    <value>0.9f</value>
-    <tag>OZONE, SCM</tag>
-    <description>
-      This determines the threshold to be used for closing a container.
-      When the container used percentage reaches this threshold,
-      the container will be closed. Value should be a positive, non-zero
-      percentage in float notation (X.Yf), with 1.0f meaning 100%.
-    </description>
-  </property>
-  <property>
-    <name>ozone.rest.client.http.connection.max</name>
-    <value>100</value>
-    <tag>OZONE, CLIENT</tag>
-    <description>
-      This defines the overall connection limit for the connection pool used in
-      RestClient.
-    </description>
-  </property>
-  <property>
-    <name>ozone.rest.client.http.connection.per-route.max</name>
-    <value>20</value>
-    <tag>OZONE, CLIENT</tag>
-    <description>
-      This defines the connection limit per one HTTP route/host. Total max
-      connection is limited by ozone.rest.client.http.connection.max property.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.open.key.cleanup.service.interval.seconds</name>
-    <value>86400</value>
-    <tag>OZONE, KSM, PERFORMANCE</tag>
-    <description>
-      A background job periodically checks open key entries and delete the 
expired ones. This entry controls the
-      interval of this cleanup check.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.open.key.expire.threshold</name>
-    <value>86400</value>
-    <tag>OZONE, KSM, PERFORMANCE</tag>
-    <description>
-      Controls how long an open key operation is considered active. 
Specifically, if a key
-      has been open longer than the value of this config entry, that open key 
is considered as
-      expired (e.g. due to client crash). Default to 24 hours.
-    </description>
-  </property>
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/dn.js
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/dn.js 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/dn.js
index 3b67167..9e2732c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/dn.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/dn.js
@@ -18,11 +18,11 @@
 (function () {
   "use strict";
 
-  var data = {ozone: {enabled: false}};
+  var data = {};
 
   dust.loadSource(dust.compile($('#tmpl-dn').html(), 'dn'));
 
-  function loadDatanodeInfo() {
+  function load() {
     $.get('/jmx?qry=Hadoop:service=DataNode,name=DataNodeInfo', function(resp) 
{
       data.dn = workaround(resp.beans[0]);
       data.dn.HostName = resp.beans[0]['DatanodeHostname'];
@@ -30,26 +30,6 @@
     }).fail(show_err_msg);
   }
 
-  function loadOzoneScmInfo() {
-        
$.get('/jmx?qry=Hadoop:service=OzoneDataNode,name=SCMConnectionManager', 
function (resp) {
-            if (resp.beans.length > 0) {
-                data.ozone.SCMServers = resp.beans[0].SCMServers;
-                data.ozone.enabled = true;
-                render();
-            }
-        }).fail(show_err_msg);
-  }
-
-  function loadOzoneStorageInfo() {
-        
$.get('/jmx?qry=Hadoop:service=OzoneDataNode,name=ContainerLocationManager', 
function (resp) {
-            if (resp.beans.length > 0) {
-                data.ozone.LocationReport = resp.beans[0].LocationReport;
-                data.ozone.enabled = true;
-                render();
-            }
-        }).fail(show_err_msg);
-    }
-
   function workaround(dn) {
     function node_map_to_array(nodes) {
       var res = [];
@@ -85,8 +65,6 @@
     $('#alert-panel').show();
   }
 
-    loadDatanodeInfo();
-    loadOzoneScmInfo();
-    loadOzoneStorageInfo();
+  load();
 
 })();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/index.html
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/index.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/index.html
deleted file mode 100644
index bee42bf..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/index.html
+++ /dev/null
@@ -1,70 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
-        "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd";>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<html lang="en">
-<head>
-    <meta charset="utf-8">
-    <meta http-equiv="X-UA-Compatible" content="IE=edge">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-    <!-- The above 3 meta tags *must* come first in the head; any other head 
content must come *after* these tags -->
-    <meta name="description" content="HDFS Key Space Manager">
-
-    <title>HDFS Key Space Manager</title>
-
-    <link href="static/bootstrap-3.0.2/css/bootstrap.min.css" rel="stylesheet">
-    <link href="static/hadoop.css" rel="stylesheet">
-    <link href="static/nvd3-1.8.5.min.css" rel="stylesheet">
-
-    <link href="static/ozone.css" rel="stylesheet">
-
-</head>
-
-<body ng-app="ksm">
-
-<header class="navbar navbar-inverse navbar-fixed-top bs-docs-nav">
-    <div class="container-fluid">
-        <div class="navbar-header">
-            <button type="button" class="navbar-toggle collapsed" 
data-toggle="collapse" data-target="#navbar"
-                    aria-expanded="false" aria-controls="navbar">
-                <span class="sr-only">Toggle navigation</span>
-                <span class="icon-bar"></span>
-                <span class="icon-bar"></span>
-                <span class="icon-bar"></span>
-            </button>
-            <a class="navbar-brand" href="#">HDFS KSM</a>
-        </div>
-        <navmenu
-                metrics="{ 'Ksm metrics' : '#!/metrics/ksm', 'Rpc metrics' : 
'#!/metrics/rpc'}"></navmenu>
-    </div>
-</header>
-
-<div class="container-fluid">
-    <ng-view></ng-view>
-</div><!-- /.container -->
-
-<script src="static/jquery-1.10.2.min.js"></script>
-<script src="static/angular-1.6.4.min.js"></script>
-<script src="static/angular-route-1.6.4.min.js"></script>
-<script src="static/d3-3.5.17.min.js"></script>
-<script src="static/nvd3-1.8.5.min.js"></script>
-<script src="static/angular-nvd3-1.0.9.min.js"></script>
-<script src="static/ozone.js"></script>
-<script src="ksm.js"></script>
-<script src="static/bootstrap-3.0.2/js/bootstrap.min.js"></script>
-</body>
-</html>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/ksm-metrics.html
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/ksm-metrics.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/ksm-metrics.html
deleted file mode 100644
index e63fb00..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/ksm-metrics.html
+++ /dev/null
@@ -1,44 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<h1>KSM Metrics</h1>
-
-<div ng-repeat="(type,numbers) in $ctrl.metrics.nums">
-    <h2>{{type}}</h2>
-    <div class="container">
-        <div class="col-md-6">
-            <h3>Requests ({{numbers.ops}} ops)</h3>
-            <nvd3 options="$ctrl.graphOptions"
-                  data="numbers.all"></nvd3>
-        </div>
-        <div class="col-md-6">
-            <h3>Failures</h3>
-            <nvd3 options="$ctrl.graphOptions"
-                  data="numbers.failures"></nvd3>
-        </div>
-    </div>
-</div>
-
-<div ng-show="$ctrl.metrics.others.length > 0">
-    <h2>Other JMX properties</h2>
-
-    <table class="table">
-        <tr ng-repeat="metric in $ctrl.metrics.others">
-            <td>{{metric.key}}</td>
-            <td>{{metric.value}}</td>
-        </tr>
-    </table>
-</div>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/ksm.js
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/ksm.js 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/ksm.js
deleted file mode 100644
index 7fb52b1..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/ksm.js
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-(function () {
-    "use strict";
-
-    var isIgnoredJmxKeys = function (key) {
-        return key == 'name' || key == 'modelerType' || key.match(/tag.*/);
-    };
-
-    angular.module('ksm', ['ozone', 'nvd3']);
-    angular.module('ksm').config(function ($routeProvider) {
-        $routeProvider
-            .when("/metrics/ksm", {
-                template: "<ksm-metrics></ksm-metrics>"
-            });
-    });
-    angular.module('ksm').component('ksmMetrics', {
-        templateUrl: 'ksm-metrics.html',
-        controller: function ($http) {
-            var ctrl = this;
-
-            ctrl.graphOptions = {
-                chart: {
-                    type: 'pieChart',
-                    height: 500,
-                    x: function (d) {
-                        return d.key;
-                    },
-                    y: function (d) {
-                        return d.value;
-                    },
-                    showLabels: true,
-                    labelType: 'value',
-                    duration: 500,
-                    labelThreshold: 0.01,
-                    labelSunbeamLayout: true,
-                    legend: {
-                        margin: {
-                            top: 5,
-                            right: 35,
-                            bottom: 5,
-                            left: 0
-                        }
-                    }
-                }
-            };
-
-
-            $http.get("jmx?qry=Hadoop:service=KeySpaceManager,name=KSMMetrics")
-                .then(function (result) {
-
-                    var groupedMetrics = {others: [], nums: {}};
-                    var metrics = result.data.beans[0]
-                    for (var key in metrics) {
-                        var numericalStatistic = 
key.match(/Num([A-Z][a-z]+)(.+?)(Fails)?$/);
-                        if (numericalStatistic) {
-                            var type = numericalStatistic[1];
-                            var name = numericalStatistic[2];
-                            var failed = numericalStatistic[3];
-                            groupedMetrics.nums[type] = 
groupedMetrics.nums[type] || {
-                                    failures: [],
-                                    all: []
-                                };
-                            if (failed) {
-                                groupedMetrics.nums[type].failures.push({
-                                    key: name,
-                                    value: metrics[key]
-                                })
-                            } else {
-                                if (name == "Ops") {
-                                    groupedMetrics.nums[type].ops = 
metrics[key]
-                                } else {
-                                    groupedMetrics.nums[type].all.push({
-                                        key: name,
-                                        value: metrics[key]
-                                    })
-                                }
-                            }
-                        } else if (isIgnoredJmxKeys(key)) {
-                            //ignore
-                        } else {
-                            groupedMetrics.others.push({
-                                'key': key,
-                                'value': metrics[key]
-                            });
-                        }
-                    }
-                    ctrl.metrics = groupedMetrics;
-                })
-        }
-    });
-
-})();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/main.css
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/main.css 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/main.css
deleted file mode 100644
index e442adc..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/main.css
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- *   Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements.  See the NOTICE file distributed with
- *  this work for additional information regarding copyright ownership.
- *  The ASF licenses this file to You under the Apache License, Version 2.0
- *  (the "License"); you may not use this file except in compliance with
- *  the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
-*/
-body {
-  padding-top: 50px;
-}
-.starter-template {
-  padding: 40px 15px;
-  text-align: center;
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/main.html
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/main.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/main.html
deleted file mode 100644
index 0821899..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/main.html
+++ /dev/null
@@ -1,18 +0,0 @@
-<!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-  -->
-<overview>
-</overview>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/scm/index.html
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/scm/index.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/scm/index.html
deleted file mode 100644
index 3407f51..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/scm/index.html
+++ /dev/null
@@ -1,76 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
-        "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd";>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<html lang="en">
-<head>
-    <meta charset="utf-8">
-    <meta http-equiv="X-UA-Compatible" content="IE=edge">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-    <!-- The above 3 meta tags *must* come first in the head; any other head 
content must come *after* these tags -->
-    <meta name="description" content="HDFS Storage Container Manager">
-
-    <title>HDFS Storage Container Manager</title>
-
-    <link href="static/bootstrap-3.0.2/css/bootstrap.min.css" rel="stylesheet">
-    <link href="static/hadoop.css" rel="stylesheet">
-    <link href="static/nvd3-1.8.5.min.css" rel="stylesheet">
-
-    <link href="static/ozone.css" rel="stylesheet">
-
-</head>
-
-<body ng-app="scm">
-
-<header class="navbar navbar-inverse navbar-fixed-top bs-docs-nav">
-    <div class="container-fluid">
-        <div class="navbar-header">
-            <button type="button" class="navbar-toggle collapsed" 
data-toggle="collapse" data-target="#navbar"
-                    aria-expanded="false" aria-controls="navbar">
-                <span class="sr-only">Toggle navigation</span>
-                <span class="icon-bar"></span>
-                <span class="icon-bar"></span>
-                <span class="icon-bar"></span>
-            </button>
-            <a class="navbar-brand" href="#">HDFS SCM</a>
-        </div>
-
-
-        <navmenu
-                metrics="{ 'Rpc metrics' : '#!/metrics/rpc'}"></navmenu>
-
-
-    </div>
-</header>
-
-<div class="container-fluid" style="margin: 12pt">
-
-    <ng-view></ng-view>
-
-</div><!-- /.container -->
-
-<script src="static/jquery-1.10.2.min.js"></script>
-<script src="static/angular-1.6.4.min.js"></script>
-<script src="static/angular-route-1.6.4.min.js"></script>
-<script src="static/d3-3.5.17.min.js"></script>
-<script src="static/nvd3-1.8.5.min.js"></script>
-<script src="static/angular-nvd3-1.0.9.min.js"></script>
-<script src="static/ozone.js"></script>
-<script src="scm.js"></script>
-<script src="static/bootstrap-3.0.2/js/bootstrap.min.js"></script>
-</body>
-</html>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/scm/main.html
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/scm/main.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/scm/main.html
deleted file mode 100644
index 2666f81..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/scm/main.html
+++ /dev/null
@@ -1,20 +0,0 @@
-<!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-  -->
-<overview>
-    <scm-overview>
-    </scm-overview>
-</overview>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/scm/scm-overview.html
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/scm/scm-overview.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/scm/scm-overview.html
deleted file mode 100644
index fca23ba..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/scm/scm-overview.html
+++ /dev/null
@@ -1,60 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<h2>Node counts</h2>
-
-<table class="table table-bordered table-striped" class="col-md-6">
-    <tbody>
-    <tr ng-repeat="typestat in $ctrl.nodemanagermetrics.NodeCount | 
orderBy:'key':false:$ctrl.nodeOrder">
-        <td>{{typestat.key}}</td>
-        <td>{{typestat.value}}</td>
-    </tr>
-    </tbody>
-</table>
-
-<h2>Status</h2>
-<table class="table table-bordered table-striped" class="col-md-6">
-    <tbody>
-    <tr>
-        <td>Client Rpc port</td>
-        <td>{{$ctrl.overview.jmx.ClientRpcPort}}</td>
-    </tr>
-    <tr>
-        <td>Datanode Rpc port</td>
-        <td>{{$ctrl.overview.jmx.DatanodeRpcPort}}</td>
-    </tr>
-    <tr>
-        <td>Block Manager: Open containers</td>
-        <td>{{$ctrl.blockmanagermetrics.OpenContainersNo}}</td>
-    </tr>
-    <tr>
-        <td>Node Manager: Minimum chill mode nodes</td>
-        <td>{{$ctrl.nodemanagermetrics.MinimumChillModeNodes}}</td>
-    </tr>
-    <tr>
-        <td>Node Manager: Out-of-node chill mode</td>
-        <td>{{$ctrl.nodemanagermetrics.OutOfNodeChillMode}}</td>
-    </tr>
-    <tr>
-        <td>Node Manager: Chill mode status</td>
-        <td>{{$ctrl.nodemanagermetrics.ChillModeStatus}}</td>
-    </tr>
-    <tr>
-        <td>Node Manager: Manual chill mode</td>
-        <td>{{$ctrl.nodemanagermetrics.InManualChillMode}}</td>
-    </tr>
-    </tbody>
-</table>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/scm/scm.js
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/scm/scm.js 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/scm/scm.js
deleted file mode 100644
index bcfa8b7..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/scm/scm.js
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-(function () {
-    "use strict";
-    angular.module('scm', ['ozone', 'nvd3']);
-
-    angular.module('scm').component('scmOverview', {
-        templateUrl: 'scm-overview.html',
-        require: {
-            overview: "^overview"
-        },
-        controller: function ($http) {
-            var ctrl = this;
-            $http.get("jmx?qry=Hadoop:service=BlockManager,name=*")
-                .then(function (result) {
-                    ctrl.blockmanagermetrics = result.data.beans[0];
-                });
-            
$http.get("jmx?qry=Hadoop:service=SCMNodeManager,name=SCMNodeManagerInfo")
-                .then(function (result) {
-                    ctrl.nodemanagermetrics = result.data.beans[0];
-                });
-
-            var statusSortOrder = {
-                "HEALTHY": "a",
-                "STALE": "b",
-                "DEAD": "c",
-                "UNKNOWN": "z",
-                "DECOMMISSIONING": "x",
-                "DECOMMISSIONED": "y"
-            };
-            ctrl.nodeOrder = function (v1, v2) {
-                //status with non defined sort order will be "undefined"
-                return ("" + statusSortOrder[v1.value]).localeCompare("" + 
statusSortOrder[v2.value])
-            }
-
-        }
-    });
-
-})();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to