http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css 
b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css
deleted file mode 100644
index 271ac74..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- *   Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements.  See the NOTICE file distributed with
- *  this work for additional information regarding copyright ownership.
- *  The ASF licenses this file to You under the Apache License, Version 2.0
- *  (the "License"); you may not use this file except in compliance with
- *  the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
-*/
-body {
-    padding: 40px;
-    padding-top: 60px;
-}
-.starter-template {
-    padding: 40px 15px;
-    text-align: center;
-}
-
-
-.btn {
-    border: 0 none;
-    font-weight: 700;
-    letter-spacing: 1px;
-    text-transform: uppercase;
-}
-
-.btn:focus, .btn:active:focus, .btn.active:focus {
-    outline: 0 none;
-}
-
-.table-striped > tbody > tr:nth-child(2n+1).selectedtag > td:hover {
-    background-color: #3276b1;
-}
-.table-striped > tbody > tr:nth-child(2n+1).selectedtag > td {
-    background-color: #3276b1;
-}
-.tagPanel tr.selectedtag td {
-    background-color: #3276b1;
-}
-.top-buffer { margin-top:4px; }
-
-
-.sortorder:after {
-    content: '\25b2';   // BLACK UP-POINTING TRIANGLE
-}
-.sortorder.reverse:after {
-    content: '\25bc';   // BLACK DOWN-POINTING TRIANGLE
-}
-
-.wrap-table{
-    word-wrap: break-word;
-    table-layout: fixed;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js 
b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js
deleted file mode 100644
index c2ed2ad..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js
+++ /dev/null
@@ -1,387 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-(function() {
-  "use strict";
-
-  var isIgnoredJmxKeys = function(key) {
-    return key == 'name' || key == 'modelerType' || key == "$$hashKey" ||
-      key.match(/tag.*/);
-  };
-  angular.module('ozone', ['nvd3', 'ngRoute']);
-  angular.module('ozone').config(function($routeProvider) {
-    $routeProvider
-      .when("/", {
-        templateUrl: "main.html"
-      })
-      .when("/metrics/rpc", {
-        template: "<rpc-metrics></rpc-metrics>"
-      })
-      .when("/config", {
-        template: "<config></config>"
-      })
-  });
-  angular.module('ozone').component('overview', {
-    templateUrl: 'static/templates/overview.html',
-    transclude: true,
-    controller: function($http) {
-      var ctrl = this;
-      $http.get("jmx?qry=Hadoop:service=*,name=*,component=ServerRuntime")
-        .then(function(result) {
-          ctrl.jmx = result.data.beans[0]
-        })
-    }
-  });
-  angular.module('ozone').component('jvmParameters', {
-    templateUrl: 'static/templates/jvm.html',
-    controller: function($http) {
-      var ctrl = this;
-      $http.get("jmx?qry=java.lang:type=Runtime")
-        .then(function(result) {
-          ctrl.jmx = result.data.beans[0];
-
-          //convert array to a map
-          var systemProperties = {};
-          for (var idx in ctrl.jmx.SystemProperties) {
-            var item = ctrl.jmx.SystemProperties[idx];
-            systemProperties[item.key.replace(/\./g, "_")] = item.value;
-          }
-          ctrl.jmx.SystemProperties = systemProperties;
-        })
-    }
-  });
-
-  angular.module('ozone').component('rpcMetrics', {
-    template: '<h1>Rpc metrics</h1><tabs>' +
-      '<pane ng-repeat="metric in $ctrl.metrics" ' +
-      'title="{{metric[\'tag.serverName\']}} ({{metric[\'tag.port\']}})">' +
-      '<rpc-metric jmxdata="metric"></rpc-metric></pane>' +
-      '</tabs>',
-    controller: function($http) {
-      var ctrl = this;
-      $http.get("jmx?qry=Hadoop:service=*,name=RpcActivityForPort*")
-        .then(function(result) {
-          ctrl.metrics = result.data.beans;
-        })
-    }
-  });
-  angular.module('ozone').component('rpcMetric', {
-    bindings: {
-      jmxdata: '<'
-    },
-    templateUrl: 'static/templates/rpc-metrics.html',
-    controller: function() {
-      var ctrl = this;
-
-
-      ctrl.percentileGraphOptions = {
-        chart: {
-          type: 'discreteBarChart',
-          height: 450,
-          margin: {
-            top: 20,
-            right: 20,
-            bottom: 50,
-            left: 55
-          },
-          x: function(d) {
-            return d.label;
-          },
-          y: function(d) {
-            return d.value;
-          },
-          showValues: true,
-          valueFormat: function(d) {
-            return d3.format(',.1f')(d);
-          },
-          duration: 500,
-          xAxis: {
-            axisLabel: 'Percentage'
-          },
-          yAxis: {
-            axisLabel: 'Latency (ms)',
-            axisLabelDistance: -10
-          }
-        }
-      };
-
-      ctrl.$onChanges = function(data) {
-        var groupedMetrics = {}
-
-        var createPercentageMetrics = function(metricName, window) {
-          groupedMetrics.percentiles = groupedMetrics['percentiles'] || {}
-          groupedMetrics.percentiles[window] = 
groupedMetrics.percentiles[window] || {};
-          groupedMetrics.percentiles[window][metricName] = 
groupedMetrics.percentiles[window][metricName] || {
-            graphdata: [{
-              key: window,
-              values: []
-            }],
-            numOps: 0
-          };
-
-        };
-        var metrics = ctrl.jmxdata;
-        for (var key in metrics) {
-          var percentile = key.match(/(.*Time)(\d+s)(\d+th)PercentileLatency/);
-          var percentileNumOps = key.match(/(.*Time)(\d+s)NumOps/);
-          var successFailures = key.match(/(.*)(Success|Failures)/);
-          var numAverages = key.match(/(.*Time)(NumOps|AvgTime)/);
-          if (percentile) {
-            var metricName = percentile[1];
-            var window = percentile[2];
-            var percentage = percentile[3]
-            createPercentageMetrics(metricName, window);
-
-
-            groupedMetrics.percentiles[window][metricName].graphdata[0]
-              .values.push({
-                label: percentage,
-                value: metrics[key]
-              })
-          } else if (successFailures) {
-            var metricName = successFailures[1];
-            groupedMetrics.successfailures = groupedMetrics['successfailures'] 
|| {}
-            groupedMetrics.successfailures[metricName] = 
groupedMetrics.successfailures[metricName] || {
-              success: 0,
-              failures: 0
-            };
-            if (successFailures[2] == 'Success') {
-              groupedMetrics.successfailures[metricName].success = 
metrics[key];
-            } else {
-              groupedMetrics.successfailures[metricName].failures = 
metrics[key];
-            }
-
-          } else if (numAverages) {
-            var metricName = numAverages[1];
-            groupedMetrics.numavgs = groupedMetrics['numavgs'] || {}
-            groupedMetrics.numavgs[metricName] = 
groupedMetrics.numavgs[metricName] || {
-              numOps: 0,
-              avgTime: 0
-            };
-            if (numAverages[2] == 'NumOps') {
-              groupedMetrics.numavgs[metricName].numOps = metrics[key];
-            } else {
-              groupedMetrics.numavgs[metricName].avgTime = metrics[key];
-            }
-
-          } else if (percentileNumOps) {
-            var metricName = percentileNumOps[1];
-            var window = percentileNumOps[2];
-            createPercentageMetrics(metricName, window);
-            groupedMetrics.percentiles[window][metricName].numOps = 
metrics[key];
-          } else if (isIgnoredJmxKeys(key)) {
-            //ignore
-          } else {
-            groupedMetrics.others = groupedMetrics.others || [];
-            groupedMetrics.others.push({
-              'key': key,
-              'value': metrics[key]
-            });
-          }
-
-        }
-        ctrl.metrics = groupedMetrics;
-      };
-
-    }
-  });
-  angular.module('ozone')
-    .component('tabs', {
-      transclude: true,
-      controller: function($scope) {
-        var ctrl = this;
-        var panes = this.panes = [];
-        this.select = function(pane) {
-          angular.forEach(panes, function(pane) {
-            pane.selected = false;
-          });
-          pane.selected = true;
-        };
-        this.addPane = function(pane) {
-          if (panes.length === 0) {
-            this.select(pane);
-          }
-          panes.push(pane);
-        };
-        this.click = function(pane) {
-          ctrl.select(pane);
-        }
-      },
-      template: '<div class="nav navtabs"><div class="row"><ul' +
-        ' class="nav nav-pills">' +
-        '<li ng-repeat="pane in $ctrl.panes" 
ng-class="{active:pane.selected}">' +
-        '<a href="" ng-click="$ctrl.click(pane)">{{pane.title}}</a> ' +
-        '</li> </ul></div><br/><div class="tab-content" ng-transclude></div> 
</div>'
-    })
-    .component('pane', {
-      transclude: true,
-      require: {
-        tabsCtrl: '^tabs'
-      },
-      bindings: {
-        title: '@'
-      },
-      controller: function() {
-        this.$onInit = function() {
-          this.tabsCtrl.addPane(this);
-        };
-      },
-      template: '<div class="tab-pane" ng-if="$ctrl.selected" 
ng-transclude></div>'
-    });
-
-  angular.module('ozone').component('navmenu', {
-    bindings: {
-      metrics: '<'
-    },
-    templateUrl: 'static/templates/menu.html',
-    controller: function($http) {
-      var ctrl = this;
-      ctrl.docs = false;
-      $http.head("docs/index.html")
-        .then(function(result) {
-          ctrl.docs = true;
-        }, function() {
-          ctrl.docs = false;
-        });
-    }
-  });
-
-  angular.module('ozone').component('config', {
-    templateUrl: 'static/templates/config.html',
-    controller: function($scope, $http) {
-      var ctrl = this;
-      ctrl.selectedTags = [];
-      ctrl.configArray = [];
-
-      $http.get("conf?cmd=getOzoneTags")
-        .then(function(response) {
-          ctrl.tags = response.data;
-          var excludedTags = ['CBLOCK', 'OM', 'SCM'];
-          for (var i = 0; i < excludedTags.length; i++) {
-            var idx = ctrl.tags.indexOf(excludedTags[i]);
-            // Remove CBLOCK related properties
-            if (idx > -1) {
-              ctrl.tags.splice(idx, 1);
-            }
-          }
-          ctrl.loadAll();
-        });
-
-      ctrl.convertToArray = function(srcObj) {
-        ctrl.keyTagMap = {};
-        for (var idx in srcObj) {
-          //console.log("Adding keys for "+idx)
-          for (var key in srcObj[idx]) {
-
-            if (ctrl.keyTagMap.hasOwnProperty(key)) {
-              ctrl.keyTagMap[key]['tag'].push(idx);
-            } else {
-              var newProp = {};
-              newProp['name'] = key;
-              newProp['value'] = srcObj[idx][key];
-              newProp['tag'] = [];
-              newProp['tag'].push(idx);
-              ctrl.keyTagMap[key] = newProp;
-            }
-          }
-        }
-      }
-
-      ctrl.loadAll = function() {
-        $http.get("conf?cmd=getPropertyByTag&tags=OM,SCM," + ctrl.tags)
-          .then(function(response) {
-
-            ctrl.convertToArray(response.data);
-            ctrl.configs = Object.values(ctrl.keyTagMap);
-            ctrl.component = 'All';
-            console.log("ajay -> " + JSON.stringify(ctrl.configs));
-            ctrl.sortBy('name');
-          });
-      };
-
-      ctrl.filterTags = function() {
-        if (!ctrl.selectedTags) {
-          return true;
-        }
-
-        if (ctrl.selectedTags.length < 1 && ctrl.component == 'All') {
-          return true;
-        }
-
-        ctrl.configs = ctrl.configs.filter(function(item) {
-
-          if (ctrl.component != 'All' && (item['tag'].indexOf(ctrl
-              .component) < 0)) {
-            console.log(item['name'] + " false tag " + item['tag']);
-            return false;
-          }
-
-          if (ctrl.selectedTags.length < 1) {
-            return true;
-          }
-          for (var tag in item['tag']) {
-            tag = item['tag'][tag];
-            if (ctrl.selectedTags.indexOf(tag) > -1) {
-              return true;
-            }
-          }
-          return false;
-        });
-
-      };
-      ctrl.configFilter = function(config) {
-        return false;
-      };
-      ctrl.selected = function(tag) {
-        return ctrl.selectedTags.includes(tag);
-      };
-
-      ctrl.switchto = function(tag) {
-        ctrl.component = tag;
-        ctrl.reloadConfig();
-      };
-
-      ctrl.select = function(tag) {
-        var tagIdx = ctrl.selectedTags.indexOf(tag);
-        if (tagIdx > -1) {
-          ctrl.selectedTags.splice(tagIdx, 1);
-        } else {
-          ctrl.selectedTags.push(tag);
-        }
-        ctrl.reloadConfig();
-      };
-
-      ctrl.reloadConfig = function() {
-        ctrl.configs = [];
-        ctrl.configs = Object.values(ctrl.keyTagMap);
-        ctrl.filterTags();
-      };
-
-      ctrl.sortBy = function(field) {
-        ctrl.reverse = (ctrl.propertyName === field) ? !ctrl.reverse : false;
-        ctrl.propertyName = field;
-      };
-
-      ctrl.allSelected = function(comp) {
-        //console.log("Adding key for compo ->"+comp)
-        return ctrl.component == comp;
-      };
-
-    }
-  });
-
-})();
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html 
b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html
deleted file mode 100644
index b52f653..0000000
--- 
a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html
+++ /dev/null
@@ -1,91 +0,0 @@
-<!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-  -->
-
-<div class="row top-buffer">
-  <div class="col-md-2">
-    <input type="text" class="form-control" placeholder="Search Properties"
-           name="search" ng-model="search.$">
-  </div>
-  <div class="col-md-10">
-    <div class="btn-group btn-group-justified">
-      <a class="btn"
-         ng-class="$ctrl.allSelected('All') ? 'btn-primary' :'btn-secondary'"
-         ng-click="$ctrl.switchto('All')">All
-      </a>
-      <a class="btn"
-         ng-class="$ctrl.allSelected('OM') ? 'btn-primary' :'btn-secondary'"
-         ng-click="$ctrl.switchto('OM')">OM</a>
-      <a class="btn"
-         ng-class="$ctrl.allSelected('SCM') ? 'btn-primary' :'btn-secondary'"
-         ng-click="$ctrl.switchto('SCM')">SCM</a>
-    </div>
-  </div>
-</div>
-<div class="row">
-  <div class="col-md-2">
-
-    <table class="table table-striped table-condensed tagPanel">
-      <colgroup>
-        <col class="col-md-12">
-      </colgroup>
-      <thead>
-      <tr>
-        <th>Tag</th>
-      </tr>
-      </thead>
-      <tbody>
-      <tr ng-click="$ctrl.select(tag)"
-          ng-class="$ctrl.selected(tag) ? 'selectedtag':''"
-          ng-repeat="tag in $ctrl.tags">
-        <td>{{tag}}</td>
-      </tr>
-      </tbody>
-    </table>
-  </div>
-  <div class="col-md-10">
-    <table class="table table-striped table-condensed table-hover wrap-table">
-      <thead>
-      <tr>
-        <th class="col-md-3" >
-          <a href="javascript:void(0)" 
ng-click="$ctrl.sortBy('name')">Property</a>
-          <span class="sortorder" ng-show="$ctrl.propertyName === 'name'"
-                ng-class="{reverse: $ctrl.reverse}">
-              </span>
-        </th>
-        <th class="col-md-2" style="word-wrap: break-word;">
-          <a href="javascript:void(0)" 
ng-click="$ctrl.sortBy('value')">Value</a>
-          <span class="sortorder" ng-show="$ctrl.propertyName === 'value'"
-                ng-class="{reverse: $ctrl.reverse}"></span>
-        </th>
-        <th class="col-md-7">
-          <a href="javascript:void(0)" 
ng-click="$ctrl.sortBy('description')">Description</a>
-          <span class="sortorder" ng-show="$ctrl.propertyName === 
'description'"
-                ng-class="{reverse: reverse}"></span>
-        </th>
-      </tr>
-      </thead>
-      <tbody>
-      <tr
-          ng-repeat="config in $ctrl.configs | filter:search | 
orderBy:$ctrl.propertyName:$ctrl.reverse">
-        <td style="word-wrap: break-word;">{{config.name}}</td>
-        <td style="word-wrap: break-word;">{{config.value}}</td>
-        <td style="word-wrap: break-word;">{{config.description}}</td>
-      </tr>
-      </tbody>
-    </table>
-  </div>
-</div>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html 
b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html
deleted file mode 100644
index c1f7d16..0000000
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html
+++ /dev/null
@@ -1,26 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<table class="table table-bordered table-striped">
-    <tr>
-        <th>JVM:</th>
-        <td>{{$ctrl.jmx.SystemProperties.java_vm_name}} 
{{$ctrl.jmx.SystemProperties.java_vm_version}}</td>
-    </tr>
-    <tr>
-        <th>Input arguments:</th>
-        <td>{{$ctrl.jmx.InputArguments}}</td>
-    </tr>
-</table>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/framework/src/main/resources/webapps/static/templates/menu.html
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/menu.html 
b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/menu.html
deleted file mode 100644
index 95f1b48..0000000
--- 
a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/menu.html
+++ /dev/null
@@ -1,60 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<div id="navbar" class="collapse navbar-collapse">
-    <ul class="nav navbar-nav" id="ui-tabs">
-        <li>
-            <a class="dropdown-toggle"
-               id="metricsMenu"
-               data-toggle="dropdown"
-               aria-haspopup="true"
-               aria-expanded="true">
-                Metrics
-                <span class="caret"></span>
-            </a>
-            <ul
-                class="dropdown-menu"
-                aria-labelledby="metricsMenu">
-                <li ng-repeat="(name, url) in $ctrl.metrics">
-                    <a ng-href="{{url}}">{{name}}<span
-                        aria-hidden="true"></span></a></li>
-            </ul>
-        </li>
-        <li><a href="#!/config">Configuration</a></li>
-        <li ng-show="$ctrl.docs"><a href="/docs">Documentation</a></li>
-        <li>
-            <a class="dropdown-toggle"
-               id="toolsMenu"
-               data-toggle="dropdown"
-               aria-haspopup="true"
-               aria-expanded="true"
-               >
-                Common tools
-                <span class="caret"></span>
-            </a>
-            <ul class="dropdown-menu" aria-labelledby="toolsMenu">
-                <li><a href="jmx">JMX <span
-                        aria-hidden="true"></span></a></li>
-                <li><a href="conf">Config <span
-                        aria-hidden="true"></a></li>
-                <li><a href="stacks">Stacks <span
-                        aria-hidden="true"></a></li>
-                <li><a href="logLevel">Log levels <span
-                        aria-hidden="true"></a></li>
-            </ul>
-        </li>
-    </ul>
-</div><!--/.nav-collapse -->

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/framework/src/main/resources/webapps/static/templates/overview.html
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/overview.html
 
b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/overview.html
deleted file mode 100644
index 30e2d26..0000000
--- 
a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/overview.html
+++ /dev/null
@@ -1,39 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<h1>Overview</h1>
-<table class="table table-bordered table-striped">
-    <tbody>
-    <tr>
-        <th>Started:</th>
-        <td>{{$ctrl.jmx.StartedTimeInMillis | date : 'medium'}}</td>
-    </tr>
-    <tr>
-        <th>Version:</th>
-        <td>{{$ctrl.jmx.Version}}</td>
-    </tr>
-    <tr>
-        <th>Compiled:</th>
-        <td>{{$ctrl.jmx.CompileInfo}}</td>
-    </tr>
-    </tbody>
-</table>
-
-<h2>JVM parameters</h2>
-
-<jvm-parameters></jvm-parameters>
-
-<div ng-transclude></div>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/framework/src/main/resources/webapps/static/templates/rpc-metrics.html
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/rpc-metrics.html
 
b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/rpc-metrics.html
deleted file mode 100644
index facb152..0000000
--- 
a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/rpc-metrics.html
+++ /dev/null
@@ -1,87 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<div ng-hide="$ctrl.metrics.percentiles" class="alert alert-info">
-    Please set <b>rpc.metrics.quantile.enable</b> to <b>true</b> and define the
-    intervals in seconds with setting <b>rpc.metrics.percentiles.intervals</b>
-    (eg. set to <b>60,300</b>) in your hdfs-site.xml
-    to display Hadoop RPC related graphs.
-</div>
-<div ng-repeat="(window,windowed) in $ctrl.metrics.percentiles">
-    <h2>{{window}} window</h2>
-    <p>Quantiles based on a fixed {{window}} window. Calculated once at every
-        {{window}}</p>
-
-    <div class="row">
-        <div class="col-md-6 col-lg-4"
-             ng-repeat="(metric,percentiles) in windowed">
-            <h3>{{metric}}</h3>
-            <p>{{percentiles.numOps}} sample</p>
-            <nvd3 options="$ctrl.percentileGraphOptions"
-                  data="percentiles.graphdata"></nvd3>
-        </div>
-    </div>
-
-</div>
-<div class="row">
-    <div ng-show="$ctrl.metrics.numavgs" class="col-md-6">
-        <h2>Number of ops / Averages</h2>
-
-        <table class="table table-bordered table-striped">
-            <thead>
-            <tr>
-                <th>Metric name</th>
-                <th>Number of ops</th>
-                <th>Average time (ms)</th>
-            </tr>
-            </thead>
-            <tr ng-repeat="(key,metric) in $ctrl.metrics.numavgs">
-                <td>{{key}}</td>
-                <td>{{metric.numOps | number}}</td>
-                <td>{{metric.avgTime | number:2}}</td>
-            </tr>
-        </table>
-    </div>
-    <div ng-show="$ctrl.metrics.successfailures" class="col-md-6">
-        <h2>Success / Failures</h2>
-
-        <table class="table table-bordered table-striped">
-            <thead>
-            <tr>
-                <th>Metric name</th>
-                <th>Success</th>
-                <th>Failures</th>
-            </tr>
-            </thead>
-
-            <tr ng-repeat="(key,metric) in $ctrl.metrics.successfailures">
-                <td>{{key}}</td>
-                <td>{{metric.success}}</td>
-                <td>{{metric.failures}}</td>
-            </tr>
-        </table>
-    </div>
-</div>
-<div ng-show="$ctrl.metrics.others">
-    <h2>Other JMX Metrics</h2>
-
-    <table class="table">
-        <tr ng-repeat="metric in $ctrl.metrics.others">
-            <td>{{metric.key}}</td>
-            <td>{{metric.value}}</td>
-        </tr>
-    </table>
-</div>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestBaseHttpServer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestBaseHttpServer.java
 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestBaseHttpServer.java
deleted file mode 100644
index c6eae0e..0000000
--- 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestBaseHttpServer.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.server;
-
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test Common ozone/hdds web methods.
- */
-public class TestBaseHttpServer {
-  @Test
-  public void getBindAddress() throws Exception {
-    Configuration conf = new Configuration();
-    conf.set("enabled", "false");
-
-    BaseHttpServer baseHttpServer = new BaseHttpServer(conf, "test") {
-      @Override
-      protected String getHttpAddressKey() {
-        return null;
-      }
-
-      @Override
-      protected String getHttpsAddressKey() {
-        return null;
-      }
-
-      @Override
-      protected String getHttpBindHostKey() {
-        return null;
-      }
-
-      @Override
-      protected String getHttpsBindHostKey() {
-        return null;
-      }
-
-      @Override
-      protected String getBindHostDefault() {
-        return null;
-      }
-
-      @Override
-      protected int getHttpBindPortDefault() {
-        return 0;
-      }
-
-      @Override
-      protected int getHttpsBindPortDefault() {
-        return 0;
-      }
-
-      @Override
-      protected String getKeytabFile() {
-        return null;
-      }
-
-      @Override
-      protected String getSpnegoPrincipal() {
-        return null;
-      }
-
-      @Override
-      protected String getEnabledKey() {
-        return "enabled";
-      }
-    };
-
-    conf.set("addresskey", "0.0.0.0:1234");
-
-    Assert.assertEquals("/0.0.0.0:1234", baseHttpServer
-        .getBindAddress("bindhostkey", "addresskey",
-            "default", 65).toString());
-
-    conf.set("bindhostkey", "1.2.3.4");
-
-    Assert.assertEquals("/1.2.3.4:1234", baseHttpServer
-        .getBindAddress("bindhostkey", "addresskey",
-            "default", 65).toString());
-  }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/EventHandlerStub.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/EventHandlerStub.java
 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/EventHandlerStub.java
deleted file mode 100644
index 3f34a70..0000000
--- 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/EventHandlerStub.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.server.events;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Dummy class for testing to collect all the received events.
- */
-public class EventHandlerStub<PAYLOAD> implements EventHandler<PAYLOAD> {
-
-  private List<PAYLOAD> receivedEvents = new ArrayList<>();
-
-  @Override
-  public void onMessage(PAYLOAD payload, EventPublisher publisher) {
-    receivedEvents.add(payload);
-  }
-
-  public List<PAYLOAD> getReceivedEvents() {
-    return receivedEvents;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
deleted file mode 100644
index 0c1200f..0000000
--- 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.server.events;
-
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-
-/**
- * Testing the basic functionality of the event queue.
- */
-public class TestEventQueue {
-
-  private static final Event<Long> EVENT1 =
-      new TypedEvent<>(Long.class, "SCM_EVENT1");
-  private static final Event<Long> EVENT2 =
-      new TypedEvent<>(Long.class, "SCM_EVENT2");
-
-  private static final Event<Long> EVENT3 =
-      new TypedEvent<>(Long.class, "SCM_EVENT3");
-  private static final Event<Long> EVENT4 =
-      new TypedEvent<>(Long.class, "SCM_EVENT4");
-
-  private EventQueue queue;
-
-  @Before
-  public void startEventQueue() {
-    DefaultMetricsSystem.initialize(getClass().getSimpleName());
-    queue = new EventQueue();
-  }
-
-  @After
-  public void stopEventQueue() {
-    DefaultMetricsSystem.shutdown();
-    queue.close();
-  }
-
-  @Test
-  public void simpleEvent() {
-
-    final long[] result = new long[2];
-
-    queue.addHandler(EVENT1, (payload, publisher) -> result[0] = payload);
-
-    queue.fireEvent(EVENT1, 11L);
-    queue.processAll(1000);
-    Assert.assertEquals(11, result[0]);
-
-  }
-
-  @Test
-  public void multipleSubscriber() {
-    final long[] result = new long[2];
-    queue.addHandler(EVENT2, (payload, publisher) -> result[0] = payload);
-
-    queue.addHandler(EVENT2, (payload, publisher) -> result[1] = payload);
-
-    queue.fireEvent(EVENT2, 23L);
-    queue.processAll(1000);
-    Assert.assertEquals(23, result[0]);
-    Assert.assertEquals(23, result[1]);
-
-  }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueueChain.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueueChain.java
 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueueChain.java
deleted file mode 100644
index bb05ef4..0000000
--- 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueueChain.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.server.events;
-
-import org.junit.Test;
-
-/**
- * More realistic event test with sending event from one listener.
- */
-public class TestEventQueueChain {
-
-  private static final Event<FailedNode> DECOMMISSION =
-      new TypedEvent<>(FailedNode.class);
-
-  private static final Event<FailedNode> DECOMMISSION_START =
-      new TypedEvent<>(FailedNode.class);
-
-  @Test
-  public void simpleEvent() {
-    EventQueue queue = new EventQueue();
-
-    queue.addHandler(DECOMMISSION, new PipelineManager());
-    queue.addHandler(DECOMMISSION_START, new NodeWatcher());
-
-    queue.fireEvent(DECOMMISSION, new FailedNode("node1"));
-
-    queue.processAll(5000);
-  }
-
-
-  static class FailedNode {
-    private final String nodeId;
-
-    FailedNode(String nodeId) {
-      this.nodeId = nodeId;
-    }
-
-    String getNodeId() {
-      return nodeId;
-    }
-  }
-
-  private static class PipelineManager implements EventHandler<FailedNode> {
-
-    @Override
-    public void onMessage(FailedNode message, EventPublisher publisher) {
-
-      System.out.println(
-          "Closing pipelines for all pipelines including node: " + message
-              .getNodeId());
-
-      publisher.fireEvent(DECOMMISSION_START, message);
-    }
-
-  }
-
-  private static class NodeWatcher implements EventHandler<FailedNode> {
-
-    @Override
-    public void onMessage(FailedNode message, EventPublisher publisher) {
-      System.out.println("Clear timer");
-    }
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java
 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java
deleted file mode 100644
index b72d2ae..0000000
--- 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java
+++ /dev/null
@@ -1,292 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.server.events;
-
-import java.util.List;
-import java.util.Objects;
-import org.apache.hadoop.hdds.HddsIdFactory;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.ozone.lease.LeaseManager;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * Test the basic functionality of event watcher.
- */
-public class TestEventWatcher {
-
-  private static final TypedEvent<UnderreplicatedEvent> WATCH_UNDER_REPLICATED 
=
-      new TypedEvent<>(UnderreplicatedEvent.class);
-
-  private static final TypedEvent<UnderreplicatedEvent> UNDER_REPLICATED =
-      new TypedEvent<>(UnderreplicatedEvent.class);
-
-  private static final TypedEvent<ReplicationCompletedEvent>
-      REPLICATION_COMPLETED = new 
TypedEvent<>(ReplicationCompletedEvent.class);
-
-  LeaseManager<Long> leaseManager;
-
-  @Before
-  public void startLeaseManager() {
-    DefaultMetricsSystem.instance();
-    leaseManager = new LeaseManager<>("Test", 2000L);
-    leaseManager.start();
-  }
-
-  @After
-  public void stopLeaseManager() {
-    leaseManager.shutdown();
-    DefaultMetricsSystem.shutdown();
-  }
-
-
-  @Test
-  public void testEventHandling() throws InterruptedException {
-    EventQueue queue = new EventQueue();
-
-    EventWatcher<UnderreplicatedEvent, ReplicationCompletedEvent>
-        replicationWatcher = createEventWatcher();
-
-    EventHandlerStub<UnderreplicatedEvent> underReplicatedEvents =
-        new EventHandlerStub<>();
-
-    queue.addHandler(UNDER_REPLICATED, underReplicatedEvents);
-
-    replicationWatcher.start(queue);
-
-    long id1 = HddsIdFactory.getLongId();
-    long id2 = HddsIdFactory.getLongId();
-
-    queue.fireEvent(WATCH_UNDER_REPLICATED,
-        new UnderreplicatedEvent(id1, "C1"));
-
-    queue.fireEvent(WATCH_UNDER_REPLICATED,
-        new UnderreplicatedEvent(id2, "C2"));
-
-    Assert.assertEquals(0, underReplicatedEvents.getReceivedEvents().size());
-
-    Thread.sleep(1000);
-
-    queue.fireEvent(REPLICATION_COMPLETED,
-        new ReplicationCompletedEvent(id1, "C2", "D1"));
-
-    Assert.assertEquals(0, underReplicatedEvents.getReceivedEvents().size());
-
-    Thread.sleep(1500);
-
-    queue.processAll(1000L);
-
-    Assert.assertEquals(1, underReplicatedEvents.getReceivedEvents().size());
-    Assert.assertEquals(id2,
-        underReplicatedEvents.getReceivedEvents().get(0).id);
-
-  }
-
-  @Test
-  public void testInprogressFilter() throws InterruptedException {
-
-    EventQueue queue = new EventQueue();
-
-    EventWatcher<UnderreplicatedEvent, ReplicationCompletedEvent>
-        replicationWatcher = createEventWatcher();
-
-    EventHandlerStub<UnderreplicatedEvent> underReplicatedEvents =
-        new EventHandlerStub<>();
-
-    queue.addHandler(UNDER_REPLICATED, underReplicatedEvents);
-
-    replicationWatcher.start(queue);
-
-    UnderreplicatedEvent event1 =
-        new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C1");
-
-    queue.fireEvent(WATCH_UNDER_REPLICATED, event1);
-
-    queue.fireEvent(WATCH_UNDER_REPLICATED,
-        new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C2"));
-
-    queue.fireEvent(WATCH_UNDER_REPLICATED,
-        new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C1"));
-
-    queue.processAll(1000L);
-    Thread.sleep(1000L);
-    List<UnderreplicatedEvent> c1todo = replicationWatcher
-        .getTimeoutEvents(e -> e.containerId.equalsIgnoreCase("C1"));
-
-    Assert.assertEquals(2, c1todo.size());
-    Assert.assertTrue(replicationWatcher.contains(event1));
-    Thread.sleep(1500L);
-
-    c1todo = replicationWatcher
-        .getTimeoutEvents(e -> e.containerId.equalsIgnoreCase("C1"));
-    Assert.assertEquals(0, c1todo.size());
-    Assert.assertFalse(replicationWatcher.contains(event1));
-
-  }
-
-  @Test
-  public void testMetrics() throws InterruptedException {
-
-    DefaultMetricsSystem.initialize("test");
-
-    EventQueue queue = new EventQueue();
-
-    EventWatcher<UnderreplicatedEvent, ReplicationCompletedEvent>
-        replicationWatcher = createEventWatcher();
-
-    EventHandlerStub<UnderreplicatedEvent> underReplicatedEvents =
-        new EventHandlerStub<>();
-
-    queue.addHandler(UNDER_REPLICATED, underReplicatedEvents);
-
-    replicationWatcher.start(queue);
-
-    //send 3 event to track 3 in-progress activity
-    UnderreplicatedEvent event1 =
-        new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C1");
-
-    UnderreplicatedEvent event2 =
-        new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C2");
-
-    UnderreplicatedEvent event3 =
-        new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C1");
-
-    queue.fireEvent(WATCH_UNDER_REPLICATED, event1);
-
-    queue.fireEvent(WATCH_UNDER_REPLICATED, event2);
-
-    queue.fireEvent(WATCH_UNDER_REPLICATED, event3);
-
-    //1st event is completed, don't need to track any more
-    ReplicationCompletedEvent event1Completed =
-        new ReplicationCompletedEvent(event1.id, "C1", "D1");
-
-    queue.fireEvent(REPLICATION_COMPLETED, event1Completed);
-
-    Thread.sleep(2200l);
-
-    //until now: 3 in-progress activities are tracked with three
-    // UnderreplicatedEvents. The first one is completed, the remaining two
-    // are timed out (as the timeout -- defined in the leasmanager -- is 
2000ms.
-
-    EventWatcherMetrics metrics = replicationWatcher.getMetrics();
-
-    //3 events are received
-    Assert.assertEquals(3, metrics.getTrackedEvents().value());
-
-    //one is finished. doesn't need to be resent
-    Assert.assertEquals(1, metrics.getCompletedEvents().value());
-
-    //Other two are timed out and resent
-    Assert.assertEquals(2, metrics.getTimedOutEvents().value());
-
-    DefaultMetricsSystem.shutdown();
-  }
-
-  private EventWatcher<UnderreplicatedEvent, ReplicationCompletedEvent>
-  createEventWatcher() {
-    return new CommandWatcherExample(WATCH_UNDER_REPLICATED,
-        REPLICATION_COMPLETED, leaseManager);
-  }
-
-  private class CommandWatcherExample
-      extends EventWatcher<UnderreplicatedEvent, ReplicationCompletedEvent> {
-
-    public CommandWatcherExample(Event<UnderreplicatedEvent> startEvent,
-        Event<ReplicationCompletedEvent> completionEvent,
-        LeaseManager<Long> leaseManager) {
-      super("TestCommandWatcher", startEvent, completionEvent, leaseManager);
-    }
-
-    @Override
-    protected void onTimeout(EventPublisher publisher, UnderreplicatedEvent 
payload) {
-      publisher.fireEvent(UNDER_REPLICATED, payload);
-    }
-
-    @Override
-    protected void onFinished(EventPublisher publisher, UnderreplicatedEvent 
payload) {
-      //Good job. We did it.
-    }
-
-    @Override
-    public EventWatcherMetrics getMetrics() {
-      return super.getMetrics();
-    }
-  }
-
-  private static class ReplicationCompletedEvent
-      implements IdentifiableEventPayload {
-
-    private final long id;
-
-    private final String containerId;
-
-    private final String datanodeId;
-
-    public ReplicationCompletedEvent(long id, String containerId,
-        String datanodeId) {
-      this.id = id;
-      this.containerId = containerId;
-      this.datanodeId = datanodeId;
-    }
-
-    public long getId() {
-      return id;
-    }
-
-    @Override
-    public boolean equals(Object o) {
-      if (this == o) {
-        return true;
-      }
-      if (o == null || getClass() != o.getClass()) {
-        return false;
-      }
-      ReplicationCompletedEvent that = (ReplicationCompletedEvent) o;
-      return Objects.equals(containerId, that.containerId) && Objects
-          .equals(datanodeId, that.datanodeId);
-    }
-
-    @Override
-    public int hashCode() {
-
-      return Objects.hash(containerId, datanodeId);
-    }
-  }
-
-  private static class UnderreplicatedEvent
-
-      implements IdentifiableEventPayload {
-
-    private final long id;
-
-    private final String containerId;
-
-    public UnderreplicatedEvent(long id, String containerId) {
-      this.containerId = containerId;
-      this.id = id;
-    }
-
-    public long getId() {
-      return id;
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/framework/src/test/resources/ozone-site.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/test/resources/ozone-site.xml 
b/hadoop-hdds/framework/src/test/resources/ozone-site.xml
deleted file mode 100644
index 77dd7ef..0000000
--- a/hadoop-hdds/framework/src/test/resources/ozone-site.xml
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
deleted file mode 100644
index 59dfa79..0000000
--- a/hadoop-hdds/pom.xml
+++ /dev/null
@@ -1,203 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0";
-xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";
-xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-http://maven.apache.org/xsd/maven-4.0.0.xsd";>
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-project-dist</artifactId>
-    <version>3.2.0-SNAPSHOT</version>
-    <relativePath>../hadoop-project-dist</relativePath>
-  </parent>
-
-  <artifactId>hadoop-hdds</artifactId>
-  <version>0.3.0-SNAPSHOT</version>
-  <description>Apache Hadoop Distributed Data Store Project</description>
-  <name>Apache Hadoop HDDS</name>
-  <packaging>pom</packaging>
-
-  <modules>
-    <module>client</module>
-    <module>common</module>
-    <module>framework</module>
-    <module>container-service</module>
-    <module>server-scm</module>
-    <module>tools</module>
-
-  </modules>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs-client</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
-      <groupId>info.picocli</groupId>
-      <artifactId>picocli</artifactId>
-      <version>3.5.2</version>
-    </dependency>
-    <dependency>
-      <groupId>com.google.protobuf</groupId>
-      <artifactId>protobuf-java</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>com.google.guava</groupId>
-      <artifactId>guava</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-  </dependencies>
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.rat</groupId>
-        <artifactId>apache-rat-plugin</artifactId>
-        <configuration>
-          <excludes>
-            <exclude>**/hs_err*.log</exclude>
-            <exclude>**/target/**</exclude>
-            <exclude>.gitattributes</exclude>
-            <exclude>.idea/**</exclude>
-            
<exclude>src/main/resources/webapps/static/angular-1.6.4.min.js</exclude>
-            
<exclude>src/main/resources/webapps/static/angular-nvd3-1.0.9.min.js</exclude>
-            
<exclude>src/main/resources/webapps/static/angular-route-1.6.4.min.js</exclude>
-            
<exclude>src/main/resources/webapps/static/d3-3.5.17.min.js</exclude>
-            
<exclude>src/main/resources/webapps/static/nvd3-1.8.5.min.css.map</exclude>
-            
<exclude>src/main/resources/webapps/static/nvd3-1.8.5.min.css</exclude>
-            
<exclude>src/main/resources/webapps/static/nvd3-1.8.5.min.js.map</exclude>
-            
<exclude>src/main/resources/webapps/static/nvd3-1.8.5.min.js</exclude>
-            <exclude>src/test/resources/additionalfields.container</exclude>
-            <exclude>src/test/resources/incorrect.checksum.container</exclude>
-            <exclude>src/test/resources/incorrect.container</exclude>
-            <exclude>src/test/resources/test.db.ini</exclude>
-          </excludes>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>findbugs-maven-plugin</artifactId>
-        <configuration>
-          <excludeFilterFile combine.self="override"></excludeFilterFile>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-dependency-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>add-classpath-descriptor</id>
-            <phase>package</phase>
-            <goals>
-              <goal>build-classpath</goal>
-            </goals>
-            <configuration>
-              <attach>true</attach>
-              <prefix>$HDDS_LIB_JARS_DIR</prefix>
-              <outputFilterFile>true</outputFilterFile>
-              <includeScope>runtime</includeScope>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-jar-plugin</artifactId>
-        <executions>
-          <execution>
-            <goals>
-              <goal>test-jar</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
-
-  <profiles>
-    <profile>
-      <id>parallel-tests</id>
-      <build>
-        <plugins>
-          <plugin>
-            <groupId>org.apache.hadoop</groupId>
-            <artifactId>hadoop-maven-plugins</artifactId>
-            <executions>
-              <execution>
-                <id>parallel-tests-createdir</id>
-                <goals>
-                  <goal>parallel-tests-createdir</goal>
-                </goals>
-              </execution>
-            </executions>
-          </plugin>
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-surefire-plugin</artifactId>
-            <configuration>
-              <forkCount>${testsThreadCount}</forkCount>
-              <reuseForks>false</reuseForks>
-              <argLine>${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true</argLine>
-              <systemPropertyVariables>
-                <testsThreadCount>${testsThreadCount}</testsThreadCount>
-                
<test.build.data>${test.build.data}/${surefire.forkNumber}</test.build.data>
-                
<test.build.dir>${test.build.dir}/${surefire.forkNumber}</test.build.dir>
-                
<hadoop.tmp.dir>${hadoop.tmp.dir}/${surefire.forkNumber}</hadoop.tmp.dir>
-
-                <!-- This is intentionally the same directory for all JUnit -->
-                <!-- forks, for use in the very rare situation that -->
-                <!-- concurrent tests need to coordinate, such as using lock 
-->
-                <!-- files. -->
-                
<test.build.shared.data>${test.build.data}</test.build.shared.data>
-
-                <!-- Due to a Maven quirk, setting this to just -->
-                <!-- surefire.forkNumber won't do the parameter substitution. 
-->
-                <!-- Putting a prefix in front of it like "fork-" makes it -->
-                <!-- work. -->
-                
<test.unique.fork.id>fork-${surefire.forkNumber}</test.unique.fork.id>
-              </systemPropertyVariables>
-            </configuration>
-          </plugin>
-        </plugins>
-      </build>
-    </profile>
-  </profiles>
-</project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml
deleted file mode 100644
index f34e848..0000000
--- a/hadoop-hdds/server-scm/pom.xml
+++ /dev/null
@@ -1,129 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0";
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-http://maven.apache.org/xsd/maven-4.0.0.xsd";>
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-hdds</artifactId>
-    <version>0.3.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>hadoop-hdds-server-scm</artifactId>
-  <version>0.3.0-SNAPSHOT</version>
-  <description>Apache Hadoop Distributed Data Store Storage Container Manager 
Server</description>
-  <name>Apache Hadoop HDDS SCM Server</name>
-  <packaging>jar</packaging>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-common</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-container-service</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-client</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-server-framework</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-container-service</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-
-    <dependency>
-      <groupId>org.hamcrest</groupId>
-      <artifactId>hamcrest-core</artifactId>
-      <version>1.3</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>io.dropwizard.metrics</groupId>
-      <artifactId>metrics-core</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.assertj</groupId>
-      <artifactId>assertj-core</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.openjdk.jmh</groupId>
-      <artifactId>jmh-core</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.openjdk.jmh</groupId>
-      <artifactId>jmh-generator-annprocess</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-all</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.hamcrest</groupId>
-      <artifactId>hamcrest-all</artifactId>
-      <version>1.3</version>
-    </dependency>
-    <dependency>
-      <groupId>org.bouncycastle</groupId>
-      <artifactId>bcprov-jdk16</artifactId>
-      <scope>test</scope>
-    </dependency>
-  </dependencies>
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-dependency-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>copy-common-html</id>
-            <phase>prepare-package</phase>
-            <goals>
-              <goal>unpack</goal>
-            </goals>
-            <configuration>
-              <artifactItems>
-                <artifactItem>
-                  <groupId>org.apache.hadoop</groupId>
-                  <artifactId>hadoop-hdds-server-framework</artifactId>
-                  <outputDirectory>${project.build.outputDirectory}
-                  </outputDirectory>
-                  <includes>webapps/static/**/*.*</includes>
-                </artifactItem>
-              </artifactItems>
-              <overWriteSnapshots>true</overWriteSnapshots>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
-</project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java
deleted file mode 100644
index 435f0a5..0000000
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.server.Precheck;
-
-/**
- * SCM utility class.
- */
-public final class ScmUtils {
-
-  private ScmUtils() {
-  }
-
-  /**
-   * Perform all prechecks for given scm operation.
-   *
-   * @param operation
-   * @param preChecks prechecks to be performed
-   */
-  public static void preCheck(ScmOps operation, Precheck... preChecks)
-      throws SCMException {
-    for (Precheck preCheck : preChecks) {
-      preCheck.check(operation);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java
deleted file mode 100644
index f9aa0cd..0000000
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.block;
-
-import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.client.BlockID;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-
-/**
- *
- *  Block APIs.
- *  Container is transparent to these APIs.
- */
-public interface BlockManager extends Closeable {
-  /**
-   * Allocates a new block for a given size.
-   * @param size - Block Size
-   * @param type Replication Type
-   * @param factor - Replication Factor
-   * @return AllocatedBlock
-   * @throws IOException
-   */
-  AllocatedBlock allocateBlock(long size, HddsProtos.ReplicationType type,
-      HddsProtos.ReplicationFactor factor, String owner) throws IOException;
-
-  /**
-   * Deletes a list of blocks in an atomic operation. Internally, SCM
-   * writes these blocks into a {@link DeletedBlockLog} and deletes them
-   * from SCM DB. If this is successful, given blocks are entering pending
-   * deletion state and becomes invisible from SCM namespace.
-   *
-   * @param blockIDs block IDs. This is often the list of blocks of
-   *                 a particular object key.
-   * @throws IOException if exception happens, non of the blocks is deleted.
-   */
-  void deleteBlocks(List<BlockID> blockIDs) throws IOException;
-
-  /**
-   * @return the block deletion transaction log maintained by SCM.
-   */
-  DeletedBlockLog getDeletedBlockLog();
-
-  /**
-   * Start block manager background services.
-   * @throws IOException
-   */
-  void start() throws IOException;
-
-  /**
-   * Shutdown block manager background services.
-   * @throws IOException
-   */
-  void stop() throws IOException;
-
-  /**
-   * @return the block deleting service executed in SCM.
-   */
-  SCMBlockDeletingService getSCMBlockDeletingService();
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
deleted file mode 100644
index d383c68..0000000
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
+++ /dev/null
@@ -1,485 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.block;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.ScmUtils;
-import org.apache.hadoop.hdds.scm.container.Mapping;
-import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
-import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.scm.server.ChillModePrecheck;
-import org.apache.hadoop.hdds.server.events.EventHandler;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.metrics2.util.MBeans;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.management.ObjectName;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
-    .CHILL_MODE_EXCEPTION;
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
-    .INVALID_BLOCK_SIZE;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT;
-
-/** Block Manager manages the block access for SCM. */
-public class BlockManagerImpl implements EventHandler<Boolean>,
-    BlockManager, BlockmanagerMXBean {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(BlockManagerImpl.class);
-  // TODO : FIX ME : Hard coding the owner.
-  // Currently only user of the block service is Ozone, CBlock manages blocks
-  // by itself and does not rely on the Block service offered by SCM.
-
-  private final NodeManager nodeManager;
-  private final Mapping containerManager;
-
-  private final long containerSize;
-
-  private final DeletedBlockLog deletedBlockLog;
-  private final SCMBlockDeletingService blockDeletingService;
-
-  private final int containerProvisionBatchSize;
-  private final Random rand;
-  private ObjectName mxBean;
-  private ChillModePrecheck chillModePrecheck;
-
-  /**
-   * Constructor.
-   *
-   * @param conf - configuration.
-   * @param nodeManager - node manager.
-   * @param containerManager - container manager.
-   * @param eventPublisher - event publisher.
-   * @throws IOException
-   */
-  public BlockManagerImpl(final Configuration conf,
-      final NodeManager nodeManager, final Mapping containerManager,
-      EventPublisher eventPublisher)
-      throws IOException {
-    this.nodeManager = nodeManager;
-    this.containerManager = containerManager;
-
-    this.containerSize = (long)conf.getStorageSize(
-        ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
-        ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT,
-        StorageUnit.BYTES);
-
-    this.containerProvisionBatchSize =
-        conf.getInt(
-            ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE,
-            ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE_DEFAULT);
-    rand = new Random();
-
-    mxBean = MBeans.register("BlockManager", "BlockManagerImpl", this);
-
-    // SCM block deleting transaction log and deleting service.
-    deletedBlockLog = new DeletedBlockLogImpl(conf, containerManager);
-    long svcInterval =
-        conf.getTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
-            OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT,
-            TimeUnit.MILLISECONDS);
-    long serviceTimeout =
-        conf.getTimeDuration(
-            OZONE_BLOCK_DELETING_SERVICE_TIMEOUT,
-            OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT,
-            TimeUnit.MILLISECONDS);
-    blockDeletingService =
-        new SCMBlockDeletingService(deletedBlockLog, containerManager,
-            nodeManager, eventPublisher, svcInterval, serviceTimeout, conf);
-    chillModePrecheck = new ChillModePrecheck();
-  }
-
-  /**
-   * Start block manager services.
-   *
-   * @throws IOException
-   */
-  public void start() throws IOException {
-    this.blockDeletingService.start();
-  }
-
-  /**
-   * Shutdown block manager services.
-   *
-   * @throws IOException
-   */
-  public void stop() throws IOException {
-    this.blockDeletingService.shutdown();
-    this.close();
-  }
-
-  /**
-   * Pre allocate specified count of containers for block creation.
-   *
-   * @param count - Number of containers to allocate.
-   * @param type - Type of containers
-   * @param factor - how many copies needed for this container.
-   * @throws IOException
-   */
-  private synchronized void preAllocateContainers(int count,
-      ReplicationType type, ReplicationFactor factor, String owner)
-      throws IOException {
-    for (int i = 0; i < count; i++) {
-      ContainerWithPipeline containerWithPipeline;
-      try {
-        // TODO: Fix this later when Ratis is made the Default.
-        containerWithPipeline = containerManager.allocateContainer(
-            type, factor, owner);
-
-        if (containerWithPipeline == null) {
-          LOG.warn("Unable to allocate container.");
-        }
-      } catch (IOException ex) {
-        LOG.warn("Unable to allocate container: {}", ex);
-      }
-    }
-  }
-
-  /**
-   * Allocates a block in a container and returns that info.
-   *
-   * @param size - Block Size
-   * @param type Replication Type
-   * @param factor - Replication Factor
-   * @return Allocated block
-   * @throws IOException on failure.
-   */
-  @Override
-  public AllocatedBlock allocateBlock(final long size,
-      ReplicationType type, ReplicationFactor factor, String owner)
-      throws IOException {
-    LOG.trace("Size;{} , type : {}, factor : {} ", size, type, factor);
-    ScmUtils.preCheck(ScmOps.allocateBlock, chillModePrecheck);
-    if (size < 0 || size > containerSize) {
-      LOG.warn("Invalid block size requested : {}", size);
-      throw new SCMException("Unsupported block size: " + size,
-          INVALID_BLOCK_SIZE);
-    }
-
-    /*
-      Here is the high level logic.
-
-      1. First we check if there are containers in ALLOCATED state, that is
-         SCM has allocated them in the SCM namespace but the corresponding
-         container has not been created in the Datanode yet. If we have any in
-         that state, we will return that to the client, which allows client to
-         finish creating those containers. This is a sort of greedy algorithm,
-         our primary purpose is to get as many containers as possible.
-
-      2. If there are no allocated containers -- Then we find a Open container
-         that matches that pattern.
-
-      3. If both of them fail, the we will pre-allocate a bunch of containers
-         in SCM and try again.
-
-      TODO : Support random picking of two containers from the list. So we can
-             use different kind of policies.
-    */
-
-    ContainerWithPipeline containerWithPipeline;
-
-    // This is to optimize performance, if the below condition is evaluated
-    // to false, then we can be sure that there are no containers in
-    // ALLOCATED state.
-    // This can result in false positive, but it will never be false negative.
-    // How can this result in false positive? We check if there are any
-    // containers in ALLOCATED state, this check doesn't care about the
-    // USER of the containers. So there might be cases where a different
-    // USER has few containers in ALLOCATED state, which will result in
-    // false positive.
-    if (!containerManager.getStateManager().getContainerStateMap()
-        .getContainerIDsByState(HddsProtos.LifeCycleState.ALLOCATED)
-        .isEmpty()) {
-      // Since the above check can result in false positive, we have to do
-      // the actual check and find out if there are containers in ALLOCATED
-      // state matching our criteria.
-      synchronized (this) {
-        // Using containers from ALLOCATED state should be done within
-        // synchronized block (or) write lock. Since we already hold a
-        // read lock, we will end up in deadlock situation if we take
-        // write lock here.
-        containerWithPipeline = containerManager
-            .getMatchingContainerWithPipeline(size, owner, type, factor,
-                HddsProtos.LifeCycleState.ALLOCATED);
-        if (containerWithPipeline != null) {
-          containerManager.updateContainerState(
-              containerWithPipeline.getContainerInfo().getContainerID(),
-              HddsProtos.LifeCycleEvent.CREATE);
-          return newBlock(containerWithPipeline,
-              HddsProtos.LifeCycleState.ALLOCATED);
-        }
-      }
-    }
-
-    // Since we found no allocated containers that match our criteria, let us
-    // look for OPEN containers that match the criteria.
-    containerWithPipeline = containerManager
-        .getMatchingContainerWithPipeline(size, owner, type, factor,
-            HddsProtos.LifeCycleState.OPEN);
-    if (containerWithPipeline != null) {
-      return newBlock(containerWithPipeline, HddsProtos.LifeCycleState.OPEN);
-    }
-
-    // We found neither ALLOCATED or OPEN Containers. This generally means
-    // that most of our containers are full or we have not allocated
-    // containers of the type and replication factor. So let us go and
-    // allocate some.
-
-    // Even though we have already checked the containers in ALLOCATED
-    // state, we have to check again as we only hold a read lock.
-    // Some other thread might have pre-allocated container in meantime.
-    synchronized (this) {
-      if (!containerManager.getStateManager().getContainerStateMap()
-          .getContainerIDsByState(HddsProtos.LifeCycleState.ALLOCATED)
-          .isEmpty()) {
-        containerWithPipeline = containerManager
-            .getMatchingContainerWithPipeline(size, owner, type, factor,
-                HddsProtos.LifeCycleState.ALLOCATED);
-      }
-      if (containerWithPipeline == null) {
-        preAllocateContainers(containerProvisionBatchSize,
-            type, factor, owner);
-        containerWithPipeline = containerManager
-            .getMatchingContainerWithPipeline(size, owner, type, factor,
-                HddsProtos.LifeCycleState.ALLOCATED);
-      }
-
-      if (containerWithPipeline != null) {
-        containerManager.updateContainerState(
-            containerWithPipeline.getContainerInfo().getContainerID(),
-            HddsProtos.LifeCycleEvent.CREATE);
-        return newBlock(containerWithPipeline,
-            HddsProtos.LifeCycleState.ALLOCATED);
-      }
-    }
-    // we have tried all strategies we know and but somehow we are not able
-    // to get a container for this block. Log that info and return a null.
-    LOG.error(
-        "Unable to allocate a block for the size: {}, type: {}, factor: {}",
-        size, type, factor);
-    return null;
-  }
-
-  /**
-   * newBlock - returns a new block assigned to a container.
-   *
-   * @param containerWithPipeline - Container Info.
-   * @param state - Current state of the container.
-   * @return AllocatedBlock
-   */
-  private AllocatedBlock newBlock(ContainerWithPipeline containerWithPipeline,
-      HddsProtos.LifeCycleState state) throws IOException {
-    ContainerInfo containerInfo = containerWithPipeline.getContainerInfo();
-    if (containerWithPipeline.getPipeline().getDatanodes().size() == 0) {
-      LOG.error("Pipeline Machine count is zero.");
-      return null;
-    }
-
-    // TODO : Revisit this local ID allocation when HA is added.
-    long localID = UniqueId.next();
-    long containerID = containerInfo.getContainerID();
-
-    boolean createContainer = (state == HddsProtos.LifeCycleState.ALLOCATED);
-
-    AllocatedBlock.Builder abb =
-        new AllocatedBlock.Builder()
-            .setBlockID(new BlockID(containerID, localID))
-            .setPipeline(containerWithPipeline.getPipeline())
-            .setShouldCreateContainer(createContainer);
-    LOG.trace("New block allocated : {} Container ID: {}", localID,
-        containerID);
-    return abb.build();
-  }
-
-  /**
-   * Deletes a list of blocks in an atomic operation. Internally, SCM writes
-   * these blocks into a
-   * {@link DeletedBlockLog} and deletes them from SCM DB. If this is
-   * successful, given blocks are
-   * entering pending deletion state and becomes invisible from SCM namespace.
-   *
-   * @param blockIDs block IDs. This is often the list of blocks of a
-   * particular object key.
-   * @throws IOException if exception happens, non of the blocks is deleted.
-   */
-  @Override
-  public void deleteBlocks(List<BlockID> blockIDs) throws IOException {
-    if (!nodeManager.isOutOfChillMode()) {
-      throw new SCMException("Unable to delete block while in chill mode",
-          CHILL_MODE_EXCEPTION);
-    }
-
-    LOG.info("Deleting blocks {}", StringUtils.join(",", blockIDs));
-    Map<Long, List<Long>> containerBlocks = new HashMap<>();
-    // TODO: track the block size info so that we can reclaim the container
-    // TODO: used space when the block is deleted.
-    for (BlockID block : blockIDs) {
-      // Merge blocks to a container to blocks mapping,
-      // prepare to persist this info to the deletedBlocksLog.
-      long containerID = block.getContainerID();
-      if (containerBlocks.containsKey(containerID)) {
-        containerBlocks.get(containerID).add(block.getLocalID());
-      } else {
-        List<Long> item = new ArrayList<>();
-        item.add(block.getLocalID());
-        containerBlocks.put(containerID, item);
-      }
-    }
-
-    try {
-      deletedBlockLog.addTransactions(containerBlocks);
-    } catch (IOException e) {
-      throw new IOException(
-          "Skip writing the deleted blocks info to"
-              + " the delLog because addTransaction fails. Batch skipped: "
-              + StringUtils.join(",", blockIDs), e);
-    }
-    // TODO: Container report handling of the deleted blocks:
-    // Remove tombstone and update open container usage.
-    // We will revisit this when the closed container replication is done.
-  }
-
-  @Override
-  public DeletedBlockLog getDeletedBlockLog() {
-    return this.deletedBlockLog;
-  }
-
-  /**
-   * Close the resources for BlockManager.
-   *
-   * @throws IOException
-   */
-  @Override
-  public void close() throws IOException {
-    if (deletedBlockLog != null) {
-      deletedBlockLog.close();
-    }
-    blockDeletingService.shutdown();
-    if (mxBean != null) {
-      MBeans.unregister(mxBean);
-      mxBean = null;
-    }
-  }
-
-  @Override
-  public int getOpenContainersNo() {
-    return 0;
-    // TODO : FIX ME : The open container being a single number does not make
-    // sense.
-    // We have to get open containers by Replication Type and Replication
-    // factor. Hence returning 0 for now.
-    // containers.get(HddsProtos.LifeCycleState.OPEN).size();
-  }
-
-  @Override
-  public SCMBlockDeletingService getSCMBlockDeletingService() {
-    return this.blockDeletingService;
-  }
-
-  @Override
-  public void onMessage(Boolean inChillMode, EventPublisher publisher) {
-    this.chillModePrecheck.setInChillMode(inChillMode);
-  }
-
-  /**
-   * Returns status of scm chill mode determined by CHILL_MODE_STATUS event.
-   * */
-  public boolean isScmInChillMode() {
-    return this.chillModePrecheck.isInChillMode();
-  }
-
-  /**
-   * Get class logger.
-   * */
-  public static Logger getLogger() {
-    return LOG;
-  }
-
-  /**
-   * This class uses system current time milliseconds to generate unique id.
-   */
-  public static final class UniqueId {
-    /*
-     * When we represent time in milliseconds using 'long' data type,
-     * the LSB bits are used. Currently we are only using 44 bits (LSB),
-     * 20 bits (MSB) are not used.
-     * We will exhaust this 44 bits only when we are in year 2525,
-     * until then we can safely use this 20 bits (MSB) for offset to generate
-     * unique id within millisecond.
-     *
-     * Year        : Mon Dec 31 18:49:04 IST 2525
-     * TimeInMillis: 17545641544247
-     * Binary Representation:
-     *   MSB (20 bits): 0000 0000 0000 0000 0000
-     *   LSB (44 bits): 1111 1111 0101 0010 1001 1011 1011 0100 1010 0011 0111
-     *
-     * We have 20 bits to run counter, we should exclude the first bit (MSB)
-     * as we don't want to deal with negative values.
-     * To be on safer side we will use 'short' data type which is of length
-     * 16 bits and will give us 65,536 values for offset.
-     *
-     */
-
-    private static volatile short offset = 0;
-
-    /**
-     * Private constructor so that no one can instantiate this class.
-     */
-    private UniqueId() {}
-
-    /**
-     * Calculate and returns next unique id based on System#currentTimeMillis.
-     *
-     * @return unique long value
-     */
-    public static synchronized long next() {
-      long utcTime = Time.getUtcTime();
-      if ((utcTime & 0xFFFF000000000000L) == 0) {
-        return utcTime << Short.SIZE | (offset++ & 0x0000FFFF);
-      }
-      throw new RuntimeException("Got invalid UTC time," +
-          " cannot generate unique Id. UTC Time: " + utcTime);
-    }
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to