http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/ozone.css
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/ozone.css 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/ozone.css
deleted file mode 100644
index 271ac74..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/ozone.css
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- *   Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements.  See the NOTICE file distributed with
- *  this work for additional information regarding copyright ownership.
- *  The ASF licenses this file to You under the Apache License, Version 2.0
- *  (the "License"); you may not use this file except in compliance with
- *  the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
-*/
-body {
-    padding: 40px;
-    padding-top: 60px;
-}
-.starter-template {
-    padding: 40px 15px;
-    text-align: center;
-}
-
-
-.btn {
-    border: 0 none;
-    font-weight: 700;
-    letter-spacing: 1px;
-    text-transform: uppercase;
-}
-
-.btn:focus, .btn:active:focus, .btn.active:focus {
-    outline: 0 none;
-}
-
-.table-striped > tbody > tr:nth-child(2n+1).selectedtag > td:hover {
-    background-color: #3276b1;
-}
-.table-striped > tbody > tr:nth-child(2n+1).selectedtag > td {
-    background-color: #3276b1;
-}
-.tagPanel tr.selectedtag td {
-    background-color: #3276b1;
-}
-.top-buffer { margin-top:4px; }
-
-
-.sortorder:after {
-    content: '\25b2';   // BLACK UP-POINTING TRIANGLE
-}
-.sortorder.reverse:after {
-    content: '\25bc';   // BLACK DOWN-POINTING TRIANGLE
-}
-
-.wrap-table{
-    word-wrap: break-word;
-    table-layout: fixed;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/ozone.js
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/ozone.js 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/ozone.js
deleted file mode 100644
index 37cafef..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/ozone.js
+++ /dev/null
@@ -1,355 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-(function () {
-    "use strict";
-
-    var isIgnoredJmxKeys = function (key) {
-        return key == 'name' || key == 'modelerType' || key == "$$hashKey" ||
-            key.match(/tag.*/);
-    };
-    angular.module('ozone', ['nvd3', 'ngRoute']);
-    angular.module('ozone').config(function ($routeProvider) {
-        $routeProvider
-            .when("/", {
-                templateUrl: "main.html"
-            })
-            .when("/metrics/rpc", {
-                template: "<rpc-metrics></rpc-metrics>"
-            })
-            .when("/config", {
-                template: "<config></config>"
-            })
-    });
-    angular.module('ozone').component('overview', {
-        templateUrl: 'static/templates/overview.html',
-        transclude: true,
-        controller: function ($http) {
-            var ctrl = this;
-            
$http.get("jmx?qry=Hadoop:service=*,name=*,component=ServerRuntime")
-                .then(function (result) {
-                    ctrl.jmx = result.data.beans[0]
-                })
-        }
-    });
-    angular.module('ozone').component('jvmParameters', {
-        templateUrl: 'static/templates/jvm.html',
-        controller: function ($http) {
-            var ctrl = this;
-            $http.get("jmx?qry=java.lang:type=Runtime")
-                .then(function (result) {
-                    ctrl.jmx = result.data.beans[0];
-
-                    //convert array to a map
-                    var systemProperties = {};
-                    for (var idx in ctrl.jmx.SystemProperties) {
-                        var item = ctrl.jmx.SystemProperties[idx];
-                        systemProperties[item.key.replace(/\./g, "_")] = 
item.value;
-                    }
-                    ctrl.jmx.SystemProperties = systemProperties;
-                })
-        }
-    });
-
-    angular.module('ozone').component('rpcMetrics', {
-        template: '<h1>Rpc metrics</h1><tabs>' +
-        '<pane ng-repeat="metric in $ctrl.metrics" ' +
-        'title="{{metric[\'tag.serverName\']}} ({{metric[\'tag.port\']}})">' +
-        '<rpc-metric jmxdata="metric"></rpc-metric></pane>' +
-        '</tabs>',
-        controller: function ($http) {
-            var ctrl = this;
-            $http.get("jmx?qry=Hadoop:service=*,name=RpcActivityForPort*")
-                .then(function (result) {
-                    ctrl.metrics = result.data.beans;
-                })
-        }
-    });
-    angular.module('ozone').component('rpcMetric', {
-        bindings: {
-            jmxdata: '<'
-        },
-        templateUrl: 'static/templates/rpc-metrics.html',
-        controller: function () {
-            var ctrl = this;
-
-
-            ctrl.percentileGraphOptions = {
-                chart: {
-                    type: 'discreteBarChart',
-                    height: 450,
-                    margin: {
-                        top: 20,
-                        right: 20,
-                        bottom: 50,
-                        left: 55
-                    },
-                    x: function (d) {
-                        return d.label;
-                    },
-                    y: function (d) {
-                        return d.value;
-                    },
-                    showValues: true,
-                    valueFormat: function (d) {
-                        return d3.format(',.1f')(d);
-                    },
-                    duration: 500,
-                    xAxis: {
-                        axisLabel: 'Percentage'
-                    },
-                    yAxis: {
-                        axisLabel: 'Latency (ms)',
-                        axisLabelDistance: -10
-                    }
-                }
-            };
-
-            ctrl.$onChanges = function (data) {
-                var groupedMetrics = {}
-
-                var createPercentageMetrics = function (metricName, window) {
-                    groupedMetrics.percentiles = groupedMetrics['percentiles'] 
|| {}
-                    groupedMetrics.percentiles[window] = 
groupedMetrics.percentiles[window] || {};
-                    groupedMetrics.percentiles[window][metricName] = 
groupedMetrics.percentiles[window][metricName] || {
-                            graphdata: [{
-                                key: window,
-                                values: []
-                            }], numOps: 0
-                        };
-
-                };
-                var metrics = ctrl.jmxdata;
-                for (var key in metrics) {
-                    var percentile = 
key.match(/(.*Time)(\d+s)(\d+th)PercentileLatency/);
-                    var percentileNumOps = key.match(/(.*Time)(\d+s)NumOps/);
-                    var successFailures = key.match(/(.*)(Success|Failures)/);
-                    var numAverages = key.match(/(.*Time)(NumOps|AvgTime)/);
-                    if (percentile) {
-                        var metricName = percentile[1];
-                        var window = percentile[2];
-                        var percentage = percentile[3]
-                        createPercentageMetrics(metricName, window);
-
-
-                        
groupedMetrics.percentiles[window][metricName].graphdata[0]
-                            .values.push({
-                            label: percentage,
-                            value: metrics[key]
-                        })
-                    } else if (successFailures) {
-                        var metricName = successFailures[1];
-                        groupedMetrics.successfailures = 
groupedMetrics['successfailures'] || {}
-                        groupedMetrics.successfailures[metricName] = 
groupedMetrics.successfailures[metricName] || {
-                                success: 0,
-                                failures: 0
-                            };
-                        if (successFailures[2] == 'Success') {
-                            groupedMetrics.successfailures[metricName].success 
= metrics[key];
-                        } else {
-                            
groupedMetrics.successfailures[metricName].failures = metrics[key];
-                        }
-
-                    } else if (numAverages) {
-                        var metricName = numAverages[1];
-                        groupedMetrics.numavgs = groupedMetrics['numavgs'] || 
{}
-                        groupedMetrics.numavgs[metricName] = 
groupedMetrics.numavgs[metricName] || {
-                                numOps: 0,
-                                avgTime: 0
-                            };
-                        if (numAverages[2] == 'NumOps') {
-                            groupedMetrics.numavgs[metricName].numOps = 
metrics[key];
-                        } else {
-                            groupedMetrics.numavgs[metricName].avgTime = 
metrics[key];
-                        }
-
-                    } else if (percentileNumOps) {
-                        var metricName = percentileNumOps[1];
-                        var window = percentileNumOps[2];
-                        createPercentageMetrics(metricName, window);
-                        groupedMetrics.percentiles[window][metricName].numOps 
= metrics[key];
-                    } else if (isIgnoredJmxKeys(key)) {
-                        //ignore
-                    } else {
-                        groupedMetrics.others = groupedMetrics.others || [];
-                        groupedMetrics.others.push({
-                            'key': key,
-                            'value': metrics[key]
-                        });
-                    }
-
-                }
-                ctrl.metrics = groupedMetrics;
-            };
-
-        }
-    });
-    angular.module('ozone')
-        .component('tabs', {
-            transclude: true,
-            controller: function ($scope) {
-                var ctrl = this;
-                var panes = this.panes = [];
-                this.select = function (pane) {
-                    angular.forEach(panes, function (pane) {
-                        pane.selected = false;
-                    });
-                    pane.selected = true;
-                };
-                this.addPane = function (pane) {
-                    if (panes.length === 0) {
-                        this.select(pane);
-                    }
-                    panes.push(pane);
-                };
-                this.click = function(pane) {
-                    ctrl.select(pane);
-                }
-            },
-            template: '<div class="nav navtabs"><div class="row"><ul' +
-            ' class="nav nav-pills">' +
-            '<li ng-repeat="pane in $ctrl.panes" 
ng-class="{active:pane.selected}">' +
-            '<a href="" ng-click="$ctrl.click(pane)">{{pane.title}}</a> ' +
-            '</li> </ul></div><br/><div class="tab-content" 
ng-transclude></div> </div>'
-        })
-        .component('pane', {
-            transclude: true,
-            require: {
-                tabsCtrl: '^tabs'
-            },
-            bindings: {
-                title: '@'
-            },
-            controller: function () {
-                this.$onInit = function () {
-                    this.tabsCtrl.addPane(this);
-                };
-            },
-            template: '<div class="tab-pane" ng-if="$ctrl.selected" 
ng-transclude></div>'
-        });
-
-    angular.module('ozone').component('navmenu', {
-        bindings: {
-            metrics: '<'
-        },
-        templateUrl: 'static/templates/menu.html',
-        controller: function ($http) {
-            var ctrl = this;
-            ctrl.docs = false;
-            $http.head("docs/index.html")
-                .then(function (result) {
-                    ctrl.docs = true;
-                },function(){
-                    ctrl.docs = false;
-                });
-        }
-    });
-
-    angular.module('ozone').component('config', {
-        templateUrl: 'static/templates/config.html',
-        controller: function ($scope, $http) {
-            var ctrl = this;
-            ctrl.selectedTags = [];
-
-            $http.get("conf?cmd=getOzoneTags&group=ozone")
-                .then(function (response) {
-                    ctrl.tags = response.data;
-
-                    var excludedTags = ['CBLOCK', 'KSM', 'SCM'];
-                    for (var i = 0; i < excludedTags.length; i++) {
-                        var idx = ctrl.tags.indexOf(excludedTags[i]);
-                        // Remove CBLOCK related properties
-                        if (idx > -1) {
-                            ctrl.tags.splice(idx, 1);
-                        }
-                    }
-                    ctrl.loadAll();
-                });
-
-
-
-            ctrl.loadAll = function () {
-                console.log("Displaying all configs");
-                $http.get("conf?cmd=getPropertyByTag&tags=" + ctrl.tags + 
"&group=ozone").then(function (response) {
-                    ctrl.configs = response.data;
-                    console.log(ctrl.configs)
-                    for (var idx in ctrl.configs) {
-                        var tags = []
-                        var parsedTags = ctrl.configs[idx].tag.split(",");
-                        for (var t in parsedTags) {
-                            tags.push(parsedTags[t].trim())
-                        }
-                        ctrl.configs[idx].tag = tags;
-
-                    };
-                    ctrl.sortBy('name');
-                });
-            };
-
-            ctrl.tagFilter = function (value, index, array) {
-                if (!ctrl.selectedTags) {
-                    return true;
-                }
-                var selected = true;
-                for (var idx in ctrl.selectedTags) {
-                    selected = selected && 
(value.tag.indexOf(ctrl.selectedTags[idx]) > -1);
-                }
-                return selected;
-            };
-            ctrl.configFilter = function (config) {
-                return false;
-            };
-            ctrl.selected = function (tag) {
-                return ctrl.selectedTags.includes(tag);
-            };
-
-            ctrl.allSelected = function () {
-                return ctrl.selectedTags.indexOf('SCM') == -1
-                    && ctrl.selectedTags.indexOf('KSM') == -1
-            };
-
-            ctrl.switchto = function (tag) {
-                var tags = ctrl.selectedTags.filter(function (item) {
-                    return item != 'KSM' && item != 'SCM';
-                });
-                if (tag) {
-                    tags.push(tag);
-                }
-                ctrl.selectedTags = tags;
-            };
-
-            ctrl.select = function (tag) {
-                var tagIdx = ctrl.selectedTags.indexOf(tag);
-                if (tagIdx > -1) {
-                    ctrl.selectedTags = ctrl.selectedTags.filter(function 
(item) {
-                        return item != tag;
-                    });
-                } else {
-                    ctrl.selectedTags.push(tag);
-                }
-                console.log("Tags selected:" + ctrl.selectedTags);
-            };
-
-            ctrl.sortBy = function (propertyName) {
-                ctrl.reverse = (ctrl.propertyName === propertyName) ? 
!ctrl.reverse : false;
-                ctrl.propertyName = propertyName;
-            };
-
-        }
-    });
-
-})();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/templates/config.html
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/templates/config.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/templates/config.html
deleted file mode 100644
index 0a273c3..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/templates/config.html
+++ /dev/null
@@ -1,92 +0,0 @@
-<!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-  -->
-
-<div class="row top-buffer">
-  <div class="col-md-2">
-    <input type="text" class="form-control" placeholder="Search Properties"
-           name="search" ng-model="search.$">
-  </div>
-  <div class="col-md-10">
-    <div class="btn-group btn-group-justified">
-      <a class="btn"
-         ng-class="$ctrl.allSelected() ? 'btn-primary' :'btn-secondary'"
-         ng-click="$ctrl.switchto('')">All
-      </a>
-      <a class="btn"
-         ng-class="$ctrl.selected('KSM') ? 'btn-primary' :'btn-secondary'"
-         ng-click="$ctrl.switchto('KSM')">KSM</a>
-      <a class="btn"
-         ng-class="$ctrl.selected('SCM') ? 'btn-primary' :'btn-secondary'"
-         ng-click="$ctrl.switchto('SCM')">SCM</a>
-    </div>
-  </div>
-</div>
-<div class="row">
-  <div class="col-md-2">
-
-    <table class="table table-striped table-condensed tagPanel">
-      <colgroup>
-        <col class="col-md-12">
-      </colgroup>
-      <thead>
-      <tr>
-        <th>Tag</th>
-      </tr>
-      </thead>
-      <tbody>
-      <tr ng-click="$ctrl.select(tag)"
-          ng-class="$ctrl.selected(tag) ? 'selectedtag':''"
-          ng-repeat="tag in $ctrl.tags">
-        <td>{{tag}}</td>
-      </tr>
-      </tbody>
-    </table>
-  </div>
-  <div class="col-md-10">
-    <table class="table table-striped table-condensed table-hover wrap-table">
-      <thead>
-      <tr>
-        <th class="col-md-3" >
-          <a href="#" ng-click="$ctrl.sortBy('name')">Property</a>
-          <span class="sortorder" ng-show="propertyName === 'name'"
-                ng-class="{reverse: reverse}">
-
-              </span>
-        </th>
-        <th class="col-md-2" style="word-wrap: break-word;">
-          <a ng-click="$ctrl.sortBy('value')">Value</a>
-          <span class="sortorder" ng-show="propertyName === 'value'"
-                ng-class="{reverse: reverse}"></span>
-        </th>
-        <th class="col-md-7">
-          <a href="#" ng-click="$ctrl.sortBy('description')">Description</a>
-          <span class="sortorder" ng-show="propertyName === 'description'"
-                ng-class="{reverse: reverse}"></span>
-        </th>
-      </tr>
-      </thead>
-      <tbody>
-      <tr
-              ng-repeat="config in $ctrl.configs | filter:$ctrl.tagFilter | 
filter:search | orderBy:propertyName:reverse">
-        <td style="word-wrap: break-word;">{{config.name}}</td>
-        <td style="word-wrap: break-word;">{{config.value}}</td>
-        <td style="word-wrap: break-word;">{{config.description}}</td>
-      </tr>
-      </tbody>
-    </table>
-  </div>
-</div>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/templates/jvm.html
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/templates/jvm.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/templates/jvm.html
deleted file mode 100644
index c1f7d16..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/templates/jvm.html
+++ /dev/null
@@ -1,26 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<table class="table table-bordered table-striped">
-    <tr>
-        <th>JVM:</th>
-        <td>{{$ctrl.jmx.SystemProperties.java_vm_name}} 
{{$ctrl.jmx.SystemProperties.java_vm_version}}</td>
-    </tr>
-    <tr>
-        <th>Input arguments:</th>
-        <td>{{$ctrl.jmx.InputArguments}}</td>
-    </tr>
-</table>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/templates/menu.html
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/templates/menu.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/templates/menu.html
deleted file mode 100644
index 95f1b48..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/templates/menu.html
+++ /dev/null
@@ -1,60 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<div id="navbar" class="collapse navbar-collapse">
-    <ul class="nav navbar-nav" id="ui-tabs">
-        <li>
-            <a class="dropdown-toggle"
-               id="metricsMenu"
-               data-toggle="dropdown"
-               aria-haspopup="true"
-               aria-expanded="true">
-                Metrics
-                <span class="caret"></span>
-            </a>
-            <ul
-                class="dropdown-menu"
-                aria-labelledby="metricsMenu">
-                <li ng-repeat="(name, url) in $ctrl.metrics">
-                    <a ng-href="{{url}}">{{name}}<span
-                        aria-hidden="true"></span></a></li>
-            </ul>
-        </li>
-        <li><a href="#!/config">Configuration</a></li>
-        <li ng-show="$ctrl.docs"><a href="/docs">Documentation</a></li>
-        <li>
-            <a class="dropdown-toggle"
-               id="toolsMenu"
-               data-toggle="dropdown"
-               aria-haspopup="true"
-               aria-expanded="true"
-               >
-                Common tools
-                <span class="caret"></span>
-            </a>
-            <ul class="dropdown-menu" aria-labelledby="toolsMenu">
-                <li><a href="jmx">JMX <span
-                        aria-hidden="true"></span></a></li>
-                <li><a href="conf">Config <span
-                        aria-hidden="true"></a></li>
-                <li><a href="stacks">Stacks <span
-                        aria-hidden="true"></a></li>
-                <li><a href="logLevel">Log levels <span
-                        aria-hidden="true"></a></li>
-            </ul>
-        </li>
-    </ul>
-</div><!--/.nav-collapse -->

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/templates/overview.html
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/templates/overview.html
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/templates/overview.html
deleted file mode 100644
index 30e2d26..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/templates/overview.html
+++ /dev/null
@@ -1,39 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<h1>Overview</h1>
-<table class="table table-bordered table-striped">
-    <tbody>
-    <tr>
-        <th>Started:</th>
-        <td>{{$ctrl.jmx.StartedTimeInMillis | date : 'medium'}}</td>
-    </tr>
-    <tr>
-        <th>Version:</th>
-        <td>{{$ctrl.jmx.Version}}</td>
-    </tr>
-    <tr>
-        <th>Compiled:</th>
-        <td>{{$ctrl.jmx.CompileInfo}}</td>
-    </tr>
-    </tbody>
-</table>
-
-<h2>JVM parameters</h2>
-
-<jvm-parameters></jvm-parameters>
-
-<div ng-transclude></div>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/templates/rpc-metrics.html
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/templates/rpc-metrics.html
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/templates/rpc-metrics.html
deleted file mode 100644
index facb152..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/templates/rpc-metrics.html
+++ /dev/null
@@ -1,87 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<div ng-hide="$ctrl.metrics.percentiles" class="alert alert-info">
-    Please set <b>rpc.metrics.quantile.enable</b> to <b>true</b> and define the
-    intervals in seconds with setting <b>rpc.metrics.percentiles.intervals</b>
-    (eg. set to <b>60,300</b>) in your hdfs-site.xml
-    to display Hadoop RPC related graphs.
-</div>
-<div ng-repeat="(window,windowed) in $ctrl.metrics.percentiles">
-    <h2>{{window}} window</h2>
-    <p>Quantiles based on a fixed {{window}} window. Calculated once at every
-        {{window}}</p>
-
-    <div class="row">
-        <div class="col-md-6 col-lg-4"
-             ng-repeat="(metric,percentiles) in windowed">
-            <h3>{{metric}}</h3>
-            <p>{{percentiles.numOps}} sample</p>
-            <nvd3 options="$ctrl.percentileGraphOptions"
-                  data="percentiles.graphdata"></nvd3>
-        </div>
-    </div>
-
-</div>
-<div class="row">
-    <div ng-show="$ctrl.metrics.numavgs" class="col-md-6">
-        <h2>Number of ops / Averages</h2>
-
-        <table class="table table-bordered table-striped">
-            <thead>
-            <tr>
-                <th>Metric name</th>
-                <th>Number of ops</th>
-                <th>Average time (ms)</th>
-            </tr>
-            </thead>
-            <tr ng-repeat="(key,metric) in $ctrl.metrics.numavgs">
-                <td>{{key}}</td>
-                <td>{{metric.numOps | number}}</td>
-                <td>{{metric.avgTime | number:2}}</td>
-            </tr>
-        </table>
-    </div>
-    <div ng-show="$ctrl.metrics.successfailures" class="col-md-6">
-        <h2>Success / Failures</h2>
-
-        <table class="table table-bordered table-striped">
-            <thead>
-            <tr>
-                <th>Metric name</th>
-                <th>Success</th>
-                <th>Failures</th>
-            </tr>
-            </thead>
-
-            <tr ng-repeat="(key,metric) in $ctrl.metrics.successfailures">
-                <td>{{key}}</td>
-                <td>{{metric.success}}</td>
-                <td>{{metric.failures}}</td>
-            </tr>
-        </table>
-    </div>
-</div>
-<div ng-show="$ctrl.metrics.others">
-    <h2>Other JMX Metrics</h2>
-
-    <table class="table">
-        <tr ng-repeat="metric in $ctrl.metrics.others">
-            <td>{{metric.key}}</td>
-            <td>{{metric.value}}</td>
-        </tr>
-    </table>
-</div>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneCommandShell.md
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneCommandShell.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneCommandShell.md
deleted file mode 100644
index 9df974f..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneCommandShell.md
+++ /dev/null
@@ -1,150 +0,0 @@
-<!---
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-Ozone Command Shell
-===================
-
-Ozone command shell gives a command shell interface to work against ozone.
-Please note that this  document assumes that cluster is deployed
-with simple authentication.
-
-The Ozone commands take the following format.
-
-* `hdfs oz --command_ http://hostname:port/volume/bucket/key -user
-<name> -root`
-
-The *port* specified in command should match the port mentioned in the config
-property `dfs.datanode.http.address`. This property can be set in 
`hdfs-site.xml`.
-The default value for the port is `9864` and is used in below commands.
-
-The *--root* option is a command line short cut that allows *hdfs oz*
-commands to be run as the user that started the cluster. This is useful to
-indicate that you want the commands to be run as some admin user. The only
-reason for this option is that it makes the life of a lazy developer more
-easier.
-
-Ozone Volume Commands
---------------------
-
-The volume commands allow users to create, delete and list the volumes in the
-ozone cluster.
-
-### Create Volume
-
-Volumes can be created only by Admins. Here is an example of creating a volume.
-
-* `hdfs oz -createVolume http://localhost:9864/hive -user bilbo -quota
-100TB -root`
-
-The above command creates a volume called `hive` owned by user `bilbo`. The
-`--root` option allows the command to be executed as user `hdfs` which is an
-admin in the cluster.
-
-### Update Volume
-
-Updates information like ownership and quota on an existing volume.
-
-* `hdfs oz  -updateVolume  http://localhost:9864/hive -quota 500TB -root`
-
-The above command changes the volume quota of hive from 100TB to 500TB.
-
-### Delete Volume
-Deletes a Volume if it is empty.
-
-* `hdfs oz -deleteVolume http://localhost:9864/hive -root`
-
-
-### Info Volume
-Info volume command allows the owner or the administrator of the cluster to 
read meta-data about a specific volume.
-
-* `hdfs oz -infoVolume http://localhost:9864/hive -root`
-
-### List Volumes
-
-List volume command can be used by administrator to list volumes of any user. 
It can also be used by a user to list volumes owned by him.
-
-* `hdfs oz -listVolume http://localhost:9864/ -user bilbo -root`
-
-The above command lists all volumes owned by user bilbo.
-
-Ozone Bucket Commands
---------------------
-
-Bucket commands follow a similar pattern as volume commands. However bucket 
commands are designed to be run by the owner of the volume.
-Following examples assume that these commands are run by the owner of the 
volume or bucket.
-
-
-### Create Bucket
-
-Create bucket call allows the owner of a volume to create a bucket.
-
-* `hdfs oz -createBucket http://localhost:9864/hive/january`
-
-This call creates a bucket called `january` in the volume called `hive`. If
-the volume does not exist, then this call will fail.
-
-
-### Update Bucket
-Updates bucket meta-data, like ACLs.
-
-* `hdfs oz -updateBucket http://localhost:9864/hive/january  -addAcl
-user:spark:rw`
-
-### Delete Bucket
-Deletes a bucket if it is empty.
-
-* `hdfs oz -deleteBucket http://localhost:9864/hive/january`
-
-### Info Bucket
-Returns information about a given bucket.
-
-* `hdfs oz -infoBucket http://localhost:9864/hive/january`
-
-### List Buckets
-List buckets on a given volume.
-
-* `hdfs oz -listBucket http://localhost:9864/hive`
-
-Ozone Key Commands
-------------------
-
-Ozone key commands allows users to put, delete and get keys from ozone buckets.
-
-### Put Key
-Creates or overwrites a key in ozone store, -file points to the file you want
-to upload.
-
-* `hdfs oz -putKey  http://localhost:9864/hive/january/processed.orc  -file
-processed.orc`
-
-### Get Key
-Downloads a file from the ozone bucket.
-
-* `hdfs oz -getKey  http://localhost:9864/hive/january/processed.orc  -file
-  processed.orc.copy`
-
-### Delete Key
-Deletes a key  from the ozone store.
-
-* `hdfs oz -deleteKey http://localhost:9864/hive/january/processed.orc`
-
-### Info Key
-Reads  key metadata from the ozone store.
-
-* `hdfs oz -infoKey http://localhost:9864/hive/january/processed.orc`
-
-### List Keys
-List all keys in an ozone bucket.
-
-* `hdfs oz -listKey  http://localhost:9864/hive/january`

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneGettingStarted.md.vm
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneGettingStarted.md.vm 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneGettingStarted.md.vm
deleted file mode 100644
index 3395a8e..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneGettingStarted.md.vm
+++ /dev/null
@@ -1,320 +0,0 @@
-<!---
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-Ozone - Object store for Hadoop
-==============================
-
-Introduction
-------------
-Ozone is an object store for Hadoop. It  is a redundant, distributed object
-store build by leveraging primitives present in HDFS. Ozone supports REST
-API for accessing the store.
-
-Getting Started
----------------
-Ozone is a work in progress and  currently lives in its own branch. To
-use it, you have to build a package by yourself and deploy a cluster.
-
-### Building Ozone
-
-To build Ozone, please checkout the hadoop sources from github. Then
-checkout the ozone branch, HDFS-7240 and build it.
-
-- `git checkout HDFS-7240`
-- `mvn clean package -DskipTests=true -Dmaven.javadoc.skip=true -Pdist -Dtar 
-DskipShade`
-
-skipShade is just to make compilation faster and not really required.
-
-This will give you a tarball in your distribution directory. This is the
-tarball that can be used for deploying your hadoop cluster. Here is an
-example of the tarball that will be generated.
-
-* `~/apache/hadoop/hadoop-dist/target/${project.version}.tar.gz`
-
-At this point we have an option to setup a physical cluster or run ozone via
-docker.
-
-Running Ozone via Docker
-------------------------
-
-This assumes that you have a running docker setup on the machine. Please run
-these following commands to see ozone in action.
-
- Go to the directory where the docker compose files exist.
-
-
- - `cd dev-support/compose/ozone`
-
-Tell docker to start ozone, this will start a KSM, SCM and a single datanode in
-the background.
-
-
- - `docker-compose up -d`
-
-Now let us run some work load against ozone, to do that we will run freon.
-
-This will log into the datanode and run bash.
-
- - `docker-compose exec datanode bash`
- - `cd hadoop/bin`
-
-Now you can run the oz command shell or freon, the ozone load generator.
-
-This is the command to run freon.
-
- - `./hdfs freon -mode offline -validateWrites -numOfVolumes 1 -numOfBuckets 
10 -numOfKeys 100`
-
-You can checkout the KSM UI to see the requests information.
-
- - `http://localhost:9874/`
-
-If you need more datanode you can scale up:
-
- - `docker-compose scale datanode=3`
-
-Running Ozone using a real cluster
-----------------------------------
-
-Please proceed to setup a hadoop cluster by creating the hdfs-site.xml and
-other configuration files that are needed for your cluster.
-
-
-### Ozone Configuration
-
-Ozone relies on its own configuration file called `ozone-site.xml`. It is
-just for convenience and ease of management --  you can add these settings
-to `hdfs-site.xml`, if you don't want to keep ozone settings separate.
-This document refers to `ozone-site.xml` so that ozone settings are in one
-place  and not mingled with HDFS settings.
-
- * _*ozone.enabled*_  This is the most important setting for ozone.
- Currently, Ozone is an opt-in subsystem of HDFS. By default, Ozone is
- disabled. Setting this flag to `true` enables ozone in the HDFS cluster.
- Here is an example,
-
-```
-    <property>
-       <name>ozone.enabled</name>
-       <value>True</value>
-    </property>
-```
- *  _*ozone.metadata.dirs*_ Ozone is designed with modern hardware
- in mind. It tries to use SSDs effectively. So users can specify where the
- metadata must reside. Usually you pick your fastest disk (SSD if
- you have them on your nodes). KSM, SCM and datanode will write the metadata
- to these disks. This is a required setting, if this is missing Ozone will
- fail to come up. Here is an example,
-
-```
-   <property>
-      <name>ozone.metadata.dirs</name>
-      <value>/data/disk1/meta</value>
-   </property>
-```
-
-* _*ozone.scm.names*_ Ozone is build on top of container framework (See Ozone
- Architecture TODO). Storage container manager(SCM) is a distributed block
- service which is used by ozone and other storage services.
- This property allows datanodes to discover where SCM is, so that
- datanodes can send heartbeat to SCM. SCM is designed to be highly available
- and datanodes assume there are multiple instances of SCM which form a highly
- available ring. The HA feature of SCM is a work in progress. So we
- configure ozone.scm.names to be a single machine. Here is an example,
-
-```
-    <property>
-      <name>ozone.scm.names</name>
-      <value>scm.hadoop.apache.org</value>
-    </property>
-```
-
-* _*ozone.scm.datanode.id*_ Each datanode that speaks to SCM generates an ID
-just like HDFS.  This is an optional setting. Please note:
-This path will be created by datanodes if it doesn't exist already. Here is an
- example,
-
-```
-   <property>
-      <name>ozone.scm.datanode.id</name>
-      <value>/data/disk1/scm/meta/node/datanode.id</value>
-   </property>
-```
-
-* _*ozone.scm.block.client.address*_ Storage Container Manager(SCM) offers a
- set of services that can be used to build a distributed storage system. One
- of the services offered is the block services. KSM and HDFS would use this
- service. This property describes where KSM can discover SCM's block service
- endpoint. There is corresponding ports etc, but assuming that we are using
- default ports, the server address is the only required field. Here is an
- example,
-
-```
-    <property>
-      <name>ozone.scm.block.client.address</name>
-      <value>scm.hadoop.apache.org</value>
-    </property>
-```
-
-* _*ozone.ksm.address*_ KSM server address. This is used by Ozonehandler and
-Ozone File System.
-
-```
-    <property>
-       <name>ozone.ksm.address</name>
-       <value>ksm.hadoop.apache.org</value>
-    </property>
-```
-
-Here is a quick summary of settings needed by Ozone.
-
-| Setting                        | Value                        | Comment |
-|--------------------------------|------------------------------|------------------------------------------------------------------|
-| ozone.enabled                  | True                         | This enables 
SCM and  containers in HDFS cluster.                |
-| ozone.metadata.dirs            | file path                    | The metadata 
will be stored here.                                |
-| ozone.scm.names                | SCM server name              | 
Hostname:port or or IP:port address of SCM.                      |
-| ozone.scm.block.client.address | SCM server name and port     | Used by 
services like KSM                                        |
-| ozone.scm.client.address       | SCM server name and port     | Used by 
client side                                              |
-| ozone.scm.datanode.address     | SCM server name and port     | Used by 
datanode to talk to SCM                                  |
-| ozone.ksm.address              | KSM server name              | Used by 
Ozone handler and Ozone file system.                     |
-
- Here is a working example of`ozone-site.xml`.
-
-```
-    <?xml version="1.0" encoding="UTF-8"?>
-    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-    <configuration>
-      <property>
-          <name>ozone.enabled</name>
-          <value>True</value>
-        </property>
-
-        <property>
-          <name>ozone.metadata.dirs</name>
-          <value>/data/disk1/ozone/meta</value>
-        </property>
-
-        <property>
-          <name>ozone.scm.names</name>
-          <value>127.0.0.1</value>
-        </property>
-
-        <property>
-           <name>ozone.scm.client.address</name>
-           <value>127.0.0.1:9860</value>
-        </property>
-
-         <property>
-           <name>ozone.scm.block.client.address</name>
-           <value>127.0.0.1:9863</value>
-         </property>
-
-         <property>
-           <name>ozone.scm.datanode.address</name>
-           <value>127.0.0.1:9861</value>
-         </property>
-
-         <property>
-           <name>ozone.ksm.address</name>
-           <value>127.0.0.1:9874</value>
-         </property>
-    </configuration>
-```
-
-### Starting Ozone
-
-Ozone is designed to run concurrently with HDFS. The simplest way to [start
-HDFS](../hadoop-common/ClusterSetup.html) is to run `start-dfs.sh` from the
-`$HADOOP/sbin/start-dfs.sh`. Once HDFS
-is running, please verify it is fully functional by running some commands like
-
-   - *./hdfs dfs -mkdir /usr*
-   - *./hdfs dfs -ls /*
-
- Once you are sure that HDFS is running, start Ozone. To start  ozone, you
- need to start SCM and KSM. Currently we assume that both KSM and SCM
-  is running on the same node, this will change in future.
-
- The first time you bring up Ozone, SCM must be initialized.
-
-   - `./hdfs scm -init`
-
- Start SCM.
-
-   - `./hdfs --daemon start scm`
-
- Once SCM gets started, KSM must be initialized.
-
-   - `./hdfs ksm -createObjectStore`
-
- Start KSM.
-
-   - `./hdfs --daemon start ksm`
-
-if you would like to start HDFS and Ozone together, you can do that by running
- a single command.
- - `$HADOOP/sbin/start-ozone.sh`
-
- This command will start HDFS and then start the ozone components.
-
- Once you have ozone running you can use these ozone 
[shell](./OzoneCommandShell.html)
- commands to  create a  volume, bucket and keys.
-
-### Diagnosing issues
-
-Ozone tries not to pollute the existing HDFS streams of configuration and
-logging. So ozone logs are by default configured to be written to a file
-called `ozone.log`. This is controlled by the settings in `log4j.properties`
-file in the hadoop configuration directory.
-
-Here is the log4j properties that are added by ozone.
-
-
-```
-   #
-   # Add a logger for ozone that is separate from the Datanode.
-   #
-   #log4j.debug=true
-   log4j.logger.org.apache.hadoop.ozone=DEBUG,OZONE,FILE
-
-   # Do not log into datanode logs. Remove this line to have single log.
-   log4j.additivity.org.apache.hadoop.ozone=false
-
-   # For development purposes, log both to console and log file.
-   log4j.appender.OZONE=org.apache.log4j.ConsoleAppender
-   log4j.appender.OZONE.Threshold=info
-   log4j.appender.OZONE.layout=org.apache.log4j.PatternLayout
-   log4j.appender.OZONE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \
-    %X{component} %X{function} %X{resource} %X{user} %X{request} - %m%n
-
-   # Real ozone logger that writes to ozone.log
-   log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender
-   log4j.appender.FILE.File=${hadoop.log.dir}/ozone.log
-   log4j.appender.FILE.Threshold=debug
-   log4j.appender.FILE.layout=org.apache.log4j.PatternLayout
-   log4j.appender.FILE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \
-     (%F:%L) %X{function} %X{resource} %X{user} %X{request} - \
-      %m%n
-```
-
-If you would like to have a single datanode log instead of ozone stuff
-getting written to ozone.log, please remove this line or set this to true.
-
- ` log4j.additivity.org.apache.hadoop.ozone=false`
-
-On the SCM/KSM side, you will be able to see
-
-  - `hadoop-hdfs-ksm-hostname.log`
-  - `hadoop-hdfs-scm-hostname.log`
-
-Please file any issues you see under [Object store in HDFS 
(HDFS-7240)](https://issues.apache.org/jira/browse/HDFS-7240)
-as this is still a work in progress.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneMetrics.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneMetrics.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneMetrics.md
deleted file mode 100644
index f5eccf6..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneMetrics.md
+++ /dev/null
@@ -1,166 +0,0 @@
-<!---
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-
-
-HDFS Ozone Metrics
-===============
-
-<!-- MACRO{toc|fromDepth=0|toDepth=3} -->
-
-Overview
---------
-
-The container metrics that is used in HDFS Ozone.
-
-### Storage Container Metrics
-
-The metrics for various storage container operations in HDFS Ozone.
-
-Storage container is an optional service that can be enabled by setting
-'ozone.enabled' to true.
-These metrics are only available when ozone is enabled.
-
-Storage Container Metrics maintains a set of generic metrics for all
-container RPC calls that can be made to a datandoe/container.
-
-Along with the total number of RPC calls containers maintain a set of metrics
-for each RPC call. Following is the set of counters maintained for each RPC
-operation.
-
-*Total number of operation* - We maintain an array which counts how
-many times a specific operation has been performed.
-Eg.`NumCreateContainer` tells us how many times create container has been
-invoked on this datanode.
-
-*Total number of pending operation* - This is an array which counts how
-many times a specific operation is waitting to be processed from the client
-point of view.
-Eg.`NumPendingCreateContainer` tells us how many create container requests that
-waitting to be processed.
-
-*Average latency of each pending operation in nanoseconds* - The average 
latency
-of the operation from the client point of view.
-Eg. `CreateContainerLatencyAvgTime` - This tells us the average latency of
-Create Container from the client point of view.
-
-*Number of bytes involved in a specific command* - This is an array that is
-maintained for all operations, but makes sense only for read and write
-operations.
-
-While it is possible to read the bytes in update container, it really makes
-no sense, since no data stream involved. Users are advised to use this
-metric only when it makes sense. Eg. `BytesReadChunk` -- Tells us how
-many bytes have been read from this data using Read Chunk operation.
-
-*Average Latency of each operation* - The average latency of the operation.
-Eg. `LatencyCreateContainerAvgTime` - This tells us the average latency of
-Create Container.
-
-*Quantiles for each of these operations* - The 50/75/90/95/99th percentile
-of these operations. Eg. `CreateContainerNanos60s50thPercentileLatency` --
-gives latency of the create container operations at the 50th percentile latency
-(1 minute granularity). We report 50th, 75th, 90th, 95th and 99th percentile
-for all RPCs.
-
-So this leads to the containers reporting these counters for each of these
-RPC operations.
-
-| Name | Description |
-|:---- |:---- |
-| `NumOps` | Total number of container operations |
-| `CreateContainer` | Create container operation |
-| `ReadContainer` | Read container operation |
-| `UpdateContainer` | Update container operations |
-| `DeleteContainer` | Delete container operations |
-| `ListContainer` | List container operations |
-| `PutKey` | Put key operations |
-| `GetKey` | Get key operations |
-| `DeleteKey` | Delete key operations |
-| `ListKey` | List key operations |
-| `ReadChunk` | Read chunk operations |
-| `DeleteChunk` | Delete chunk operations |
-| `WriteChunk` | Write chunk operations|
-| `ListChunk` | List chunk operations |
-| `CompactChunk` | Compact chunk operations |
-| `PutSmallFile` | Put small file operations |
-| `GetSmallFile` | Get small file operations |
-| `CloseContainer` | Close container operations |
-
-### Storage Container Manager Metrics
-
-The metrics for containers that managed by Storage Container Manager.
-
-Storage Container Manager (SCM) is a master service which keeps track of
-replicas of storage containers. It also manages all data nodes and their
-states, dealing with container reports and dispatching commands for execution.
-
-Following are the counters for containers:
-
-| Name | Description |
-|:---- |:---- |
-| `LastContainerReportSize` | Total size in bytes of all containers in latest 
container report that SCM received from datanode |
-| `LastContainerReportUsed` | Total number of bytes used by all containers in 
latest container report that SCM received from datanode |
-| `LastContainerReportKeyCount` | Total number of keys in all containers in 
latest container report that SCM received from datanode |
-| `LastContainerReportReadBytes` | Total number of bytes have been read from 
all containers in latest container report that SCM received from datanode |
-| `LastContainerReportWriteBytes` | Total number of bytes have been written 
into all containers in latest container report that SCM received from datanode |
-| `LastContainerReportReadCount` | Total number of times containers have been 
read from in latest container report that SCM received from datanode |
-| `LastContainerReportWriteCount` | Total number of times containers have been 
written to in latest container report that SCM received from datanode |
-| `ContainerReportSize` | Total size in bytes of all containers over whole 
cluster |
-| `ContainerReportUsed` | Total number of bytes used by all containers over 
whole cluster |
-| `ContainerReportKeyCount` | Total number of keys in all containers over 
whole cluster |
-| `ContainerReportReadBytes` | Total number of bytes have been read from all 
containers over whole cluster |
-| `ContainerReportWriteBytes` | Total number of bytes have been written into 
all containers over whole cluster |
-| `ContainerReportReadCount` | Total number of times containers have been read 
from over whole cluster |
-| `ContainerReportWriteCount` | Total number of times containers have been 
written to over whole cluster |
-
-### Key Space Metrics
-
-The metrics for various key space manager operations in HDFS Ozone.
-
-key space manager (KSM) is a service that similar to the Namenode in HDFS.
-In the current design of KSM, it maintains metadata of all volumes, buckets 
and keys.
-These metrics are only available when ozone is enabled.
-
-Following is the set of counters maintained for each key space operation.
-
-*Total number of operation* - We maintain an array which counts how
-many times a specific operation has been performed.
-Eg.`NumVolumeCreate` tells us how many times create volume has been
-invoked in KSM.
-
-*Total number of failed operation* - This type operation is opposite to the 
above
-operation.
-Eg.`NumVolumeCreateFails` tells us how many times create volume has been 
invoked
-failed in KSM.
-
-Following are the counters for each of key space operations.
-
-| Name | Description |
-|:---- |:---- |
-| `VolumeCreate` | Create volume operation |
-| `VolumeUpdates` | Update volume property operation |
-| `VolumeInfos` | Get volume information operation |
-| `VolumeCheckAccesses` | Check volume access operation |
-| `VolumeDeletes` | Delete volume operation |
-| `VolumeLists` | List volume operation |
-| `BucketCreates` | Create bucket operation |
-| `BucketInfos` | Get bucket information operation |
-| `BucketUpdates` | Update bucket property operation |
-| `BucketDeletes` | Delete bucket operation |
-| `BucketLists` | List bucket operation |
-| `KeyAllocate` | Allocate key operation |
-| `KeyLookup` | Look up key operation |
-| `KeyDeletes` | Delete key operation |
-| `KeyLists` | List key operation |
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneOverview.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneOverview.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneOverview.md
deleted file mode 100644
index 41d7dbd..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneOverview.md
+++ /dev/null
@@ -1,88 +0,0 @@
-<!---
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-Ozone Overview
-==============
-
-
Ozone is an Object store for Apache Hadoop. It aims to scale to billions of
-keys. 
The following is a high-level overview of the core components of 
Ozone.


-
-![Ozone Architecture Overview](images/ozoneoverview.png) 


-
-The main elements of Ozone are
:
-
-### Clients
-Ozone ships with a set of ready-made clients. They are 
Ozone CLI and 
Freon.

-
-    * [Ozone CLI](./OzoneCommandShell.html) is the command line interface like 
'hdfs' command.

-
-    * Freon is a  load generation tool for Ozone.

-
-### REST Handler
-Ozone provides both an RPC (Remote Procedure Call) as well as a  REST
-(Representational State Transfer) style interface. This allows clients to be
-written in many languages quickly. Ozone strives to maintain a similar
-interface between REST and RPC. The Rest handler offers the REST protocol
-services of Ozone.
-
-For most purposes, a client can make one line change to switch from REST to
-RPC or vice versa.  

-
-### Ozone File System
-Ozone file system (TODO: Add documentation) is a Hadoop compatible file system.
-This is the important user-visible component of ozone.
-This allows Hadoop services and applications like Hive/Spark to run against
-Ozone without any change.
-
-### Ozone Client
-This is like DFSClient in HDFS. This acts as the standard client to talk to
-Ozone. All other components that we have discussed so far rely on Ozone client
-(TODO: Add Ozone client documentation).

-
-### Key Space Manager

-Key Space Manager(KSM) takes care of the Ozone's namespace.
-All ozone entities like volumes, buckets and keys are managed by KSM
-(TODO: Add KSM documentation). In Short, KSM is the metadata manager for Ozone.
-KSM talks to blockManager(SCM) to get blocks and passes it on to the Ozone
-client.  Ozone client writes data to these blocks.
-KSM will eventually be replicated via Apache Ratis for High Availability.

-
-### Storage Container Manager
-Storage Container Manager (SCM) is the block and cluster manager for Ozone.
-SCM along with data nodes offer a service called 'containers'.
-A container is a group unrelated of blocks that are managed together
-as a single entity.
-
-SCM offers the following abstractions.


-
-![SCM Abstractions](images/scmservices.png)
-#### Blocks
-Blocks are like blocks in HDFS. They are replicated store of data.
-
-#### Containers
-A collection of blocks replicated and managed together.
-
-#### Pipelines
-SCM allows each container to choose its method of replication.
-For example, a container might decide that it needs only one copy of a  block
-and might choose a stand-alone pipeline. Another container might want to have
-a very high level of reliability and pick a RATIS based pipeline. In other
-words, SCM allows different kinds of replication strategies to co-exist.
-
-#### Pools
-A group of data nodes is called a pool. For scaling purposes,
-we define a pool as a set of machines. This makes management of datanodes
-easier.
-
-#### Nodes
-The data node where data is stored.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneRest.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneRest.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneRest.md
deleted file mode 100644
index 0294a53..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneRest.md
+++ /dev/null
@@ -1,549 +0,0 @@
-<!---
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-Ozone REST API's.
-===================
-
-<!-- MACRO{toc|fromDepth=0|toDepth=1} -->
-
-Overview
---------
-
-The Ozone REST API's allows user to access ozone via  REST protocol.
-
-Authentication and Authorization
---------------------
-
-For time being, The default authentication mode of REST API is insecure access
-mode, which is *Simple* mode. Under this mode, ozone server trusts the user
-name specified by client and it does not perform any authentication.
-
-User name can be specified in HTTP header by
-
-* `x-ozone-user: {USER_NAME}`
-
-for example if add following header *x-ozone-user: bilbo* in the HTTP request,
-then operation will be executed as *bilbo* user.
-In *Simple* mode, there is no real authorization either. Client can be
-authorized to obtain administrator privilege by using HTTP header
-
-* `Authorization: {AUTH_METHOD} {SIGNATURE}`
-
-for example set following header *Authorization: OZONE root* in the HTTP 
request,
-then ozone will authorize the client with administrator privilege.
-
-Common REST Headers
---------------------
-
-The following HTTP headers must be set for each REST call.
-
-| Property | Description |
-|:---- |:----
-| Authorization | The authorization field determines which authentication 
method is used by ozone. Currently only *simple* mode is supported, the 
corresponding value is *OZONE*. Optionally an user name can be set as *OZONE 
{USER_NAME}* to authorize as a particular user. |
-| Date | Standard HTTP header that represents dates. The format is - day of 
the week, month, day, year and time (military time format) in GMT. Any other 
time zone will be rejected by ozone server. Eg. *Date : Mon, Apr 4, 2016 
06:22:00 GMT*. This field is required. |
-| x-ozone-version | A required HTTP header to indicate which version of API 
this call will be communicating to. E.g *x-ozone-version: v1*. Currently ozone 
only publishes v1 version API. |
-
-Common Reply Headers
---------------------
-
-The common reply headers are part of all Ozone server replies.
-
-| Property | Description |
-|:---- |:----
-| Date | This is the HTTP date header and it is set to server’s local time 
expressed in GMT. |
-| x-ozone-request-id | This is a UUID string that represents an unique request 
ID. This ID is used to track the request through the ozone system and is useful 
for debugging purposes. |
-| x-ozone-server-name | Fully qualified domain name of the sever which handled 
the request. |
-
-Volume APIs
---------------------
-
-### Create a Volume
-
-This API allows admins to create a new storage volume.
-
-Schema:
-
-- `POST /{volume}?quota=<VOLUME_QUOTA>`
-
-Query Parameter:
-
-| Query Parameter | Value | Description |
-|:---- |:---- |:----
-| quota | long<BYTES \| MB \| GB \| TB> | Optional. Quota size in BYTEs, MBs, 
GBs or TBs |
-
-Sample HTTP POST request:
-
-    curl -i -X POST -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H 
"Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE root" 
"http://localhost:9864/volume-to-create";
-
-this request creates a volume as user *bilbo*, the authorization field is set 
to *OZONE root* because this call requires administration privilege. The client 
receives a response with zero content length.
-
-    HTTP/1.1 201 Created
-    x-ozone-server-name: localhost
-    x-ozone-request-id: 2173deb5-bbb7-4f0a-8236-f354784e3bae
-    Date: Tue, 27 Jun 2017 07:42:04 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 0
-    Connection: keep-alive
-
-### Update Volume
-
-This API allows administrators to update volume info such as ownership and 
quota. This API requires administration privilege.
-
-Schema:
-
-- `PUT /{volume}?quota=<VOLUME_QUOTA>`
-
-Query Parameter:
-
-| Query Parameter | Value | Description |
-|:---- |:---- |:----
-| quota | long<BYTES \| MB \| GB \| TB>  \| remove | Optional. Quota size in 
BYTEs, MBs, GBs or TBs. Or use string value *remove* to remove an existing 
quota for a volume. |
-
-Sample HTTP PUT request:
-
-    curl -X PUT -H "Authorization:OZONE root" -H "Date: Mon, 26 Jun 2017 
04:23:30 GMT" -H "x-ozone-version: v1" -H "x-ozone-user: john"  
http://localhost:9864/volume-to-update
-
-this request modifies the owner of */volume-to-update* to *john*.
-
-### Delete Volume
-
-This API allows user to delete a volume owned by themselves if the volume is 
not empty. Administrators can delete volumes owned by any user.
-
-Schema:
-
-- `DELETE /{volume}`
-
-Sample HTTP DELETE request:
-
-    curl -i -X DELETE -H "Authorization:OZONE root" -H "Date: Mon, 26 Jun 2017 
04:23:30 GMT" -H "x-ozone-version: v1" -H "x-ozone-user: bilbo"  
http://localhost:9864/volume-to-delete
-
-this request deletes an empty volume */volume-to-delete*. The client receives 
a zero length content.
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: 6af14c64-e3a9-40fe-9634-df60b7cbbc6a
-    Date: Tue, 27 Jun 2017 08:49:52 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 0
-    Connection: keep-alive
-
-### Info Volume
-
-This API allows user to read the info of a volume owned by themselves. 
Administrators can read volume info owned by any user.
-
-Schema:
-
-- `GET /{volume}?info=volume`
-
-Query Parameter:
-
-| Query Parameter | Value | Description |
-|:---- |:---- |:----
-| info | "volume" | Required and enforced with this value. |
-
-Sample HTTP GET request:
-
-    curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 
26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" 
"http://localhost:9864/volume-of-bilbo?info=volume";
-
-this request gets the info of volume */volume-of-bilbo*, the client receives a 
response with a JSON object of volume info.
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: a2224806-beaf-42dd-a68e-533cd7508f74
-    Date: Tue, 27 Jun 2017 07:55:35 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 171
-    Connection: keep-alive
-
-    {
-      "owner" : { "name" : "bilbo" },
-      "quota" : { "unit" : "TB", "size" : 1048576 },
-      "volumeName" : "volume-of-bilbo",
-      "createdOn" : "Tue, 27 Jun 2017 07:42:04 GMT",
-      "createdBy" : "root"
-    }
-
-### List Volumes
-
-This API allows user to list all volumes owned by themselves. Administrators 
can list all volumes owned by any user.
-
-Schema:
-
-- `GET 
/?prefix=<PREFIX>&max-keys=<MAX_RESULT_SIZE>&prev-key=<PREVIOUS_VOLUME_KEY>`
-
-Query Parameter:
-
-| Query Parameter | Value | Description |
-|:---- |:---- |:----
-| prefix | string | Optional. Only volumes with this prefix are included in 
the result. |
-| max-keys | int | Optional. Maximum number of volumes included in the result. 
Default is 1024 if not specified. |
-| prev-key | string | Optional. Volume name from where listing should start, 
this key is excluded in the result. It must be a valid volume name. |
-| root-scan | bool | Optional. List all volumes in the cluster if this is set 
to true. Default false. |
-
-Sample HTTP GET request:
-
-    curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 
26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" 
"http://localhost:9864/?max-keys=100&prefix=Jan";
-
-this request gets all volumes owned by *bilbo* and each volume's name contains 
prefix *Jan*, the result at most contains *100* entries. The client receives a 
list of SON objects, each of them describes the info of a volume.
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: 7fa0dce1-a8bd-4387-bc3c-1dac4b710bb1
-    Date: Tue, 27 Jun 2017 08:07:04 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 602
-    Connection: keep-alive
-
-    {
-      "volumes" : [
-        {
-          "owner" : { "name" : "bilbo"},
-          "quota" : { "unit" : "TB", "size" : 2 },
-          "volumeName" : "Jan-vol1",
-          "createdOn" : "Tue, 27 Jun 2017 07:42:04 GMT",
-          "createdBy" : root
-      },
-      ...
-      ]
-    }
-
-Bucket APIs
---------------------
-
-### Create Bucket
-
-This API allows an user to create a bucket in a volume.
-
-Schema:
-
-- `POST /{volume}/{bucket}`
-
-Additional HTTP Headers:
-
-| HTTP Header | Value | Description |
-|:---- |:---- |:----
-| x-ozone-acl | ozone ACLs | Optional. Ozone acls. |
-| x-ozone-storage-class | <DEFAULT \| ARCHIVE \| DISK \| RAM_DISK \| SSD > | 
Optional. Storage type for a volume. |
-| x-ozone-bucket-versioning | enabled/disabled | Optional. Do enable bucket 
versioning or not. |
-
-Sample HTTP POST request:
-
-    curl -i -X POST -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H 
"Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" 
http://localhost:9864/volume-of-bilbo/bucket-0
-
-this request creates a bucket *bucket-0* under volume *volume-of-bilbo*.
-
-    HTTP/1.1 201 Created
-    x-ozone-server-name: localhost
-    x-ozone-request-id: 49acfeec-4c85-470a-872b-2eaebd8d751e
-    Date: Tue, 27 Jun 2017 08:55:25 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 0
-    Connection: keep-alive
-
-### Update Bucket
-
-Updates bucket meta-data, like ACLs.
-
-Schema:
-
-- `PUT /{volume}/{bucket}`
-
-Additional HTTP Headers:
-
-| HTTP Header | Value | Description |
-|:---- |:---- |:----
-| x-ozone-acl | ozone ACLs | Optional. Ozone acls. |
-| x-ozone-bucket-versioning | enabled/disabled | Optional. Do enable bucket 
versioning or not. |
-
-Sample HTTP PUT request:
-
-    curl -i -X PUT -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: 
Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" -H "x-ozone-acl: ADD 
user:peregrin:rw" http://localhost:9864/volume-of-bilbo/bucket-to-update
-
-this request adds an ACL policy specified by HTTP header *x-ozone-acl* to 
bucket */volume-of-bilbo/bucket-to-update*, the ACL field *ADD 
user:peregrin:rw* gives add additional read/write permission to user *peregrin* 
to this bucket.
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: b061a295-5faf-4b98-94b9-8b3e87c8eb5e
-    Date: Tue, 27 Jun 2017 09:02:37 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 0
-    Connection: keep-alive
-
-### Delete Bucket
-
-Deletes a bucket if it is empty. An user can only delete bucket owned by 
themselves, and administrators can delete buckets owned by any user, as long as 
it is empty.
-
-Schema:
-
-- `DELETE /{volume}/{bucket}`
-
-Sample HTTP DELETE request:
-
-    curl -i -X DELETE -H "Authorization:OZONE root" -H "Date: Mon, 26 Jun 2017 
04:23:30 GMT" -H "x-ozone-version: v1" -H "x-ozone-user:bilbo" 
"http://localhost:9864/volume-of-bilbo/bucket-0";
-
-this request deletes bucket */volume-of-bilbo/bucket-0*. The client receives a 
zero length content response.
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: f57acd7a-2116-4c2f-aa2f-5a483db81c9c
-    Date: Tue, 27 Jun 2017 09:16:52 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 0
-    Connection: keep-alive
-
-
-### Info Bucket
-
-This API returns information about a given bucket.
-
-Schema:
-
-- `GET /{volume}/{bucket}?info=bucket`
-
-Query Parameters:
-
-| Query Parameter | Value | Description |
-|:---- |:---- |:----
-| info | "bucket" | Required and enforced with this value. |
-
-Sample HTTP GET request:
-
-    curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 
26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" 
"http://localhost:9864/volume-of-bilbo/bucket-0?info=bucket";
-
-this request gets the info of bucket */volume-of-bilbo/bucket-0*. The client 
receives a response of JSON object contains bucket info.
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: f125485b-8cae-4c7f-a2d6-5b1fefd6f193
-    Date: Tue, 27 Jun 2017 09:08:31 GMT
-    Content-Type: application/json
-    Content-Length: 138
-    Connection: keep-alive
-
-    {
-      "volumeName" : "volume-of-bilbo",
-      "bucketName" : "bucket-0",
-      "createdOn" : "Tue, 27 Jun 2017 08:55:25 GMT",
-      "acls" : [ ],
-      "versioning" : "DISABLED",
-      "storageType" : "DISK"
-    }
-
-### List Buckets
-
-List buckets in a given volume.
-
-Schema:
-
-- `GET 
/{volume}?prefix=<PREFIX>&max-keys=<MAX_RESULT_SIZE>&prev-key=<PREVIOUS_BUCKET_KEY>`
-
-Query Parameters:
-
-| Query Parameter | Value | Description |
-|:---- |:---- |:----
-| prefix | string | Optional. Only buckets with this prefix are included in 
the result. |
-| max-keys | int | Optional. Maximum number of buckets included in the result. 
Default is 1024 if not specified. |
-| prev-key | string | Optional. Bucket name from where listing should start, 
this key is excluded in the result. It must be a valid bucket name. |
-
-Sample HTTP GET request:
-
-    curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 
26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" 
"http://localhost:9864/volume-of-bilbo?max-keys=10";
-
-this request lists all the buckets under volume *volume-of-bilbo*, and the 
result at most contains 10 entries. The client receives response of a array of 
JSON objects, each of them represents for a bucket info.
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: e048c3d5-169c-470f-9903-632d9f9e32d5
-    Date: Tue, 27 Jun 2017 09:12:18 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 207
-    Connection: keep-alive
-
-    {
-      "buckets" : [ {
-        "volumeName" : "volume-of-bilbo",
-        "bucketName" : "bucket-0",
-        "createdOn" : "Tue, 27 Jun 2017 08:55:25 GMT",
-        "acls" : [ ],
-        "versioning" : null,
-        "storageType" : "DISK",
-        "bytesUsed" : 0,
-        "keyCount" : 0
-        },
-        ...
-      ]
-    }
-
-Key APIs
-------------------
-
-### Put Key
-
-This API allows user to create or overwrite keys inside of a bucket.
-
-Schema:
-
-- `PUT /{volume}/{bucket}/{key}`
-
-Additional HTTP headers:
-
-| HTTP Header | Value | Description |
-|:---- |:---- |:----
-| Content-MD5 | MD5 digest | Standard HTTP header, file hash. |
-
-Sample PUT HTTP request:
-
-    curl -X PUT -T /path/to/localfile -H "Authorization:OZONE" -H "Date: Mon, 
26 Jun 2017 04:23:30 GMT" -H "x-ozone-version: v1" -H "x-ozone-user:bilbo" 
"http://localhost:9864/volume-of-bilbo/bucket-0/file-0";
-
-this request uploads a local file */path/to/localfile* specified by option 
*-T* to ozone as user *bilbo*, mapped to ozone key 
*/volume-of-bilbo/bucket-0/file-0*. The client receives a zero length content 
response.
-
-### Get Key
-
-This API allows user to get or download a key from an ozone bucket.
-
-Schema:
-
-- `GET /{volume}/{bucket}/{key}`
-
-Sample HTTP GET request:
-
-    curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 
26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" 
"http://localhost:9864/volume-of-bilbo/bucket-0/file-0";
-
-this request reads the content of key */volume-of-bilbo/bucket-0/file-0*. If 
the content of the file is plain text, it can be directly dumped onto stdout.
-
-    HTTP/1.1 200 OK
-    Content-Type: application/octet-stream
-    x-ozone-server-name: localhost
-    x-ozone-request-id: 1bcd7de7-d8e3-46bb-afee-bdc933d383b8
-    Date: Tue, 27 Jun 2017 09:35:29 GMT
-    Content-Length: 6
-    Connection: keep-alive
-
-    Hello Ozone!
-
-if the file is not plain text, specify *-O* option in curl command and the 
file *file-0* will be downloaded into current working directory, file name will 
be same as the key. A sample request like following:
-
-    curl -O -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: 
Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" 
"http://localhost:9864/volume-of-bilbo/bucket-0/file-1";
-
-response looks like following:
-
-    % Total    % Received % Xferd  Average Speed   Time    Time     Time  
Current
-                                 Dload  Upload   Total   Spent    Left  Speed
-    100 6148k  100 6148k    0     0  24.0M      0 --:--:-- --:--:-- --:--:-- 
24.1M
-
-### Delete Key
-
-This API allows user to delete a key from a bucket.
-
-Schema:
-
-- `DELETE /{volume}/{bucket}/{key}`
-
-Sample HTTP DELETE request:
-
-    curl -i -X DELETE -H "Authorization:OZONE root" -H "Date: Mon, 26 Jun 2017 
04:23:30 GMT" -H "x-ozone-version: v1" -H "x-ozone-user:bilbo" 
"http://localhost:9864/volume-of-bilbo/bucket-0/file-0";
-
-this request deletes key */volume-of-bilbo/bucket-0/file-0*. The client 
receives a zero length content result:
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: f8c4a373-dd5f-4e3a-b6c4-ddf7e191fe91
-    Date: Tue, 27 Jun 2017 14:19:48 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 0
-    Connection: keep-alive
-
-### Info Key
-
-This API returns information about a given key.
-
-Schema:
-
-- `GET /{volume}/{bucket}/{key}?info=key`
-
-Query Parameter:
-
-| Query Parameter | Value | Description |
-|:---- |:---- |:----
-| info | String, "key" | Required and enforced with this value. |
-
-Sample HTTP DELETE request:
-
-    curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 
26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" 
"http://localhost:9864/volume-of-bilbo/buket-0/file-0?info=key";
-
-this request returns information of the key 
*/volume-of-bilbo/bucket-0/file-0*. The client receives a JSON object listed 
attributes of the key.
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: c674343c-a0f2-49e4-bbd6-daa73e7dc131
-    Date: Mon, 03 Jul 2017 14:28:45 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 73
-    Connection: keep-alive
-
-    {
-      "version" : 0,
-      "md5hash" : null,
-      "createdOn" : "Mon, 26 Jun 2017 04:23:30 GMT",
-      "modifiedOn" : "Mon, 26 Jun 2017 04:23:30 GMT",
-      "size" : 0,
-      "keyName" : "file-0"
-    }
-
-### List Keys
-
-This API allows user to list keys in a bucket.
-
-Schema:
-
-- `GET 
/{volume}/{bucket}?prefix=<PREFIX>&max-keys=<MAX_RESULT_SIZE>&prev-key=<PREVIOUS_KEY>`
-
-Query Parameters:
-
-| Query Parameter | Value | Description |
-|:---- |:---- |:----
-| prefix | string | Optional. Only keys with this prefix are included in the 
result. |
-| max-keys | int | Optional. Maximum number of keys included in the result. 
Default is 1024 if not specified. |
-| prev-key | string | Optional. Key name from where listing should start, this 
key is excluded in the result. It must be a valid key name. |
-
-Sample HTTP GET request:
-
-    curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 
26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" 
"http:/localhost:9864/volume-of-bilbo/bucket-0/?max-keys=100&prefix=file"
-
-this request list keys under bucket */volume-of-bilbo/bucket-0*, the listing 
result is filtered by prefix *file*. The client receives an array of JSON 
objects, each of them represents the info of a matched key.
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: 7f9fc970-9904-4c56-b671-83a086c6f555
-    Date: Tue, 27 Jun 2017 09:48:59 GMT
-    Content-Type: application/json
-    Content-Length: 209
-    Connection: keep-alive
-
-    {
-      "name" : null,
-      "prefix" : file,
-      "maxKeys" : 0,
-      "truncated" : false,
-      "keyList" : [ {
-          "version" : 0,
-          "md5hash" : null,
-          "createdOn" : "Mon, 26 Jun 2017 04:23:30 GMT",
-          "modifiedOn" : "Mon, 26 Jun 2017 04:23:30 GMT",
-          "size" : 0,
-          "keyName" : "file-0"
-          },
-          ...
-       ]
-    }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to