http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
deleted file mode 100644
index a80c3be..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
+++ /dev/null
@@ -1,766 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.federation.metrics;
-
-import static org.apache.hadoop.util.Time.now;
-
-import java.io.IOException;
-import java.lang.reflect.Method;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.UnknownHostException;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-import java.util.function.Function;
-import java.util.function.ToIntFunction;
-import java.util.function.ToLongFunction;
-import java.util.stream.Collectors;
-
-import javax.management.NotCompliantMBeanException;
-import javax.management.ObjectName;
-import javax.management.StandardMBean;
-
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
-import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
-import 
org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext;
-import 
org.apache.hadoop.hdfs.server.federation.resolver.FederationNamespaceInfo;
-import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
-import org.apache.hadoop.hdfs.server.federation.router.Router;
-import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer;
-import org.apache.hadoop.hdfs.server.federation.store.MembershipStore;
-import org.apache.hadoop.hdfs.server.federation.store.MountTableStore;
-import org.apache.hadoop.hdfs.server.federation.store.RouterStore;
-import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
-import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
-import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
-import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetNamenodeRegistrationsRequest;
-import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetNamenodeRegistrationsResponse;
-import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetNamespaceInfoRequest;
-import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetNamespaceInfoResponse;
-import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetRouterRegistrationsRequest;
-import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetRouterRegistrationsResponse;
-import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord;
-import org.apache.hadoop.hdfs.server.federation.store.records.MembershipState;
-import org.apache.hadoop.hdfs.server.federation.store.records.MembershipStats;
-import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
-import org.apache.hadoop.hdfs.server.federation.store.records.RouterState;
-import 
org.apache.hadoop.hdfs.server.federation.store.records.StateStoreVersion;
-import org.apache.hadoop.metrics2.util.MBeans;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.util.VersionInfo;
-import org.codehaus.jettison.json.JSONObject;
-import org.eclipse.jetty.util.ajax.JSON;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.annotations.VisibleForTesting;
-
-/**
- * Implementation of the Router metrics collector.
- */
-public class FederationMetrics implements FederationMBean {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(FederationMetrics.class);
-
-  /** Format for a date. */
-  private static final String DATE_FORMAT = "yyyy/MM/dd HH:mm:ss";
-
-  /** Prevent holding the page from load too long. */
-  private static final long TIME_OUT = TimeUnit.SECONDS.toMillis(1);
-
-
-  /** Router interface. */
-  private final Router router;
-
-  /** FederationState JMX bean. */
-  private ObjectName beanName;
-
-  /** Resolve the namenode for each namespace. */
-  private final ActiveNamenodeResolver namenodeResolver;
-
-  /** State store. */
-  private final StateStoreService stateStore;
-  /** Membership state store. */
-  private MembershipStore membershipStore;
-  /** Mount table store. */
-  private MountTableStore mountTableStore;
-  /** Router state store. */
-  private RouterStore routerStore;
-
-
-  public FederationMetrics(Router router) throws IOException {
-    this.router = router;
-
-    try {
-      StandardMBean bean = new StandardMBean(this, FederationMBean.class);
-      this.beanName = MBeans.register("Router", "FederationState", bean);
-      LOG.info("Registered Router MBean: {}", this.beanName);
-    } catch (NotCompliantMBeanException e) {
-      throw new RuntimeException("Bad Router MBean setup", e);
-    }
-
-    // Resolve namenode for each nameservice
-    this.namenodeResolver = this.router.getNamenodeResolver();
-
-    // State store interfaces
-    this.stateStore = this.router.getStateStore();
-    if (this.stateStore == null) {
-      LOG.error("State store not available");
-    } else {
-      this.membershipStore = stateStore.getRegisteredRecordStore(
-          MembershipStore.class);
-      this.mountTableStore = stateStore.getRegisteredRecordStore(
-          MountTableStore.class);
-      this.routerStore = stateStore.getRegisteredRecordStore(
-          RouterStore.class);
-    }
-  }
-
-  /**
-   * Unregister the JMX beans.
-   */
-  public void close() {
-    if (this.beanName != null) {
-      MBeans.unregister(beanName);
-    }
-  }
-
-  @Override
-  public String getNamenodes() {
-    final Map<String, Map<String, Object>> info = new LinkedHashMap<>();
-    try {
-      // Get the values from the store
-      GetNamenodeRegistrationsRequest request =
-          GetNamenodeRegistrationsRequest.newInstance();
-      GetNamenodeRegistrationsResponse response =
-          membershipStore.getNamenodeRegistrations(request);
-
-      // Order the namenodes
-      final List<MembershipState> namenodes = 
response.getNamenodeMemberships();
-      if (namenodes == null || namenodes.size() == 0) {
-        return JSON.toString(info);
-      }
-      List<MembershipState> namenodesOrder = new ArrayList<>(namenodes);
-      Collections.sort(namenodesOrder, MembershipState.NAME_COMPARATOR);
-
-      // Dump namenodes information into JSON
-      for (MembershipState namenode : namenodesOrder) {
-        Map<String, Object> innerInfo = new HashMap<>();
-        Map<String, Object> map = getJson(namenode);
-        innerInfo.putAll(map);
-        long dateModified = namenode.getDateModified();
-        long lastHeartbeat = getSecondsSince(dateModified);
-        innerInfo.put("lastHeartbeat", lastHeartbeat);
-        MembershipStats stats = namenode.getStats();
-        long used = stats.getTotalSpace() - stats.getAvailableSpace();
-        innerInfo.put("used", used);
-        info.put(namenode.getNamenodeKey(),
-            Collections.unmodifiableMap(innerInfo));
-      }
-    } catch (IOException e) {
-      LOG.error("Enable to fetch json representation of namenodes {}",
-          e.getMessage());
-      return "{}";
-    }
-    return JSON.toString(info);
-  }
-
-  @Override
-  public String getNameservices() {
-    final Map<String, Map<String, Object>> info = new LinkedHashMap<>();
-    try {
-      final List<MembershipState> namenodes = getActiveNamenodeRegistrations();
-      List<MembershipState> namenodesOrder = new ArrayList<>(namenodes);
-      Collections.sort(namenodesOrder, MembershipState.NAME_COMPARATOR);
-
-      // Dump namenodes information into JSON
-      for (MembershipState namenode : namenodesOrder) {
-        Map<String, Object> innerInfo = new HashMap<>();
-        Map<String, Object> map = getJson(namenode);
-        innerInfo.putAll(map);
-        long dateModified = namenode.getDateModified();
-        long lastHeartbeat = getSecondsSince(dateModified);
-        innerInfo.put("lastHeartbeat", lastHeartbeat);
-        MembershipStats stats = namenode.getStats();
-        long used = stats.getTotalSpace() - stats.getAvailableSpace();
-        innerInfo.put("used", used);
-        info.put(namenode.getNamenodeKey(),
-            Collections.unmodifiableMap(innerInfo));
-      }
-    } catch (IOException e) {
-      LOG.error("Cannot retrieve nameservices for JMX: {}", e.getMessage());
-      return "{}";
-    }
-    return JSON.toString(info);
-  }
-
-  @Override
-  public String getMountTable() {
-    final List<Map<String, Object>> info = new LinkedList<>();
-
-    try {
-      // Get all the mount points in order
-      GetMountTableEntriesRequest request =
-          GetMountTableEntriesRequest.newInstance("/");
-      GetMountTableEntriesResponse response =
-          mountTableStore.getMountTableEntries(request);
-      final List<MountTable> mounts = response.getEntries();
-      List<MountTable> orderedMounts = new ArrayList<>(mounts);
-      Collections.sort(orderedMounts, MountTable.SOURCE_COMPARATOR);
-
-      // Dump mount table entries information into JSON
-      for (MountTable entry : orderedMounts) {
-        // Sumarize destinations
-        Set<String> nameservices = new LinkedHashSet<>();
-        Set<String> paths = new LinkedHashSet<>();
-        for (RemoteLocation location : entry.getDestinations()) {
-          nameservices.add(location.getNameserviceId());
-          paths.add(location.getDest());
-        }
-
-        Map<String, Object> map = getJson(entry);
-        // We add some values with a cleaner format
-        map.put("dateCreated", getDateString(entry.getDateCreated()));
-        map.put("dateModified", getDateString(entry.getDateModified()));
-
-        Map<String, Object> innerInfo = new HashMap<>();
-        innerInfo.putAll(map);
-        innerInfo.put("nameserviceId", StringUtils.join(",", nameservices));
-        innerInfo.put("path", StringUtils.join(",", paths));
-        if (nameservices.size() > 1) {
-          innerInfo.put("order", entry.getDestOrder().toString());
-        } else {
-          innerInfo.put("order", "");
-        }
-        innerInfo.put("readonly", entry.isReadOnly());
-        info.add(Collections.unmodifiableMap(innerInfo));
-      }
-    } catch (IOException e) {
-      LOG.error(
-          "Cannot generate JSON of mount table from store: {}", 
e.getMessage());
-      return "[]";
-    }
-    return JSON.toString(info);
-  }
-
-  @Override
-  public String getRouters() {
-    final Map<String, Map<String, Object>> info = new LinkedHashMap<>();
-    try {
-      // Get all the routers in order
-      GetRouterRegistrationsRequest request =
-          GetRouterRegistrationsRequest.newInstance();
-      GetRouterRegistrationsResponse response =
-          routerStore.getRouterRegistrations(request);
-      final List<RouterState> routers = response.getRouters();
-      List<RouterState> routersOrder = new ArrayList<>(routers);
-      Collections.sort(routersOrder);
-
-      // Dump router information into JSON
-      for (RouterState record : routersOrder) {
-        Map<String, Object> innerInfo = new HashMap<>();
-        Map<String, Object> map = getJson(record);
-        innerInfo.putAll(map);
-        long dateModified = record.getDateModified();
-        long lastHeartbeat = getSecondsSince(dateModified);
-        innerInfo.put("lastHeartbeat", lastHeartbeat);
-
-        StateStoreVersion stateStoreVersion = record.getStateStoreVersion();
-        if (stateStoreVersion == null) {
-          LOG.error("Cannot get State Store versions");
-        } else {
-          setStateStoreVersions(innerInfo, stateStoreVersion);
-        }
-
-        info.put(record.getPrimaryKey(),
-            Collections.unmodifiableMap(innerInfo));
-      }
-    } catch (IOException e) {
-      LOG.error("Cannot get Routers JSON from the State Store", e);
-      return "{}";
-    }
-    return JSON.toString(info);
-  }
-
-  /**
-   * Populate the map with the State Store versions.
-   *
-   * @param innerInfo Map with the information.
-   * @param version State Store versions.
-   */
-  private static void setStateStoreVersions(
-      Map<String, Object> map, StateStoreVersion version) {
-
-    long membershipVersion = version.getMembershipVersion();
-    String lastMembershipUpdate = getDateString(membershipVersion);
-    map.put("lastMembershipUpdate", lastMembershipUpdate);
-
-    long mountTableVersion = version.getMountTableVersion();
-    String lastMountTableDate = getDateString(mountTableVersion);
-    map.put("lastMountTableUpdate", lastMountTableDate);
-  }
-
-  @Override
-  public long getTotalCapacity() {
-    return getNameserviceAggregatedLong(MembershipStats::getTotalSpace);
-  }
-
-  @Override
-  public long getRemainingCapacity() {
-    return getNameserviceAggregatedLong(MembershipStats::getAvailableSpace);
-  }
-
-  @Override
-  public long getProvidedSpace() {
-    return getNameserviceAggregatedLong(MembershipStats::getProvidedSpace);
-  }
-
-  @Override
-  public long getUsedCapacity() {
-    return getTotalCapacity() - getRemainingCapacity();
-  }
-
-  @Override
-  public int getNumNameservices() {
-    try {
-      Set<FederationNamespaceInfo> nss = namenodeResolver.getNamespaces();
-      return nss.size();
-    } catch (IOException e) {
-      LOG.error(
-          "Cannot fetch number of expired registrations from the store: {}",
-          e.getMessage());
-      return 0;
-    }
-  }
-
-  @Override
-  public int getNumNamenodes() {
-    try {
-      GetNamenodeRegistrationsRequest request =
-          GetNamenodeRegistrationsRequest.newInstance();
-      GetNamenodeRegistrationsResponse response =
-          membershipStore.getNamenodeRegistrations(request);
-      List<MembershipState> memberships = response.getNamenodeMemberships();
-      return memberships.size();
-    } catch (IOException e) {
-      LOG.error("Cannot retrieve numNamenodes for JMX: {}", e.getMessage());
-      return 0;
-    }
-  }
-
-  @Override
-  public int getNumExpiredNamenodes() {
-    try {
-      GetNamenodeRegistrationsRequest request =
-          GetNamenodeRegistrationsRequest.newInstance();
-      GetNamenodeRegistrationsResponse response =
-          membershipStore.getExpiredNamenodeRegistrations(request);
-      List<MembershipState> expiredMemberships =
-          response.getNamenodeMemberships();
-      return expiredMemberships.size();
-    } catch (IOException e) {
-      LOG.error(
-          "Cannot retrieve numExpiredNamenodes for JMX: {}", e.getMessage());
-      return 0;
-    }
-  }
-
-  @Override
-  public int getNumLiveNodes() {
-    return getNameserviceAggregatedInt(
-        MembershipStats::getNumOfActiveDatanodes);
-  }
-
-  @Override
-  public int getNumDeadNodes() {
-    return getNameserviceAggregatedInt(MembershipStats::getNumOfDeadDatanodes);
-  }
-
-  @Override
-  public int getNumDecommissioningNodes() {
-    return getNameserviceAggregatedInt(
-        MembershipStats::getNumOfDecommissioningDatanodes);
-  }
-
-  @Override
-  public int getNumDecomLiveNodes() {
-    return getNameserviceAggregatedInt(
-        MembershipStats::getNumOfDecomActiveDatanodes);
-  }
-
-  @Override
-  public int getNumDecomDeadNodes() {
-    return getNameserviceAggregatedInt(
-        MembershipStats::getNumOfDecomDeadDatanodes);
-  }
-
-  @Override // NameNodeMXBean
-  public String getNodeUsage() {
-    float median = 0;
-    float max = 0;
-    float min = 0;
-    float dev = 0;
-
-    final Map<String, Map<String, Object>> info = new HashMap<>();
-    try {
-      RouterRpcServer rpcServer = this.router.getRpcServer();
-      DatanodeInfo[] live = rpcServer.getDatanodeReport(
-          DatanodeReportType.LIVE, TIME_OUT);
-
-      if (live.length > 0) {
-        float totalDfsUsed = 0;
-        float[] usages = new float[live.length];
-        int i = 0;
-        for (DatanodeInfo dn : live) {
-          usages[i++] = dn.getDfsUsedPercent();
-          totalDfsUsed += dn.getDfsUsedPercent();
-        }
-        totalDfsUsed /= live.length;
-        Arrays.sort(usages);
-        median = usages[usages.length / 2];
-        max = usages[usages.length - 1];
-        min = usages[0];
-
-        for (i = 0; i < usages.length; i++) {
-          dev += (usages[i] - totalDfsUsed) * (usages[i] - totalDfsUsed);
-        }
-        dev = (float) Math.sqrt(dev / usages.length);
-      }
-    } catch (IOException e) {
-      LOG.info("Cannot get the live nodes: {}", e.getMessage());
-    }
-
-    final Map<String, Object> innerInfo = new HashMap<>();
-    innerInfo.put("min", StringUtils.format("%.2f%%", min));
-    innerInfo.put("median", StringUtils.format("%.2f%%", median));
-    innerInfo.put("max", StringUtils.format("%.2f%%", max));
-    innerInfo.put("stdDev", StringUtils.format("%.2f%%", dev));
-    info.put("nodeUsage", innerInfo);
-
-    return JSON.toString(info);
-  }
-
-  @Override
-  public long getNumBlocks() {
-    return getNameserviceAggregatedLong(MembershipStats::getNumOfBlocks);
-  }
-
-  @Override
-  public long getNumOfMissingBlocks() {
-    return 
getNameserviceAggregatedLong(MembershipStats::getNumOfBlocksMissing);
-  }
-
-  @Override
-  public long getNumOfBlocksPendingReplication() {
-    return getNameserviceAggregatedLong(
-        MembershipStats::getNumOfBlocksPendingReplication);
-  }
-
-  @Override
-  public long getNumOfBlocksUnderReplicated() {
-    return getNameserviceAggregatedLong(
-        MembershipStats::getNumOfBlocksUnderReplicated);
-  }
-
-  @Override
-  public long getNumOfBlocksPendingDeletion() {
-    return getNameserviceAggregatedLong(
-        MembershipStats::getNumOfBlocksPendingDeletion);
-  }
-
-  @Override
-  public long getNumFiles() {
-    return getNameserviceAggregatedLong(MembershipStats::getNumOfFiles);
-  }
-
-  @Override
-  public String getRouterStarted() {
-    long startTime = this.router.getStartTime();
-    return new Date(startTime).toString();
-  }
-
-  @Override
-  public String getVersion() {
-    return VersionInfo.getVersion() + ", r" + VersionInfo.getRevision();
-  }
-
-  @Override
-  public String getCompiledDate() {
-    return VersionInfo.getDate();
-  }
-
-  @Override
-  public String getCompileInfo() {
-    return VersionInfo.getDate() + " by " + VersionInfo.getUser() + " from "
-        + VersionInfo.getBranch();
-  }
-
-  @Override
-  public String getHostAndPort() {
-    InetSocketAddress address = this.router.getHttpServerAddress();
-    if (address != null) {
-      try {
-        String hostname = InetAddress.getLocalHost().getHostName();
-        int port = address.getPort();
-        return hostname + ":" + port;
-      } catch (UnknownHostException ignored) { }
-    }
-    return "Unknown";
-  }
-
-  @Override
-  public String getRouterId() {
-    return this.router.getRouterId();
-  }
-
-  @Override
-  public String getClusterId() {
-    try {
-      Collection<String> clusterIds =
-          getNamespaceInfo(FederationNamespaceInfo::getClusterId);
-      return clusterIds.toString();
-    } catch (IOException e) {
-      LOG.error("Cannot fetch cluster ID metrics: {}", e.getMessage());
-      return "";
-    }
-  }
-
-  @Override
-  public String getBlockPoolId() {
-    try {
-      Collection<String> blockpoolIds =
-          getNamespaceInfo(FederationNamespaceInfo::getBlockPoolId);
-      return blockpoolIds.toString();
-    } catch (IOException e) {
-      LOG.error("Cannot fetch block pool ID metrics: {}", e.getMessage());
-      return "";
-    }
-  }
-
-  @Override
-  public String getRouterStatus() {
-    return "RUNNING";
-  }
-
-  /**
-   * Build a set of unique values found in all namespaces.
-   *
-   * @param f Method reference of the appropriate FederationNamespaceInfo
-   *          getter function
-   * @return Set of unique string values found in all discovered namespaces.
-   * @throws IOException if the query could not be executed.
-   */
-  private Collection<String> getNamespaceInfo(
-      Function<FederationNamespaceInfo, String> f) throws IOException {
-    GetNamespaceInfoRequest request = GetNamespaceInfoRequest.newInstance();
-    GetNamespaceInfoResponse response =
-        membershipStore.getNamespaceInfo(request);
-    return response.getNamespaceInfo().stream()
-      .map(f)
-      .collect(Collectors.toSet());
-  }
-
-  /**
-   * Get the aggregated value for a method for all nameservices.
-   * @param f Method reference
-   * @return Aggregated integer.
-   */
-  private int getNameserviceAggregatedInt(ToIntFunction<MembershipStats> f) {
-    try {
-      return getActiveNamenodeRegistrations().stream()
-               .map(MembershipState::getStats)
-               .collect(Collectors.summingInt(f));
-    } catch (IOException e) {
-      LOG.error("Unable to extract metrics: {}", e.getMessage());
-      return 0;
-    }
-  }
-
-  /**
-   * Get the aggregated value for a method for all nameservices.
-   * @param f Method reference
-   * @return Aggregated long.
-   */
-  private long getNameserviceAggregatedLong(ToLongFunction<MembershipStats> f) 
{
-    try {
-      return getActiveNamenodeRegistrations().stream()
-               .map(MembershipState::getStats)
-               .collect(Collectors.summingLong(f));
-    } catch (IOException e) {
-      LOG.error("Unable to extract metrics: {}", e.getMessage());
-      return 0;
-    }
-  }
-
-  /**
-   * Fetches the most active namenode memberships for all known nameservices.
-   * The fetched membership may not or may not be active. Excludes expired
-   * memberships.
-   * @throws IOException if the query could not be performed.
-   * @return List of the most active NNs from each known nameservice.
-   */
-  private List<MembershipState> getActiveNamenodeRegistrations()
-      throws IOException {
-
-    List<MembershipState> resultList = new ArrayList<>();
-    GetNamespaceInfoRequest request = GetNamespaceInfoRequest.newInstance();
-    GetNamespaceInfoResponse response =
-        membershipStore.getNamespaceInfo(request);
-    for (FederationNamespaceInfo nsInfo : response.getNamespaceInfo()) {
-      // Fetch the most recent namenode registration
-      String nsId = nsInfo.getNameserviceId();
-      List<? extends FederationNamenodeContext> nns =
-          namenodeResolver.getNamenodesForNameserviceId(nsId);
-      if (nns != null) {
-        FederationNamenodeContext nn = nns.get(0);
-        if (nn != null && nn instanceof MembershipState) {
-          resultList.add((MembershipState) nn);
-        }
-      }
-    }
-    return resultList;
-  }
-
-  /**
-   * Get time as a date string.
-   * @param time Seconds since 1970.
-   * @return String representing the date.
-   */
-  @VisibleForTesting
-  static String getDateString(long time) {
-    if (time <= 0) {
-      return "-";
-    }
-    Date date = new Date(time);
-
-    SimpleDateFormat sdf = new SimpleDateFormat(DATE_FORMAT);
-    return sdf.format(date);
-  }
-
-  /**
-   * Get the number of seconds passed since a date.
-   *
-   * @param timeMs to use as a reference.
-   * @return Seconds since the date.
-   */
-  private static long getSecondsSince(long timeMs) {
-    if (timeMs < 0) {
-      return -1;
-    }
-    return (now() - timeMs) / 1000;
-  }
-
-  /**
-   * Get JSON for this record.
-   *
-   * @return Map representing the data for the JSON representation.
-   */
-  private static Map<String, Object> getJson(BaseRecord record) {
-    Map<String, Object> json = new HashMap<>();
-    Map<String, Class<?>> fields = getFields(record);
-
-    for (String fieldName : fields.keySet()) {
-      if (!fieldName.equalsIgnoreCase("proto")) {
-        try {
-          Object value = getField(record, fieldName);
-          if (value instanceof BaseRecord) {
-            BaseRecord recordField = (BaseRecord) value;
-            json.putAll(getJson(recordField));
-          } else {
-            json.put(fieldName, value == null ? JSONObject.NULL : value);
-          }
-        } catch (Exception e) {
-          throw new IllegalArgumentException(
-              "Cannot serialize field " + fieldName + " into JSON");
-        }
-      }
-    }
-    return json;
-  }
-
-  /**
-   * Returns all serializable fields in the object.
-   *
-   * @return Map with the fields.
-   */
-  private static Map<String, Class<?>> getFields(BaseRecord record) {
-    Map<String, Class<?>> getters = new HashMap<>();
-    for (Method m : record.getClass().getDeclaredMethods()) {
-      if (m.getName().startsWith("get")) {
-        try {
-          Class<?> type = m.getReturnType();
-          char[] c = m.getName().substring(3).toCharArray();
-          c[0] = Character.toLowerCase(c[0]);
-          String key = new String(c);
-          getters.put(key, type);
-        } catch (Exception e) {
-          LOG.error("Cannot execute getter {} on {}", m.getName(), record);
-        }
-      }
-    }
-    return getters;
-  }
-
-  /**
-   * Fetches the value for a field name.
-   *
-   * @param fieldName the legacy name of the field.
-   * @return The field data or null if not found.
-   */
-  private static Object getField(BaseRecord record, String fieldName) {
-    Object result = null;
-    Method m = locateGetter(record, fieldName);
-    if (m != null) {
-      try {
-        result = m.invoke(record);
-      } catch (Exception e) {
-        LOG.error("Cannot get field {} on {}", fieldName, record);
-      }
-    }
-    return result;
-  }
-
-  /**
-   * Finds the appropriate getter for a field name.
-   *
-   * @param fieldName The legacy name of the field.
-   * @return The matching getter or null if not found.
-   */
-  private static Method locateGetter(BaseRecord record, String fieldName) {
-    for (Method m : record.getClass().getMethods()) {
-      if (m.getName().equalsIgnoreCase("get" + fieldName)) {
-        return m;
-      }
-    }
-    return null;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java
deleted file mode 100644
index 3e031fe..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.federation.metrics;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * JMX interface for the RPC server.
- * TODO use the default RPC MBean.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public interface FederationRPCMBean {
-
-  long getProxyOps();
-
-  double getProxyAvg();
-
-  long getProcessingOps();
-
-  double getProcessingAvg();
-
-  long getProxyOpFailureCommunicate();
-
-  long getProxyOpFailureStandby();
-
-  long getProxyOpNotImplemented();
-
-  long getProxyOpRetries();
-
-  long getRouterFailureStateStoreOps();
-
-  long getRouterFailureReadOnlyOps();
-
-  long getRouterFailureLockedOps();
-
-  long getRouterFailureSafemodeOps();
-
-  int getRpcServerCallQueue();
-
-  /**
-   * Get the number of RPC connections between the clients and the Router.
-   * @return Number of RPC connections between the clients and the Router.
-   */
-  int getRpcServerNumOpenConnections();
-
-  /**
-   * Get the number of RPC connections between the Router and the NNs.
-   * @return Number of RPC connections between the Router and the NNs.
-   */
-  int getRpcClientNumConnections();
-
-  /**
-   * Get the number of active RPC connections between the Router and the NNs.
-   * @return Number of active RPC connections between the Router and the NNs.
-   */
-  int getRpcClientNumActiveConnections();
-
-  /**
-   * Get the number of RPC connections to be created.
-   * @return Number of RPC connections to be created.
-   */
-  int getRpcClientNumCreatingConnections();
-
-  /**
-   * Get the number of connection pools between the Router and a NNs.
-   * @return Number of connection pools between the Router and a NNs.
-   */
-  int getRpcClientNumConnectionPools();
-
-  /**
-   * JSON representation of the RPC connections from the Router to the NNs.
-   * @return JSON string representation.
-   */
-  String getRpcClientConnections();
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
deleted file mode 100644
index 94d3383..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
+++ /dev/null
@@ -1,250 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.federation.metrics;
-
-import static org.apache.hadoop.metrics2.impl.MsInfo.ProcessName;
-import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.annotation.Metrics;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.MetricsRegistry;
-import org.apache.hadoop.metrics2.lib.MutableCounterLong;
-import org.apache.hadoop.metrics2.lib.MutableRate;
-
-/**
- * Implementation of the RPC metrics collector.
- */
-@Metrics(name = "RouterRPCActivity", about = "Router RPC Activity",
-    context = "dfs")
-public class FederationRPCMetrics implements FederationRPCMBean {
-
-  private final MetricsRegistry registry = new MetricsRegistry("router");
-
-  private RouterRpcServer rpcServer;
-
-  @Metric("Time for the router to process an operation internally")
-  private MutableRate processing;
-  @Metric("Number of operations the Router processed internally")
-  private MutableCounterLong processingOp;
-  @Metric("Time for the Router to proxy an operation to the Namenodes")
-  private MutableRate proxy;
-  @Metric("Number of operations the Router proxied to a Namenode")
-  private MutableCounterLong proxyOp;
-
-  @Metric("Number of operations to fail to reach NN")
-  private MutableCounterLong proxyOpFailureStandby;
-  @Metric("Number of operations to hit a standby NN")
-  private MutableCounterLong proxyOpFailureCommunicate;
-  @Metric("Number of operations not implemented")
-  private MutableCounterLong proxyOpNotImplemented;
-  @Metric("Number of operation retries")
-  private MutableCounterLong proxyOpRetries;
-
-  @Metric("Failed requests due to State Store unavailable")
-  private MutableCounterLong routerFailureStateStore;
-  @Metric("Failed requests due to read only mount point")
-  private MutableCounterLong routerFailureReadOnly;
-  @Metric("Failed requests due to locked path")
-  private MutableCounterLong routerFailureLocked;
-  @Metric("Failed requests due to safe mode")
-  private MutableCounterLong routerFailureSafemode;
-
-  public FederationRPCMetrics(Configuration conf, RouterRpcServer rpcServer) {
-    this.rpcServer = rpcServer;
-
-    registry.tag(SessionId, "RouterRPCSession");
-    registry.tag(ProcessName, "Router");
-  }
-
-  public static FederationRPCMetrics create(Configuration conf,
-      RouterRpcServer rpcServer) {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    return ms.register(FederationRPCMetrics.class.getName(),
-        "HDFS Federation RPC Metrics",
-        new FederationRPCMetrics(conf, rpcServer));
-  }
-
-  /**
-   * Convert nanoseconds to milliseconds.
-   * @param ns Time in nanoseconds.
-   * @return Time in milliseconds.
-   */
-  private static double toMs(double ns) {
-    return ns / 1000000;
-  }
-
-  /**
-   * Reset the metrics system.
-   */
-  public static void reset() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    ms.unregisterSource(FederationRPCMetrics.class.getName());
-  }
-
-  public void incrProxyOpFailureStandby() {
-    proxyOpFailureStandby.incr();
-  }
-
-  @Override
-  public long getProxyOpFailureStandby() {
-    return proxyOpFailureStandby.value();
-  }
-
-  public void incrProxyOpFailureCommunicate() {
-    proxyOpFailureCommunicate.incr();
-  }
-
-  @Override
-  public long getProxyOpFailureCommunicate() {
-    return proxyOpFailureCommunicate.value();
-  }
-
-
-  public void incrProxyOpNotImplemented() {
-    proxyOpNotImplemented.incr();
-  }
-
-  @Override
-  public long getProxyOpNotImplemented() {
-    return proxyOpNotImplemented.value();
-  }
-
-  public void incrProxyOpRetries() {
-    proxyOpRetries.incr();
-  }
-
-  @Override
-  public long getProxyOpRetries() {
-    return proxyOpRetries.value();
-  }
-
-  public void incrRouterFailureStateStore() {
-    routerFailureStateStore.incr();
-  }
-
-  @Override
-  public long getRouterFailureStateStoreOps() {
-    return routerFailureStateStore.value();
-  }
-
-  public void incrRouterFailureSafemode() {
-    routerFailureSafemode.incr();
-  }
-
-  @Override
-  public long getRouterFailureSafemodeOps() {
-    return routerFailureSafemode.value();
-  }
-
-  public void incrRouterFailureReadOnly() {
-    routerFailureReadOnly.incr();
-  }
-
-  @Override
-  public long getRouterFailureReadOnlyOps() {
-    return routerFailureReadOnly.value();
-  }
-
-  public void incrRouterFailureLocked() {
-    routerFailureLocked.incr();
-  }
-
-  @Override
-  public long getRouterFailureLockedOps() {
-    return routerFailureLocked.value();
-  }
-
-  @Override
-  public int getRpcServerCallQueue() {
-    return rpcServer.getServer().getCallQueueLen();
-  }
-
-  @Override
-  public int getRpcServerNumOpenConnections() {
-    return rpcServer.getServer().getNumOpenConnections();
-  }
-
-  @Override
-  public int getRpcClientNumConnections() {
-    return rpcServer.getRPCClient().getNumConnections();
-  }
-
-  @Override
-  public int getRpcClientNumActiveConnections() {
-    return rpcServer.getRPCClient().getNumActiveConnections();
-  }
-
-  @Override
-  public int getRpcClientNumCreatingConnections() {
-    return rpcServer.getRPCClient().getNumCreatingConnections();
-  }
-
-  @Override
-  public int getRpcClientNumConnectionPools() {
-    return rpcServer.getRPCClient().getNumConnectionPools();
-  }
-
-  @Override
-  public String getRpcClientConnections() {
-    return rpcServer.getRPCClient().getJSON();
-  }
-
-  /**
-   * Add the time to proxy an operation from the moment the Router sends it to
-   * the Namenode until it replied.
-   * @param time Proxy time of an operation in nanoseconds.
-   */
-  public void addProxyTime(long time) {
-    proxy.add(time);
-    proxyOp.incr();
-  }
-
-  @Override
-  public double getProxyAvg() {
-    return toMs(proxy.lastStat().mean());
-  }
-
-  @Override
-  public long getProxyOps() {
-    return proxyOp.value();
-  }
-
-  /**
-   * Add the time to process a request in the Router from the time we receive
-   * the call until we send it to the Namenode.
-   * @param time Process time of an operation in nanoseconds.
-   */
-  public void addProcessingTime(long time) {
-    processing.add(time);
-    processingOp.incr();
-  }
-
-  @Override
-  public double getProcessingAvg() {
-    return toMs(processing.lastStat().mean());
-  }
-
-  @Override
-  public long getProcessingOps() {
-    return processingOp.value();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java
deleted file mode 100644
index 547ebb5..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java
+++ /dev/null
@@ -1,221 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.federation.metrics;
-
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ThreadFactory;
-
-import javax.management.NotCompliantMBeanException;
-import javax.management.ObjectName;
-import javax.management.StandardMBean;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.server.federation.router.RouterRpcMonitor;
-import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer;
-import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
-import org.apache.hadoop.metrics2.util.MBeans;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-
-/**
- * Customizable RPC performance monitor. Receives events from the RPC server
- * and aggregates them via JMX.
- */
-public class FederationRPCPerformanceMonitor implements RouterRpcMonitor {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(FederationRPCPerformanceMonitor.class);
-
-
-  /** Time for an operation to be received in the Router. */
-  private static final ThreadLocal<Long> START_TIME = new ThreadLocal<>();
-  /** Time for an operation to be send to the Namenode. */
-  private static final ThreadLocal<Long> PROXY_TIME = new ThreadLocal<>();
-
-  /** Configuration for the performance monitor. */
-  private Configuration conf;
-  /** RPC server for the Router. */
-  private RouterRpcServer server;
-  /** State Store. */
-  private StateStoreService store;
-
-  /** JMX interface to monitor the RPC metrics. */
-  private FederationRPCMetrics metrics;
-  private ObjectName registeredBean;
-
-  /** Thread pool for logging stats. */
-  private ExecutorService executor;
-
-
-  @Override
-  public void init(Configuration configuration, RouterRpcServer rpcServer,
-      StateStoreService stateStore) {
-
-    this.conf = configuration;
-    this.server = rpcServer;
-    this.store = stateStore;
-
-    // Create metrics
-    this.metrics = FederationRPCMetrics.create(conf, server);
-
-    // Create thread pool
-    ThreadFactory threadFactory = new ThreadFactoryBuilder()
-        .setNameFormat("Federation RPC Performance Monitor-%d").build();
-    this.executor = Executors.newFixedThreadPool(1, threadFactory);
-
-    // Adding JMX interface
-    try {
-      StandardMBean bean =
-          new StandardMBean(this.metrics, FederationRPCMBean.class);
-      registeredBean = MBeans.register("Router", "FederationRPC", bean);
-      LOG.info("Registered FederationRPCMBean: {}", registeredBean);
-    } catch (NotCompliantMBeanException e) {
-      throw new RuntimeException("Bad FederationRPCMBean setup", e);
-    }
-  }
-
-  @Override
-  public void close() {
-    if (registeredBean != null) {
-      MBeans.unregister(registeredBean);
-      registeredBean = null;
-    }
-    if (this.executor != null) {
-      this.executor.shutdown();
-    }
-  }
-
-  /**
-   * Resets all RPC service performance counters to their defaults.
-   */
-  public void resetPerfCounters() {
-    if (registeredBean != null) {
-      MBeans.unregister(registeredBean);
-      registeredBean = null;
-    }
-    if (metrics != null) {
-      FederationRPCMetrics.reset();
-      metrics = null;
-    }
-    init(conf, server, store);
-  }
-
-  @Override
-  public void startOp() {
-    START_TIME.set(this.getNow());
-  }
-
-  @Override
-  public long proxyOp() {
-    PROXY_TIME.set(this.getNow());
-    long processingTime = getProcessingTime();
-    if (processingTime >= 0) {
-      metrics.addProcessingTime(processingTime);
-    }
-    return Thread.currentThread().getId();
-  }
-
-  @Override
-  public void proxyOpComplete(boolean success) {
-    if (success) {
-      long proxyTime = getProxyTime();
-      if (proxyTime >= 0) {
-        metrics.addProxyTime(proxyTime);
-      }
-    }
-  }
-
-  @Override
-  public void proxyOpFailureStandby() {
-    metrics.incrProxyOpFailureStandby();
-  }
-
-  @Override
-  public void proxyOpFailureCommunicate() {
-    metrics.incrProxyOpFailureCommunicate();
-  }
-
-  @Override
-  public void proxyOpNotImplemented() {
-    metrics.incrProxyOpNotImplemented();
-  }
-
-  @Override
-  public void proxyOpRetries() {
-    metrics.incrProxyOpRetries();
-  }
-
-  @Override
-  public void routerFailureStateStore() {
-    metrics.incrRouterFailureStateStore();
-  }
-
-  @Override
-  public void routerFailureSafemode() {
-    metrics.incrRouterFailureSafemode();
-  }
-
-  @Override
-  public void routerFailureReadOnly() {
-    metrics.incrRouterFailureReadOnly();
-  }
-
-  @Override
-  public void routerFailureLocked() {
-    metrics.incrRouterFailureLocked();
-  }
-
-  /**
-   * Get current time.
-   * @return Current time in nanoseconds.
-   */
-  private long getNow() {
-    return System.nanoTime();
-  }
-
-  /**
-   * Get time between we receiving the operation and sending it to the 
Namenode.
-   * @return Processing time in nanoseconds.
-   */
-  private long getProcessingTime() {
-    if (START_TIME.get() != null && START_TIME.get() > 0 &&
-        PROXY_TIME.get() != null && PROXY_TIME.get() > 0) {
-      return PROXY_TIME.get() - START_TIME.get();
-    }
-    return -1;
-  }
-
-  /**
-   * Get time between now and when the operation was forwarded to the Namenode.
-   * @return Current proxy time in nanoseconds.
-   */
-  private long getProxyTime() {
-    if (PROXY_TIME.get() != null && PROXY_TIME.get() > 0) {
-      return getNow() - PROXY_TIME.get();
-    }
-    return -1;
-  }
-
-  @Override
-  public FederationRPCMetrics getRPCMetrics() {
-    return this.metrics;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
deleted file mode 100644
index c4e5b5b..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
+++ /dev/null
@@ -1,634 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.federation.metrics;
-
-import static org.apache.hadoop.util.Time.now;
-
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.function.Function;
-import java.util.stream.Collectors;
-
-import javax.management.NotCompliantMBeanException;
-import javax.management.ObjectName;
-import javax.management.StandardMBean;
-
-import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
-import org.apache.hadoop.hdfs.DFSUtilClient;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
-import 
org.apache.hadoop.hdfs.server.federation.resolver.FederationNamespaceInfo;
-import org.apache.hadoop.hdfs.server.federation.router.Router;
-import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer;
-import org.apache.hadoop.hdfs.server.federation.store.MembershipStore;
-import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
-import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetNamespaceInfoRequest;
-import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetNamespaceInfoResponse;
-import org.apache.hadoop.hdfs.server.namenode.NameNodeMXBean;
-import org.apache.hadoop.hdfs.server.namenode.NameNodeStatusMXBean;
-import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
-import org.apache.hadoop.ipc.StandbyException;
-import org.apache.hadoop.metrics2.util.MBeans;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.util.VersionInfo;
-import org.eclipse.jetty.util.ajax.JSON;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Expose the Namenode metrics as the Router was one.
- */
-public class NamenodeBeanMetrics
-    implements FSNamesystemMBean, NameNodeMXBean, NameNodeStatusMXBean {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(NamenodeBeanMetrics.class);
-
-  private final Router router;
-
-  /** FSNamesystem bean. */
-  private ObjectName fsBeanName;
-  /** FSNamesystemState bean. */
-  private ObjectName fsStateBeanName;
-  /** NameNodeInfo bean. */
-  private ObjectName nnInfoBeanName;
-  /** NameNodeStatus bean. */
-  private ObjectName nnStatusBeanName;
-
-
-  public NamenodeBeanMetrics(Router router) {
-    this.router = router;
-
-    try {
-      // TODO this needs to be done with the Metrics from FSNamesystem
-      StandardMBean bean = new StandardMBean(this, FSNamesystemMBean.class);
-      this.fsBeanName = MBeans.register("NameNode", "FSNamesystem", bean);
-      LOG.info("Registered FSNamesystem MBean: {}", this.fsBeanName);
-    } catch (NotCompliantMBeanException e) {
-      throw new RuntimeException("Bad FSNamesystem MBean setup", e);
-    }
-
-    try {
-      StandardMBean bean = new StandardMBean(this, FSNamesystemMBean.class);
-      this.fsStateBeanName =
-          MBeans.register("NameNode", "FSNamesystemState", bean);
-      LOG.info("Registered FSNamesystemState MBean: {}", this.fsStateBeanName);
-    } catch (NotCompliantMBeanException e) {
-      throw new RuntimeException("Bad FSNamesystemState MBean setup", e);
-    }
-
-    try {
-      StandardMBean bean = new StandardMBean(this, NameNodeMXBean.class);
-      this.nnInfoBeanName = MBeans.register("NameNode", "NameNodeInfo", bean);
-      LOG.info("Registered NameNodeInfo MBean: {}", this.nnInfoBeanName);
-    } catch (NotCompliantMBeanException e) {
-      throw new RuntimeException("Bad NameNodeInfo MBean setup", e);
-    }
-
-    try {
-      StandardMBean bean = new StandardMBean(this, NameNodeStatusMXBean.class);
-      this.nnStatusBeanName =
-          MBeans.register("NameNode", "NameNodeStatus", bean);
-      LOG.info("Registered NameNodeStatus MBean: {}", this.nnStatusBeanName);
-    } catch (NotCompliantMBeanException e) {
-      throw new RuntimeException("Bad NameNodeStatus MBean setup", e);
-    }
-  }
-
-  /**
-   * De-register the JMX interfaces.
-   */
-  public void close() {
-    if (fsStateBeanName != null) {
-      MBeans.unregister(fsStateBeanName);
-      fsStateBeanName = null;
-    }
-    if (nnInfoBeanName != null) {
-      MBeans.unregister(nnInfoBeanName);
-      nnInfoBeanName = null;
-    }
-    // Remove the NameNode status bean
-    if (nnStatusBeanName != null) {
-      MBeans.unregister(nnStatusBeanName);
-      nnStatusBeanName = null;
-    }
-  }
-
-  private FederationMetrics getFederationMetrics() {
-    return this.router.getMetrics();
-  }
-
-  /////////////////////////////////////////////////////////
-  // NameNodeMXBean
-  /////////////////////////////////////////////////////////
-
-  @Override
-  public String getVersion() {
-    return VersionInfo.getVersion() + ", r" + VersionInfo.getRevision();
-  }
-
-  @Override
-  public String getSoftwareVersion() {
-    return VersionInfo.getVersion();
-  }
-
-  @Override
-  public long getUsed() {
-    return getFederationMetrics().getUsedCapacity();
-  }
-
-  @Override
-  public long getFree() {
-    return getFederationMetrics().getRemainingCapacity();
-  }
-
-  @Override
-  public long getTotal() {
-    return getFederationMetrics().getTotalCapacity();
-  }
-
-  @Override
-  public long getProvidedCapacity() {
-    return getFederationMetrics().getProvidedSpace();
-  }
-
-  @Override
-  public String getSafemode() {
-    // We assume that the global federated view is never in safe mode
-    return "";
-  }
-
-  @Override
-  public boolean isUpgradeFinalized() {
-    // We assume the upgrade is always finalized in a federated biew
-    return true;
-  }
-
-  @Override
-  public RollingUpgradeInfo.Bean getRollingUpgradeStatus() {
-    return null;
-  }
-
-  @Override
-  public long getNonDfsUsedSpace() {
-    return 0;
-  }
-
-  @Override
-  public float getPercentUsed() {
-    return DFSUtilClient.getPercentUsed(getCapacityUsed(), getCapacityTotal());
-  }
-
-  @Override
-  public float getPercentRemaining() {
-    return DFSUtilClient.getPercentUsed(
-        getCapacityRemaining(), getCapacityTotal());
-  }
-
-  @Override
-  public long getCacheUsed() {
-    return 0;
-  }
-
-  @Override
-  public long getCacheCapacity() {
-    return 0;
-  }
-
-  @Override
-  public long getBlockPoolUsedSpace() {
-    return 0;
-  }
-
-  @Override
-  public float getPercentBlockPoolUsed() {
-    return 0;
-  }
-
-  @Override
-  public long getTotalBlocks() {
-    return getFederationMetrics().getNumBlocks();
-  }
-
-  @Override
-  public long getNumberOfMissingBlocks() {
-    return getFederationMetrics().getNumOfMissingBlocks();
-  }
-
-  @Override
-  @Deprecated
-  public long getPendingReplicationBlocks() {
-    return getFederationMetrics().getNumOfBlocksPendingReplication();
-  }
-
-  @Override
-  public long getPendingReconstructionBlocks() {
-    return getFederationMetrics().getNumOfBlocksPendingReplication();
-  }
-
-  @Override
-  @Deprecated
-  public long getUnderReplicatedBlocks() {
-    return getFederationMetrics().getNumOfBlocksUnderReplicated();
-  }
-
-  @Override
-  public long getLowRedundancyBlocks() {
-    return getFederationMetrics().getNumOfBlocksUnderReplicated();
-  }
-
-  @Override
-  public long getPendingDeletionBlocks() {
-    return getFederationMetrics().getNumOfBlocksPendingDeletion();
-  }
-
-  @Override
-  public long getScheduledReplicationBlocks() {
-    return -1;
-  }
-
-  @Override
-  public long getNumberOfMissingBlocksWithReplicationFactorOne() {
-    return 0;
-  }
-
-  @Override
-  public String getCorruptFiles() {
-    return "N/A";
-  }
-
-  @Override
-  public int getThreads() {
-    return ManagementFactory.getThreadMXBean().getThreadCount();
-  }
-
-  @Override
-  public String getLiveNodes() {
-    return this.getNodes(DatanodeReportType.LIVE);
-  }
-
-  @Override
-  public String getDeadNodes() {
-    return this.getNodes(DatanodeReportType.DEAD);
-  }
-
-  @Override
-  public String getDecomNodes() {
-    return this.getNodes(DatanodeReportType.DECOMMISSIONING);
-  }
-
-  /**
-   * Get all the nodes in the federation from a particular type.
-   * TODO this is expensive, we may want to cache it.
-   * @param type Type of the datanodes to check.
-   * @return JSON with the nodes.
-   */
-  private String getNodes(DatanodeReportType type) {
-    final Map<String, Map<String, Object>> info = new HashMap<>();
-    try {
-      RouterRpcServer rpcServer = this.router.getRpcServer();
-      DatanodeInfo[] datanodes = rpcServer.getDatanodeReport(type);
-      for (DatanodeInfo node : datanodes) {
-        Map<String, Object> innerinfo = new HashMap<>();
-        innerinfo.put("infoAddr", node.getInfoAddr());
-        innerinfo.put("infoSecureAddr", node.getInfoSecureAddr());
-        innerinfo.put("xferaddr", node.getXferAddr());
-        innerinfo.put("location", node.getNetworkLocation());
-        innerinfo.put("lastContact", getLastContact(node));
-        innerinfo.put("usedSpace", node.getDfsUsed());
-        innerinfo.put("adminState", node.getAdminState().toString());
-        innerinfo.put("nonDfsUsedSpace", node.getNonDfsUsed());
-        innerinfo.put("capacity", node.getCapacity());
-        innerinfo.put("numBlocks", -1); // node.numBlocks()
-        innerinfo.put("version", (node.getSoftwareVersion() == null ?
-                        "UNKNOWN" : node.getSoftwareVersion()));
-        innerinfo.put("used", node.getDfsUsed());
-        innerinfo.put("remaining", node.getRemaining());
-        innerinfo.put("blockScheduled", -1); // node.getBlocksScheduled()
-        innerinfo.put("blockPoolUsed", node.getBlockPoolUsed());
-        innerinfo.put("blockPoolUsedPercent", node.getBlockPoolUsedPercent());
-        innerinfo.put("volfails", -1); // node.getVolumeFailures()
-        info.put(node.getHostName() + ":" + node.getXferPort(),
-            Collections.unmodifiableMap(innerinfo));
-      }
-    } catch (StandbyException e) {
-      LOG.error("Cannot get {} nodes, Router in safe mode", type);
-    } catch (IOException e) {
-      LOG.error("Cannot get " + type + " nodes", e);
-    }
-    return JSON.toString(info);
-  }
-
-  @Override
-  public String getClusterId() {
-    try {
-      return 
getNamespaceInfo(FederationNamespaceInfo::getClusterId).toString();
-    } catch (IOException e) {
-      LOG.error("Cannot fetch cluster ID metrics {}", e.getMessage());
-      return "";
-    }
-  }
-
-  @Override
-  public String getBlockPoolId() {
-    try {
-      return
-          getNamespaceInfo(FederationNamespaceInfo::getBlockPoolId).toString();
-    } catch (IOException e) {
-      LOG.error("Cannot fetch block pool ID metrics {}", e.getMessage());
-      return "";
-    }
-  }
-
-  /**
-   * Build a set of unique values found in all namespaces.
-   *
-   * @param f Method reference of the appropriate FederationNamespaceInfo
-   *          getter function
-   * @return Set of unique string values found in all discovered namespaces.
-   * @throws IOException if the query could not be executed.
-   */
-  private Collection<String> getNamespaceInfo(
-      Function<FederationNamespaceInfo, String> f) throws IOException {
-    StateStoreService stateStore = router.getStateStore();
-    MembershipStore membershipStore =
-        stateStore.getRegisteredRecordStore(MembershipStore.class);
-
-    GetNamespaceInfoRequest request = GetNamespaceInfoRequest.newInstance();
-    GetNamespaceInfoResponse response =
-        membershipStore.getNamespaceInfo(request);
-    return response.getNamespaceInfo().stream()
-      .map(f)
-      .collect(Collectors.toSet());
-  }
-
-  @Override
-  public String getNameDirStatuses() {
-    return "N/A";
-  }
-
-  @Override
-  public String getNodeUsage() {
-    return "N/A";
-  }
-
-  @Override
-  public String getNameJournalStatus() {
-    return "N/A";
-  }
-
-  @Override
-  public String getJournalTransactionInfo() {
-    return "N/A";
-  }
-
-  @Override
-  public long getNNStartedTimeInMillis() {
-    return this.router.getStartTime();
-  }
-
-  @Override
-  public String getCompileInfo() {
-    return VersionInfo.getDate() + " by " + VersionInfo.getUser() +
-        " from " + VersionInfo.getBranch();
-  }
-
-  @Override
-  public int getDistinctVersionCount() {
-    return 0;
-  }
-
-  @Override
-  public Map<String, Integer> getDistinctVersions() {
-    return null;
-  }
-
-  /////////////////////////////////////////////////////////
-  // FSNamesystemMBean
-  /////////////////////////////////////////////////////////
-
-  @Override
-  public String getFSState() {
-    // We assume is not in safe mode
-    return "Operational";
-  }
-
-  @Override
-  public long getBlocksTotal() {
-    return this.getTotalBlocks();
-  }
-
-  @Override
-  public long getCapacityTotal() {
-    return this.getTotal();
-  }
-
-  @Override
-  public long getCapacityRemaining() {
-    return this.getFree();
-  }
-
-  @Override
-  public long getCapacityUsed() {
-    return this.getUsed();
-  }
-
-  @Override
-  public long getProvidedCapacityTotal() {
-    return getProvidedCapacity();
-  }
-
-  @Override
-  public long getFilesTotal() {
-    return getFederationMetrics().getNumFiles();
-  }
-
-  @Override
-  public int getTotalLoad() {
-    return -1;
-  }
-
-  @Override
-  public int getNumLiveDataNodes() {
-    return this.router.getMetrics().getNumLiveNodes();
-  }
-
-  @Override
-  public int getNumDeadDataNodes() {
-    return this.router.getMetrics().getNumDeadNodes();
-  }
-
-  @Override
-  public int getNumStaleDataNodes() {
-    return -1;
-  }
-
-  @Override
-  public int getNumDecomLiveDataNodes() {
-    return this.router.getMetrics().getNumDecomLiveNodes();
-  }
-
-  @Override
-  public int getNumDecomDeadDataNodes() {
-    return this.router.getMetrics().getNumDecomDeadNodes();
-  }
-
-  @Override
-  public int getNumDecommissioningDataNodes() {
-    return this.router.getMetrics().getNumDecommissioningNodes();
-  }
-
-  @Override
-  public int getNumInMaintenanceLiveDataNodes() {
-    return 0;
-  }
-
-  @Override
-  public int getNumInMaintenanceDeadDataNodes() {
-    return 0;
-  }
-
-  @Override
-  public int getNumEnteringMaintenanceDataNodes() {
-    return 0;
-  }
-
-  @Override
-  public int getVolumeFailuresTotal() {
-    return 0;
-  }
-
-  @Override
-  public long getEstimatedCapacityLostTotal() {
-    return 0;
-  }
-
-  @Override
-  public String getSnapshotStats() {
-    return null;
-  }
-
-  @Override
-  public long getMaxObjects() {
-    return 0;
-  }
-
-  @Override
-  public long getBlockDeletionStartTime() {
-    return -1;
-  }
-
-  @Override
-  public int getNumStaleStorages() {
-    return -1;
-  }
-
-  @Override
-  public String getTopUserOpCounts() {
-    return "N/A";
-  }
-
-  @Override
-  public int getFsLockQueueLength() {
-    return 0;
-  }
-
-  @Override
-  public long getTotalSyncCount() {
-    return 0;
-  }
-
-  @Override
-  public String getTotalSyncTimes() {
-    return "";
-  }
-
-  private long getLastContact(DatanodeInfo node) {
-    return (now() - node.getLastUpdate()) / 1000;
-  }
-
-  /////////////////////////////////////////////////////////
-  // NameNodeStatusMXBean
-  /////////////////////////////////////////////////////////
-
-  @Override
-  public String getNNRole() {
-    return NamenodeRole.NAMENODE.toString();
-  }
-
-  @Override
-  public String getState() {
-    return HAServiceState.ACTIVE.toString();
-  }
-
-  @Override
-  public String getHostAndPort() {
-    return NetUtils.getHostPortString(router.getRpcServerAddress());
-  }
-
-  @Override
-  public boolean isSecurityEnabled() {
-    return false;
-  }
-
-  @Override
-  public long getLastHATransitionTime() {
-    return 0;
-  }
-
-  @Override
-  public long getBytesWithFutureGenerationStamps() {
-    return 0;
-  }
-
-  @Override
-  public String getSlowPeersReport() {
-    return "N/A";
-  }
-
-  @Override
-  public String getSlowDisksReport() {
-    return "N/A";
-  }
-
-  @Override
-  public long getNumberOfSnapshottableDirs() {
-    return 0;
-  }
-
-  @Override
-  public String getEnteringMaintenanceNodes() {
-    return "N/A";
-  }
-
-  @Override
-  public String getNameDirSize() {
-    return "N/A";
-  }
-
-  @Override
-  public int getNumEncryptionZones() {
-    return 0;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMBean.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMBean.java
deleted file mode 100644
index 5e4ccab..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMBean.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.federation.metrics;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * JMX interface for the State Store metrics.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public interface StateStoreMBean {
-
-  long getReadOps();
-
-  double getReadAvg();
-
-  long getWriteOps();
-
-  double getWriteAvg();
-
-  long getFailureOps();
-
-  double getFailureAvg();
-
-  long getRemoveOps();
-
-  double getRemoveAvg();
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
deleted file mode 100644
index 09253a2..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
+++ /dev/null
@@ -1,149 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.federation.metrics;
-
-import static org.apache.hadoop.metrics2.impl.MsInfo.ProcessName;
-import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.annotation.Metrics;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.MetricsRegistry;
-import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
-import org.apache.hadoop.metrics2.lib.MutableRate;
-
-import com.google.common.annotations.VisibleForTesting;
-
-/**
- * Implementations of the JMX interface for the State Store metrics.
- */
-@Metrics(name = "StateStoreActivity", about = "Router metrics",
-    context = "dfs")
-public final class StateStoreMetrics implements StateStoreMBean {
-
-  private final MetricsRegistry registry = new MetricsRegistry("router");
-
-  @Metric("GET transactions")
-  private MutableRate reads;
-  @Metric("PUT transactions")
-  private MutableRate writes;
-  @Metric("REMOVE transactions")
-  private MutableRate removes;
-  @Metric("Failed transactions")
-  private MutableRate failures;
-
-  private Map<String, MutableGaugeInt> cacheSizes;
-
-  private StateStoreMetrics(Configuration conf) {
-    registry.tag(SessionId, "RouterSession");
-    registry.tag(ProcessName, "Router");
-    cacheSizes = new HashMap<>();
-  }
-
-  public static StateStoreMetrics create(Configuration conf) {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    return ms.register(new StateStoreMetrics(conf));
-  }
-
-  public void shutdown() {
-    DefaultMetricsSystem.shutdown();
-    reset();
-  }
-
-  public void addRead(long latency) {
-    reads.add(latency);
-  }
-
-  public long getReadOps() {
-    return reads.lastStat().numSamples();
-  }
-
-  public double getReadAvg() {
-    return reads.lastStat().mean();
-  }
-
-  public void addWrite(long latency) {
-    writes.add(latency);
-  }
-
-  public long getWriteOps() {
-    return writes.lastStat().numSamples();
-  }
-
-  public double getWriteAvg() {
-    return writes.lastStat().mean();
-  }
-
-  public void addFailure(long latency) {
-    failures.add(latency);
-  }
-
-  public long getFailureOps() {
-    return failures.lastStat().numSamples();
-  }
-
-  public double getFailureAvg() {
-    return failures.lastStat().mean();
-  }
-
-  public void addRemove(long latency) {
-    removes.add(latency);
-  }
-
-  public long getRemoveOps() {
-    return removes.lastStat().numSamples();
-  }
-
-  public double getRemoveAvg() {
-    return removes.lastStat().mean();
-  }
-
-  /**
-   * Set the size of the cache for a State Store interface.
-   *
-   * @param name Name of the record to cache.
-   * @param size Number of records.
-   */
-  public void setCacheSize(String name, int size) {
-    String counterName = "Cache" + name + "Size";
-    MutableGaugeInt counter = cacheSizes.get(counterName);
-    if (counter == null) {
-      counter = registry.newGauge(counterName, name, size);
-      cacheSizes.put(counterName, counter);
-    }
-    counter.set(size);
-  }
-
-  @VisibleForTesting
-  public void reset() {
-    reads.resetMinMax();
-    writes.resetMinMax();
-    removes.resetMinMax();
-    failures.resetMinMax();
-
-    reads.lastStat().reset();
-    writes.lastStat().reset();
-    removes.lastStat().reset();
-    failures.lastStat().reset();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/package-info.java
deleted file mode 100644
index c56c823..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/package-info.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Report metrics for Router-based Federation.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-package org.apache.hadoop.hdfs.server.federation.metrics;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/ActiveNamenodeResolver.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/ActiveNamenodeResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/ActiveNamenodeResolver.java
deleted file mode 100644
index 1773b34..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/ActiveNamenodeResolver.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.federation.resolver;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * Locates the most active NN for a given nameservice ID or blockpool ID. This
- * interface is used by the {@link org.apache.hadoop.hdfs.server.federation.
- * router.RouterRpcServer RouterRpcServer} to:
- * <ul>
- * <li>Determine the target NN for a given subcluster.
- * <li>List of all namespaces discovered/active in the federation.
- * <li>Update the currently active NN empirically.
- * </ul>
- * The interface is also used by the {@link org.apache.hadoop.hdfs.server.
- * federation.router.NamenodeHeartbeatService NamenodeHeartbeatService} to
- * register a discovered NN.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public interface ActiveNamenodeResolver {
-
-  /**
-   * Report a successful, active NN address for a nameservice or blockPool.
-   *
-   * @param ns Nameservice identifier.
-   * @param successfulAddress The address the successful responded to the
-   *                          command.
-   * @throws IOException If the state store cannot be accessed.
-   */
-  void updateActiveNamenode(
-      String ns, InetSocketAddress successfulAddress) throws IOException;
-
-  /**
-   * Returns a prioritized list of the most recent cached registration entries
-   * for a single nameservice ID.
-   * Returns an empty list if none are found. Returns entries in preference of:
-   * <ul>
-   * <li>The most recent ACTIVE NN
-   * <li>The most recent STANDBY NN
-   * <li>The most recent UNAVAILABLE NN
-   * </ul>
-   *
-   * @param nameserviceId Nameservice identifier.
-   * @return Prioritized list of namenode contexts.
-   * @throws IOException If the state store cannot be accessed.
-   */
-  List<? extends FederationNamenodeContext>
-      getNamenodesForNameserviceId(String nameserviceId) throws IOException;
-
-  /**
-   * Returns a prioritized list of the most recent cached registration entries
-   * for a single block pool ID.
-   * Returns an empty list if none are found. Returns entries in preference of:
-   * <ul>
-   * <li>The most recent ACTIVE NN
-   * <li>The most recent STANDBY NN
-   * <li>The most recent UNAVAILABLE NN
-   * </ul>
-   *
-   * @param blockPoolId Block pool identifier for the nameservice.
-   * @return Prioritized list of namenode contexts.
-   * @throws IOException If the state store cannot be accessed.
-   */
-  List<? extends FederationNamenodeContext>
-      getNamenodesForBlockPoolId(String blockPoolId) throws IOException;
-
-  /**
-   * Register a namenode in the State Store.
-   *
-   * @param report Namenode status report.
-   * @return True if the node was registered and successfully committed to the
-   *         data store.
-   * @throws IOException Throws exception if the namenode could not be
-   *         registered.
-   */
-  boolean registerNamenode(NamenodeStatusReport report) throws IOException;
-
-  /**
-   * Get a list of all namespaces that are registered and active in the
-   * federation.
-   *
-   * @return List of name spaces in the federation
-   * @throws IOException Throws exception if the namespace list is not
-   *         available.
-   */
-  Set<FederationNamespaceInfo> getNamespaces() throws IOException;
-
-  /**
-   * Assign a unique identifier for the parent router service.
-   * Required to report the status to the namenode resolver.
-   *
-   * @param routerId Unique string identifier for the router.
-   */
-  void setRouterId(String routerId);
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamenodeContext.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamenodeContext.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamenodeContext.java
deleted file mode 100644
index 68ef02a..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamenodeContext.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.federation.resolver;
-
-/**
- * Interface for a discovered NN and its current server endpoints.
- */
-public interface FederationNamenodeContext {
-
-  /**
-   * Get the RPC server address of the namenode.
-   *
-   * @return RPC server address in the form of host:port.
-   */
-  String getRpcAddress();
-
-  /**
-   * Get the Service RPC server address of the namenode.
-   *
-   * @return Service RPC server address in the form of host:port.
-   */
-  String getServiceAddress();
-
-  /**
-   * Get the Lifeline RPC server address of the namenode.
-   *
-   * @return Lifeline RPC server address in the form of host:port.
-   */
-  String getLifelineAddress();
-
-  /**
-   * Get the HTTP server address of the namenode.
-   *
-   * @return HTTP address in the form of host:port.
-   */
-  String getWebAddress();
-
-  /**
-   * Get the unique key representing the namenode.
-   *
-   * @return Combination of the nameservice and the namenode IDs.
-   */
-  String getNamenodeKey();
-
-  /**
-   * Identifier for the nameservice/namespace.
-   *
-   * @return Namenode nameservice identifier.
-   */
-  String getNameserviceId();
-
-  /**
-   * Identifier for the namenode.
-   *
-   * @return String
-   */
-  String getNamenodeId();
-
-  /**
-   * The current state of the namenode (active, standby, etc).
-   *
-   * @return FederationNamenodeServiceState State of the namenode.
-   */
-  FederationNamenodeServiceState getState();
-
-  /**
-   * The update date.
-   *
-   * @return Long with the update date.
-   */
-  long getDateModified();
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamenodeServiceState.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamenodeServiceState.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamenodeServiceState.java
deleted file mode 100644
index c773f82..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamenodeServiceState.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.federation.resolver;
-
-import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
-
-/**
- * Namenode state in the federation. The order of this enum is used to evaluate
- * NN priority for RPC calls.
- */
-public enum FederationNamenodeServiceState {
-  ACTIVE, // HAServiceState.ACTIVE or operational.
-  STANDBY, // HAServiceState.STANDBY.
-  UNAVAILABLE, // When the namenode cannot be reached.
-  EXPIRED; // When the last update is too old.
-
-  public static FederationNamenodeServiceState getState(HAServiceState state) {
-    switch(state) {
-    case ACTIVE:
-      return FederationNamenodeServiceState.ACTIVE;
-    case STANDBY:
-      return FederationNamenodeServiceState.STANDBY;
-    case INITIALIZING:
-      return FederationNamenodeServiceState.UNAVAILABLE;
-    case STOPPING:
-      return FederationNamenodeServiceState.UNAVAILABLE;
-    default:
-      return FederationNamenodeServiceState.UNAVAILABLE;
-    }
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamespaceInfo.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamespaceInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamespaceInfo.java
deleted file mode 100644
index edcd308..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamespaceInfo.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.federation.resolver;
-
-import org.apache.hadoop.hdfs.server.federation.router.RemoteLocationContext;
-
-/**
- * Represents information about a single nameservice/namespace in a federated
- * HDFS cluster.
- */
-public class FederationNamespaceInfo extends RemoteLocationContext {
-
-  /** Block pool identifier. */
-  private final String blockPoolId;
-  /** Cluster identifier. */
-  private final String clusterId;
-  /** Nameservice identifier. */
-  private final String nameserviceId;
-
-  public FederationNamespaceInfo(String bpId, String clId, String nsId) {
-    this.blockPoolId = bpId;
-    this.clusterId = clId;
-    this.nameserviceId = nsId;
-  }
-
-  @Override
-  public String getNameserviceId() {
-    return this.nameserviceId;
-  }
-
-  @Override
-  public String getDest() {
-    return this.nameserviceId;
-  }
-
-  /**
-   * The HDFS cluster id for this namespace.
-   *
-   * @return Cluster identifier.
-   */
-  public String getClusterId() {
-    return this.clusterId;
-  }
-
-  /**
-   * The HDFS block pool id for this namespace.
-   *
-   * @return Block pool identifier.
-   */
-  public String getBlockPoolId() {
-    return this.blockPoolId;
-  }
-
-  @Override
-  public String toString() {
-    return this.nameserviceId + "->" + this.blockPoolId + ":" + this.clusterId;
-  }
-}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to