2005hithlj commented on code in PR #4810:
URL: https://github.com/apache/hbase/pull/4810#discussion_r1000049844


##########
hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java:
##########
@@ -294,80 +327,107 @@ public String 
dumpPeersState(List<ReplicationPeerDescription> peers) throws Exce
     return sb.toString();
   }
 
-  public String dumpQueues(ZKWatcher zkw, Set<String> peerIds, boolean hdfs) 
throws Exception {
-    ReplicationQueueStorage queueStorage;
+  public String dumpQueues(ZKWatcher zkw, Connection connection, Set<String> 
peerIds, boolean hdfs)
+    throws Exception {
     StringBuilder sb = new StringBuilder();
+    ReplicationQueueStorage queueStorage =
+      ReplicationStorageFactory.getReplicationQueueStorage(connection, 
getConf());
+    Set<ServerName> liveRegionServers = ZKUtil.listChildrenNoWatch(zkw, 
zkw.getZNodePaths().rsZNode)
+      .stream().map(ServerName::parseServerName).collect(Collectors.toSet());
+
+    List<ServerName> regionServers = queueStorage.listAllReplicators();
+    if (regionServers == null || regionServers.isEmpty()) {
+      return sb.toString();
+    }
+    for (ServerName regionServer : regionServers) {
+      List<ReplicationQueueId> queueIds = 
queueStorage.listAllQueueIds(regionServer);
 
-    // queueStorage = 
ReplicationStorageFactory.getReplicationQueueStorage(zkw, getConf());
-    // Set<ServerName> liveRegionServers = ZKUtil.listChildrenNoWatch(zkw,
-    // zkw.getZNodePaths().rsZNode)
-    // .stream().map(ServerName::parseServerName).collect(Collectors.toSet());
-    //
-    // Loops each peer on each RS and dumps the queues
-    // List<ServerName> regionservers = queueStorage.getListOfReplicators();
-    // if (regionservers == null || regionservers.isEmpty()) {
-    // return sb.toString();
-    // }
-    // for (ServerName regionserver : regionservers) {
-    // List<String> queueIds = queueStorage.getAllQueues(regionserver);
-    // if (!liveRegionServers.contains(regionserver)) {
-    // deadRegionServers.add(regionserver.getServerName());
-    // }
-    // for (String queueId : queueIds) {
-    // ReplicationQueueInfo queueInfo = new ReplicationQueueInfo(queueId);
-    // List<String> wals = queueStorage.getWALsInQueue(regionserver, queueId);
-    // Collections.sort(wals);
-    // if (!peerIds.contains(queueInfo.getPeerId())) {
-    // deletedQueues.add(regionserver + "/" + queueId);
-    // sb.append(formatQueue(regionserver, queueStorage, queueInfo, queueId, 
wals, true, hdfs));
-    // } else {
-    // sb.append(formatQueue(regionserver, queueStorage, queueInfo, queueId, 
wals, false, hdfs));
-    // }
-    // }
-    // }
+      if (!liveRegionServers.contains(regionServer)) {
+        deadRegionServers.add(regionServer.getServerName());
+      }
+      for (ReplicationQueueId queueId : queueIds) {
+        List<String> wals = null;
+        if (queueId.isRecovered()) {
+          wals = AbstractFSWALProvider

Review Comment:
   This method can really solve my problem.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@hbase.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to