This is an automated email from the ASF dual-hosted git repository.

andor pushed a commit to branch HBASE-29081
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/HBASE-29081 by this push:
     new afe2274bba5 HBASE-29580: Clean-up hardcoded meta table names from log 
entries (#7319)
afe2274bba5 is described below

commit afe2274bba52322e553664b20aa0420596fafcc4
Author: Kevin Geiszler <[email protected]>
AuthorDate: Tue Sep 30 07:12:12 2025 -0700

    HBASE-29580: Clean-up hardcoded meta table names from log entries (#7319)
    
    Change-Id: I2bca05b3f2ef4450bfcbb3b7608b829348c37bde
---
 .../backup/impl/IncrementalBackupManager.java      |  2 +-
 .../master/SnapshotOfRegionAssignmentFromMeta.java |  6 +-
 .../org/apache/hadoop/hbase/client/RegionInfo.java |  2 +-
 .../master/MetricsMasterFileSystemSource.java      |  3 +-
 .../org/apache/hadoop/hbase/MetaTableAccessor.java |  2 +-
 .../org/apache/hadoop/hbase/master/HMaster.java    | 10 +--
 .../hbase/master/RegionPlacementMaintainer.java    | 16 ++---
 .../hbase/master/assignment/AssignmentManager.java |  6 +-
 .../assignment/MergeTableRegionsProcedure.java     |  6 +-
 .../hbase/master/assignment/RegionStateStore.java  | 11 ++--
 .../assignment/SplitTableRegionProcedure.java      |  6 +-
 .../hbase/master/janitor/CatalogJanitor.java       |  4 +-
 .../hadoop/hbase/master/janitor/MetaFixer.java     | 12 ++--
 .../hbase/master/janitor/ReportMakingVisitor.java  |  6 +-
 .../master/procedure/HBCKServerCrashProcedure.java | 13 ++--
 .../hbase/master/procedure/InitMetaProcedure.java  |  2 +-
 .../master/procedure/ModifyTableProcedure.java     |  2 +-
 .../master/procedure/RefreshMetaProcedure.java     |  2 +-
 .../hadoop/hbase/tool/BulkLoadHFilesTool.java      | 12 ++--
 .../hadoop/hbase/util/FSTableDescriptors.java      |  6 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java    | 75 ++++++++++++----------
 .../org/apache/hadoop/hbase/util/RegionMover.java  |  9 +--
 .../hadoop/hbase/zookeeper/MetaTableLocator.java   | 19 +++---
 .../org/apache/hadoop/hbase/zookeeper/ZKDump.java  |  3 +-
 24 files changed, 131 insertions(+), 104 deletions(-)

diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
index c92c0747e83..911e785c52c 100644
--- 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
@@ -177,7 +177,7 @@ public class IncrementalBackupManager extends BackupManager 
{
         LOG.debug("currentLogFile: " + log.getPath().toString());
         if (AbstractFSWALProvider.isMetaFile(log.getPath())) {
           if (LOG.isDebugEnabled()) {
-            LOG.debug("Skip hbase:meta log file: " + log.getPath().getName());
+            LOG.debug("Skip {} log file: {}", TableName.META_TABLE_NAME, 
log.getPath().getName());
           }
           continue;
         }
diff --git 
a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
 
b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
index 02c18c73bfb..74d49ec14c0 100644
--- 
a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
+++ 
b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
@@ -170,7 +170,8 @@ public class SnapshotOfRegionAssignmentFromMeta {
    * Initialize the region assignment snapshot by scanning the hbase:meta table
    */
   public void initialize() throws IOException {
-    LOG.info("Start to scan the hbase:meta for the current region assignment " 
+ "snappshot");
+    LOG.info("Start to scan {} for the current region assignment snapshot",
+      TableName.META_TABLE_NAME);
     // Scan hbase:meta to pick up user regions
     try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME);
       ResultScanner scanner = metaTable.getScanner(HConstants.CATALOG_FAMILY)) 
{
@@ -187,7 +188,8 @@ public class SnapshotOfRegionAssignmentFromMeta {
         }
       }
     }
-    LOG.info("Finished to scan the hbase:meta for the current region 
assignment" + "snapshot");
+    LOG.info("Finished scanning {} for the current region assignment snapshot",
+      TableName.META_TABLE_NAME);
   }
 
   private void addRegion(RegionInfo regionInfo) {
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java
index 10c554e26f7..82b8711b776 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java
@@ -431,7 +431,7 @@ public interface RegionInfo extends Comparable<RegionInfo> {
    */
   static String prettyPrint(final String encodedRegionName) {
     if (encodedRegionName.equals("1028785192")) {
-      return encodedRegionName + "/hbase:meta";
+      return encodedRegionName + "/" + TableName.META_TABLE_NAME;
     }
     return encodedRegionName;
   }
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java
index 53ed8a25ed0..579171e1c3d 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.master;
 
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.metrics.BaseSource;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -49,7 +50,7 @@ public interface MetricsMasterFileSystemSource extends 
BaseSource {
   String SPLIT_SIZE_NAME = "hlogSplitSize";
 
   String META_SPLIT_TIME_DESC = "Time it takes to finish splitMetaLog()";
-  String META_SPLIT_SIZE_DESC = "Size of hbase:meta WAL files being split";
+  String META_SPLIT_SIZE_DESC = "Size of " + TableName.META_TABLE_NAME + " WAL 
files being split";
   String SPLIT_TIME_DESC = "Time it takes to finish WAL.splitLog()";
   String SPLIT_SIZE_DESC = "Size of WAL files being split";
 
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index 9d31511a896..4a3ae89fa64 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -848,7 +848,7 @@ public final class MetaTableAccessor {
   private static void updateTableState(Connection connection, TableState 
state) throws IOException {
     Put put = makePutFromTableState(state, 
EnvironmentEdgeManager.currentTime());
     putToMetaTable(connection, put);
-    LOG.info("Updated {} in hbase:meta", state);
+    LOG.info("Updated {} in {}", state, TableName.META_TABLE_NAME);
   }
 
   /**
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 757699ccccb..f849a19a251 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -1187,8 +1187,9 @@ public class HMaster extends 
HBaseServerBase<MasterRpcServices> implements Maste
         int existingReplicasCount =
           
assignmentManager.getRegionStates().getRegionsOfTable(TableName.META_TABLE_NAME).size();
         if (existingReplicasCount > metaDesc.getRegionReplication()) {
-          LOG.info("Update replica count of hbase:meta from {}(in 
TableDescriptor)"
-            + " to {}(existing ZNodes)", metaDesc.getRegionReplication(), 
existingReplicasCount);
+          LOG.info(
+            "Update replica count of {} from {}(in TableDescriptor)" + " to 
{}(existing ZNodes)",
+            TableName.META_TABLE_NAME, metaDesc.getRegionReplication(), 
existingReplicasCount);
           metaDesc = TableDescriptorBuilder.newBuilder(metaDesc)
             .setRegionReplication(existingReplicasCount).build();
           tableDescriptors.update(metaDesc);
@@ -1197,8 +1198,9 @@ public class HMaster extends 
HBaseServerBase<MasterRpcServices> implements Maste
         if (metaDesc.getRegionReplication() != replicasNumInConf) {
           LOG.info(
             "The {} config is {} while the replica count in TableDescriptor is 
{}"
-              + " for hbase:meta, altering...",
-            HConstants.META_REPLICAS_NUM, replicasNumInConf, 
metaDesc.getRegionReplication());
+              + " for {}, altering...",
+            HConstants.META_REPLICAS_NUM, replicasNumInConf, 
metaDesc.getRegionReplication(),
+            TableName.META_TABLE_NAME);
           procedureExecutor.submitProcedure(new ModifyTableProcedure(
             procedureExecutor.getEnvironment(), 
TableDescriptorBuilder.newBuilder(metaDesc)
               .setRegionReplication(replicasNumInConf).build(),
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
index 854c21da2bc..69dfb040831 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
@@ -605,7 +605,7 @@ public class RegionPlacementMaintainer implements Closeable 
{
    */
   public void updateAssignmentPlanToMeta(FavoredNodesPlan plan) throws 
IOException {
     try {
-      LOG.info("Start to update the hbase:meta with the new assignment plan");
+      LOG.info("Started updating {} with the new assignment plan", 
TableName.META_TABLE_NAME);
       Map<String, List<ServerName>> assignmentMap = plan.getAssignmentMap();
       Map<RegionInfo, List<ServerName>> planToUpdate = new 
HashMap<>(assignmentMap.size());
       Map<String, RegionInfo> regionToRegionInfoMap =
@@ -615,10 +615,10 @@ public class RegionPlacementMaintainer implements 
Closeable {
       }
 
       FavoredNodeAssignmentHelper.updateMetaWithFavoredNodesInfo(planToUpdate, 
conf);
-      LOG.info("Updated the hbase:meta with the new assignment plan");
+      LOG.info("Updated {} with the new assignment plan", 
TableName.META_TABLE_NAME);
     } catch (Exception e) {
-      LOG.error(
-        "Failed to update hbase:meta with the new assignment" + "plan because 
" + e.getMessage());
+      LOG.error("Failed to update {} with the new assignment plan because {}",
+        TableName.META_TABLE_NAME, e.getMessage());
     }
   }
 
@@ -690,14 +690,14 @@ public class RegionPlacementMaintainer implements 
Closeable {
   }
 
   public void updateAssignmentPlan(FavoredNodesPlan plan) throws IOException {
-    LOG.info("Start to update the new assignment plan for the hbase:meta table 
and"
-      + " the region servers");
+    LOG.info("Started updating the new assignment plan for {} and the region 
servers",
+      TableName.META_TABLE_NAME);
     // Update the new assignment plan to META
     updateAssignmentPlanToMeta(plan);
     // Update the new assignment plan to Region Servers
     updateAssignmentPlanToRegionServers(plan);
-    LOG.info("Finish to update the new assignment plan for the hbase:meta 
table and"
-      + " the region servers");
+    LOG.info("Finished updating the new assignment plan for {} and the region 
servers",
+      TableName.META_TABLE_NAME);
   }
 
   /**
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index 8c37385757a..1e553c4ea3f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -346,7 +346,7 @@ public class AssignmentManager {
             if (RegionReplicaUtil.isDefaultReplica(regionInfo.getReplicaId())) 
{
               setMetaAssigned(regionInfo, state == State.OPEN);
             }
-            LOG.debug("Loaded hbase:meta {}", regionNode);
+            LOG.debug("Loaded {} {}", TableName.META_TABLE_NAME, regionNode);
           }, result);
       }
     }
@@ -1921,8 +1921,8 @@ public class AssignmentManager {
     boolean meta = isMetaRegion(hri);
     boolean metaLoaded = isMetaLoaded();
     if (!meta && !metaLoaded) {
-      throw new PleaseHoldException(
-        "Master not fully online; hbase:meta=" + meta + ", metaLoaded=" + 
metaLoaded);
+      throw new PleaseHoldException("Master not fully online; " + 
TableName.META_TABLE_NAME + "="
+        + meta + ", metaLoaded=" + metaLoaded);
     }
   }
 
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index c370fed9d9c..5fdd2893cca 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
@@ -718,8 +718,10 @@ public class MergeTableRegionsProcedure
           RegionInfo.parseRegionName(p.getRow());
         }
       } catch (IOException e) {
-        LOG.error("Row key of mutation from coprocessor is not parsable as 
region name. "
-          + "Mutations from coprocessor should only be for hbase:meta table.", 
e);
+        LOG.error(
+          "Row key of mutation from coprocessor is not parsable as region 
name. "
+            + "Mutations from coprocessor should only be for {} table.",
+          TableName.META_TABLE_NAME, e);
         throw e;
       }
     }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
index 5987fc7537b..6dac4c37c1b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
@@ -169,9 +169,10 @@ public class RegionStateStore {
       final long openSeqNum = hrl.getSeqNum();
 
       LOG.debug(
-        "Load hbase:meta entry region={}, regionState={}, lastHost={}, "
+        "Load {} entry region={}, regionState={}, lastHost={}, "
           + "regionLocation={}, openSeqNum={}",
-        regionInfo.getEncodedName(), state, lastHost, regionLocation, 
openSeqNum);
+        TableName.META_TABLE_NAME, regionInfo.getEncodedName(), state, 
lastHost, regionLocation,
+        openSeqNum);
       visitor.visitRegionState(result, regionInfo, state, regionLocation, 
lastHost, openSeqNum);
     }
   }
@@ -190,8 +191,8 @@ public class RegionStateStore {
     final Put put = new 
Put(CatalogFamilyFormat.getMetaKeyForRegion(regionInfo), time);
     MetaTableAccessor.addRegionInfo(put, regionInfo);
     final StringBuilder info =
-      new StringBuilder("pid=").append(pid).append(" updating hbase:meta row=")
-        .append(regionInfo.getEncodedName()).append(", 
regionState=").append(state);
+      new StringBuilder("pid=").append(pid).append(" updating 
").append(TableName.META_TABLE_NAME)
+        .append(" row=").append(regionInfo.getEncodedName()).append(", 
regionState=").append(state);
     if (openSeqNum >= 0) {
       Preconditions.checkArgument(state == State.OPEN && regionLocation != 
null,
         "Open region should be on a server");
@@ -694,7 +695,7 @@ public class RegionStateStore {
       return State.valueOf(state);
     } catch (IllegalArgumentException e) {
       LOG.warn(
-        "BAD value {} in hbase:meta info:state column for region {} , "
+        "BAD value {} in " + TableName.META_TABLE_NAME + " info:state column 
for region {} , "
           + "Consider using HBCK2 setRegionState ENCODED_REGION_NAME STATE",
         state, regionInfo.getEncodedName());
       return null;
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index 3d3d3d18de2..c9d158d3fb4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -903,8 +903,10 @@ public class SplitTableRegionProcedure
           RegionInfo.parseRegionName(p.getRow());
         }
       } catch (IOException e) {
-        LOG.error("pid=" + getProcId() + " row key of mutation from 
coprocessor not parsable as "
-          + "region name." + "Mutations from coprocessor should only for 
hbase:meta table.");
+        LOG.error(
+          "pid={} row key of mutation from coprocessor not parsable as region 
name. "
+            + "Mutations from coprocessor should only be for {} table.",
+          getProcId(), TableName.META_TABLE_NAME);
         throw e;
       }
     }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java
index 0d3ddb43abd..14cf61ef970 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java
@@ -105,7 +105,7 @@ public class CatalogJanitor extends ScheduledChore {
         scan();
       }
     } catch (IOException e) {
-      LOG.warn("Failed initial janitorial scan of hbase:meta table", e);
+      LOG.warn("Failed initial janitorial scan of {} table", 
TableName.META_TABLE_NAME, e);
       return false;
     }
     return true;
@@ -145,7 +145,7 @@ public class CatalogJanitor extends ScheduledChore {
           + this.services.getServerManager().isClusterShutdown());
       }
     } catch (IOException e) {
-      LOG.warn("Failed janitorial scan of hbase:meta table", e);
+      LOG.warn("Failed janitorial scan of {} table", 
TableName.META_TABLE_NAME, e);
     }
   }
 
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java
index d2cc2e2bfdb..f292c94c8ff 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java
@@ -198,19 +198,19 @@ public class MetaFixer {
         .flatMap(List::stream).collect(Collectors.toList());
     final List<IOException> createMetaEntriesFailures = 
addMetaEntriesResults.stream()
       
.filter(Either::hasRight).map(Either::getRight).collect(Collectors.toList());
-    LOG.debug("Added {}/{} entries to hbase:meta", 
createMetaEntriesSuccesses.size(),
-      newRegionInfos.size());
+    LOG.debug("Added {}/{} entries to {}", createMetaEntriesSuccesses.size(), 
newRegionInfos.size(),
+      TableName.META_TABLE_NAME);
 
     if (!createMetaEntriesFailures.isEmpty()) {
       LOG.warn(
-        "Failed to create entries in hbase:meta for {}/{} RegionInfo 
descriptors. First"
+        "Failed to create entries in {} for {}/{} RegionInfo descriptors. 
First"
           + " failure message included; full list of failures with 
accompanying stack traces is"
           + " available at log level DEBUG. message={}",
-        createMetaEntriesFailures.size(), addMetaEntriesResults.size(),
+        TableName.META_TABLE_NAME, createMetaEntriesFailures.size(), 
addMetaEntriesResults.size(),
         createMetaEntriesFailures.get(0).getMessage());
       if (LOG.isDebugEnabled()) {
-        createMetaEntriesFailures
-          .forEach(ioe -> LOG.debug("Attempt to fix region hole in hbase:meta 
failed.", ioe));
+        createMetaEntriesFailures.forEach(ioe -> LOG
+          .debug("Attempt to fix region hole in {} failed.", 
TableName.META_TABLE_NAME, ioe));
       }
     }
 
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java
index c712f1cba67..b194bca221c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.Result;
@@ -137,8 +138,9 @@ class ReportMakingVisitor implements 
ClientMetaTableAccessor.CloseableVisitor {
     if (!Bytes.equals(metaTableRow.getRow(), ri.getRegionName())) {
       LOG.warn(
         "INCONSISTENCY: Row name is not equal to serialized info:regioninfo 
content; "
-          + "row={} {}; See if RegionInfo is referenced in another hbase:meta 
row? Delete?",
-        Bytes.toStringBinary(metaTableRow.getRow()), 
ri.getRegionNameAsString());
+          + "row={} {}; See if RegionInfo is referenced in another {} row? 
Delete?",
+        Bytes.toStringBinary(metaTableRow.getRow()), 
ri.getRegionNameAsString(),
+        TableName.META_TABLE_NAME);
       return null;
     }
     // Skip split parent region
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java
index 43d69361c2d..e14579ae668 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Result;
@@ -102,14 +103,14 @@ public class HBCKServerCrashProcedure extends 
ServerCrashProcedure {
       
MetaTableAccessor.scanMetaForTableRegions(env.getMasterServices().getConnection(),
 visitor,
         null);
     } catch (IOException ioe) {
-      LOG.warn("Failed scan of hbase:meta for 'Unknown Servers'", ioe);
+      LOG.warn("Failed scan of {} for 'Unknown Servers'", 
TableName.META_TABLE_NAME, ioe);
       return ris;
     }
     // create the server state node too
     env.getAssignmentManager().getRegionStates().createServer(getServerName());
-    LOG.info("Found {} mentions of {} in hbase:meta of OPEN/OPENING Regions: 
{}",
-      visitor.getReassigns().size(), getServerName(), 
visitor.getReassigns().stream()
-        .map(RegionInfo::getEncodedName).collect(Collectors.joining(",")));
+    LOG.info("Found {} mentions of {} in {} of OPEN/OPENING Regions: {}",
+      visitor.getReassigns().size(), getServerName(), 
TableName.META_TABLE_NAME, visitor
+        
.getReassigns().stream().map(RegionInfo::getEncodedName).collect(Collectors.joining(",")));
     return visitor.getReassigns();
   }
 
@@ -150,8 +151,8 @@ public class HBCKServerCrashProcedure extends 
ServerCrashProcedure {
         RegionState rs = new RegionState(hrl.getRegion(), state, 
hrl.getServerName());
         if (rs.isClosing()) {
           // Move region to CLOSED in hbase:meta.
-          LOG.info("Moving {} from CLOSING to CLOSED in hbase:meta",
-            hrl.getRegion().getRegionNameAsString());
+          LOG.info("Moving {} from CLOSING to CLOSED in {}",
+            hrl.getRegion().getRegionNameAsString(), 
TableName.META_TABLE_NAME);
           try {
             MetaTableAccessor.updateRegionState(this.connection, 
hrl.getRegion(),
               RegionState.State.CLOSED);
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java
index 8b4901e90e8..c943ae1a7a1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java
@@ -77,7 +77,7 @@ public class InitMetaProcedure extends 
AbstractStateMachineTableProcedure<InitMe
 
   private static TableDescriptor writeFsLayout(Path rootDir, Configuration 
conf)
     throws IOException {
-    LOG.info("BOOTSTRAP: creating hbase:meta region");
+    LOG.info("BOOTSTRAP: creating {} region", TableName.META_TABLE_NAME);
     FileSystem fs = rootDir.getFileSystem(conf);
     Path tableDir = CommonFSUtils.getTableDir(rootDir, 
TableName.META_TABLE_NAME);
     if (fs.exists(tableDir) && !deleteMetaTableDirectoryIfPartial(fs, 
tableDir)) {
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
index 0d8981891e5..43050b5a1a6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
@@ -108,7 +108,7 @@ public class ModifyTableProcedure extends 
AbstractStateMachineTableProcedure<Mod
       for (byte[] family : UNDELETABLE_META_COLUMNFAMILIES) {
         if (!cfs.contains(family)) {
           throw new HBaseIOException(
-            "Delete of hbase:meta column family " + Bytes.toString(family));
+            "Delete of " + TableName.META_TABLE_NAME + " column family " + 
Bytes.toString(family));
         }
       }
     }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RefreshMetaProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RefreshMetaProcedure.java
index b2e458cd495..4efd5769902 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RefreshMetaProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RefreshMetaProcedure.java
@@ -116,7 +116,7 @@ public class RefreshMetaProcedure extends 
AbstractStateMachineTableProcedure<Ref
   }
 
   private Flow executeInit(MasterProcedureEnv env) throws IOException {
-    LOG.trace("Getting current regions from hbase:meta table");
+    LOG.trace("Getting current regions from {} table", 
TableName.META_TABLE_NAME);
     try {
       currentRegions = 
getCurrentRegions(env.getMasterServices().getConnection());
       LOG.info("Found {} current regions in meta table", 
currentRegions.size());
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java
index 4d6f57e22ed..1889ea61b0b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java
@@ -660,21 +660,21 @@ public class BulkLoadHFilesTool extends Configured 
implements BulkLoadHFiles, To
   private void checkRegionIndexValid(int idx, List<Pair<byte[], byte[]>> 
startEndKeys,
     TableName tableName) throws IOException {
     if (idx < 0) {
-      throw new IOException("The first region info for table " + tableName
-        + " can't be found in hbase:meta.Please use hbck tool to fix it 
first.");
+      throw new IOException("The first region info for table " + tableName + " 
can't be found in "
+        + TableName.META_TABLE_NAME + ". Please use hbck tool to fix it" + " 
first.");
     } else if (
       (idx == startEndKeys.size() - 1)
         && !Bytes.equals(startEndKeys.get(idx).getSecond(), 
HConstants.EMPTY_BYTE_ARRAY)
     ) {
-      throw new IOException("The last region info for table " + tableName
-        + " can't be found in hbase:meta.Please use hbck tool to fix it 
first.");
+      throw new IOException("The last region info for table " + tableName + " 
can't be found in "
+        + TableName.META_TABLE_NAME + ". Please use hbck tool to fix it" + " 
first.");
     } else if (
       idx + 1 < startEndKeys.size() && 
!(Bytes.compareTo(startEndKeys.get(idx).getSecond(),
         startEndKeys.get(idx + 1).getFirst()) == 0)
     ) {
       throw new IOException("The endkey of one region for table " + tableName
-        + " is not equal to the startkey of the next region in hbase:meta."
-        + "Please use hbck tool to fix it first.");
+        + " is not equal to the startkey of the next region in " + 
TableName.META_TABLE_NAME + "."
+        + " Please use hbck tool to fix it first.");
     }
   }
 
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
index f34991279d2..30525d7129e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
@@ -153,14 +153,14 @@ public class FSTableDescriptors implements 
TableDescriptors {
     }
     TableDescriptorBuilder builder = createMetaTableDescriptorBuilder(conf);
     TableDescriptor td = 
StoreFileTrackerFactory.updateWithTrackerConfigs(conf, builder.build());
-    LOG.info("Creating new hbase:meta table descriptor {}", td);
+    LOG.info("Creating new {} table descriptor {}", TableName.META_TABLE_NAME, 
td);
     TableName tableName = td.getTableName();
     Path tableDir = CommonFSUtils.getTableDir(rootdir, tableName);
     Path p = writeTableDescriptor(fs, td, tableDir, null);
     if (p == null) {
-      throw new IOException("Failed update hbase:meta table descriptor");
+      throw new IOException("Failed update " + TableName.META_TABLE_NAME + " 
table descriptor");
     }
-    LOG.info("Updated hbase:meta table descriptor to {}", p);
+    LOG.info("Updated {} table descriptor to {}", TableName.META_TABLE_NAME, 
p);
     return td;
   }
 
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index c3eafa7c11d..bfdbcb3c788 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -660,17 +660,19 @@ public class HBaseFsck extends Configured implements 
Closeable {
     reportUnknownServers();
     // Check if hbase:meta is found only once and in the right place
     if (!checkMetaRegion()) {
-      String errorMsg = "hbase:meta table is not consistent. ";
+      String errorMsg = TableName.META_TABLE_NAME + " table is not consistent. 
";
       if (shouldFixAssignments()) {
-        errorMsg += "HBCK will try fixing it. Rerun once hbase:meta is back to 
consistent state.";
+        errorMsg += "HBCK will try fixing it. Rerun once " + 
TableName.META_TABLE_NAME + " is back "
+          + "to consistent state.";
       } else {
-        errorMsg += "Run HBCK with proper fix options to fix hbase:meta 
inconsistency.";
+        errorMsg += "Run HBCK with proper fix options to fix " + 
TableName.META_TABLE_NAME
+          + " inconsistency.";
       }
       errors.reportError(errorMsg + " Exiting...");
       return -2;
     }
     // Not going with further consistency check for tables when hbase:meta 
itself is not consistent.
-    LOG.info("Loading regionsinfo from the hbase:meta table");
+    LOG.info("Loading regionsinfo from the {} table", 
TableName.META_TABLE_NAME);
     boolean success = loadMetaEntries();
     if (!success) return -1;
 
@@ -1219,7 +1221,7 @@ public class HBaseFsck extends Configured implements 
Closeable {
    * TODO -- need to add tests for this.
    */
   private void reportEmptyMetaCells() {
-    errors.print("Number of empty REGIONINFO_QUALIFIER rows in hbase:meta: "
+    errors.print("Number of empty REGIONINFO_QUALIFIER rows in " + 
TableName.META_TABLE_NAME + ": "
       + emptyRegionInfoQualifiers.size());
     if (details) {
       for (Result r : emptyRegionInfoQualifiers) {
@@ -1371,7 +1373,7 @@ public class HBaseFsck extends Configured implements 
Closeable {
    */
   public void fixEmptyMetaCells() throws IOException {
     if (shouldFixEmptyMetaCells() && !emptyRegionInfoQualifiers.isEmpty()) {
-      LOG.info("Trying to fix empty REGIONINFO_QUALIFIER hbase:meta rows.");
+      LOG.info("Trying to fix empty REGIONINFO_QUALIFIER {} rows.", 
TableName.META_TABLE_NAME);
       for (Result region : emptyRegionInfoQualifiers) {
         deleteMetaRegion(region.getRow());
         errors.getErrorList().remove(ERROR_CODE.EMPTY_META_CELL);
@@ -2019,9 +2021,11 @@ public class HBaseFsck extends Configured implements 
Closeable {
       }
       RegionInfo hri = h.getRegion();
       if (hri == null) {
-        LOG.warn("Unable to close region " + hi.getRegionNameAsString()
-          + " because hbase:meta had invalid or missing " + 
HConstants.CATALOG_FAMILY_STR + ":"
-          + Bytes.toString(HConstants.REGIONINFO_QUALIFIER) + " qualifier 
value.");
+        LOG.warn(
+          "Unable to close region " + hi.getRegionNameAsString()
+            + " because {} had invalid or missing " + 
HConstants.CATALOG_FAMILY_STR + ":"
+            + Bytes.toString(HConstants.REGIONINFO_QUALIFIER) + " qualifier 
value.",
+          TableName.META_TABLE_NAME);
         continue;
       }
       // close the region -- close files and remove assignment
@@ -2140,8 +2144,9 @@ public class HBaseFsck extends Configured implements 
Closeable {
       assert false : "Entry for region with no data";
     } else if (!inMeta && !inHdfs && isDeployed) {
       errors.reportError(ERROR_CODE.NOT_IN_META_HDFS,
-        "Region " + descriptiveName + ", key=" + key + ", not on HDFS or in 
hbase:meta but "
-          + "deployed on " + Joiner.on(", ").join(hbi.getDeployedOn()));
+        "Region " + descriptiveName + ", key=" + key + ", not on HDFS or in "
+          + TableName.META_TABLE_NAME + " but " + "deployed on "
+          + Joiner.on(", ").join(hbi.getDeployedOn()));
       if (shouldFixAssignments()) {
         undeployRegions(hbi);
       }
@@ -2155,8 +2160,9 @@ public class HBaseFsck extends Configured implements 
Closeable {
           + " got merge recently, its file(s) will be cleaned by 
CatalogJanitor later");
         return;
       }
-      errors.reportError(ERROR_CODE.NOT_IN_META_OR_DEPLOYED, "Region " + 
descriptiveName
-        + " on HDFS, but not listed in hbase:meta " + "or deployed on any 
region server");
+      errors.reportError(ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
+        "Region " + descriptiveName + " on HDFS, but not listed in " + 
TableName.META_TABLE_NAME
+          + " or deployed on any region server");
       // restore region consistency of an adopted orphan
       if (shouldFixMeta()) {
         if (!hbi.isHdfsRegioninfoPresent()) {
@@ -2196,7 +2202,7 @@ public class HBaseFsck extends Configured implements 
Closeable {
             }
           }
         }
-        LOG.info("Patching hbase:meta with .regioninfo: " + hbi.getHdfsHRI());
+        LOG.info("Patching {} with .regioninfo: " + hbi.getHdfsHRI(), 
TableName.META_TABLE_NAME);
         int numReplicas = 
admin.getDescriptor(hbi.getTableName()).getRegionReplication();
         HBaseFsckRepair.fixMetaHoleOnlineAndAddReplicas(getConf(), 
hbi.getHdfsHRI(),
           
admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet(),
@@ -2224,7 +2230,8 @@ public class HBaseFsck extends Configured implements 
Closeable {
           return;
         }
 
-        LOG.info("Patching hbase:meta with with .regioninfo: " + 
hbi.getHdfsHRI());
+        LOG.info("Patching {} with with .regioninfo: " + hbi.getHdfsHRI(),
+          TableName.META_TABLE_NAME);
         int numReplicas = 
admin.getDescriptor(hbi.getTableName()).getRegionReplication();
         HBaseFsckRepair.fixMetaHoleOnlineAndAddReplicas(getConf(), 
hbi.getHdfsHRI(),
           
admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet(),
@@ -2301,9 +2308,9 @@ public class HBaseFsck extends Configured implements 
Closeable {
       }
     } else if (inMeta && inHdfs && isMultiplyDeployed) {
       errors.reportError(ERROR_CODE.MULTI_DEPLOYED,
-        "Region " + descriptiveName + " is listed in hbase:meta on region 
server "
-          + hbi.getMetaEntry().regionServer + " but is multiply assigned to 
region servers "
-          + Joiner.on(", ").join(hbi.getDeployedOn()));
+        "Region " + descriptiveName + " is listed in " + 
TableName.META_TABLE_NAME
+          + " on region server " + hbi.getMetaEntry().regionServer + " but is 
multiply assigned"
+          + " to region servers " + Joiner.on(", ").join(hbi.getDeployedOn()));
       // If we are trying to fix the errors
       if (shouldFixAssignments()) {
         errors.print("Trying to fix assignment error...");
@@ -2313,8 +2320,8 @@ public class HBaseFsck extends Configured implements 
Closeable {
       }
     } else if (inMeta && inHdfs && isDeployed && !deploymentMatchesMeta) {
       errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META,
-        "Region " + descriptiveName + " listed in hbase:meta on region server "
-          + hbi.getMetaEntry().regionServer + " but found on region server "
+        "Region " + descriptiveName + " listed in " + TableName.META_TABLE_NAME
+          + " on region server " + hbi.getMetaEntry().regionServer + " but 
found on region server "
           + hbi.getDeployedOn().get(0));
       // If we are trying to fix the errors
       if (shouldFixAssignments()) {
@@ -2614,11 +2621,12 @@ public class HBaseFsck extends Configured implements 
Closeable {
         if (servers.isEmpty()) {
           assignMetaReplica(i);
         } else if (servers.size() > 1) {
-          errors.reportError(ERROR_CODE.MULTI_META_REGION, "hbase:meta, 
replicaId "
-            + metaHbckRegionInfo.getReplicaId() + " is found on more than one 
region.");
+          errors.reportError(ERROR_CODE.MULTI_META_REGION,
+            TableName.META_TABLE_NAME + ", replicaId " + 
metaHbckRegionInfo.getReplicaId()
+              + " is found on more than one region.");
           if (shouldFixAssignments()) {
-            errors.print("Trying to fix a problem with hbase:meta, replicaId "
-              + metaHbckRegionInfo.getReplicaId() + "..");
+            errors.print("Trying to fix a problem with " + 
TableName.META_TABLE_NAME
+              + ", replicaId " + metaHbckRegionInfo.getReplicaId() + "..");
             setShouldRerun();
             // try fix it (treat is a dupe assignment)
             HBaseFsckRepair.fixMultiAssignment(connection,
@@ -2631,11 +2639,11 @@ public class HBaseFsck extends Configured implements 
Closeable {
     for (Map.Entry<Integer, HbckRegionInfo> entry : metaRegions.entrySet()) {
       noProblem = false;
       errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED,
-        "hbase:meta replicas are deployed in excess. Configured " + 
metaReplication + ", deployed "
-          + metaRegions.size());
+        TableName.META_TABLE_NAME + " replicas are deployed in excess. 
Configured "
+          + metaReplication + ", deployed " + metaRegions.size());
       if (shouldFixAssignments()) {
-        errors.print(
-          "Trying to undeploy excess replica, replicaId: " + entry.getKey() + 
" of hbase:meta..");
+        errors.print("Trying to undeploy excess replica, replicaId: " + 
entry.getKey() + " of "
+          + TableName.META_TABLE_NAME + "..");
         setShouldRerun();
         unassignMetaReplica(entry.getValue());
       }
@@ -2655,9 +2663,9 @@ public class HBaseFsck extends Configured implements 
Closeable {
   private void assignMetaReplica(int replicaId)
     throws IOException, KeeperException, InterruptedException {
     errors.reportError(ERROR_CODE.NO_META_REGION,
-      "hbase:meta, replicaId " + replicaId + " is not found on any region.");
+      TableName.META_TABLE_NAME + ", replicaId " + replicaId + " is not found 
on any region.");
     if (shouldFixAssignments()) {
-      errors.print("Trying to fix a problem with hbase:meta..");
+      errors.print("Trying to fix a problem with " + TableName.META_TABLE_NAME 
+ "..");
       setShouldRerun();
       // try to fix it (treat it as unassigned region)
       RegionInfo h = RegionReplicaUtil
@@ -2693,7 +2701,7 @@ public class HBaseFsck extends Configured implements 
Closeable {
           if (rl == null) {
             emptyRegionInfoQualifiers.add(result);
             errors.reportError(ERROR_CODE.EMPTY_META_CELL,
-              "Empty REGIONINFO_QUALIFIER found in hbase:meta");
+              "Empty REGIONINFO_QUALIFIER found in " + 
TableName.META_TABLE_NAME);
             return true;
           }
           ServerName sn = null;
@@ -2703,7 +2711,7 @@ public class HBaseFsck extends Configured implements 
Closeable {
           ) {
             emptyRegionInfoQualifiers.add(result);
             errors.reportError(ERROR_CODE.EMPTY_META_CELL,
-              "Empty REGIONINFO_QUALIFIER found in hbase:meta");
+              "Empty REGIONINFO_QUALIFIER found in " + 
TableName.META_TABLE_NAME);
             return true;
           }
           RegionInfo hri = 
rl.getRegionLocation(RegionInfo.DEFAULT_REPLICA_ID).getRegion();
@@ -2731,7 +2739,8 @@ public class HBaseFsck extends Configured implements 
Closeable {
             } else if (previous.getMetaEntry() == null) {
               previous.setMetaEntry(m);
             } else {
-              throw new IOException("Two entries in hbase:meta are same " + 
previous);
+              throw new IOException(
+                "Two entries in " + TableName.META_TABLE_NAME + " are same " + 
previous);
             }
           }
           List<RegionInfo> mergeParents = 
CatalogFamilyFormat.getMergeRegions(result.rawCells());
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
index c1f98edd75a..5f7e62bd5bc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
@@ -57,6 +57,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.UnknownRegionException;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
@@ -586,13 +587,13 @@ public class RegionMover extends AbstractHBaseTool 
implements Closeable {
           // For isolating hbase:meta, it should move explicitly in Ack mode,
           // hence the forceMoveRegionByAck = true.
           if (!metaSeverName.equals(server)) {
-            LOG.info("Region of hbase:meta " + metaRegionInfo.getEncodedName() 
+ " is on server "
-              + metaSeverName + " moving to " + server);
+            LOG.info("Region of {} {} is on server {} moving to {}", 
TableName.META_TABLE_NAME,
+              metaRegionInfo.getEncodedName(), metaSeverName, server);
             submitRegionMovesWhileUnloading(metaSeverName, 
Collections.singletonList(server),
               movedRegions, Collections.singletonList(metaRegionInfo), true);
           } else {
-            LOG.info("Region of hbase:meta " + metaRegionInfo.getEncodedName() 
+ " already exists"
-              + " on server : " + server);
+            LOG.info("Region of {} {} already exists on server: {}", 
TableName.META_TABLE_NAME,
+              metaRegionInfo.getEncodedName(), server);
           }
           isolateRegionInfoList.add(RegionInfoBuilder.FIRST_META_REGIONINFO);
         }
diff --git 
a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
 
b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
index d34600bc5d3..87feb20b770 100644
--- 
a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
+++ 
b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
@@ -21,6 +21,7 @@ import com.google.errorprone.annotations.RestrictedApi;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.master.RegionState;
@@ -165,11 +166,12 @@ public final class MetaTableLocator {
   public static void setMetaLocation(ZKWatcher zookeeper, ServerName 
serverName, int replicaId,
     RegionState.State state) throws KeeperException {
     if (serverName == null) {
-      LOG.warn("Tried to set null ServerName in hbase:meta; skipping -- 
ServerName required");
+      LOG.warn("Tried to set null ServerName in {}; skipping -- ServerName 
required",
+        TableName.META_TABLE_NAME);
       return;
     }
-    LOG.info("Setting hbase:meta replicaId={} location in ZooKeeper as {}, 
state={}", replicaId,
-      serverName, state);
+    LOG.info("Setting {} replicaId={} location in ZooKeeper as {}, state={}",
+      TableName.META_TABLE_NAME, replicaId, serverName, state);
     // Make the MetaRegionServer pb and then get its bytes and save this as
     // the znode content.
     MetaRegionServer pbrsr =
@@ -180,10 +182,10 @@ public final class MetaTableLocator {
       ZKUtil.setData(zookeeper, 
zookeeper.getZNodePaths().getZNodeForReplica(replicaId), data);
     } catch (KeeperException.NoNodeException nne) {
       if (replicaId == RegionInfo.DEFAULT_REPLICA_ID) {
-        LOG.debug("hbase:meta region location doesn't exist, create it");
+        LOG.debug("{} region location doesn't exist, create it", 
TableName.META_TABLE_NAME);
       } else {
-        LOG.debug(
-          "hbase:meta region location doesn't exist for replicaId=" + 
replicaId + ", create it");
+        LOG.debug("{} region location doesn't exist for replicaId={}, create 
it",
+          TableName.META_TABLE_NAME, replicaId);
       }
       ZKUtil.createAndWatch(zookeeper, 
zookeeper.getZNodePaths().getZNodeForReplica(replicaId),
         data);
@@ -233,9 +235,10 @@ public final class MetaTableLocator {
 
   public static void deleteMetaLocation(ZKWatcher zookeeper, int replicaId) 
throws KeeperException {
     if (replicaId == RegionInfo.DEFAULT_REPLICA_ID) {
-      LOG.info("Deleting hbase:meta region location in ZooKeeper");
+      LOG.info("Deleting {} region location in ZooKeeper", 
TableName.META_TABLE_NAME);
     } else {
-      LOG.info("Deleting hbase:meta for {} region location in ZooKeeper", 
replicaId);
+      LOG.info("Deleting {} for {} region location in ZooKeeper", 
TableName.META_TABLE_NAME,
+        replicaId);
     }
     try {
       // Just delete the node. Don't need any watches.
diff --git 
a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKDump.java 
b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKDump.java
index cd2b0b1a014..ed164d714a2 100644
--- 
a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKDump.java
+++ 
b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKDump.java
@@ -32,6 +32,7 @@ import java.util.Collections;
 import java.util.LinkedList;
 import java.util.List;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
@@ -74,7 +75,7 @@ public final class ZKDump {
           sb.append("\n ").append(child);
         }
       }
-      sb.append("\nRegion server holding hbase:meta:");
+      sb.append("\nRegion server holding 
").append(TableName.META_TABLE_NAME).append(":");
       sb.append("\n 
").append(MetaTableLocator.getMetaRegionLocation(zkWatcher));
       int numMetaReplicas = zkWatcher.getMetaReplicaNodes().size();
       for (int i = 1; i < numMetaReplicas; i++) {

Reply via email to