(hbase-site) branch asf-site updated: INFRA-10751 Empty commit

2024-04-07 Thread git-site-role
This is an automated email from the ASF dual-hosted git repository.

git-site-role pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/hbase-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new d25a2cf3340 INFRA-10751 Empty commit
d25a2cf3340 is described below

commit d25a2cf3340aecc680c84e894ca3a2bc1203ad28
Author: jenkins 
AuthorDate: Sun Apr 7 14:44:13 2024 +

INFRA-10751 Empty commit



(hbase) branch branch-2.5 updated: HBASE-28183 It's impossible to re-enable the quota table if it gets disabled (#5691)

2024-04-07 Thread bbeaudreault
This is an automated email from the ASF dual-hosted git repository.

bbeaudreault pushed a commit to branch branch-2.5
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.5 by this push:
 new e95b3cceb6e HBASE-28183 It's impossible to re-enable the quota table 
if it gets disabled (#5691)
e95b3cceb6e is described below

commit e95b3cceb6e7e7d058b36986f2b19003554c6157
Author: chandrasekhar-188k 
<154109917+chandrasekhar-1...@users.noreply.github.com>
AuthorDate: Sun Apr 7 18:44:06 2024 +0530

HBASE-28183 It's impossible to re-enable the quota table if it gets 
disabled (#5691)

Signed-off-by: Bryan Beaudreault 
Signed-off-by: Pankaj Kumar 
---
 .../org/apache/hadoop/hbase/master/HMaster.java| 24 +-
 .../apache/hadoop/hbase/quotas/TestQuotaAdmin.java | 11 ++
 2 files changed, 25 insertions(+), 10 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 5f5e527bdb3..d2bcbed94ef 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2602,16 +2602,20 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   MasterQuotaManager quotaManager = getMasterQuotaManager();
   if (quotaManager != null) {
 if (quotaManager.isQuotaInitialized()) {
-  SpaceQuotaSnapshot currSnapshotOfTable =
-
QuotaTableUtil.getCurrentSnapshotFromQuotaTable(getConnection(), tableName);
-  if (currSnapshotOfTable != null) {
-SpaceQuotaStatus quotaStatus = 
currSnapshotOfTable.getQuotaStatus();
-if (
-  quotaStatus.isInViolation()
-&& SpaceViolationPolicy.DISABLE == 
quotaStatus.getPolicy().orElse(null)
-) {
-  throw new AccessDeniedException("Enabling the table '" + 
tableName
-+ "' is disallowed due to a violated space quota.");
+  // skip checking quotas for system tables, see:
+  // https://issues.apache.org/jira/browse/HBASE-28183
+  if (!tableName.isSystemTable()) {
+SpaceQuotaSnapshot currSnapshotOfTable =
+  
QuotaTableUtil.getCurrentSnapshotFromQuotaTable(getConnection(), tableName);
+if (currSnapshotOfTable != null) {
+  SpaceQuotaStatus quotaStatus = 
currSnapshotOfTable.getQuotaStatus();
+  if (
+quotaStatus.isInViolation()
+  && SpaceViolationPolicy.DISABLE == 
quotaStatus.getPolicy().orElse(null)
+  ) {
+throw new AccessDeniedException("Enabling the table '" + 
tableName
+  + "' is disallowed due to a violated space quota.");
+  }
 }
   }
 } else if (LOG.isTraceEnabled()) {
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java
index 1f9a016eb98..a3b2929b9aa 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java
@@ -994,4 +994,15 @@ public class TestQuotaAdmin {
 }
 return quotaSettingCount;
   }
+
+  @Test
+  public void testQuotaTableDisableAndEnable() throws Exception {
+final Admin admin = TEST_UTIL.getAdmin();
+admin.disableTable(QuotaUtil.QUOTA_TABLE_NAME);
+try {
+  admin.enableTable(QuotaUtil.QUOTA_TABLE_NAME);
+} catch (Exception ex) {
+  fail("Got an exception while enabling table: " + 
QuotaUtil.QUOTA_TABLE_NAME);
+}
+  }
 }



(hbase) branch branch-2.6 updated: HBASE-28183 It's impossible to re-enable the quota table if it gets disabled (#5691)

2024-04-07 Thread bbeaudreault
This is an automated email from the ASF dual-hosted git repository.

bbeaudreault pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.6 by this push:
 new 21381882929 HBASE-28183 It's impossible to re-enable the quota table 
if it gets disabled (#5691)
21381882929 is described below

commit 21381882929a00b10972b0d8c1c7c06e5690904b
Author: chandrasekhar-188k 
<154109917+chandrasekhar-1...@users.noreply.github.com>
AuthorDate: Sun Apr 7 18:44:06 2024 +0530

HBASE-28183 It's impossible to re-enable the quota table if it gets 
disabled (#5691)

Signed-off-by: Bryan Beaudreault 
Signed-off-by: Pankaj Kumar 
---
 .../org/apache/hadoop/hbase/master/HMaster.java| 24 +-
 .../apache/hadoop/hbase/quotas/TestQuotaAdmin.java | 11 ++
 2 files changed, 25 insertions(+), 10 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 4195cc7f3f5..61e3048d9f0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2663,16 +2663,20 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   MasterQuotaManager quotaManager = getMasterQuotaManager();
   if (quotaManager != null) {
 if (quotaManager.isQuotaInitialized()) {
-  SpaceQuotaSnapshot currSnapshotOfTable =
-
QuotaTableUtil.getCurrentSnapshotFromQuotaTable(getConnection(), tableName);
-  if (currSnapshotOfTable != null) {
-SpaceQuotaStatus quotaStatus = 
currSnapshotOfTable.getQuotaStatus();
-if (
-  quotaStatus.isInViolation()
-&& SpaceViolationPolicy.DISABLE == 
quotaStatus.getPolicy().orElse(null)
-) {
-  throw new AccessDeniedException("Enabling the table '" + 
tableName
-+ "' is disallowed due to a violated space quota.");
+  // skip checking quotas for system tables, see:
+  // https://issues.apache.org/jira/browse/HBASE-28183
+  if (!tableName.isSystemTable()) {
+SpaceQuotaSnapshot currSnapshotOfTable =
+  
QuotaTableUtil.getCurrentSnapshotFromQuotaTable(getConnection(), tableName);
+if (currSnapshotOfTable != null) {
+  SpaceQuotaStatus quotaStatus = 
currSnapshotOfTable.getQuotaStatus();
+  if (
+quotaStatus.isInViolation()
+  && SpaceViolationPolicy.DISABLE == 
quotaStatus.getPolicy().orElse(null)
+  ) {
+throw new AccessDeniedException("Enabling the table '" + 
tableName
+  + "' is disallowed due to a violated space quota.");
+  }
 }
   }
 } else if (LOG.isTraceEnabled()) {
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java
index 1f9a016eb98..a3b2929b9aa 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java
@@ -994,4 +994,15 @@ public class TestQuotaAdmin {
 }
 return quotaSettingCount;
   }
+
+  @Test
+  public void testQuotaTableDisableAndEnable() throws Exception {
+final Admin admin = TEST_UTIL.getAdmin();
+admin.disableTable(QuotaUtil.QUOTA_TABLE_NAME);
+try {
+  admin.enableTable(QuotaUtil.QUOTA_TABLE_NAME);
+} catch (Exception ex) {
+  fail("Got an exception while enabling table: " + 
QuotaUtil.QUOTA_TABLE_NAME);
+}
+  }
 }



(hbase) branch branch-2 updated: HBASE-28183 It's impossible to re-enable the quota table if it gets disabled (#5691)

2024-04-07 Thread bbeaudreault
This is an automated email from the ASF dual-hosted git repository.

bbeaudreault pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 4c3e9c9ae61 HBASE-28183 It's impossible to re-enable the quota table 
if it gets disabled (#5691)
4c3e9c9ae61 is described below

commit 4c3e9c9ae6180eebd52c9e31baaffda5752e2b96
Author: chandrasekhar-188k 
<154109917+chandrasekhar-1...@users.noreply.github.com>
AuthorDate: Sun Apr 7 18:44:06 2024 +0530

HBASE-28183 It's impossible to re-enable the quota table if it gets 
disabled (#5691)

Signed-off-by: Bryan Beaudreault 
Signed-off-by: Pankaj Kumar 
---
 .../org/apache/hadoop/hbase/master/HMaster.java| 24 +-
 .../apache/hadoop/hbase/quotas/TestQuotaAdmin.java | 11 ++
 2 files changed, 25 insertions(+), 10 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 4195cc7f3f5..61e3048d9f0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2663,16 +2663,20 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   MasterQuotaManager quotaManager = getMasterQuotaManager();
   if (quotaManager != null) {
 if (quotaManager.isQuotaInitialized()) {
-  SpaceQuotaSnapshot currSnapshotOfTable =
-
QuotaTableUtil.getCurrentSnapshotFromQuotaTable(getConnection(), tableName);
-  if (currSnapshotOfTable != null) {
-SpaceQuotaStatus quotaStatus = 
currSnapshotOfTable.getQuotaStatus();
-if (
-  quotaStatus.isInViolation()
-&& SpaceViolationPolicy.DISABLE == 
quotaStatus.getPolicy().orElse(null)
-) {
-  throw new AccessDeniedException("Enabling the table '" + 
tableName
-+ "' is disallowed due to a violated space quota.");
+  // skip checking quotas for system tables, see:
+  // https://issues.apache.org/jira/browse/HBASE-28183
+  if (!tableName.isSystemTable()) {
+SpaceQuotaSnapshot currSnapshotOfTable =
+  
QuotaTableUtil.getCurrentSnapshotFromQuotaTable(getConnection(), tableName);
+if (currSnapshotOfTable != null) {
+  SpaceQuotaStatus quotaStatus = 
currSnapshotOfTable.getQuotaStatus();
+  if (
+quotaStatus.isInViolation()
+  && SpaceViolationPolicy.DISABLE == 
quotaStatus.getPolicy().orElse(null)
+  ) {
+throw new AccessDeniedException("Enabling the table '" + 
tableName
+  + "' is disallowed due to a violated space quota.");
+  }
 }
   }
 } else if (LOG.isTraceEnabled()) {
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java
index 1f9a016eb98..a3b2929b9aa 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java
@@ -994,4 +994,15 @@ public class TestQuotaAdmin {
 }
 return quotaSettingCount;
   }
+
+  @Test
+  public void testQuotaTableDisableAndEnable() throws Exception {
+final Admin admin = TEST_UTIL.getAdmin();
+admin.disableTable(QuotaUtil.QUOTA_TABLE_NAME);
+try {
+  admin.enableTable(QuotaUtil.QUOTA_TABLE_NAME);
+} catch (Exception ex) {
+  fail("Got an exception while enabling table: " + 
QuotaUtil.QUOTA_TABLE_NAME);
+}
+  }
 }



(hbase) branch branch-3 updated: HBASE-28183 It's impossible to re-enable the quota table if it gets disabled (#5691)

2024-04-07 Thread bbeaudreault
This is an automated email from the ASF dual-hosted git repository.

bbeaudreault pushed a commit to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-3 by this push:
 new b75fe0dc5c8 HBASE-28183 It's impossible to re-enable the quota table 
if it gets disabled (#5691)
b75fe0dc5c8 is described below

commit b75fe0dc5c89df7177cede0b554e22a1ec510af1
Author: chandrasekhar-188k 
<154109917+chandrasekhar-1...@users.noreply.github.com>
AuthorDate: Sun Apr 7 18:44:06 2024 +0530

HBASE-28183 It's impossible to re-enable the quota table if it gets 
disabled (#5691)

Signed-off-by: Bryan Beaudreault 
Signed-off-by: Pankaj Kumar 
---
 .../org/apache/hadoop/hbase/master/HMaster.java| 24 +-
 .../apache/hadoop/hbase/quotas/TestQuotaAdmin.java | 11 ++
 2 files changed, 25 insertions(+), 10 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index ddef3e27b40..0dc5b61cba8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2728,16 +2728,20 @@ public class HMaster extends 
HBaseServerBase implements Maste
   MasterQuotaManager quotaManager = getMasterQuotaManager();
   if (quotaManager != null) {
 if (quotaManager.isQuotaInitialized()) {
-  SpaceQuotaSnapshot currSnapshotOfTable =
-
QuotaTableUtil.getCurrentSnapshotFromQuotaTable(getConnection(), tableName);
-  if (currSnapshotOfTable != null) {
-SpaceQuotaStatus quotaStatus = 
currSnapshotOfTable.getQuotaStatus();
-if (
-  quotaStatus.isInViolation()
-&& SpaceViolationPolicy.DISABLE == 
quotaStatus.getPolicy().orElse(null)
-) {
-  throw new AccessDeniedException("Enabling the table '" + 
tableName
-+ "' is disallowed due to a violated space quota.");
+  // skip checking quotas for system tables, see:
+  // https://issues.apache.org/jira/browse/HBASE-28183
+  if (!tableName.isSystemTable()) {
+SpaceQuotaSnapshot currSnapshotOfTable =
+  
QuotaTableUtil.getCurrentSnapshotFromQuotaTable(getConnection(), tableName);
+if (currSnapshotOfTable != null) {
+  SpaceQuotaStatus quotaStatus = 
currSnapshotOfTable.getQuotaStatus();
+  if (
+quotaStatus.isInViolation()
+  && SpaceViolationPolicy.DISABLE == 
quotaStatus.getPolicy().orElse(null)
+  ) {
+throw new AccessDeniedException("Enabling the table '" + 
tableName
+  + "' is disallowed due to a violated space quota.");
+  }
 }
   }
 } else if (LOG.isTraceEnabled()) {
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java
index c577e9aceac..817f135f0c9 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java
@@ -994,4 +994,15 @@ public class TestQuotaAdmin {
 }
 return quotaSettingCount;
   }
+
+  @Test
+  public void testQuotaTableDisableAndEnable() throws Exception {
+final Admin admin = TEST_UTIL.getAdmin();
+admin.disableTable(QuotaUtil.QUOTA_TABLE_NAME);
+try {
+  admin.enableTable(QuotaUtil.QUOTA_TABLE_NAME);
+} catch (Exception ex) {
+  fail("Got an exception while enabling table: " + 
QuotaUtil.QUOTA_TABLE_NAME);
+}
+  }
 }



(hbase) branch master updated: HBASE-28183 It's impossible to re-enable the quota table if it gets disabled (#5691)

2024-04-07 Thread bbeaudreault
This is an automated email from the ASF dual-hosted git repository.

bbeaudreault pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 3340d8dd07e HBASE-28183 It's impossible to re-enable the quota table 
if it gets disabled (#5691)
3340d8dd07e is described below

commit 3340d8dd07eba18b71dcc44cd1fa7633ac630a5e
Author: chandrasekhar-188k 
<154109917+chandrasekhar-1...@users.noreply.github.com>
AuthorDate: Sun Apr 7 18:44:06 2024 +0530

HBASE-28183 It's impossible to re-enable the quota table if it gets 
disabled (#5691)

Signed-off-by: Bryan Beaudreault 
Signed-off-by: Pankaj Kumar 
---
 .../org/apache/hadoop/hbase/master/HMaster.java| 24 +-
 .../apache/hadoop/hbase/quotas/TestQuotaAdmin.java | 11 ++
 2 files changed, 25 insertions(+), 10 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index ddef3e27b40..0dc5b61cba8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2728,16 +2728,20 @@ public class HMaster extends 
HBaseServerBase implements Maste
   MasterQuotaManager quotaManager = getMasterQuotaManager();
   if (quotaManager != null) {
 if (quotaManager.isQuotaInitialized()) {
-  SpaceQuotaSnapshot currSnapshotOfTable =
-
QuotaTableUtil.getCurrentSnapshotFromQuotaTable(getConnection(), tableName);
-  if (currSnapshotOfTable != null) {
-SpaceQuotaStatus quotaStatus = 
currSnapshotOfTable.getQuotaStatus();
-if (
-  quotaStatus.isInViolation()
-&& SpaceViolationPolicy.DISABLE == 
quotaStatus.getPolicy().orElse(null)
-) {
-  throw new AccessDeniedException("Enabling the table '" + 
tableName
-+ "' is disallowed due to a violated space quota.");
+  // skip checking quotas for system tables, see:
+  // https://issues.apache.org/jira/browse/HBASE-28183
+  if (!tableName.isSystemTable()) {
+SpaceQuotaSnapshot currSnapshotOfTable =
+  
QuotaTableUtil.getCurrentSnapshotFromQuotaTable(getConnection(), tableName);
+if (currSnapshotOfTable != null) {
+  SpaceQuotaStatus quotaStatus = 
currSnapshotOfTable.getQuotaStatus();
+  if (
+quotaStatus.isInViolation()
+  && SpaceViolationPolicy.DISABLE == 
quotaStatus.getPolicy().orElse(null)
+  ) {
+throw new AccessDeniedException("Enabling the table '" + 
tableName
+  + "' is disallowed due to a violated space quota.");
+  }
 }
   }
 } else if (LOG.isTraceEnabled()) {
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java
index c577e9aceac..817f135f0c9 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java
@@ -994,4 +994,15 @@ public class TestQuotaAdmin {
 }
 return quotaSettingCount;
   }
+
+  @Test
+  public void testQuotaTableDisableAndEnable() throws Exception {
+final Admin admin = TEST_UTIL.getAdmin();
+admin.disableTable(QuotaUtil.QUOTA_TABLE_NAME);
+try {
+  admin.enableTable(QuotaUtil.QUOTA_TABLE_NAME);
+} catch (Exception ex) {
+  fail("Got an exception while enabling table: " + 
QuotaUtil.QUOTA_TABLE_NAME);
+}
+  }
 }



(hbase) branch branch-3 updated: HBASE-28478 Remove the hbase1 compatible code in FixedFileTrailer (#5788)

2024-04-07 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-3 by this push:
 new eea740cf5cf HBASE-28478 Remove the hbase1 compatible code in 
FixedFileTrailer (#5788)
eea740cf5cf is described below

commit eea740cf5cf35b09badf6f9f3e03fe0416659816
Author: Duo Zhang 
AuthorDate: Sun Apr 7 18:40:13 2024 +0800

HBASE-28478 Remove the hbase1 compatible code in FixedFileTrailer (#5788)

Signed-off-by: Bryan Beaudreault 
(cherry picked from commit eeebbdfa723dd49aeaf4a6bc061382752002c5a6)
---
 .../hadoop/hbase/io/hfile/FixedFileTrailer.java| 42 +---
 .../hbase/io/hfile/TestFixedFileTrailer.java   | 76 +++---
 2 files changed, 27 insertions(+), 91 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
index 2a405197a48..eaf79f31103 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
@@ -26,7 +26,6 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellComparatorImpl;
 import org.apache.hadoop.hbase.InnerStoreCellComparator;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.MetaCellComparator;
@@ -206,8 +205,7 @@ public class FixedFileTrailer {
   
.setTotalUncompressedBytes(totalUncompressedBytes).setDataIndexCount(dataIndexCount)
   .setMetaIndexCount(metaIndexCount).setEntryCount(entryCount)
   
.setNumDataIndexLevels(numDataIndexLevels).setFirstDataBlockOffset(firstDataBlockOffset)
-  .setLastDataBlockOffset(lastDataBlockOffset)
-  .setComparatorClassName(getHBase1CompatibleName(comparatorClassName))
+  
.setLastDataBlockOffset(lastDataBlockOffset).setComparatorClassName(comparatorClassName)
   .setCompressionCodec(compressionCodec.ordinal());
 if (encryptionKey != null) {
   builder.setEncryptionKey(UnsafeByteOperations.unsafeWrap(encryptionKey));
@@ -216,8 +214,7 @@ public class FixedFileTrailer {
   }
 
   /**
-   * Write trailer data as protobuf. NOTE: we run a translation on the 
comparator name and will
-   * serialize the old hbase-1.x where it makes sense. See {@link 
#getHBase1CompatibleName(String)}.
+   * Write trailer data as protobuf.
*/
   void serializeAsPB(DataOutputStream output) throws IOException {
 ByteArrayOutputStream baos = new ByteArrayOutputStream();
@@ -553,41 +550,6 @@ public class FixedFileTrailer {
 }
   }
 
-  /**
-   * If a 'standard' Comparator, write the old name for the Comparator when we 
serialize rather than
-   * the new name; writing the new name will make it so newly-written hfiles 
are not parseable by
-   * hbase-1.x, a facility we'd like to preserve across rolling upgrade and 
hbase-1.x clusters
-   * reading hbase-2.x produce.
-   * 
-   * The Comparators in hbase-2.x work the same as they did in hbase-1.x; they 
compare KeyValues. In
-   * hbase-2.x they were renamed making use of the more generic 'Cell' 
nomenclature to indicate that
-   * we intend to move away from KeyValues post hbase-2. A naming change is 
not reason enough to
-   * make it so hbase-1.x cannot read hbase-2.x files given the structure goes 
unchanged (hfile v3).
-   * So, lets write the old names for Comparators into the hfile tails in 
hbase-2. Here is where we
-   * do the translation. {@link #getComparatorClass(String)} does translation 
going the other way.
-   * 
-   * The translation is done on the serialized Protobuf only.
-   * 
-   * @param comparator String class name of the Comparator used in this hfile.
-   * @return What to store in the trailer as our comparator name.
-   * @see #getComparatorClass(String)
-   * @since hbase-2.0.0.
-   * @deprecated Since hbase-2.0.0. Will be removed in hbase-3.0.0.
-   */
-  @Deprecated
-  private String getHBase1CompatibleName(final String comparator) {
-if (
-  comparator.equals(CellComparatorImpl.class.getName())
-|| comparator.equals(InnerStoreCellComparator.class.getName())
-) {
-  return KeyValue.COMPARATOR.getClass().getName();
-}
-if (comparator.equals(MetaCellComparator.class.getName())) {
-  return KeyValue.META_COMPARATOR.getClass().getName();
-}
-return comparator;
-  }
-
   @SuppressWarnings("unchecked")
   private static Class getComparatorClass(String 
comparatorClassName)
 throws IOException {
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java
index 442f62e

(hbase) branch master updated: HBASE-28478 Remove the hbase1 compatible code in FixedFileTrailer (#5788)

2024-04-07 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new eeebbdfa723 HBASE-28478 Remove the hbase1 compatible code in 
FixedFileTrailer (#5788)
eeebbdfa723 is described below

commit eeebbdfa723dd49aeaf4a6bc061382752002c5a6
Author: Duo Zhang 
AuthorDate: Sun Apr 7 18:40:13 2024 +0800

HBASE-28478 Remove the hbase1 compatible code in FixedFileTrailer (#5788)

Signed-off-by: Bryan Beaudreault 
---
 .../hadoop/hbase/io/hfile/FixedFileTrailer.java| 42 +---
 .../hbase/io/hfile/TestFixedFileTrailer.java   | 76 +++---
 2 files changed, 27 insertions(+), 91 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
index 2a405197a48..eaf79f31103 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
@@ -26,7 +26,6 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellComparatorImpl;
 import org.apache.hadoop.hbase.InnerStoreCellComparator;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.MetaCellComparator;
@@ -206,8 +205,7 @@ public class FixedFileTrailer {
   
.setTotalUncompressedBytes(totalUncompressedBytes).setDataIndexCount(dataIndexCount)
   .setMetaIndexCount(metaIndexCount).setEntryCount(entryCount)
   
.setNumDataIndexLevels(numDataIndexLevels).setFirstDataBlockOffset(firstDataBlockOffset)
-  .setLastDataBlockOffset(lastDataBlockOffset)
-  .setComparatorClassName(getHBase1CompatibleName(comparatorClassName))
+  
.setLastDataBlockOffset(lastDataBlockOffset).setComparatorClassName(comparatorClassName)
   .setCompressionCodec(compressionCodec.ordinal());
 if (encryptionKey != null) {
   builder.setEncryptionKey(UnsafeByteOperations.unsafeWrap(encryptionKey));
@@ -216,8 +214,7 @@ public class FixedFileTrailer {
   }
 
   /**
-   * Write trailer data as protobuf. NOTE: we run a translation on the 
comparator name and will
-   * serialize the old hbase-1.x where it makes sense. See {@link 
#getHBase1CompatibleName(String)}.
+   * Write trailer data as protobuf.
*/
   void serializeAsPB(DataOutputStream output) throws IOException {
 ByteArrayOutputStream baos = new ByteArrayOutputStream();
@@ -553,41 +550,6 @@ public class FixedFileTrailer {
 }
   }
 
-  /**
-   * If a 'standard' Comparator, write the old name for the Comparator when we 
serialize rather than
-   * the new name; writing the new name will make it so newly-written hfiles 
are not parseable by
-   * hbase-1.x, a facility we'd like to preserve across rolling upgrade and 
hbase-1.x clusters
-   * reading hbase-2.x produce.
-   * 
-   * The Comparators in hbase-2.x work the same as they did in hbase-1.x; they 
compare KeyValues. In
-   * hbase-2.x they were renamed making use of the more generic 'Cell' 
nomenclature to indicate that
-   * we intend to move away from KeyValues post hbase-2. A naming change is 
not reason enough to
-   * make it so hbase-1.x cannot read hbase-2.x files given the structure goes 
unchanged (hfile v3).
-   * So, lets write the old names for Comparators into the hfile tails in 
hbase-2. Here is where we
-   * do the translation. {@link #getComparatorClass(String)} does translation 
going the other way.
-   * 
-   * The translation is done on the serialized Protobuf only.
-   * 
-   * @param comparator String class name of the Comparator used in this hfile.
-   * @return What to store in the trailer as our comparator name.
-   * @see #getComparatorClass(String)
-   * @since hbase-2.0.0.
-   * @deprecated Since hbase-2.0.0. Will be removed in hbase-3.0.0.
-   */
-  @Deprecated
-  private String getHBase1CompatibleName(final String comparator) {
-if (
-  comparator.equals(CellComparatorImpl.class.getName())
-|| comparator.equals(InnerStoreCellComparator.class.getName())
-) {
-  return KeyValue.COMPARATOR.getClass().getName();
-}
-if (comparator.equals(MetaCellComparator.class.getName())) {
-  return KeyValue.META_COMPARATOR.getClass().getName();
-}
-return comparator;
-  }
-
   @SuppressWarnings("unchecked")
   private static Class getComparatorClass(String 
comparatorClassName)
 throws IOException {
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java
index 442f62e505d..3bad8d46a14 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/

(hbase) branch branch-2.6 updated: HBASE-28481 Prompting table already exists after failing to create table with many region replications (#5789)

2024-04-07 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.6 by this push:
 new 3dd0b6d56be HBASE-28481 Prompting table already exists after failing 
to create table with many region replications (#5789)
3dd0b6d56be is described below

commit 3dd0b6d56bebb41b61d0c6188e9e680f02d71a26
Author: guluo 
AuthorDate: Sun Apr 7 17:27:28 2024 +0800

HBASE-28481 Prompting table already exists after failing to create table 
with many region replications (#5789)

Signed-off-by: Duo Zhang 
Reviewed-by: Vineet Kumar Maheshwari 
(cherry picked from commit e5d59cadc5dc9ac7d6f1be1a8defc05862ad)
---
 .../hadoop/hbase/client/MutableRegionInfo.java |  2 +-
 .../master/procedure/CreateTableProcedure.java | 13 --
 .../master/procedure/TestCreateTableProcedure.java | 29 ++
 3 files changed, 41 insertions(+), 3 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
index a9382f3a9be..4217201b85e 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
@@ -96,7 +96,7 @@ class MutableRegionInfo implements RegionInfo {
 
   private static int checkReplicaId(int regionId) {
 if (regionId > MAX_REPLICA_ID) {
-  throw new IllegalArgumentException("ReplicaId cannot be greater than" + 
MAX_REPLICA_ID);
+  throw new IllegalArgumentException("ReplicaId cannot be greater than " + 
MAX_REPLICA_ID);
 }
 return regionId;
   }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
index 47122962502..c9155301f24 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
@@ -56,6 +56,8 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.C
 public class CreateTableProcedure extends 
AbstractStateMachineTableProcedure {
   private static final Logger LOG = 
LoggerFactory.getLogger(CreateTableProcedure.class);
 
+  private static final int MAX_REGION_REPLICATION = 0x1;
+
   private TableDescriptor tableDescriptor;
   private List newRegions;
 
@@ -84,10 +86,10 @@ public class CreateTableProcedure extends 
AbstractStateMachineTableProcedure MAX_REGION_REPLICATION) {
+  setFailure("master-create-table", new IllegalArgumentException(
+"Region Replication cannot exceed " + MAX_REGION_REPLICATION + "."));
+  return false;
+}
+
 // check for store file tracker configurations
 
StoreFileTrackerValidationUtils.checkForCreateTable(env.getMasterConfiguration(),
   tableDescriptor);
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
index 618a4a45a04..bed41f4da86 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.master.procedure;
 
 import static 
org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory.TRACKER_IMPL;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -284,4 +285,32 @@ public class TestCreateTableProcedure extends 
TestTableDDLProcedureBase {
   new CreateTableProcedureOnHDFSFailure(procExec.getEnvironment(), htd, 
regions));
 ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
   }
+
+  @Test
+  public void testCreateTableWithManyRegionReplication() throws IOException {
+final int EXCEED_MAX_REGION_REPLICATION = 0x10001;
+TableName tableName = TableName.valueOf(name.getMethodName());
+ProcedureExecutor procExec = 
getMasterProcedureExecutor();
+
+TableDescriptor tableWithManyRegionReplication = 
TableDescriptorBuilder.newBuilder(tableName)
+  
.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f1")).build())
+  .setRegionReplication(EXCEED_MAX_REGION_REPLICATION).build();
+RegionInfo[] regions01 =
+  ModifyRegionUtils.createRegionInfos(tableWithManyRegionReplication, 
null);
+long procId01 = ProcedureTestingUtility.submitAndWait(procExec, new 
CreateTableProcedure(
+  pro

(hbase) branch branch-2 updated: HBASE-28481 Prompting table already exists after failing to create table with many region replications (#5789)

2024-04-07 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new af527053103 HBASE-28481 Prompting table already exists after failing 
to create table with many region replications (#5789)
af527053103 is described below

commit af5270531030e8008530b1dc363b04a8dc355704
Author: guluo 
AuthorDate: Sun Apr 7 17:27:28 2024 +0800

HBASE-28481 Prompting table already exists after failing to create table 
with many region replications (#5789)

Signed-off-by: Duo Zhang 
Reviewed-by: Vineet Kumar Maheshwari 
(cherry picked from commit e5d59cadc5dc9ac7d6f1be1a8defc05862ad)
---
 .../hadoop/hbase/client/MutableRegionInfo.java |  2 +-
 .../master/procedure/CreateTableProcedure.java | 13 --
 .../master/procedure/TestCreateTableProcedure.java | 29 ++
 3 files changed, 41 insertions(+), 3 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
index a9382f3a9be..4217201b85e 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
@@ -96,7 +96,7 @@ class MutableRegionInfo implements RegionInfo {
 
   private static int checkReplicaId(int regionId) {
 if (regionId > MAX_REPLICA_ID) {
-  throw new IllegalArgumentException("ReplicaId cannot be greater than" + 
MAX_REPLICA_ID);
+  throw new IllegalArgumentException("ReplicaId cannot be greater than " + 
MAX_REPLICA_ID);
 }
 return regionId;
   }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
index 47122962502..c9155301f24 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
@@ -56,6 +56,8 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.C
 public class CreateTableProcedure extends 
AbstractStateMachineTableProcedure {
   private static final Logger LOG = 
LoggerFactory.getLogger(CreateTableProcedure.class);
 
+  private static final int MAX_REGION_REPLICATION = 0x1;
+
   private TableDescriptor tableDescriptor;
   private List newRegions;
 
@@ -84,10 +86,10 @@ public class CreateTableProcedure extends 
AbstractStateMachineTableProcedure MAX_REGION_REPLICATION) {
+  setFailure("master-create-table", new IllegalArgumentException(
+"Region Replication cannot exceed " + MAX_REGION_REPLICATION + "."));
+  return false;
+}
+
 // check for store file tracker configurations
 
StoreFileTrackerValidationUtils.checkForCreateTable(env.getMasterConfiguration(),
   tableDescriptor);
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
index 618a4a45a04..bed41f4da86 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.master.procedure;
 
 import static 
org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory.TRACKER_IMPL;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -284,4 +285,32 @@ public class TestCreateTableProcedure extends 
TestTableDDLProcedureBase {
   new CreateTableProcedureOnHDFSFailure(procExec.getEnvironment(), htd, 
regions));
 ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
   }
+
+  @Test
+  public void testCreateTableWithManyRegionReplication() throws IOException {
+final int EXCEED_MAX_REGION_REPLICATION = 0x10001;
+TableName tableName = TableName.valueOf(name.getMethodName());
+ProcedureExecutor procExec = 
getMasterProcedureExecutor();
+
+TableDescriptor tableWithManyRegionReplication = 
TableDescriptorBuilder.newBuilder(tableName)
+  
.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f1")).build())
+  .setRegionReplication(EXCEED_MAX_REGION_REPLICATION).build();
+RegionInfo[] regions01 =
+  ModifyRegionUtils.createRegionInfos(tableWithManyRegionReplication, 
null);
+long procId01 = ProcedureTestingUtility.submitAndWait(procExec, new 
CreateTableProcedure(
+  procExe

(hbase) 01/02: HBASE-28457 Introduce a version field in file based tracker record (#5784)

2024-04-07 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-2.5
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 479c756291f27b0b5c004ee45098948140a8fc6e
Author: Duo Zhang 
AuthorDate: Sun Apr 7 16:43:50 2024 +0800

HBASE-28457 Introduce a version field in file based tracker record (#5784)

Signed-off-by: Wellington Chevreuil 
(cherry picked from commit c1012a9ebec9bb9fcc09f2d6fdc78e74cc44d562)
---
 .../src/main/protobuf/StoreFileTracker.proto   |  1 +
 .../storefiletracker/StoreFileListFile.java| 57 --
 .../storefiletracker/TestStoreFileListFile.java| 16 ++
 3 files changed, 60 insertions(+), 14 deletions(-)

diff --git a/hbase-protocol-shaded/src/main/protobuf/StoreFileTracker.proto 
b/hbase-protocol-shaded/src/main/protobuf/StoreFileTracker.proto
index 2a269ea4ac4..001cb3ea233 100644
--- a/hbase-protocol-shaded/src/main/protobuf/StoreFileTracker.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/StoreFileTracker.proto
@@ -33,4 +33,5 @@ message StoreFileEntry {
 message StoreFileList {
   required uint64 timestamp = 1;
   repeated StoreFileEntry store_file = 2;
+  optional uint64 version = 3 [default = 1];
 }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java
index c8cf55aeba6..48a38038914 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.regionserver.storefiletracker;
 
+import com.google.errorprone.annotations.RestrictedApi;
 import java.io.EOFException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -47,15 +48,24 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.StoreFileTrackerProtos.
  * without error on partial bytes if you stop at some special points, but the 
return message will
  * have incorrect field value. We should try our best to prevent this happens 
because loading an
  * incorrect store file list file usually leads to data loss.
+ * 
+ * To prevent failing silently while downgrading, where we may miss some newly 
introduced fields in
+ * {@link StoreFileList} which are necessary, we introduce a 'version' field in
+ * {@link StoreFileList}. If we find out that we are reading a {@link 
StoreFileList} with higher
+ * version, we will fail immediately and tell users that you need extra steps 
while downgrading, to
+ * prevent potential data loss.
  */
 @InterfaceAudience.Private
 class StoreFileListFile {
 
   private static final Logger LOG = 
LoggerFactory.getLogger(StoreFileListFile.class);
 
+  // the current version for StoreFileList
+  static final long VERSION = 1;
+
   static final String TRACK_FILE_DIR = ".filelist";
 
-  private static final String TRACK_FILE = "f1";
+  static final String TRACK_FILE = "f1";
 
   private static final String TRACK_FILE_ROTATE = "f2";
 
@@ -101,7 +111,18 @@ class StoreFileListFile {
   throw new IOException(
 "Checksum mismatch, expected " + expectedChecksum + ", actual " + 
calculatedChecksum);
 }
-return StoreFileList.parseFrom(data);
+StoreFileList storeFileList = StoreFileList.parseFrom(data);
+if (storeFileList.getVersion() > VERSION) {
+  LOG.error(
+"The loaded store file list is in version {}, which is higher than 
expected"
+  + " version {}. Stop loading to prevent potential data loss. This 
usually because your"
+  + " cluster is downgraded from a newer version. You need extra steps 
before downgrading,"
+  + " like switching back to default store file tracker.",
+storeFileList.getVersion(), VERSION);
+  throw new IOException("Higher store file list version detected, expected 
" + VERSION
++ ", got " + storeFileList.getVersion());
+}
+return storeFileList;
   }
 
   private int select(StoreFileList[] lists) {
@@ -134,30 +155,38 @@ class StoreFileListFile {
 return lists[winnerIndex];
   }
 
+  @RestrictedApi(explanation = "Should only be called in tests", link = "",
+  allowedOnPath = ".*/StoreFileListFile.java|.*/src/test/.*")
+  static void write(FileSystem fs, Path file, StoreFileList storeFileList) 
throws IOException {
+byte[] data = storeFileList.toByteArray();
+CRC32 crc32 = new CRC32();
+crc32.update(data);
+int checksum = (int) crc32.getValue();
+// 4 bytes length at the beginning, plus 4 bytes checksum
+try (FSDataOutputStream out = fs.create(file, true)) {
+  out.writeInt(data.length);
+  out.write(data);
+  out.writeInt(checksum);
+}
+  }
+
   /**
-   * We will set the timestamp in this method so just pass the builder in
+   * We

(hbase) branch branch-2.5 updated (afd4da05914 -> 33fe2047837)

2024-04-07 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a change to branch branch-2.5
in repository https://gitbox.apache.org/repos/asf/hbase.git


from afd4da05914 HBASE-28366 Mis-order of SCP and regionServerReport 
results into region inconsistencies (#5774)
 new 479c756291f HBASE-28457 Introduce a version field in file based 
tracker record (#5784)
 new 33fe2047837 HBASE-28481 Prompting table already exists after failing 
to create table with many region replications (#5789)

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../hadoop/hbase/client/MutableRegionInfo.java |  2 +-
 .../src/main/protobuf/StoreFileTracker.proto   |  1 +
 .../master/procedure/CreateTableProcedure.java | 13 -
 .../storefiletracker/StoreFileListFile.java| 57 --
 .../master/procedure/TestCreateTableProcedure.java | 29 +++
 .../storefiletracker/TestStoreFileListFile.java| 16 ++
 6 files changed, 101 insertions(+), 17 deletions(-)



(hbase) branch branch-3 updated: HBASE-28481 Prompting table already exists after failing to create table with many region replications (#5789)

2024-04-07 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-3 by this push:
 new 39864e92841 HBASE-28481 Prompting table already exists after failing 
to create table with many region replications (#5789)
39864e92841 is described below

commit 39864e92841d5471d156f5e2bac90d14a845d42c
Author: guluo 
AuthorDate: Sun Apr 7 17:27:28 2024 +0800

HBASE-28481 Prompting table already exists after failing to create table 
with many region replications (#5789)

Signed-off-by: Duo Zhang 
Reviewed-by: Vineet Kumar Maheshwari 
(cherry picked from commit e5d59cadc5dc9ac7d6f1be1a8defc05862ad)
---
 .../hadoop/hbase/client/MutableRegionInfo.java |  2 +-
 .../master/procedure/CreateTableProcedure.java | 14 +--
 .../master/procedure/TestCreateTableProcedure.java | 29 ++
 3 files changed, 42 insertions(+), 3 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
index a9382f3a9be..4217201b85e 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
@@ -96,7 +96,7 @@ class MutableRegionInfo implements RegionInfo {
 
   private static int checkReplicaId(int regionId) {
 if (regionId > MAX_REPLICA_ID) {
-  throw new IllegalArgumentException("ReplicaId cannot be greater than" + 
MAX_REPLICA_ID);
+  throw new IllegalArgumentException("ReplicaId cannot be greater than " + 
MAX_REPLICA_ID);
 }
 return regionId;
   }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
index 533b6fffcc4..17998fec7bd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
@@ -56,6 +56,8 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.C
 public class CreateTableProcedure extends 
AbstractStateMachineTableProcedure {
   private static final Logger LOG = 
LoggerFactory.getLogger(CreateTableProcedure.class);
 
+  private static final int MAX_REGION_REPLICATION = 0x1;
+
   private TableDescriptor tableDescriptor;
   private List newRegions;
 
@@ -84,10 +86,10 @@ public class CreateTableProcedure extends 
AbstractStateMachineTableProcedure MAX_REGION_REPLICATION) {
+  setFailure("master-create-table", new IllegalArgumentException(
+"Region Replication cannot exceed " + MAX_REGION_REPLICATION + "."));
+  return false;
+}
+
 if (!tableName.isSystemTable()) {
   // do not check rs group for system tables as we may block the bootstrap.
   Supplier forWhom = () -> "table " + tableName;
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
index 618a4a45a04..bed41f4da86 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.master.procedure;
 
 import static 
org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory.TRACKER_IMPL;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -284,4 +285,32 @@ public class TestCreateTableProcedure extends 
TestTableDDLProcedureBase {
   new CreateTableProcedureOnHDFSFailure(procExec.getEnvironment(), htd, 
regions));
 ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
   }
+
+  @Test
+  public void testCreateTableWithManyRegionReplication() throws IOException {
+final int EXCEED_MAX_REGION_REPLICATION = 0x10001;
+TableName tableName = TableName.valueOf(name.getMethodName());
+ProcedureExecutor procExec = 
getMasterProcedureExecutor();
+
+TableDescriptor tableWithManyRegionReplication = 
TableDescriptorBuilder.newBuilder(tableName)
+  
.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f1")).build())
+  .setRegionReplication(EXCEED_MAX_REGION_REPLICATION).build();
+RegionInfo[] regions01 =
+  ModifyRegionUtils.createRegionInfos(tableWithManyRegionReplication, 
null);
+long procId01 = ProcedureTestingUtility.submitAndWait(procExec, new 
CreateTableProcedure(
+   

(hbase) 02/02: HBASE-28481 Prompting table already exists after failing to create table with many region replications (#5789)

2024-04-07 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-2.5
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 33fe2047837f60f91ed509ec1e285cfe5633407e
Author: guluo 
AuthorDate: Sun Apr 7 17:27:28 2024 +0800

HBASE-28481 Prompting table already exists after failing to create table 
with many region replications (#5789)

Signed-off-by: Duo Zhang 
Reviewed-by: Vineet Kumar Maheshwari 
(cherry picked from commit e5d59cadc5dc9ac7d6f1be1a8defc05862ad)
---
 .../hadoop/hbase/client/MutableRegionInfo.java |  2 +-
 .../master/procedure/CreateTableProcedure.java | 13 --
 .../master/procedure/TestCreateTableProcedure.java | 29 ++
 3 files changed, 41 insertions(+), 3 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
index a9382f3a9be..4217201b85e 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
@@ -96,7 +96,7 @@ class MutableRegionInfo implements RegionInfo {
 
   private static int checkReplicaId(int regionId) {
 if (regionId > MAX_REPLICA_ID) {
-  throw new IllegalArgumentException("ReplicaId cannot be greater than" + 
MAX_REPLICA_ID);
+  throw new IllegalArgumentException("ReplicaId cannot be greater than " + 
MAX_REPLICA_ID);
 }
 return regionId;
   }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
index 37aea5e2f7d..08be7238821 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
@@ -55,6 +55,8 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.C
 public class CreateTableProcedure extends 
AbstractStateMachineTableProcedure {
   private static final Logger LOG = 
LoggerFactory.getLogger(CreateTableProcedure.class);
 
+  private static final int MAX_REGION_REPLICATION = 0x1;
+
   private TableDescriptor tableDescriptor;
   private List newRegions;
 
@@ -83,10 +85,10 @@ public class CreateTableProcedure extends 
AbstractStateMachineTableProcedure MAX_REGION_REPLICATION) {
+  setFailure("master-create-table", new IllegalArgumentException(
+"Region Replication cannot exceed " + MAX_REGION_REPLICATION + "."));
+  return false;
+}
+
 // check for store file tracker configurations
 
StoreFileTrackerValidationUtils.checkForCreateTable(env.getMasterConfiguration(),
   tableDescriptor);
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
index 618a4a45a04..bed41f4da86 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.master.procedure;
 
 import static 
org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory.TRACKER_IMPL;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -284,4 +285,32 @@ public class TestCreateTableProcedure extends 
TestTableDDLProcedureBase {
   new CreateTableProcedureOnHDFSFailure(procExec.getEnvironment(), htd, 
regions));
 ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
   }
+
+  @Test
+  public void testCreateTableWithManyRegionReplication() throws IOException {
+final int EXCEED_MAX_REGION_REPLICATION = 0x10001;
+TableName tableName = TableName.valueOf(name.getMethodName());
+ProcedureExecutor procExec = 
getMasterProcedureExecutor();
+
+TableDescriptor tableWithManyRegionReplication = 
TableDescriptorBuilder.newBuilder(tableName)
+  
.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f1")).build())
+  .setRegionReplication(EXCEED_MAX_REGION_REPLICATION).build();
+RegionInfo[] regions01 =
+  ModifyRegionUtils.createRegionInfos(tableWithManyRegionReplication, 
null);
+long procId01 = ProcedureTestingUtility.submitAndWait(procExec, new 
CreateTableProcedure(
+  procExec.getEnvironment(), tableWithManyRegionReplication, regions01));
+Procedure result01 = procExec.getResult(procId01);
+assertTrue(result01.getException().getCause() instanceof 
IllegalArgumentException);
+assertFalse(UTIL.get

(hbase) branch branch-2.4 updated: HBASE-28481 Prompting table already exists after failing to create table with many region replications (#5789)

2024-04-07 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.4 by this push:
 new 7e24dc7034f HBASE-28481 Prompting table already exists after failing 
to create table with many region replications (#5789)
7e24dc7034f is described below

commit 7e24dc7034f176e40fea32bbf4a20863b24b5d66
Author: guluo 
AuthorDate: Sun Apr 7 17:27:28 2024 +0800

HBASE-28481 Prompting table already exists after failing to create table 
with many region replications (#5789)

Signed-off-by: Duo Zhang 
Reviewed-by: Vineet Kumar Maheshwari 
(cherry picked from commit e5d59cadc5dc9ac7d6f1be1a8defc05862ad)
---
 .../hadoop/hbase/client/MutableRegionInfo.java |  2 +-
 .../master/procedure/CreateTableProcedure.java | 13 --
 .../master/procedure/TestCreateTableProcedure.java | 29 ++
 3 files changed, 41 insertions(+), 3 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
index a9382f3a9be..4217201b85e 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
@@ -96,7 +96,7 @@ class MutableRegionInfo implements RegionInfo {
 
   private static int checkReplicaId(int regionId) {
 if (regionId > MAX_REPLICA_ID) {
-  throw new IllegalArgumentException("ReplicaId cannot be greater than" + 
MAX_REPLICA_ID);
+  throw new IllegalArgumentException("ReplicaId cannot be greater than " + 
MAX_REPLICA_ID);
 }
 return regionId;
   }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
index 3f151dff2c0..36c8186672e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
@@ -54,6 +54,8 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.C
 public class CreateTableProcedure extends 
AbstractStateMachineTableProcedure {
   private static final Logger LOG = 
LoggerFactory.getLogger(CreateTableProcedure.class);
 
+  private static final int MAX_REGION_REPLICATION = 0x1;
+
   private TableDescriptor tableDescriptor;
   private List newRegions;
 
@@ -82,10 +84,10 @@ public class CreateTableProcedure extends 
AbstractStateMachineTableProcedure MAX_REGION_REPLICATION) {
+  setFailure("master-create-table", new IllegalArgumentException(
+"Region Replication cannot exceed " + MAX_REGION_REPLICATION + "."));
+  return false;
+}
+
 return true;
   }
 
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
index c7b79558e8d..71f05d2c75f 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.master.procedure;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -251,4 +252,32 @@ public class TestCreateTableProcedure extends 
TestTableDDLProcedureBase {
   new CreateTableProcedureOnHDFSFailure(procExec.getEnvironment(), htd, 
regions));
 ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
   }
+
+  @Test
+  public void testCreateTableWithManyRegionReplication() throws IOException {
+final int EXCEED_MAX_REGION_REPLICATION = 0x10001;
+TableName tableName = TableName.valueOf(name.getMethodName());
+ProcedureExecutor procExec = 
getMasterProcedureExecutor();
+
+TableDescriptor tableWithManyRegionReplication = 
TableDescriptorBuilder.newBuilder(tableName)
+  
.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f1")).build())
+  .setRegionReplication(EXCEED_MAX_REGION_REPLICATION).build();
+RegionInfo[] regions01 =
+  ModifyRegionUtils.createRegionInfos(tableWithManyRegionReplication, 
null);
+long procId01 = ProcedureTestingUtility.submitAndWait(procExec, new 
CreateTableProcedure(
+  procExec.getEnvironment(), tableWithManyRegionReplication, regions01));
+Procedure result01 = procExec.getResult(procId01);
+assertTrue(result01.getException().getCause() instanceof 
IllegalArgumentException);
+assertFalse(UTIL.getAdmin()

(hbase) branch master updated: HBASE-28481 Prompting table already exists after failing to create table with many region replications (#5789)

2024-04-07 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new e5d59cadc5d HBASE-28481 Prompting table already exists after failing 
to create table with many region replications (#5789)
e5d59cadc5d is described below

commit e5d59cadc5dc9ac7d6f1be1a8defc05862ad
Author: guluo 
AuthorDate: Sun Apr 7 17:27:28 2024 +0800

HBASE-28481 Prompting table already exists after failing to create table 
with many region replications (#5789)

Signed-off-by: Duo Zhang 
Reviewed-by: Vineet Kumar Maheshwari 
---
 .../hadoop/hbase/client/MutableRegionInfo.java |  2 +-
 .../master/procedure/CreateTableProcedure.java | 14 +--
 .../master/procedure/TestCreateTableProcedure.java | 29 ++
 3 files changed, 42 insertions(+), 3 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
index a9382f3a9be..4217201b85e 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
@@ -96,7 +96,7 @@ class MutableRegionInfo implements RegionInfo {
 
   private static int checkReplicaId(int regionId) {
 if (regionId > MAX_REPLICA_ID) {
-  throw new IllegalArgumentException("ReplicaId cannot be greater than" + 
MAX_REPLICA_ID);
+  throw new IllegalArgumentException("ReplicaId cannot be greater than " + 
MAX_REPLICA_ID);
 }
 return regionId;
   }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
index 533b6fffcc4..17998fec7bd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
@@ -56,6 +56,8 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.C
 public class CreateTableProcedure extends 
AbstractStateMachineTableProcedure {
   private static final Logger LOG = 
LoggerFactory.getLogger(CreateTableProcedure.class);
 
+  private static final int MAX_REGION_REPLICATION = 0x1;
+
   private TableDescriptor tableDescriptor;
   private List newRegions;
 
@@ -84,10 +86,10 @@ public class CreateTableProcedure extends 
AbstractStateMachineTableProcedure MAX_REGION_REPLICATION) {
+  setFailure("master-create-table", new IllegalArgumentException(
+"Region Replication cannot exceed " + MAX_REGION_REPLICATION + "."));
+  return false;
+}
+
 if (!tableName.isSystemTable()) {
   // do not check rs group for system tables as we may block the bootstrap.
   Supplier forWhom = () -> "table " + tableName;
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
index 618a4a45a04..bed41f4da86 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.master.procedure;
 
 import static 
org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory.TRACKER_IMPL;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -284,4 +285,32 @@ public class TestCreateTableProcedure extends 
TestTableDDLProcedureBase {
   new CreateTableProcedureOnHDFSFailure(procExec.getEnvironment(), htd, 
regions));
 ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
   }
+
+  @Test
+  public void testCreateTableWithManyRegionReplication() throws IOException {
+final int EXCEED_MAX_REGION_REPLICATION = 0x10001;
+TableName tableName = TableName.valueOf(name.getMethodName());
+ProcedureExecutor procExec = 
getMasterProcedureExecutor();
+
+TableDescriptor tableWithManyRegionReplication = 
TableDescriptorBuilder.newBuilder(tableName)
+  
.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f1")).build())
+  .setRegionReplication(EXCEED_MAX_REGION_REPLICATION).build();
+RegionInfo[] regions01 =
+  ModifyRegionUtils.createRegionInfos(tableWithManyRegionReplication, 
null);
+long procId01 = ProcedureTestingUtility.submitAndWait(procExec, new 
CreateTableProcedure(
+  procExec.getEnvironment(), tableWithManyRegionReplication, regions01));
+ 

(hbase) branch branch-3 updated: HBASE-28457 Introduce a version field in file based tracker record (#5784)

2024-04-07 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-3 by this push:
 new 7f93609adca HBASE-28457 Introduce a version field in file based 
tracker record (#5784)
7f93609adca is described below

commit 7f93609adca262229a447210f29850bde4954674
Author: Duo Zhang 
AuthorDate: Sun Apr 7 16:43:50 2024 +0800

HBASE-28457 Introduce a version field in file based tracker record (#5784)

Signed-off-by: Wellington Chevreuil 
(cherry picked from commit c1012a9ebec9bb9fcc09f2d6fdc78e74cc44d562)
---
 .../protobuf/server/region/StoreFileTracker.proto  |  1 +
 .../storefiletracker/StoreFileListFile.java| 62 --
 .../storefiletracker/TestStoreFileListFile.java| 17 ++
 3 files changed, 64 insertions(+), 16 deletions(-)

diff --git 
a/hbase-protocol-shaded/src/main/protobuf/server/region/StoreFileTracker.proto 
b/hbase-protocol-shaded/src/main/protobuf/server/region/StoreFileTracker.proto
index 2a269ea4ac4..001cb3ea233 100644
--- 
a/hbase-protocol-shaded/src/main/protobuf/server/region/StoreFileTracker.proto
+++ 
b/hbase-protocol-shaded/src/main/protobuf/server/region/StoreFileTracker.proto
@@ -33,4 +33,5 @@ message StoreFileEntry {
 message StoreFileList {
   required uint64 timestamp = 1;
   repeated StoreFileEntry store_file = 2;
+  optional uint64 version = 3 [default = 1];
 }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java
index 7a6938106d3..b6287b076b3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java
@@ -17,11 +17,13 @@
  */
 package org.apache.hadoop.hbase.regionserver.storefiletracker;
 
+import com.google.errorprone.annotations.RestrictedApi;
 import java.io.EOFException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.Comparator;
 import java.util.List;
 import java.util.Map;
 import java.util.NavigableMap;
@@ -59,19 +61,28 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.StoreFileTrackerProtos.
  * without error on partial bytes if you stop at some special points, but the 
return message will
  * have incorrect field value. We should try our best to prevent this happens 
because loading an
  * incorrect store file list file usually leads to data loss.
+ * 
+ * To prevent failing silently while downgrading, where we may miss some newly 
introduced fields in
+ * {@link StoreFileList} which are necessary, we introduce a 'version' field in
+ * {@link StoreFileList}. If we find out that we are reading a {@link 
StoreFileList} with higher
+ * version, we will fail immediately and tell users that you need extra steps 
while downgrading, to
+ * prevent potential data loss.
  */
 @InterfaceAudience.Private
 class StoreFileListFile {
 
   private static final Logger LOG = 
LoggerFactory.getLogger(StoreFileListFile.class);
 
+  // the current version for StoreFileList
+  static final long VERSION = 1;
+
   static final String TRACK_FILE_DIR = ".filelist";
 
-  private static final String TRACK_FILE_PREFIX = "f1";
+  static final String TRACK_FILE_PREFIX = "f1";
 
   private static final String TRACK_FILE_ROTATE_PREFIX = "f2";
 
-  private static final char TRACK_FILE_SEPARATOR = '.';
+  static final char TRACK_FILE_SEPARATOR = '.';
 
   static final Pattern TRACK_FILE_PATTERN = Pattern.compile("^f(1|2)\\.\\d+$");
 
@@ -114,7 +125,18 @@ class StoreFileListFile {
   throw new IOException(
 "Checksum mismatch, expected " + expectedChecksum + ", actual " + 
calculatedChecksum);
 }
-return StoreFileList.parseFrom(data);
+StoreFileList storeFileList = StoreFileList.parseFrom(data);
+if (storeFileList.getVersion() > VERSION) {
+  LOG.error(
+"The loaded store file list is in version {}, which is higher than 
expected"
+  + " version {}. Stop loading to prevent potential data loss. This 
usually because your"
+  + " cluster is downgraded from a newer version. You need extra steps 
before downgrading,"
+  + " like switching back to default store file tracker.",
+storeFileList.getVersion(), VERSION);
+  throw new IOException("Higher store file list version detected, expected 
" + VERSION
++ ", got " + storeFileList.getVersion());
+}
+return storeFileList;
   }
 
   StoreFileList load(Path path) throws IOException {
@@ -145,7 +167,7 @@ class StoreFileListFile {
 if (statuses == null || statuses.length == 0) {
   return Collections.emptyNavigableMap();

(hbase) branch branch-2 updated: HBASE-28457 Introduce a version field in file based tracker record (#5784)

2024-04-07 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new d668c581046 HBASE-28457 Introduce a version field in file based 
tracker record (#5784)
d668c581046 is described below

commit d668c58104640d7363a3c199779da337ea3dc2bb
Author: Duo Zhang 
AuthorDate: Sun Apr 7 16:43:50 2024 +0800

HBASE-28457 Introduce a version field in file based tracker record (#5784)

Signed-off-by: Wellington Chevreuil 
(cherry picked from commit c1012a9ebec9bb9fcc09f2d6fdc78e74cc44d562)
---
 .../src/main/protobuf/StoreFileTracker.proto   |  1 +
 .../storefiletracker/StoreFileListFile.java| 62 --
 .../storefiletracker/TestStoreFileListFile.java| 17 ++
 3 files changed, 64 insertions(+), 16 deletions(-)

diff --git a/hbase-protocol-shaded/src/main/protobuf/StoreFileTracker.proto 
b/hbase-protocol-shaded/src/main/protobuf/StoreFileTracker.proto
index 2a269ea4ac4..001cb3ea233 100644
--- a/hbase-protocol-shaded/src/main/protobuf/StoreFileTracker.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/StoreFileTracker.proto
@@ -33,4 +33,5 @@ message StoreFileEntry {
 message StoreFileList {
   required uint64 timestamp = 1;
   repeated StoreFileEntry store_file = 2;
+  optional uint64 version = 3 [default = 1];
 }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java
index 7a6938106d3..b6287b076b3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java
@@ -17,11 +17,13 @@
  */
 package org.apache.hadoop.hbase.regionserver.storefiletracker;
 
+import com.google.errorprone.annotations.RestrictedApi;
 import java.io.EOFException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.Comparator;
 import java.util.List;
 import java.util.Map;
 import java.util.NavigableMap;
@@ -59,19 +61,28 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.StoreFileTrackerProtos.
  * without error on partial bytes if you stop at some special points, but the 
return message will
  * have incorrect field value. We should try our best to prevent this happens 
because loading an
  * incorrect store file list file usually leads to data loss.
+ * 
+ * To prevent failing silently while downgrading, where we may miss some newly 
introduced fields in
+ * {@link StoreFileList} which are necessary, we introduce a 'version' field in
+ * {@link StoreFileList}. If we find out that we are reading a {@link 
StoreFileList} with higher
+ * version, we will fail immediately and tell users that you need extra steps 
while downgrading, to
+ * prevent potential data loss.
  */
 @InterfaceAudience.Private
 class StoreFileListFile {
 
   private static final Logger LOG = 
LoggerFactory.getLogger(StoreFileListFile.class);
 
+  // the current version for StoreFileList
+  static final long VERSION = 1;
+
   static final String TRACK_FILE_DIR = ".filelist";
 
-  private static final String TRACK_FILE_PREFIX = "f1";
+  static final String TRACK_FILE_PREFIX = "f1";
 
   private static final String TRACK_FILE_ROTATE_PREFIX = "f2";
 
-  private static final char TRACK_FILE_SEPARATOR = '.';
+  static final char TRACK_FILE_SEPARATOR = '.';
 
   static final Pattern TRACK_FILE_PATTERN = Pattern.compile("^f(1|2)\\.\\d+$");
 
@@ -114,7 +125,18 @@ class StoreFileListFile {
   throw new IOException(
 "Checksum mismatch, expected " + expectedChecksum + ", actual " + 
calculatedChecksum);
 }
-return StoreFileList.parseFrom(data);
+StoreFileList storeFileList = StoreFileList.parseFrom(data);
+if (storeFileList.getVersion() > VERSION) {
+  LOG.error(
+"The loaded store file list is in version {}, which is higher than 
expected"
+  + " version {}. Stop loading to prevent potential data loss. This 
usually because your"
+  + " cluster is downgraded from a newer version. You need extra steps 
before downgrading,"
+  + " like switching back to default store file tracker.",
+storeFileList.getVersion(), VERSION);
+  throw new IOException("Higher store file list version detected, expected 
" + VERSION
++ ", got " + storeFileList.getVersion());
+}
+return storeFileList;
   }
 
   StoreFileList load(Path path) throws IOException {
@@ -145,7 +167,7 @@ class StoreFileListFile {
 if (statuses == null || statuses.length == 0) {
   return Collections.emptyNavigableMap();
 }
-TreeMap> map = new TreeMap<>((l1, l2) -> 
l2.c

(hbase) branch branch-2.6 updated: HBASE-28457 Introduce a version field in file based tracker record (#5784)

2024-04-07 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.6 by this push:
 new a0b84d4c45c HBASE-28457 Introduce a version field in file based 
tracker record (#5784)
a0b84d4c45c is described below

commit a0b84d4c45ce7dae1509f0587bbe59690bc4e496
Author: Duo Zhang 
AuthorDate: Sun Apr 7 16:43:50 2024 +0800

HBASE-28457 Introduce a version field in file based tracker record (#5784)

Signed-off-by: Wellington Chevreuil 
(cherry picked from commit c1012a9ebec9bb9fcc09f2d6fdc78e74cc44d562)
---
 .../src/main/protobuf/StoreFileTracker.proto   |  1 +
 .../storefiletracker/StoreFileListFile.java| 62 --
 .../storefiletracker/TestStoreFileListFile.java| 17 ++
 3 files changed, 64 insertions(+), 16 deletions(-)

diff --git a/hbase-protocol-shaded/src/main/protobuf/StoreFileTracker.proto 
b/hbase-protocol-shaded/src/main/protobuf/StoreFileTracker.proto
index 2a269ea4ac4..001cb3ea233 100644
--- a/hbase-protocol-shaded/src/main/protobuf/StoreFileTracker.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/StoreFileTracker.proto
@@ -33,4 +33,5 @@ message StoreFileEntry {
 message StoreFileList {
   required uint64 timestamp = 1;
   repeated StoreFileEntry store_file = 2;
+  optional uint64 version = 3 [default = 1];
 }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java
index 7a6938106d3..b6287b076b3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java
@@ -17,11 +17,13 @@
  */
 package org.apache.hadoop.hbase.regionserver.storefiletracker;
 
+import com.google.errorprone.annotations.RestrictedApi;
 import java.io.EOFException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.Comparator;
 import java.util.List;
 import java.util.Map;
 import java.util.NavigableMap;
@@ -59,19 +61,28 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.StoreFileTrackerProtos.
  * without error on partial bytes if you stop at some special points, but the 
return message will
  * have incorrect field value. We should try our best to prevent this happens 
because loading an
  * incorrect store file list file usually leads to data loss.
+ * 
+ * To prevent failing silently while downgrading, where we may miss some newly 
introduced fields in
+ * {@link StoreFileList} which are necessary, we introduce a 'version' field in
+ * {@link StoreFileList}. If we find out that we are reading a {@link 
StoreFileList} with higher
+ * version, we will fail immediately and tell users that you need extra steps 
while downgrading, to
+ * prevent potential data loss.
  */
 @InterfaceAudience.Private
 class StoreFileListFile {
 
   private static final Logger LOG = 
LoggerFactory.getLogger(StoreFileListFile.class);
 
+  // the current version for StoreFileList
+  static final long VERSION = 1;
+
   static final String TRACK_FILE_DIR = ".filelist";
 
-  private static final String TRACK_FILE_PREFIX = "f1";
+  static final String TRACK_FILE_PREFIX = "f1";
 
   private static final String TRACK_FILE_ROTATE_PREFIX = "f2";
 
-  private static final char TRACK_FILE_SEPARATOR = '.';
+  static final char TRACK_FILE_SEPARATOR = '.';
 
   static final Pattern TRACK_FILE_PATTERN = Pattern.compile("^f(1|2)\\.\\d+$");
 
@@ -114,7 +125,18 @@ class StoreFileListFile {
   throw new IOException(
 "Checksum mismatch, expected " + expectedChecksum + ", actual " + 
calculatedChecksum);
 }
-return StoreFileList.parseFrom(data);
+StoreFileList storeFileList = StoreFileList.parseFrom(data);
+if (storeFileList.getVersion() > VERSION) {
+  LOG.error(
+"The loaded store file list is in version {}, which is higher than 
expected"
+  + " version {}. Stop loading to prevent potential data loss. This 
usually because your"
+  + " cluster is downgraded from a newer version. You need extra steps 
before downgrading,"
+  + " like switching back to default store file tracker.",
+storeFileList.getVersion(), VERSION);
+  throw new IOException("Higher store file list version detected, expected 
" + VERSION
++ ", got " + storeFileList.getVersion());
+}
+return storeFileList;
   }
 
   StoreFileList load(Path path) throws IOException {
@@ -145,7 +167,7 @@ class StoreFileListFile {
 if (statuses == null || statuses.length == 0) {
   return Collections.emptyNavigableMap();
 }
-TreeMap> map = new TreeMap<>((l1, l2) -> 

(hbase) branch master updated: HBASE-28457 Introduce a version field in file based tracker record (#5784)

2024-04-07 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new c1012a9ebec HBASE-28457 Introduce a version field in file based 
tracker record (#5784)
c1012a9ebec is described below

commit c1012a9ebec9bb9fcc09f2d6fdc78e74cc44d562
Author: Duo Zhang 
AuthorDate: Sun Apr 7 16:43:50 2024 +0800

HBASE-28457 Introduce a version field in file based tracker record (#5784)

Signed-off-by: Wellington Chevreuil 
---
 .../protobuf/server/region/StoreFileTracker.proto  |  1 +
 .../storefiletracker/StoreFileListFile.java| 62 --
 .../storefiletracker/TestStoreFileListFile.java| 17 ++
 3 files changed, 64 insertions(+), 16 deletions(-)

diff --git 
a/hbase-protocol-shaded/src/main/protobuf/server/region/StoreFileTracker.proto 
b/hbase-protocol-shaded/src/main/protobuf/server/region/StoreFileTracker.proto
index 2a269ea4ac4..001cb3ea233 100644
--- 
a/hbase-protocol-shaded/src/main/protobuf/server/region/StoreFileTracker.proto
+++ 
b/hbase-protocol-shaded/src/main/protobuf/server/region/StoreFileTracker.proto
@@ -33,4 +33,5 @@ message StoreFileEntry {
 message StoreFileList {
   required uint64 timestamp = 1;
   repeated StoreFileEntry store_file = 2;
+  optional uint64 version = 3 [default = 1];
 }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java
index 7a6938106d3..b6287b076b3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java
@@ -17,11 +17,13 @@
  */
 package org.apache.hadoop.hbase.regionserver.storefiletracker;
 
+import com.google.errorprone.annotations.RestrictedApi;
 import java.io.EOFException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.Comparator;
 import java.util.List;
 import java.util.Map;
 import java.util.NavigableMap;
@@ -59,19 +61,28 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.StoreFileTrackerProtos.
  * without error on partial bytes if you stop at some special points, but the 
return message will
  * have incorrect field value. We should try our best to prevent this happens 
because loading an
  * incorrect store file list file usually leads to data loss.
+ * 
+ * To prevent failing silently while downgrading, where we may miss some newly 
introduced fields in
+ * {@link StoreFileList} which are necessary, we introduce a 'version' field in
+ * {@link StoreFileList}. If we find out that we are reading a {@link 
StoreFileList} with higher
+ * version, we will fail immediately and tell users that you need extra steps 
while downgrading, to
+ * prevent potential data loss.
  */
 @InterfaceAudience.Private
 class StoreFileListFile {
 
   private static final Logger LOG = 
LoggerFactory.getLogger(StoreFileListFile.class);
 
+  // the current version for StoreFileList
+  static final long VERSION = 1;
+
   static final String TRACK_FILE_DIR = ".filelist";
 
-  private static final String TRACK_FILE_PREFIX = "f1";
+  static final String TRACK_FILE_PREFIX = "f1";
 
   private static final String TRACK_FILE_ROTATE_PREFIX = "f2";
 
-  private static final char TRACK_FILE_SEPARATOR = '.';
+  static final char TRACK_FILE_SEPARATOR = '.';
 
   static final Pattern TRACK_FILE_PATTERN = Pattern.compile("^f(1|2)\\.\\d+$");
 
@@ -114,7 +125,18 @@ class StoreFileListFile {
   throw new IOException(
 "Checksum mismatch, expected " + expectedChecksum + ", actual " + 
calculatedChecksum);
 }
-return StoreFileList.parseFrom(data);
+StoreFileList storeFileList = StoreFileList.parseFrom(data);
+if (storeFileList.getVersion() > VERSION) {
+  LOG.error(
+"The loaded store file list is in version {}, which is higher than 
expected"
+  + " version {}. Stop loading to prevent potential data loss. This 
usually because your"
+  + " cluster is downgraded from a newer version. You need extra steps 
before downgrading,"
+  + " like switching back to default store file tracker.",
+storeFileList.getVersion(), VERSION);
+  throw new IOException("Higher store file list version detected, expected 
" + VERSION
++ ", got " + storeFileList.getVersion());
+}
+return storeFileList;
   }
 
   StoreFileList load(Path path) throws IOException {
@@ -145,7 +167,7 @@ class StoreFileListFile {
 if (statuses == null || statuses.length == 0) {
   return Collections.emptyNavigableMap();
 }
-TreeMap> map = new TreeMap<>((l1, l2) -> 
l2.compareTo(l1));
+