This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
     new 36a0f01  HBASE-25656 Backport to branch-2.2: [HBASE-25548 Optionally 
allow sna… (#3055)
36a0f01 is described below

commit 36a0f01b5500b53d3d878025506e3217158c732c
Author: Wellington Ramos Chevreuil <wchevre...@apache.org>
AuthorDate: Thu Mar 18 15:50:18 2021 +0000

    HBASE-25656 Backport to branch-2.2: [HBASE-25548 Optionally allow sna… 
(#3055)
    
    Signed-off-by: Peter Somogyi <psomo...@apache.org>
    Signed-off-by: Michael Stack <st...@apache.org>
---
 .../java/org/apache/hadoop/hbase/client/Admin.java |  47 +++++++++
 .../hadoop/hbase/client/SnapshotDescription.java   |  44 ++++++--
 .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java |   8 +-
 .../src/main/protobuf/Snapshot.proto               |   1 +
 .../hbase/master/snapshot/SnapshotManager.java     |   4 +
 .../hbase/master/snapshot/TakeSnapshotHandler.java |   9 +-
 .../hbase/client/TestSnapshotFromClient.java       |   2 +-
 .../master/snapshot/TestTakeSnapshotHandler.java   | 111 +++++++++++++++++++++
 hbase-shell/src/main/ruby/hbase/admin.rb           |   8 +-
 .../src/main/ruby/shell/commands/snapshot.rb       |   2 +-
 src/main/asciidoc/_chapters/ops_mgt.adoc           |  12 +++
 11 files changed, 234 insertions(+), 14 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 1746dd5..1e01cd1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -1983,6 +1983,53 @@ public interface Admin extends Abortable, Closeable {
       IllegalArgumentException;
 
   /**
+   * Create typed snapshot of the table. Snapshots are considered unique based 
on <b>the name of the
+   * snapshot</b>. Snapshots are taken sequentially even when requested 
concurrently, across
+   * all tables. Attempts to take a snapshot with the same name (even a 
different type or with
+   * different parameters) will fail with a {@link SnapshotCreationException} 
indicating the
+   * duplicate naming. Snapshot names follow the same naming constraints as 
tables in HBase. See
+   * {@link 
org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
+   * Snapshot can live with ttl seconds.
+   *
+   * @param snapshotName  name to give the snapshot on the filesystem. Must be 
unique from all other
+   *                      snapshots stored on the cluster
+   * @param tableName     name of the table to snapshot
+   * @param type          type of snapshot to take
+   * @param snapshotProps snapshot additional properties e.g. TTL
+   * @throws IOException               we fail to reach the master
+   * @throws SnapshotCreationException if snapshot creation failed
+   * @throws IllegalArgumentException  if the snapshot request is formatted 
incorrectly
+   */
+  default void snapshot(String snapshotName, TableName tableName, SnapshotType 
type,
+    Map<String, Object> snapshotProps) throws IOException,
+    SnapshotCreationException, IllegalArgumentException {
+    snapshot(new SnapshotDescription(snapshotName, tableName, type, 
snapshotProps));
+  }
+
+  /**
+   * Create typed snapshot of the table. Snapshots are considered unique based 
on <b>the name of the
+   * snapshot</b>. Snapshots are taken sequentially even when requested 
concurrently, across
+   * all tables. Attempts to take a snapshot with the same name (even a 
different type or with
+   * different parameters) will fail with a {@link SnapshotCreationException} 
indicating the
+   * duplicate naming. Snapshot names follow the same naming constraints as 
tables in HBase. See
+   * {@link 
org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
+   * Snapshot can live with ttl seconds.
+   *
+   * @param snapshotName  name to give the snapshot on the filesystem. Must be 
unique from all other
+   *                      snapshots stored on the cluster
+   * @param tableName     name of the table to snapshot
+   * @param snapshotProps snapshot additional properties e.g. TTL
+   * @throws IOException               we fail to reach the master
+   * @throws SnapshotCreationException if snapshot creation failed
+   * @throws IllegalArgumentException  if the snapshot request is formatted 
incorrectly
+   */
+  default void snapshot(String snapshotName, TableName tableName,
+    Map<String, Object> snapshotProps) throws IOException,
+    SnapshotCreationException, IllegalArgumentException {
+    snapshot(new SnapshotDescription(snapshotName, tableName, 
SnapshotType.FLUSH, snapshotProps));
+  }
+
+  /**
    * Take a snapshot and wait for the server to complete that snapshot 
(blocking). Only a single
    * snapshot should be taken at a time for an instance of HBase, or results 
may be undefined (you
    * can tell multiple HBase clusters to snapshot at the same time, but only 
one at a time for a
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotDescription.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotDescription.java
index 0b6f196..8de3bf4 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotDescription.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotDescription.java
@@ -17,7 +17,10 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import java.util.Map;
+
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hbase.thirdparty.org.apache.commons.collections4.MapUtils;
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
@@ -32,6 +35,8 @@ public class SnapshotDescription {
   private final long creationTime;
   private final int version;
 
+  private final long maxFileSize;
+
   public SnapshotDescription(String name) {
     this(name, (TableName)null);
   }
@@ -48,7 +53,7 @@ public class SnapshotDescription {
   }
 
   public SnapshotDescription(String name, TableName table) {
-    this(name, table, SnapshotType.DISABLED, null);
+    this(name, table, SnapshotType.DISABLED, null, -1, -1, null);
   }
 
   /**
@@ -63,7 +68,7 @@ public class SnapshotDescription {
   }
 
   public SnapshotDescription(String name, TableName table, SnapshotType type) {
-    this(name, table, type, null);
+    this(name, table, type, null, -1, -1, null);
   }
 
   /**
@@ -78,29 +83,49 @@ public class SnapshotDescription {
   }
 
   public SnapshotDescription(String name, TableName table, SnapshotType type, 
String owner) {
-    this(name, table, type, owner, -1, -1);
+    this(name, table, type, owner, -1, -1, null);
   }
 
   /**
    * @deprecated since 2.0.0 and will be removed in 3.0.0. Use the version 
with the TableName
    *   instance instead.
-   * @see #SnapshotDescription(String, TableName, SnapshotType, String, long, 
int)
+   * @see #SnapshotDescription(String, String, SnapshotType, String, long, int)
    * @see <a 
href="https://issues.apache.org/jira/browse/HBASE-16892";>HBASE-16892</a>
    */
   @Deprecated
   public SnapshotDescription(String name, String table, SnapshotType type, 
String owner,
       long creationTime, int version) {
-    this(name, TableName.valueOf(table), type, owner, creationTime, version);
+    this(name, TableName.valueOf(table), type, owner, creationTime, version, 
null);
   }
 
   public SnapshotDescription(String name, TableName table, SnapshotType type, 
String owner,
-      long creationTime, int version) {
+      long creationTime, int version, Map<String, Object> snapshotProps) {
     this.name = name;
     this.table = table;
     this.snapShotType = type;
     this.owner = owner;
     this.creationTime = creationTime;
     this.version = version;
+    this.maxFileSize = getLongFromSnapshotProps(snapshotProps, 
TableDescriptorBuilder.MAX_FILESIZE);
+  }
+
+  private long getLongFromSnapshotProps(Map<String, Object> snapshotProps, 
String property) {
+    return MapUtils.getLongValue(snapshotProps, property, -1);
+  }
+
+
+
+  /**
+   * SnapshotDescription Parameterized Constructor
+   *
+   * @param snapshotName  Name of the snapshot
+   * @param tableName     TableName associated with the snapshot
+   * @param type          Type of the snapshot - enum SnapshotType
+   * @param snapshotProps Additional properties for snapshot e.g. TTL
+   */
+  public SnapshotDescription(String snapshotName, TableName tableName, 
SnapshotType type,
+                             Map<String, Object> snapshotProps) {
+    this(snapshotName, tableName, type, null, -1, -1, snapshotProps);
   }
 
   public String getName() {
@@ -143,11 +168,16 @@ public class SnapshotDescription {
     return this.version;
   }
 
+  public long getMaxFileSize() {
+    return maxFileSize;
+  }
+
   @Override
   public String toString() {
     return "SnapshotDescription: name = " + ((name != null) ? name : null) + 
"/table = "
         + ((table != null) ? table : null) + " /owner = " + ((owner != null) ? 
owner : null)
         + (creationTime != -1 ? ("/creationtime = " + creationTime) : "")
-        + (version != -1 ? ("/version = " + version) : "");
+        + (version != -1 ? ("/version = " + version) : "")
+        + (maxFileSize != -1 ? ("/maxFileSize = " + maxFileSize) : "");
   }
 }
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 9071321..79ef913 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -29,6 +29,7 @@ import java.security.AccessController;
 import java.security.PrivilegedAction;
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Locale;
 import java.util.Map;
@@ -2925,6 +2926,9 @@ public final class ProtobufUtil {
     if (snapshotDesc.getVersion() != -1) {
       builder.setVersion(snapshotDesc.getVersion());
     }
+    if (snapshotDesc.getMaxFileSize() != -1) {
+      builder.setMaxFileSize(snapshotDesc.getMaxFileSize());
+    }
     
builder.setType(ProtobufUtil.createProtosSnapShotDescType(snapshotDesc.getType()));
     SnapshotProtos.SnapshotDescription snapshot = builder.build();
     return snapshot;
@@ -2939,10 +2943,12 @@ public final class ProtobufUtil {
    */
   public static SnapshotDescription
       createSnapshotDesc(SnapshotProtos.SnapshotDescription snapshotDesc) {
+    final Map<String, Object> snapshotProps = new HashMap<>();
+    snapshotProps.put(TableDescriptorBuilder.MAX_FILESIZE, 
snapshotDesc.getMaxFileSize());
     return new SnapshotDescription(snapshotDesc.getName(),
         snapshotDesc.hasTable() ? TableName.valueOf(snapshotDesc.getTable()) : 
null,
         createSnapshotType(snapshotDesc.getType()), snapshotDesc.getOwner(),
-        snapshotDesc.getCreationTime(), snapshotDesc.getVersion());
+        snapshotDesc.getCreationTime(), snapshotDesc.getVersion(), 
snapshotProps);
   }
 
   public static RegionLoadStats 
createRegionLoadStats(ClientProtos.RegionLoadStats stats) {
diff --git a/hbase-protocol-shaded/src/main/protobuf/Snapshot.proto 
b/hbase-protocol-shaded/src/main/protobuf/Snapshot.proto
index 479e33e..9f62f66 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Snapshot.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Snapshot.proto
@@ -44,6 +44,7 @@ message SnapshotDescription {
   optional int32 version = 5;
   optional string owner = 6;
   optional UsersAndPermissions users_and_permissions = 7;
+  optional int64 max_file_size = 8 [default = 0];
 }
 
 message SnapshotFileInfo {
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index c1ae96d..5af56dd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -149,6 +149,10 @@ public class SnapshotManager extends 
MasterProcedureManager implements Stoppable
   /** number of current operations running on the master */
   public static final int SNAPSHOT_POOL_THREADS_DEFAULT = 1;
 
+  /** Conf key for preserving original max file size configs */
+  public static final String SNAPSHOT_MAX_FILE_SIZE_PRESERVE =
+    "hbase.snapshot.max.filesize.preserve";
+
   private boolean stopped;
   private MasterServices master;  // Needed by TableEventHandlers
   private ProcedureCoordinator coordinator;
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
index 95bc5cb..63f4a18 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hbase.master.snapshot;
 
-import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.net.URI;
 import java.util.HashSet;
@@ -33,6 +32,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
@@ -141,12 +141,17 @@ public abstract class TakeSnapshotHandler extends 
EventHandler implements Snapsh
   }
 
   private TableDescriptor loadTableDescriptor()
-      throws FileNotFoundException, IOException {
+      throws IOException {
     TableDescriptor htd =
       this.master.getTableDescriptors().get(snapshotTable);
     if (htd == null) {
       throw new IOException("TableDescriptor missing for " + snapshotTable);
     }
+    if (htd.getMaxFileSize()==-1 &&
+        this.snapshot.getMaxFileSize()>0) {
+      htd = 
TableDescriptorBuilder.newBuilder(htd).setValue(TableDescriptorBuilder.MAX_FILESIZE,
+        Long.toString(this.snapshot.getMaxFileSize())).build();
+    }
     return htd;
   }
 
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java
index b46404f..bcc5aab 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java
@@ -227,7 +227,7 @@ public class TestSnapshotFromClient {
     final String SNAPSHOT_NAME = "offlineTableSnapshot";
     byte[] snapshot = Bytes.toBytes(SNAPSHOT_NAME);
 
-    admin.snapshot(new SnapshotDescription(SNAPSHOT_NAME, TABLE_NAME,
+    admin.snapshot(new SnapshotDescription(SNAPSHOT_NAME, 
TABLE_NAME.getNameAsString(),
         SnapshotType.DISABLED, null, -1, 
SnapshotManifestV1.DESCRIPTOR_VERSION));
     LOG.debug("Snapshot completed.");
 
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestTakeSnapshotHandler.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestTakeSnapshotHandler.java
new file mode 100644
index 0000000..9000254
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestTakeSnapshotHandler.java
@@ -0,0 +1,111 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.snapshot;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.HashMap;
+import java.util.Map;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+
+
+/**
+ * Unfortunately, couldn't test TakeSnapshotHandler using mocks, because it 
relies on TableLock,
+ * which is tightly coupled to LockManager and LockProcedure classes, which 
are both final and
+ * prevents us from mocking its behaviour. Looks like an overkill having to 
emulate a
+ * whole cluster run for such a small optional property behaviour.
+ */
+@Category({ MediumTests.class})
+public class TestTakeSnapshotHandler {
+
+  private static HBaseTestingUtility UTIL;
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestTakeSnapshotHandler.class);
+
+  @Rule
+  public TestName name = new TestName();
+
+
+  @Before
+  public void setup()  {
+    UTIL = new HBaseTestingUtility();
+  }
+
+  public TableDescriptor createTableInsertDataAndTakeSnapshot(Map<String, 
Object> snapshotProps)
+      throws Exception {
+    TableDescriptor descriptor =
+      
TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
+        .setColumnFamily(
+          
ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f")).build()).build();
+    UTIL.getConnection().getAdmin().createTable(descriptor);
+    Table table = UTIL.getConnection().getTable(descriptor.getTableName());
+    Put put = new Put(Bytes.toBytes("1"));
+    put.addColumn(Bytes.toBytes("f"), Bytes.toBytes("1"), Bytes.toBytes("v1"));
+    table.put(put);
+    String snapName = "snap"+name.getMethodName();
+    UTIL.getAdmin().snapshot(snapName, descriptor.getTableName(), 
snapshotProps);
+    TableName cloned = TableName.valueOf(name.getMethodName() + "clone");
+    UTIL.getAdmin().cloneSnapshot(snapName, cloned);
+    return descriptor;
+  }
+
+  @Test
+  public void testPreparePreserveMaxFileSizeEnabled() throws Exception {
+    UTIL.startMiniCluster();
+    Map<String, Object> snapshotProps = new HashMap<>();
+    snapshotProps.put(TableDescriptorBuilder.MAX_FILESIZE, 
Long.parseLong("21474836480"));
+    TableDescriptor descriptor = 
createTableInsertDataAndTakeSnapshot(snapshotProps);
+    TableName cloned = TableName.valueOf(name.getMethodName() + "clone");
+    assertEquals(-1,
+      
UTIL.getAdmin().getDescriptor(descriptor.getTableName()).getMaxFileSize());
+    assertEquals(21474836480L, 
UTIL.getAdmin().getDescriptor(cloned).getMaxFileSize());
+  }
+
+  @Test
+  public void testPreparePreserveMaxFileSizeDisabled() throws Exception {
+    UTIL.startMiniCluster();
+    TableDescriptor descriptor = createTableInsertDataAndTakeSnapshot(null);
+    TableName cloned = TableName.valueOf(name.getMethodName() + "clone");
+    assertEquals(-1,
+      
UTIL.getAdmin().getDescriptor(descriptor.getTableName()).getMaxFileSize());
+    assertEquals(-1, UTIL.getAdmin().getDescriptor(cloned).getMaxFileSize());
+  }
+
+  @After
+  public void shutdown() throws Exception {
+    UTIL.shutdownMiniCluster();
+  }
+}
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb 
b/hbase-shell/src/main/ruby/hbase/admin.rb
index 0ba57c4..aeb24a8 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -1063,11 +1063,15 @@ module Hbase
         @admin.snapshot(snapshot_name, table_name)
       else
         args.each do |arg|
+          snapshot_props = java.util.HashMap.new
+          max_filesize = arg[MAX_FILESIZE]
+          max_filesize = max_filesize ? max_filesize.to_java(:long) : -1
+          snapshot_props.put("MAX_FILESIZE", max_filesize)
           if arg[SKIP_FLUSH] == true
             @admin.snapshot(snapshot_name, table_name,
-                            
org.apache.hadoop.hbase.client.SnapshotType::SKIPFLUSH)
+                            
org.apache.hadoop.hbase.client.SnapshotType::SKIPFLUSH, snapshot_props)
           else
-            @admin.snapshot(snapshot_name, table_name)
+            @admin.snapshot(snapshot_name, table_name, snapshot_props)
           end
         end
       end
diff --git a/hbase-shell/src/main/ruby/shell/commands/snapshot.rb 
b/hbase-shell/src/main/ruby/shell/commands/snapshot.rb
index c591e12..9984494 100644
--- a/hbase-shell/src/main/ruby/shell/commands/snapshot.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/snapshot.rb
@@ -24,7 +24,7 @@ module Shell
 Take a snapshot of specified table. Examples:
 
   hbase> snapshot 'sourceTable', 'snapshotName'
-  hbase> snapshot 'namespace:sourceTable', 'snapshotName', {SKIP_FLUSH => true}
+  hbase> snapshot 'namespace:sourceTable', 'snapshotName', {SKIP_FLUSH => 
true, MAX_FILESIZE => 21474836480}
 EOF
       end
 
diff --git a/src/main/asciidoc/_chapters/ops_mgt.adoc 
b/src/main/asciidoc/_chapters/ops_mgt.adoc
index f94c84b..a8e5ff4 100644
--- a/src/main/asciidoc/_chapters/ops_mgt.adoc
+++ b/src/main/asciidoc/_chapters/ops_mgt.adoc
@@ -2830,6 +2830,18 @@ A snapshot is only a representation of a table during a 
window of time.
 The amount of time the snapshot operation will take to reach each Region 
Server may vary from a few seconds to a minute, depending on the resource load 
and speed of the hardware or network, among other factors.
 There is also no way to know whether a given insert or update is in memory or 
has been flushed.
 
+.Take a snapshot with custom MAX_FILESIZE
+
+Optionally, snapshots can be created with a custom max file size configuration 
that will be
+used by cloned tables, instead of the global `hbase.hregion.max.filesize` 
configuration property.
+This is mostly useful when exporting snapshots between different clusters. If 
the HBase cluster where
+the snapshot is originally taken has a much larger value set for 
`hbase.hregion.max.filesize` than
+one or more clusters where the snapshot is being exported to, a storm of 
region splits may occur when
+restoring the snapshot on destination clusters. Specifying `MAX_FILESIZE` on 
properties passed to
+`snapshot` command will save informed value into the table's `MAX_FILESIZE`
+decriptor at snapshot creation time. If the table already defines 
`MAX_FILESIZE` descriptor,
+this property would be ignored and have no effect.
+
 [[ops.snapshots.list]]
 === Listing Snapshots
 

Reply via email to