HBASE-16904 Snapshot related changes for FS redo work

Signed-off-by: Sean Busbey <bus...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/159a67c6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/159a67c6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/159a67c6

Branch: refs/heads/hbase-14439
Commit: 159a67c6767585ed9fb7ee357fb579ce25c30a47
Parents: 6d1813a
Author: Umesh Agashe <uaga...@cloudera.com>
Authored: Tue Nov 1 10:32:01 2016 -0700
Committer: Sean Busbey <bus...@apache.org>
Committed: Tue Nov 29 01:27:05 2016 -0600

----------------------------------------------------------------------
 .../hbase/client/ClientSideRegionScanner.java   |    8 +-
 .../hbase/client/TableSnapshotScanner.java      |   51 +-
 .../apache/hadoop/hbase/fs/MasterStorage.java   |  286 ++++-
 .../apache/hadoop/hbase/fs/StorageContext.java  |    1 -
 .../hadoop/hbase/fs/legacy/LegacyLayout.java    |  131 ++-
 .../hbase/fs/legacy/LegacyMasterStorage.java    |  431 ++++++-
 .../fs/legacy/snapshot/ExportSnapshot.java      | 1102 ++++++++++++++++++
 .../legacy/snapshot/RestoreSnapshotHelper.java  |  689 +++++++++++
 .../fs/legacy/snapshot/SnapshotFileCache.java   |   11 +-
 .../legacy/snapshot/SnapshotHFileCleaner.java   |    3 +-
 .../fs/legacy/snapshot/SnapshotManifest.java    |  570 +++++++++
 .../fs/legacy/snapshot/SnapshotManifestV1.java  |  209 ++++
 .../fs/legacy/snapshot/SnapshotManifestV2.java  |  187 +++
 .../apache/hadoop/hbase/mapreduce/Driver.java   |    2 +-
 .../MultiTableSnapshotInputFormatImpl.java      |   43 +-
 .../mapreduce/TableSnapshotInputFormat.java     |    2 +-
 .../mapreduce/TableSnapshotInputFormatImpl.java |   47 +-
 .../procedure/CloneSnapshotProcedure.java       |   32 +-
 .../procedure/RestoreSnapshotProcedure.java     |   24 +-
 .../snapshot/DisabledTableSnapshotHandler.java  |   16 +-
 .../master/snapshot/MasterSnapshotVerifier.java |  137 +--
 .../hbase/master/snapshot/SnapshotManager.java  |  255 +---
 .../master/snapshot/TakeSnapshotHandler.java    |   80 +-
 .../hadoop/hbase/regionserver/HRegion.java      |   18 +-
 .../regionserver/DumpReplicationQueues.java     |    3 -
 .../hadoop/hbase/snapshot/ExportSnapshot.java   | 1084 -----------------
 .../hbase/snapshot/RestoreSnapshotHelper.java   |  823 -------------
 .../snapshot/SnapshotDescriptionUtils.java      |  239 +---
 .../hadoop/hbase/snapshot/SnapshotInfo.java     |  187 ++-
 .../hadoop/hbase/snapshot/SnapshotManifest.java |  570 ---------
 .../hbase/snapshot/SnapshotManifestV1.java      |  209 ----
 .../hbase/snapshot/SnapshotManifestV2.java      |  187 ---
 .../hbase/snapshot/SnapshotReferenceUtil.java   |  327 ++----
 .../snapshot/SnapshotRestoreMetaChanges.java    |  157 +++
 .../hbase/client/TestSnapshotFromClient.java    |    7 -
 .../fs/legacy/snapshot/TestExportSnapshot.java  |  384 ++++++
 .../snapshot/TestExportSnapshotHelpers.java     |   96 ++
 .../snapshot/TestRestoreSnapshotHelper.java     |  181 +++
 .../legacy/snapshot/TestSnapshotFileCache.java  |   30 +-
 .../legacy/snapshot/TestSnapshotManifest.java   |  146 +++
 .../hbase/snapshot/SnapshotTestingUtils.java    |    3 +
 .../hbase/snapshot/TestExportSnapshot.java      |  376 ------
 .../snapshot/TestExportSnapshotHelpers.java     |   96 --
 .../snapshot/TestRestoreSnapshotHelper.java     |  180 ---
 .../hbase/snapshot/TestSnapshotManifest.java    |  145 ---
 src/main/asciidoc/_chapters/ops_mgt.adoc        |   10 +-
 46 files changed, 5006 insertions(+), 4769 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/159a67c6/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
index a643428..a7ea192 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
@@ -24,8 +24,6 @@ import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
@@ -33,6 +31,8 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
+import org.apache.hadoop.hbase.fs.MasterStorage;
+import org.apache.hadoop.hbase.fs.StorageIdentifier;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 
@@ -49,7 +49,7 @@ public class ClientSideRegionScanner extends 
AbstractClientScanner {
   RegionScanner scanner;
   List<Cell> values;
 
-  public ClientSideRegionScanner(Configuration conf, FileSystem fs,
+  public ClientSideRegionScanner(MasterStorage<? extends StorageIdentifier> 
masterStorage,
       Path rootDir, HTableDescriptor htd, HRegionInfo hri, Scan scan, 
ScanMetrics scanMetrics)
           throws IOException {
 
@@ -57,7 +57,7 @@ public class ClientSideRegionScanner extends 
AbstractClientScanner {
     scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
 
     // open region from the snapshot directory
-    this.region = HRegion.openHRegion(fs, rootDir, hri, htd, conf);
+    this.region = HRegion.openHRegion(masterStorage, hri, htd);
 
     // create an internal region scanner
     this.scanner = region.getScanner(scan);

http://git-wip-us.apache.org/repos/asf/hbase/blob/159a67c6/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java
index 4601ae4..2cf5ede 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java
@@ -26,16 +26,16 @@ import java.util.UUID;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
-import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.fs.MasterStorage;
+import org.apache.hadoop.hbase.fs.StorageIdentifier;
+import org.apache.hadoop.hbase.snapshot.SnapshotRestoreMetaChanges;
+import org.apache.hadoop.hbase.fs.legacy.snapshot.RestoreSnapshotHelper;
 
 /**
  * A Scanner which performs a scan over snapshot files. Using this class 
requires copying the
@@ -45,7 +45,7 @@ import org.apache.hadoop.hbase.util.FSUtils;
  * <p>
  * This also allows one to run the scan from an
  * online or offline hbase cluster. The snapshot files can be exported by 
using the
- * {@link org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool,
+ * {@link org.apache.hadoop.hbase.fs.legacy.snapshot.ExportSnapshot} tool,
  * to a pure-hdfs cluster, and this scanner can be used to
  * run the scan directly over the snapshot files. The snapshot should not be 
deleted while there
  * are open scanners reading from snapshot files.
@@ -70,10 +70,8 @@ public class TableSnapshotScanner extends 
AbstractClientScanner {
 
   private static final Log LOG = LogFactory.getLog(TableSnapshotScanner.class);
 
-  private Configuration conf;
+  private MasterStorage<? extends StorageIdentifier> masterStorage;
   private String snapshotName;
-  private FileSystem fs;
-  private Path rootDir;
   private Path restoreDir;
   private Scan scan;
   private ArrayList<HRegionInfo> regions;
@@ -84,7 +82,7 @@ public class TableSnapshotScanner extends 
AbstractClientScanner {
 
   /**
    * Creates a TableSnapshotScanner.
-   * @param conf the configuration
+   * @param masterStorage the {@link MasterStorage} to use
    * @param restoreDir a temporary directory to copy the snapshot files into. 
Current user should
    * have write permissions to this directory, and this should not be a 
subdirectory of rootdir.
    * The scanner deletes the contents of the directory once the scanner is 
closed.
@@ -92,38 +90,20 @@ public class TableSnapshotScanner extends 
AbstractClientScanner {
    * @param scan a Scan representing scan parameters
    * @throws IOException in case of error
    */
-  public TableSnapshotScanner(Configuration conf, Path restoreDir,
-      String snapshotName, Scan scan) throws IOException {
-    this(conf, FSUtils.getRootDir(conf), restoreDir, snapshotName, scan);
-  }
-
-  /**
-   * Creates a TableSnapshotScanner.
-   * @param conf the configuration
-   * @param rootDir root directory for HBase.
-   * @param restoreDir a temporary directory to copy the snapshot files into. 
Current user should
-   * have write permissions to this directory, and this should not be a 
subdirectory of rootdir.
-   * The scanner deletes the contents of the directory once the scanner is 
closed.
-   * @param snapshotName the name of the snapshot to read from
-   * @param scan a Scan representing scan parameters
-   * @throws IOException in case of error
-   */
-  public TableSnapshotScanner(Configuration conf, Path rootDir,
+  public TableSnapshotScanner(MasterStorage<? extends StorageIdentifier> 
masterStorage,
       Path restoreDir, String snapshotName, Scan scan) throws IOException {
-    this.conf = conf;
+    this.masterStorage = masterStorage;
     this.snapshotName = snapshotName;
-    this.rootDir = rootDir;
     // restoreDir will be deleted in close(), use a unique sub directory
     this.restoreDir = new Path(restoreDir, UUID.randomUUID().toString());
     this.scan = scan;
-    this.fs = rootDir.getFileSystem(conf);
     init();
   }
 
   private void init() throws IOException {
-    final RestoreSnapshotHelper.RestoreMetaChanges meta =
-      RestoreSnapshotHelper.copySnapshotForScanner(
-        conf, fs, rootDir, restoreDir, snapshotName);
+    // TODO: whats needed is temporary copy of a snapshot for scanning. Is 
separate API required?
+    final SnapshotRestoreMetaChanges meta = 
RestoreSnapshotHelper.copySnapshotForScanner(
+        masterStorage, restoreDir, snapshotName);
     final List<HRegionInfo> restoredRegions = meta.getRegionsToAdd();
 
     htd = meta.getTableDescriptor();
@@ -151,8 +131,8 @@ public class TableSnapshotScanner extends 
AbstractClientScanner {
         }
 
         HRegionInfo hri = regions.get(currentRegion);
-        currentRegionScanner = new ClientSideRegionScanner(conf, fs,
-          restoreDir, htd, hri, scan, scanMetrics);
+        currentRegionScanner = new ClientSideRegionScanner(masterStorage, 
restoreDir, htd, hri,
+            scan, scanMetrics);
         if (this.scanMetrics != null) {
           this.scanMetrics.countOfRegions.incrementAndGet();
         }
@@ -178,7 +158,8 @@ public class TableSnapshotScanner extends 
AbstractClientScanner {
       currentRegionScanner.close();
     }
     try {
-      fs.delete(this.restoreDir, true);
+      // TODO: same as above
+      masterStorage.getFileSystem().delete(this.restoreDir, true);
     } catch (IOException ex) {
       LOG.warn("Could not delete restore directory for the snapshot:" + ex);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/159a67c6/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/MasterStorage.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/MasterStorage.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/MasterStorage.java
index 2f3b4a4..a62cbb7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/MasterStorage.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/MasterStorage.java
@@ -23,6 +23,7 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 
 import org.apache.commons.logging.Log;
@@ -38,9 +39,16 @@ import org.apache.hadoop.hbase.ScheduledChore;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
 import org.apache.hadoop.hbase.fs.legacy.LegacyMasterStorage;
 import org.apache.hadoop.hbase.fs.RegionStorage.StoreFileVisitor;
 import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier;
+import org.apache.hadoop.hbase.monitoring.MonitoredTask;
+import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
+import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException;
+import org.apache.hadoop.hbase.snapshot.SnapshotRestoreMetaChanges;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 
@@ -83,22 +91,6 @@ public abstract class MasterStorage<IDENTIFIER extends 
StorageIdentifier> {
     return new ArrayList<>();
   }
 
-  /**
-   * This method should be called to prepare storage implementation/s for 
snapshots. The default
-   * implementation does nothing. MasterStorage subclasses need to override 
this method to
-   * provide specific preparatory steps.
-   */
-  public void enableSnapshots() {
-    return;
-  }
-
-  /**
-   * Returns true if MasterStorage is prepared for snapshots
-   */
-  public boolean isSnapshotsEnabled() {
-    return true;
-  }
-
   // ==========================================================================
   //  PUBLIC Interfaces - Visitors
   // ==========================================================================
@@ -114,6 +106,17 @@ public abstract class MasterStorage<IDENTIFIER extends 
StorageIdentifier> {
     void visitRegion(HRegionInfo regionInfo) throws IOException;
   }
 
+  public interface SnapshotVisitor {
+    void visitSnapshot(final String snapshotName, final SnapshotDescription 
snapshot,
+        StorageContext ctx);
+  }
+
+  public interface SnapshotStoreFileVisitor {
+    // TODO: Instead of SnapshotRegionManifest.StoreFile return common object 
across all
+    void visitSnapshotStoreFile(SnapshotDescription snapshot, StorageContext 
ctx, HRegionInfo hri,
+        String familyName, final SnapshotRegionManifest.StoreFile storeFile) 
throws IOException;
+  }
+
   // ==========================================================================
   //  PUBLIC Methods - Namespace related
   // ==========================================================================
@@ -199,7 +202,7 @@ public abstract class MasterStorage<IDENTIFIER extends 
StorageIdentifier> {
   }
 
   /**
-   * Archives specified table and all it's regions
+   * Archives a table and all it's regions
    * @param tableName
    * @throws IOException
    */
@@ -208,7 +211,7 @@ public abstract class MasterStorage<IDENTIFIER extends 
StorageIdentifier> {
   }
 
   /**
-   * Archives specified table and all it's regions
+   * Archives a table and all it's regions
    * @param ctx Storage context of the table.
    * @param tableName
    * @throws IOException
@@ -301,13 +304,234 @@ public abstract class MasterStorage<IDENTIFIER extends 
StorageIdentifier> {
   }
 
   /**
-   * Archives the specified region's storage artifacts (files, directories etc)
+   * Archives region's storage artifacts (files, directories etc)
    * @param regionInfo
    * @throws IOException
    */
   public abstract void archiveRegion(HRegionInfo regionInfo) throws 
IOException;
 
   // ==========================================================================
+  //  PUBLIC Methods - Snapshot related
+  // ==========================================================================
+  /**
+   * This method should be called to prepare storage implementation/s for 
snapshots. The default
+   * implementation does nothing. MasterStorage subclasses need to override 
this method to
+   * provide specific preparatory steps.
+   */
+  public void enableSnapshots() throws IOException {
+    return;
+  }
+
+  /**
+   * Returns true if MasterStorage is prepared for snapshots
+   */
+  public boolean isSnapshotsEnabled() {
+    return true;
+  }
+
+  /**
+   * Gets the list of all snapshots.
+   * @return list of SnapshotDescriptions
+   * @throws IOException Storage exception
+   */
+  public List<SnapshotDescription> getSnapshots() throws IOException {
+    return getSnapshots(StorageContext.DATA);
+  }
+
+  public abstract List<SnapshotDescription> getSnapshots(StorageContext ctx) 
throws IOException;
+
+  /**
+   * Gets snapshot description of a snapshot
+   * @return Snapshot description of a snapshot if found, null otherwise
+   * @throws IOException
+   */
+  public SnapshotDescription getSnapshot(final String snapshotName)
+      throws IOException {
+    return getSnapshot(snapshotName, StorageContext.DATA);
+  }
+
+  public abstract SnapshotDescription getSnapshot(final String snapshotName, 
StorageContext ctx)
+    throws IOException;
+
+  /**
+   * @return {@link HTableDescriptor} for a snapshot
+   * @param snapshot
+   * @throws IOException if can't read from the storage
+   */
+  public HTableDescriptor getTableDescriptorForSnapshot(final 
SnapshotDescription snapshot)
+      throws IOException {
+    return getTableDescriptorForSnapshot(snapshot, StorageContext.DATA);
+  }
+
+  public abstract HTableDescriptor getTableDescriptorForSnapshot(final 
SnapshotDescription
+    snapshot, StorageContext ctx) throws IOException;
+
+  /**
+   * Returns all {@link HRegionInfo} for a snapshot
+   *
+   * @param snapshot
+   * @return
+   * @throws IOException
+   */
+  public Map<String, HRegionInfo> getSnapshotRegions(final SnapshotDescription 
snapshot)
+      throws IOException {
+    return getSnapshotRegions(snapshot, StorageContext.DATA);
+  }
+
+  public abstract Map<String, HRegionInfo> getSnapshotRegions(final 
SnapshotDescription snapshot,
+      StorageContext ctx) throws IOException;
+
+  /**
+   * Check to see if the snapshot is one of the currently snapshots on the 
storage.
+   *
+   * @param snapshot
+   * @throws IOException
+   */
+  public boolean snapshotExists(SnapshotDescription snapshot) throws 
IOException {
+    return snapshotExists(snapshot, StorageContext.DATA);
+  }
+
+  public abstract boolean snapshotExists(SnapshotDescription snapshot, 
StorageContext ctx)
+      throws IOException;
+
+  public boolean snapshotExists(String snapshotName) throws IOException {
+    return snapshotExists(snapshotName, StorageContext.DATA);
+  }
+
+  public abstract boolean snapshotExists(String snapshotName, StorageContext 
ctx) throws
+      IOException;
+
+  /**
+   * Cleans up all snapshots.
+   *
+   * @throws IOException if can't reach the storage
+   */
+  public void deleteAllSnapshots() throws IOException {
+    deleteAllSnapshots(StorageContext.DATA);
+  }
+
+  public abstract void deleteAllSnapshots(StorageContext ctx) throws 
IOException;
+
+  /**
+   * Deletes a snapshot
+   * @param snapshot
+   * @throws SnapshotDoesNotExistException If the specified snapshot does not 
exist.
+   * @throws IOException For storage IOExceptions
+   */
+  public boolean deleteSnapshot(final SnapshotDescription snapshot) throws 
IOException {
+    return deleteSnapshot(snapshot, StorageContext.DATA) &&
+        deleteSnapshot(snapshot, StorageContext.TEMP);
+  }
+
+  public boolean deleteSnapshot(final String snapshotName) throws IOException {
+    return deleteSnapshot(snapshotName, StorageContext.DATA) &&
+        deleteSnapshot(snapshotName, StorageContext.TEMP);
+  }
+
+  public abstract boolean deleteSnapshot(final SnapshotDescription snapshot,
+      final StorageContext ctx) throws IOException;
+
+  public abstract boolean deleteSnapshot(final String snapshotName, final 
StorageContext ctx)
+      throws IOException;
+
+  /**
+   * Deletes old in-progress and/ or completed snapshot and prepares for new 
one with the same
+   * description
+   *
+   * @param snapshot
+   * @throws IOException for storage IOExceptions
+   */
+  public abstract void prepareSnapshot(SnapshotDescription snapshot) throws 
IOException;
+
+  /**
+   * In general snapshot is created with following steps:
+   * <ul>
+   *   <li>Initiate a snapshot for a table in TEMP context</li>
+   *   <li>Snapshot and add regions to the snapshot in TEMP</li>
+   *   <li>Consolidate snapshot</li>
+   *   <li>Change context of a snapshot from TEMP to DATA</li>
+   * </ul>
+   * @param htd
+   * @param snapshot
+   * @param monitor
+   * @throws IOException
+   */
+  public void initiateSnapshot(HTableDescriptor htd, SnapshotDescription 
snapshot, final
+      ForeignExceptionSnare monitor) throws IOException {
+    initiateSnapshot(htd, snapshot, monitor, StorageContext.DATA);
+  }
+
+  public abstract void initiateSnapshot(HTableDescriptor htd, 
SnapshotDescription snapshot,
+                                        final ForeignExceptionSnare monitor, 
StorageContext ctx) throws IOException;
+
+  /**
+   * Consolidates added regions and verifies snapshot
+   * @param snapshot
+   * @throws IOException
+   */
+  public void consolidateSnapshot(SnapshotDescription snapshot) throws 
IOException {
+    consolidateSnapshot(snapshot, StorageContext.DATA);
+  }
+
+  public abstract void consolidateSnapshot(SnapshotDescription snapshot, 
StorageContext ctx)
+      throws IOException;
+
+  /**
+   * Changes {@link StorageContext} of a snapshot from src to dest
+   *
+   * @param snapshot
+   * @param src Source {@link StorageContext}
+   * @param dest Destination {@link StorageContext}
+   * @return
+   * @throws IOException
+   */
+  public abstract boolean changeSnapshotContext(SnapshotDescription snapshot, 
StorageContext src,
+                                                StorageContext dest) throws 
IOException;
+
+  /**
+   * Adds given region to the snapshot.
+   *
+   * @param snapshot
+   * @param hri
+   * @throws IOException
+   */
+  public void addRegionToSnapshot(SnapshotDescription snapshot, HRegionInfo 
hri)
+      throws IOException {
+    addRegionToSnapshot(snapshot, hri, StorageContext.DATA);
+  }
+
+  public abstract void addRegionToSnapshot(SnapshotDescription snapshot, 
HRegionInfo hri,
+      StorageContext ctx) throws IOException;
+
+  public void addRegionsToSnapshot(SnapshotDescription snapshot, 
Collection<HRegionInfo> regions)
+      throws IOException {
+    addRegionsToSnapshot(snapshot, regions, StorageContext.DATA);
+  }
+
+  public abstract void addRegionsToSnapshot(SnapshotDescription snapshot,
+      Collection<HRegionInfo> regions, StorageContext ctx) throws IOException;
+
+  /**
+   * Restore snapshot to dest table and returns instance of {@link 
SnapshotRestoreMetaChanges}
+   * describing changes required for META.
+   * @param snapshot
+   * @param destHtd
+   * @param monitor
+   * @param status
+   * @return
+   * @throws IOException
+   */
+  public SnapshotRestoreMetaChanges restoreSnapshot(final SnapshotDescription 
snapshot,
+      final HTableDescriptor destHtd, final ForeignExceptionDispatcher monitor,
+      final MonitoredTask status) throws IOException {
+    return restoreSnapshot(snapshot, StorageContext.DATA, destHtd, monitor, 
status);
+  }
+
+  public abstract SnapshotRestoreMetaChanges restoreSnapshot(final 
SnapshotDescription snapshot,
+      final StorageContext snapshotCtx, final HTableDescriptor destHtd,
+      final ForeignExceptionDispatcher monitor, final MonitoredTask status) 
throws IOException;
+
+  // ==========================================================================
   // PUBLIC Methods - WAL
   // ==========================================================================
 
@@ -320,6 +544,8 @@ public abstract class MasterStorage<IDENTIFIER extends 
StorageIdentifier> {
   // ==========================================================================
   //  PUBLIC Methods - visitors
   // ==========================================================================
+  // TODO: remove implementations. How to visit store files is up to 
implementation, may use
+  // threadpool etc.
   public void visitStoreFiles(StoreFileVisitor visitor)
       throws IOException {
     visitStoreFiles(StorageContext.DATA, visitor);
@@ -356,6 +582,28 @@ public abstract class MasterStorage<IDENTIFIER extends 
StorageIdentifier> {
     }
   }
 
+  /**
+   * Visit all snapshots on a storage with visitor instance
+   * @param visitor
+   * @throws IOException
+   */
+  public abstract void visitSnapshots(final SnapshotVisitor visitor) throws 
IOException;
+
+  public abstract void visitSnapshots(StorageContext ctx, final 
SnapshotVisitor visitor)
+      throws IOException;
+
+  /**
+   * Visit all store files of a snapshot with visitor instance
+   *
+   * @param snapshot
+   * @param ctx
+   * @param visitor
+   * @throws IOException
+   */
+  public abstract void visitSnapshotStoreFiles(SnapshotDescription snapshot, 
StorageContext ctx,
+                                               SnapshotStoreFileVisitor 
visitor) throws IOException;
+
+
   // ==========================================================================
   //  PUBLIC Methods - bootstrap
   // ==========================================================================

http://git-wip-us.apache.org/repos/asf/hbase/blob/159a67c6/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/StorageContext.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/StorageContext.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/StorageContext.java
index cc324a9..3b2cc9b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/StorageContext.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/StorageContext.java
@@ -23,6 +23,5 @@ public enum StorageContext {
   TEMP,
   DATA,
   ARCHIVE,
-  SNAPSHOT,
   SIDELINE,
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/159a67c6/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyLayout.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyLayout.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyLayout.java
index 2906f91..a59edca 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyLayout.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyLayout.java
@@ -23,8 +23,66 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.fs.legacy.snapshot.SnapshotManifest;
+import org.apache.hadoop.hbase.fs.legacy.snapshot.SnapshotManifestV2;
 import org.apache.hadoop.hbase.mob.MobConstants;
+import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 
+/**
+ * This class helps manage legacy layout of directories and files on HDFS for 
HBase. The directories
+ * are laid out on disk as below (Note: transient files and directories are 
enclosed with [],
+ * multiple directories, files for namespaces, tables, regions etc. at the 
same directorydepth is
+ * indicated by ...):
+ * <p>
+ * <pre>
+ * Table data           ---&gt;  /hbase/{@value HConstants#BASE_NAMESPACE_DIR}/
+ * Default namespace    ---&gt;    default/
+ * System namespace     ---&gt;    hbase/
+ * Namespace            ---&gt;    ns1/
+ * Table                ---&gt;        table1/
+ * Table details        ---&gt;          {@value 
LegacyTableDescriptor#TABLEINFO_DIR}/
+ * Table info           ---&gt;            {@value 
LegacyTableDescriptor#TABLEINFO_FILE_PREFIX}.0000000003
+ * Region name          ---&gt;          region1/
+ * Region details       ---&gt;            {@value #REGION_INFO_FILE}
+ * Column family        ---&gt;            cf1/
+ * Store file           ---&gt;              file1
+ * Store files          ---&gt;              ...
+ * Column families      ---&gt;            .../
+ * Regions              ---&gt;          .../
+ * Tables               ---&gt;        .../
+ * Namespaces           ---&gt;    .../
+ * Temp                 ---&gt;  /hbase/{@value 
HConstants#HBASE_TEMP_DIRECTORY}/
+ * Base MOB             ---&gt;  /hbase/{@value MobConstants#MOB_DIR_NAME}/
+ * Snapshot             ---&gt;  /hbase/{@value HConstants#SNAPSHOT_DIR_NAME}/
+ * Working              ---&gt;    {@value #SNAPSHOT_TMP_DIR_NAME}/
+ * In progress snapshot ---&gt;      snap5/
+ * Snapshot descriptor  ---&gt;        {@value #SNAPSHOTINFO_FILE}
+ * Snapshot manifest    ---&gt;        {@value 
SnapshotManifest#DATA_MANIFEST_NAME}
+ * Region manifest      ---&gt;        [{@value 
SnapshotManifestV2#SNAPSHOT_MANIFEST_PREFIX}region51]
+ * Region manifests     ---&gt;        ...
+ * Snapshots            ---&gt;      .../
+ * Completed snapshot   ---&gt;    snap1/
+ * Snapshot descriptor  ---&gt;        {@value #SNAPSHOTINFO_FILE}
+ * Snapshot manifest    ---&gt;        {@value 
SnapshotManifest#DATA_MANIFEST_NAME}
+ * OLD snapshot layout  ---&gt;    snap_old/
+ * Snapshot descriptor  ---&gt;      {@value #SNAPSHOTINFO_FILE}
+ * Table details        ---&gt;      {@value 
LegacyTableDescriptor#TABLEINFO_DIR}/
+ * Table info           ---&gt;        {@value 
LegacyTableDescriptor#TABLEINFO_FILE_PREFIX}.0000000006
+ * Snapshot region      ---&gt;      region6/
+ * Region details       ---&gt;        {@value #REGION_INFO_FILE}
+ * Column family        ---&gt;        cf3/
+ * Store file           ---&gt;          file3
+ * Store files          ---&gt;          ...
+ * Column families      ---&gt;        .../
+ * Regions              ---&gt;      .../
+ * Logs                 ---&gt;      .logs/
+ * Server name          ---&gt;        server1/
+ * Log files            ---&gt;          logfile1
+ * Snapshots            ---&gt;    .../
+ * Archive              ---&gt;  /hbase/{@value 
HConstants#HFILE_ARCHIVE_DIRECTORY}/
+ * </pre>
+ * </p>
+ */
 public final class LegacyLayout {
   /** Name of the region info file that resides just under the region 
directory. */
   public final static String REGION_INFO_FILE = ".regioninfo";
@@ -38,22 +96,87 @@ public final class LegacyLayout {
   /** Temporary subdirectory of the region directory used for compaction 
output. */
   private static final String REGION_TEMP_DIR = ".tmp";
 
+  // snapshot directory constants
+  /**
+   * The file contains the snapshot basic information and it is under the 
directory of a snapshot.
+   */
+  public static final String SNAPSHOTINFO_FILE = ".snapshotinfo";
+
+  /** Temporary directory under the snapshot directory to store in-progress 
snapshots */
+  public static final String SNAPSHOT_TMP_DIR_NAME = ".tmp";
+
   private LegacyLayout() {}
 
   public static Path getDataDir(final Path rootDir) {
     return new Path(rootDir, HConstants.BASE_NAMESPACE_DIR);
   }
 
-  public static Path getSidelineDir(Path rootDir) {
+  public static Path getSidelineDir(final Path rootDir) {
     return new Path(rootDir, HConstants.HBCK_SIDELINEDIR_NAME);
   }
 
-  public static Path getSnapshotDir(Path rootDir) {
+  /**
+   * Get the snapshot root directory. All the snapshots are kept under this 
directory, i.e.
+   * ${hbase.rootdir}/{@value HConstants#SNAPSHOT_DIR_NAME}
+   * @param rootDir hbase root directory
+   * @return the base directory in which all snapshots are kept
+   */
+  public static Path getSnapshotDir(final Path rootDir) {
     return new Path(rootDir, HConstants.SNAPSHOT_DIR_NAME);
   }
 
-  public static Path getSnapshotDir(Path baseSnapshotDir, String snapshotName) 
{
-    return new Path(baseSnapshotDir, snapshotName);
+  /**
+   * Get the directory for a completed snapshot. This directory is a 
sub-directory of snapshot root
+   * directory and all the data files for a snapshot are kept under this 
directory.
+   * @param rootDir hbase root directory
+   * @param snapshotName name of the snapshot being taken
+   * @return the final directory for the completed snapshot
+   */
+  public static Path getCompletedSnapshotDir(final Path rootDir, final String 
snapshotName) {
+    return new Path(getSnapshotDir(rootDir), snapshotName);
+  }
+
+  /**
+   * Get the directory for a specified snapshot. This directory is a 
sub-directory of snapshot root
+   * directory and all the data files for a snapshot are kept under this 
directory.
+   * @param rootDir hbase root directory
+   * @param snapshot snapshot description
+   * @return the final directory for the completed snapshot
+   */
+  public static Path getCompletedSnapshotDir(final Path rootDir,
+                                             final SnapshotDescription 
snapshot) {
+    return getCompletedSnapshotDir(rootDir, snapshot.getName());
+  }
+
+  /**
+   * Get the general working directory for snapshots - where they are built, 
where they are
+   * temporarily copied on export, etc.
+   * i.e.$ {hbase.rootdir}/{@value HConstants#SNAPSHOT_DIR_NAME}/{@value 
#SNAPSHOT_TMP_DIR_NAME}
+   * @param rootDir root directory of the HBase installation
+   * @return Path to the snapshot tmp directory, relative to the passed root 
directory
+   */
+  public static Path getWorkingSnapshotDir(final Path rootDir) {
+    return new Path(getSnapshotDir(rootDir), SNAPSHOT_TMP_DIR_NAME);
+  }
+
+  /**
+   * Get the directory to build a snapshot, before it is finalized
+   * @param rootDir root directory of the hbase installation
+   * @param snapshotName name of the snapshot
+   * @return {@link Path} where one can build a snapshot
+   */
+  public static Path getWorkingSnapshotDir(final Path rootDir, final String 
snapshotName) {
+    return new Path(getWorkingSnapshotDir(rootDir), snapshotName);
+  }
+
+  /**
+   * Get the directory to build a snapshot, before it is finalized
+   * @param rootDir root directory of the hbase installation
+   * @param snapshot snapshot that will be built
+   * @return {@link Path} where one can build a snapshot
+   */
+  public static Path getWorkingSnapshotDir(final Path rootDir, final 
SnapshotDescription snapshot) {
+    return getWorkingSnapshotDir(rootDir, snapshot.getName());
   }
 
   public static Path getArchiveDir(Path rootDir) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/159a67c6/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyMasterStorage.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyMasterStorage.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyMasterStorage.java
index aa4de2c..043c0ee 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyMasterStorage.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyMasterStorage.java
@@ -24,7 +24,9 @@ import java.io.FileNotFoundException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
@@ -32,9 +34,11 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hbase.ClusterId;
@@ -45,12 +49,24 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ScheduledChore;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.fs.legacy.cleaner.HFileCleaner;
 import org.apache.hadoop.hbase.fs.legacy.cleaner.HFileLinkCleaner;
 import org.apache.hadoop.hbase.fs.legacy.cleaner.LogCleaner;
+import org.apache.hadoop.hbase.fs.legacy.snapshot.RestoreSnapshotHelper;
 import org.apache.hadoop.hbase.fs.legacy.snapshot.SnapshotHFileCleaner;
+import org.apache.hadoop.hbase.fs.legacy.snapshot.SnapshotManifest;
 import org.apache.hadoop.hbase.mob.MobUtils;
+import org.apache.hadoop.hbase.monitoring.MonitoredTask;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
+import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
+import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
+import org.apache.hadoop.hbase.snapshot.SnapshotRestoreMetaChanges;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.fs.StorageContext;
@@ -65,10 +81,13 @@ import org.apache.hadoop.hbase.backup.HFileArchiver;
 
 @InterfaceAudience.Private
 public class LegacyMasterStorage extends MasterStorage<LegacyPathIdentifier> {
+  // TODO: Modify all APIs to use ExecutorService and support parallel HDFS 
queries
+
   private static final Log LOG = LogFactory.getLog(LegacyMasterStorage.class);
 
   private final Path sidelineDir;
   private final Path snapshotDir;
+  private final Path tmpSnapshotDir;
   private final Path archiveDataDir;
   private final Path archiveDir;
   private final Path tmpDataDir;
@@ -102,6 +121,7 @@ public class LegacyMasterStorage extends 
MasterStorage<LegacyPathIdentifier> {
     // base directories
     this.sidelineDir = LegacyLayout.getSidelineDir(rootDir.path);
     this.snapshotDir = LegacyLayout.getSnapshotDir(rootDir.path);
+    this.tmpSnapshotDir = LegacyLayout.getWorkingSnapshotDir(rootDir.path);
     this.archiveDir = LegacyLayout.getArchiveDir(rootDir.path);
     this.archiveDataDir = LegacyLayout.getDataDir(this.archiveDir);
     this.dataDir = LegacyLayout.getDataDir(rootDir.path);
@@ -128,39 +148,6 @@ public class LegacyMasterStorage extends 
MasterStorage<LegacyPathIdentifier> {
     return chores;
   }
 
-  /**
-   * This method modifies chores configuration for snapshots. Please call this 
method before
-   * instantiating and scheduling list of chores with {@link 
#getChores(Stoppable, Map)}.
-   */
-  @Override
-  public void enableSnapshots() {
-    super.enableSnapshots();
-    if (!isSnapshotsEnabled()) {
-      // Extract cleaners from conf
-      Set<String> hfileCleaners = new HashSet<>();
-      String[] cleaners = 
getConfiguration().getStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
-      if (cleaners != null) Collections.addAll(hfileCleaners, cleaners);
-
-      // add snapshot related cleaners
-      hfileCleaners.add(SnapshotHFileCleaner.class.getName());
-      hfileCleaners.add(HFileLinkCleaner.class.getName());
-
-      // Set cleaners conf
-      getConfiguration().setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
-          hfileCleaners.toArray(new String[hfileCleaners.size()]));
-    }
-  }
-
-  @Override
-  public boolean isSnapshotsEnabled() {
-    // Extract cleaners from conf
-    Set<String> hfileCleaners = new HashSet<>();
-    String[] cleaners = 
getConfiguration().getStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
-    if (cleaners != null) Collections.addAll(hfileCleaners, cleaners);
-    return hfileCleaners.contains(SnapshotHFileCleaner.class.getName()) &&
-        hfileCleaners.contains(HFileLinkCleaner.class.getName());
-  }
-
   // ==========================================================================
   //  PUBLIC Methods - Namespace related
   // ==========================================================================
@@ -332,6 +319,367 @@ public class LegacyMasterStorage extends 
MasterStorage<LegacyPathIdentifier> {
   }
 
   // ==========================================================================
+  //  Methods - Snapshot related
+  // ==========================================================================
+
+  /**
+   * Filter that only accepts completed snapshot directories
+   */
+  public static class CompletedSnapshotDirectoriesFilter extends 
FSUtils.BlackListDirFilter {
+    /**
+     * @param fs
+     */
+    public CompletedSnapshotDirectoriesFilter(FileSystem fs) {
+      super(fs, Collections.singletonList(LegacyLayout.SNAPSHOT_TMP_DIR_NAME));
+    }
+  }
+
+  /**
+   * This method modifies chores configuration for snapshots. Please call this 
method before
+   * instantiating and scheduling list of chores with {@link 
#getChores(Stoppable, Map)}.
+   */
+  @Override
+  public void enableSnapshots() throws IOException {
+    super.enableSnapshots();
+
+    // check if an older version of snapshot directory was present
+    Path oldSnapshotDir = new Path(getRootContainer().path, 
HConstants.OLD_SNAPSHOT_DIR_NAME);
+    List<SnapshotDescription> oldSnapshots = 
getSnapshotDescriptions(oldSnapshotDir,
+        new CompletedSnapshotDirectoriesFilter(getFileSystem()));
+    if (oldSnapshots != null && !oldSnapshots.isEmpty()) {
+      LOG.error("Snapshots from an earlier release were found under '" + 
oldSnapshotDir + "'.");
+      LOG.error("Please rename the directory ");
+    }
+
+    // TODO: add check for old snapshot dir that existed just before 
HBASE-14439
+
+    if (!isSnapshotsEnabled()) {
+      // Extract cleaners from conf
+      Set<String> hfileCleaners = new HashSet<>();
+      String[] cleaners = 
getConfiguration().getStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
+      if (cleaners != null) Collections.addAll(hfileCleaners, cleaners);
+
+      // add snapshot related cleaners
+      hfileCleaners.add(SnapshotHFileCleaner.class.getName());
+      hfileCleaners.add(HFileLinkCleaner.class.getName());
+
+      // Set cleaners conf
+      getConfiguration().setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
+          hfileCleaners.toArray(new String[hfileCleaners.size()]));
+    }
+  }
+
+  @Override
+  public boolean isSnapshotsEnabled() {
+    // Extract cleaners from conf
+    Set<String> hfileCleaners = new HashSet<>();
+    String[] cleaners = 
getConfiguration().getStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
+    if (cleaners != null) Collections.addAll(hfileCleaners, cleaners);
+    return hfileCleaners.contains(SnapshotHFileCleaner.class.getName()) &&
+        hfileCleaners.contains(HFileLinkCleaner.class.getName());
+  }
+
+  private List<SnapshotDescription> getSnapshotDescriptions(final Path dir,
+      final PathFilter filter) throws IOException {
+    List<SnapshotDescription> snapshotDescs = new ArrayList<>();
+    if (!FSUtils.isExists(getFileSystem(), dir)) {
+      return snapshotDescs;
+    }
+
+    for (FileStatus fileStatus : FSUtils.listStatus(getFileSystem(), dir, 
filter)) {
+      Path info = new Path(fileStatus.getPath(), 
LegacyLayout.SNAPSHOTINFO_FILE);
+      if (!FSUtils.isExists(getFileSystem(), info)) {
+        LOG.error("Snapshot information for '" + fileStatus.getPath() + "' 
doesn't exist!");
+        continue;
+      }
+
+      FSDataInputStream in = null;
+      try {
+        in = getFileSystem().open(info);
+        SnapshotDescription desc = SnapshotDescription.parseFrom(in);
+        snapshotDescs.add(desc);
+      } catch (IOException e) {
+        LOG.warn("Found a corrupted snapshot '" + fileStatus.getPath() + "'.", 
e);
+      } finally {
+        if (in != null) {
+          in.close();
+        }
+      }
+    }
+    return snapshotDescs;
+  }
+
+  @Override
+  public List<SnapshotDescription> getSnapshots(StorageContext ctx) throws 
IOException {
+    return getSnapshotDescriptions(getSnapshotDirFromContext(ctx),
+        new CompletedSnapshotDirectoriesFilter(getFileSystem()));
+  }
+
+  @Override
+  public SnapshotDescription getSnapshot(String snapshotName, StorageContext 
ctx)
+      throws IOException {
+    SnapshotDescription retSnapshot = null;
+
+    Path snapshotDir = getSnapshotDirFromContext(ctx, snapshotName);
+    Path info = new Path(snapshotDir, LegacyLayout.SNAPSHOTINFO_FILE);
+    if (!FSUtils.isExists(getFileSystem(), info)) {
+      LOG.warn("Snapshot information for '" + snapshotName + "' doesn't 
exist!");
+      return retSnapshot;
+    }
+
+    FSDataInputStream in = null;
+    try {
+      in = getFileSystem().open(info);
+      retSnapshot = SnapshotDescription.parseFrom(in);
+    } catch (IOException e) {
+      LOG.warn("Found a corrupted snapshot '" + snapshotName + "'.", e);
+    } finally {
+      if (in != null) {
+        in.close();
+      }
+    }
+
+    return retSnapshot;
+  }
+
+  @Override
+  public void visitSnapshots(final SnapshotVisitor visitor) throws IOException 
{
+    visitSnapshots(StorageContext.DATA, visitor);
+  }
+
+  @Override
+  public void visitSnapshots(StorageContext ctx, final SnapshotVisitor 
visitor) throws IOException {
+    for (SnapshotDescription s : getSnapshots(ctx)) {
+      visitor.visitSnapshot(s.getName(), s, ctx);
+    }
+  }
+
+  private SnapshotManifest getSnapshotManifest(SnapshotDescription snapshot, 
StorageContext ctx)
+    throws IOException {
+    Path snapshotDir = getSnapshotDirFromContext(ctx, snapshot.getName());
+    return SnapshotManifest.open(getConfiguration(), getFileSystem(), 
snapshotDir, snapshot);
+  }
+
+  @Override
+  public HTableDescriptor getTableDescriptorForSnapshot(SnapshotDescription 
snapshot,
+      StorageContext ctx) throws IOException {
+    SnapshotManifest manifest = getSnapshotManifest(snapshot, ctx);
+    return manifest.getTableDescriptor();
+  }
+
+  private List<SnapshotRegionManifest> 
getSnapshotRegionManifests(SnapshotDescription snapshot,
+      StorageContext ctx) throws IOException {
+    SnapshotManifest manifest = getSnapshotManifest(snapshot, ctx);
+    List<SnapshotRegionManifest> regionManifests = 
manifest.getRegionManifests();
+    if (regionManifests == null) {
+      regionManifests = new ArrayList<>();
+    }
+    return regionManifests;
+  }
+
+  @Override
+  public Map<String, HRegionInfo> getSnapshotRegions(SnapshotDescription 
snapshot,
+      StorageContext ctx) throws IOException {
+    Map<String, HRegionInfo> retRegions = new HashMap<>();
+    for (SnapshotRegionManifest regionManifest: 
getSnapshotRegionManifests(snapshot, ctx)) {
+      HRegionInfo hri = HRegionInfo.convert(regionManifest.getRegionInfo());
+      retRegions.put(hri.getEncodedName(), hri);
+    }
+    return retRegions;
+  }
+
+  /**
+   * Utility function for visiting/ listing store files for a snapshot.
+   * @param snapshot
+   * @param ctx
+   * @param regionName If not null, then store files for the matching region 
are visited/ returned
+   * @param familyName If not null, then store files for the matching family 
are visited/ returned
+   * @param visitor If not null, visitor is call on each store file entry
+   * @return List of store files base on suggested filters
+   * @throws IOException
+   */
+  private List<SnapshotRegionManifest.StoreFile> visitAndGetSnapshotStoreFiles(
+      SnapshotDescription snapshot, StorageContext ctx, String regionName, 
String familyName,
+      SnapshotStoreFileVisitor visitor) throws IOException {
+    List<SnapshotRegionManifest.StoreFile> snapshotStoreFiles = new 
ArrayList<>();
+
+    for (SnapshotRegionManifest regionManifest: 
getSnapshotRegionManifests(snapshot, ctx)) {
+      HRegionInfo hri = HRegionInfo.convert(regionManifest.getRegionInfo());
+
+      // check for region name
+      if (regionName != null) {
+        if (!hri.getEncodedName().equals(regionName)) {
+          continue;
+        }
+      }
+
+      for (SnapshotRegionManifest.FamilyFiles familyFiles: 
regionManifest.getFamilyFilesList()) {
+        String family = familyFiles.getFamilyName().toStringUtf8();
+        // check for family name
+        if (familyName != null && !familyName.equals(family)) {
+          continue;
+        }
+
+        List<SnapshotRegionManifest.StoreFile> storeFiles = 
familyFiles.getStoreFilesList();
+        snapshotStoreFiles.addAll(storeFiles);
+
+        if (visitor != null) {
+          for(SnapshotRegionManifest.StoreFile storeFile: storeFiles) {
+            visitor.visitSnapshotStoreFile(snapshot, ctx, hri, family, 
storeFile);
+          }
+        }
+      }
+    }
+
+    return snapshotStoreFiles;
+  }
+
+  @Override
+  public void visitSnapshotStoreFiles(SnapshotDescription snapshot, 
StorageContext ctx,
+      SnapshotStoreFileVisitor visitor) throws IOException {
+    visitAndGetSnapshotStoreFiles(snapshot, ctx, null, null, visitor);
+  }
+
+  @Override
+  public boolean snapshotExists(SnapshotDescription snapshot, StorageContext 
ctx)
+      throws IOException {
+    return snapshotExists(snapshot.getName(), ctx);
+  }
+
+  @Override
+  public boolean snapshotExists(String snapshotName, StorageContext ctx) 
throws IOException {
+    return getSnapshot(snapshotName, ctx) != null;
+  }
+
+  @Override
+  public void deleteAllSnapshots(StorageContext ctx) throws IOException {
+    Path snapshotDir = getSnapshotDirFromContext(ctx);
+    if (!FSUtils.deleteDirectory(getFileSystem(), snapshotDir)) {
+      LOG.warn("Couldn't delete working snapshot directory '" + snapshotDir + 
".");
+    }
+  }
+
+  private void deleteSnapshotDir(Path snapshotDir) throws IOException {
+    LOG.debug("Deleting snapshot directory '" + snapshotDir + "'.");
+    if (!FSUtils.deleteDirectory(getFileSystem(), snapshotDir)) {
+      throw new HBaseSnapshotException("Failed to delete snapshot directory '" 
+
+          snapshotDir + "'.");
+    }
+  }
+
+  @Override
+  public boolean deleteSnapshot(final SnapshotDescription snapshot, final 
StorageContext ctx)
+      throws IOException {
+    return deleteSnapshot(snapshot.getName(), ctx);
+  }
+
+  @Override
+  public boolean deleteSnapshot(final String snapshotName, final 
StorageContext ctx)
+      throws IOException {
+    deleteSnapshotDir(getSnapshotDirFromContext(ctx, snapshotName));
+    return false;
+  }
+
+  @Override
+  public void prepareSnapshot(SnapshotDescription snapshot) throws IOException 
{
+    if (snapshot == null) return;
+    deleteSnapshot(snapshot);
+    Path snapshotDir = getSnapshotDirFromContext(StorageContext.TEMP, 
snapshot.getName());
+    if (getFileSystem().mkdirs(snapshotDir)) {
+      throw new SnapshotCreationException("Couldn't create working directory 
'" + snapshotDir +
+          "' for snapshot", ProtobufUtil.createSnapshotDesc(snapshot));
+    }
+  }
+
+  @Override
+  public void initiateSnapshot(HTableDescriptor htd, SnapshotDescription 
snapshot,
+                               final ForeignExceptionSnare monitor, 
StorageContext ctx) throws IOException {
+    Path snapshotDir = getSnapshotDirFromContext(ctx, snapshot.getName());
+
+    // write down the snapshot info in the working directory
+    writeSnapshotInfo(snapshot, snapshotDir);
+
+    // create manifest
+    SnapshotManifest manifest = SnapshotManifest.create(getConfiguration(), 
getFileSystem(),
+        snapshotDir, snapshot, monitor);
+    manifest.addTableDescriptor(htd);
+  }
+
+  @Override
+  public void consolidateSnapshot(SnapshotDescription snapshot, StorageContext 
ctx)
+      throws IOException {
+    SnapshotManifest manifest = getSnapshotManifest(snapshot, ctx);
+    manifest.consolidate();
+  }
+
+  @Override
+  public boolean changeSnapshotContext(SnapshotDescription snapshot, 
StorageContext src,
+      StorageContext dest) throws IOException {
+    Path srcDir = getSnapshotDirFromContext(src, snapshot.getName());
+    Path destDir = getSnapshotDirFromContext(dest, snapshot.getName());
+    return getFileSystem().rename(srcDir, destDir);
+  }
+
+  @Override
+  public void addRegionToSnapshot(SnapshotDescription snapshot, HRegionInfo 
hri,
+                                  StorageContext ctx) throws IOException {
+    SnapshotManifest manifest = getSnapshotManifest(snapshot, ctx);
+    Path tableDir = 
LegacyLayout.getTableDir(LegacyLayout.getDataDir(getRootContainer().path),
+        hri.getTable());
+    manifest.addRegion(tableDir, hri);
+  }
+
+  @Override
+  public void addRegionsToSnapshot(SnapshotDescription snapshot, 
Collection<HRegionInfo> regions,
+      StorageContext ctx) throws IOException {
+    // TODO: use ExecutorService to add regions
+    for (HRegionInfo r: regions) {
+      addRegionToSnapshot(snapshot, r, ctx);
+    }
+  }
+
+  @Override
+  public SnapshotRestoreMetaChanges restoreSnapshot(final SnapshotDescription 
snapshot,
+      final StorageContext snapshotCtx, final HTableDescriptor destHtd,
+      final ForeignExceptionDispatcher monitor, final MonitoredTask status) 
throws IOException {
+    // TODO: currently snapshotCtx is not used, modify RestoreSnapshotHelper 
to take ctx as an input
+    RestoreSnapshotHelper restoreSnapshotHelper = new 
RestoreSnapshotHelper(this, snapshot,
+        destHtd, monitor, status);
+    return restoreSnapshotHelper.restoreStorageRegions();
+  }
+
+  /**
+   * Write the snapshot description into the working directory of a snapshot
+   *
+   * @param snapshot description of the snapshot being taken
+   * @param workingDir working directory of the snapshot
+   * @throws IOException if we can't reach the filesystem and the file cannot 
be cleaned up on
+   *           failure
+   */
+  // TODO: After ExportSnapshot refactoring make this private if not referred 
from outside package
+  public void writeSnapshotInfo(SnapshotDescription snapshot, Path workingDir)
+      throws IOException {
+    FsPermission perms = FSUtils.getFilePermissions(getFileSystem(), 
getFileSystem().getConf(),
+        HConstants.DATA_FILE_UMASK_KEY);
+    Path snapshotInfo = new Path(workingDir, LegacyLayout.SNAPSHOTINFO_FILE);
+    try {
+      FSDataOutputStream out = FSUtils.create(getFileSystem(), snapshotInfo, 
perms, true);
+      try {
+        snapshot.writeTo(out);
+      } finally {
+        out.close();
+      }
+    } catch (IOException e) {
+      // if we get an exception, try to remove the snapshot info
+      if (!getFileSystem().delete(snapshotInfo, false)) {
+        String msg = "Couldn't delete snapshot info file: " + snapshotInfo;
+        LOG.error(msg);
+        throw new IOException(msg);
+      }
+    }
+  }
+
+  // ==========================================================================
   // PUBLIC - WAL
   // ==========================================================================
   @Override
@@ -653,12 +1001,27 @@ public class LegacyMasterStorage extends 
MasterStorage<LegacyPathIdentifier> {
     return new LegacyPathIdentifier(tmpDir);
   }
 
+  protected Path getSnapshotDirFromContext(StorageContext ctx, String 
snapshot) {
+    switch(ctx) {
+      case TEMP: return 
LegacyLayout.getWorkingSnapshotDir(getRootContainer().path, snapshot);
+      case DATA: return 
LegacyLayout.getCompletedSnapshotDir(getRootContainer().path, snapshot);
+      default: throw new RuntimeException("Invalid context: " + ctx);
+    }
+  }
+
+  protected Path getSnapshotDirFromContext(StorageContext ctx) {
+    switch (ctx) {
+      case TEMP: return tmpSnapshotDir;
+      case DATA: return snapshotDir;
+      default: throw new RuntimeException("Invalid context: " + ctx);
+    }
+  }
+
   protected Path getBaseDirFromContext(StorageContext ctx) {
     switch (ctx) {
       case TEMP: return tmpDataDir;
       case DATA: return dataDir;
       case ARCHIVE: return archiveDataDir;
-      case SNAPSHOT: return snapshotDir;
       case SIDELINE: return sidelineDir;
       default: throw new RuntimeException("Invalid context: " + ctx);
     }

Reply via email to