http://git-wip-us.apache.org/repos/asf/hive/blob/56083008/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
deleted file mode 100755
index 1dd50de..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
+++ /dev/null
@@ -1,635 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import static 
org.apache.hadoop.hive.metastore.MetaStoreUtils.DATABASE_WAREHOUSE_SUFFIX;
-import static 
org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.AbstractList;
-import java.util.ArrayList;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.hive.metastore.utils.HdfsUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.hive.common.FileUtils;
-import org.apache.hadoop.hive.common.HiveStatsUtils;
-import org.apache.hadoop.hive.common.JavaUtils;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.ReplChangeManager.RecycleType;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.util.ReflectionUtils;
-
-/**
- * This class represents a warehouse where data of Hive tables is stored
- */
-public class Warehouse {
-  private Path whRoot;
-  private final Configuration conf;
-  private final String whRootString;
-
-  public static final Logger LOG = 
LoggerFactory.getLogger("hive.metastore.warehouse");
-
-  private MetaStoreFS fsHandler = null;
-  private boolean storageAuthCheck = false;
-  private ReplChangeManager cm = null;
-
-  public Warehouse(Configuration conf) throws MetaException {
-    this.conf = conf;
-    whRootString = HiveConf.getVar(conf, HiveConf.ConfVars.METASTOREWAREHOUSE);
-    if (StringUtils.isBlank(whRootString)) {
-      throw new MetaException(HiveConf.ConfVars.METASTOREWAREHOUSE.varname
-          + " is not set in the config or blank");
-    }
-    fsHandler = getMetaStoreFsHandler(conf);
-    cm = ReplChangeManager.getInstance((HiveConf)conf);
-    storageAuthCheck = HiveConf.getBoolVar(conf,
-        HiveConf.ConfVars.METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS);
-  }
-
-  private MetaStoreFS getMetaStoreFsHandler(Configuration conf)
-      throws MetaException {
-    String handlerClassStr = HiveConf.getVar(conf,
-        HiveConf.ConfVars.HIVE_METASTORE_FS_HANDLER_CLS);
-    try {
-      Class<? extends MetaStoreFS> handlerClass = (Class<? extends 
MetaStoreFS>) Class
-          .forName(handlerClassStr, true, JavaUtils.getClassLoader());
-      MetaStoreFS handler = ReflectionUtils.newInstance(handlerClass, conf);
-      return handler;
-    } catch (ClassNotFoundException e) {
-      throw new MetaException("Error in loading MetaStoreFS handler."
-          + e.getMessage());
-    }
-  }
-
-
-  /**
-   * Helper functions to convert IOException to MetaException
-   */
-  public static FileSystem getFs(Path f, Configuration conf) throws 
MetaException {
-    try {
-      return f.getFileSystem(conf);
-    } catch (IOException e) {
-      MetaStoreUtils.logAndThrowMetaException(e);
-    }
-    return null;
-  }
-
-  public FileSystem getFs(Path f) throws MetaException {
-    return getFs(f, conf);
-  }
-
-
-  /**
-   * Hadoop File System reverse lookups paths with raw ip addresses The File
-   * System URI always contains the canonical DNS name of the Namenode.
-   * Subsequently, operations on paths with raw ip addresses cause an exception
-   * since they don't match the file system URI.
-   *
-   * This routine solves this problem by replacing the scheme and authority of 
a
-   * path with the scheme and authority of the FileSystem that it maps to.
-   *
-   * @param path
-   *          Path to be canonicalized
-   * @return Path with canonical scheme and authority
-   */
-  public static Path getDnsPath(Path path, Configuration conf) throws 
MetaException {
-    FileSystem fs = getFs(path, conf);
-    return (new Path(fs.getUri().getScheme(), fs.getUri().getAuthority(), path
-        .toUri().getPath()));
-  }
-
-  public Path getDnsPath(Path path) throws MetaException {
-    return getDnsPath(path, conf);
-  }
-
-  /**
-   * Resolve the configured warehouse root dir with respect to the 
configuration
-   * This involves opening the FileSystem corresponding to the warehouse root
-   * dir (but that should be ok given that this is only called during DDL
-   * statements for non-external tables).
-   */
-  public Path getWhRoot() throws MetaException {
-    if (whRoot != null) {
-      return whRoot;
-    }
-    whRoot = getDnsPath(new Path(whRootString));
-    return whRoot;
-  }
-
-  public Path getDatabasePath(Database db) throws MetaException {
-    if (db.getName().equalsIgnoreCase(DEFAULT_DATABASE_NAME)) {
-      return getWhRoot();
-    }
-    return new Path(db.getLocationUri());
-  }
-
-  public Path getDefaultDatabasePath(String dbName) throws MetaException {
-    if (dbName.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) {
-      return getWhRoot();
-    }
-    return new Path(getWhRoot(), dbName.toLowerCase() + 
DATABASE_WAREHOUSE_SUFFIX);
-  }
-
-  /**
-   * Returns the default location of the table path using the parent 
database's location
-   * @param db Database where the table is created
-   * @param tableName table name
-   * @return
-   * @throws MetaException
-   */
-  public Path getDefaultTablePath(Database db, String tableName)
-      throws MetaException {
-    return getDnsPath(new Path(getDatabasePath(db), 
MetaStoreUtils.encodeTableName(tableName.toLowerCase())));
-  }
-
-  public static String getQualifiedName(Table table) {
-    return table.getDbName() + "." + table.getTableName();
-  }
-
-  public static String getQualifiedName(Partition partition) {
-    return partition.getDbName() + "." + partition.getTableName() + 
partition.getValues();
-  }
-
-  public boolean mkdirs(Path f) throws MetaException {
-    FileSystem fs = null;
-    try {
-      fs = getFs(f);
-      return FileUtils.mkdir(fs, f, conf);
-    } catch (IOException e) {
-      MetaStoreUtils.logAndThrowMetaException(e);
-    }
-    return false;
-  }
-
-  public boolean renameDir(Path sourcePath, Path destPath, boolean 
needCmRecycle) throws MetaException {
-    try {
-      if (needCmRecycle) {
-        // Copy the source files to cmroot. As the client will move the source 
files to another
-        // location, we should make a copy of the files to cmroot instead of 
moving it.
-        cm.recycle(sourcePath, RecycleType.COPY, true);
-      }
-      FileSystem fs = getFs(sourcePath);
-      return FileUtils.rename(fs, sourcePath, destPath, conf);
-    } catch (Exception ex) {
-      MetaStoreUtils.logAndThrowMetaException(ex);
-    }
-    return false;
-  }
-
-  void addToChangeManagement(Path file) throws MetaException {
-    cm.recycle(file, RecycleType.COPY, true);
-  }
-
-  public boolean deleteDir(Path f, boolean recursive) throws MetaException {
-    return deleteDir(f, recursive, false);
-  }
-
-  public boolean deleteDir(Path f, boolean recursive, boolean ifPurge) throws 
MetaException {
-    cm.recycle(f, RecycleType.MOVE, ifPurge);
-    FileSystem fs = getFs(f);
-    return fsHandler.deleteDir(fs, f, recursive, ifPurge, conf);
-  }
-
-  public void recycleDirToCmPath(Path f, boolean ifPurge) throws MetaException 
{
-    cm.recycle(f, RecycleType.MOVE, ifPurge);
-    return;
-  }
-
-  public boolean isEmpty(Path path) throws IOException, MetaException {
-    ContentSummary contents = getFs(path).getContentSummary(path);
-    if (contents != null && contents.getFileCount() == 0 && 
contents.getDirectoryCount() == 1) {
-      return true;
-    }
-    return false;
-  }
-
-  public boolean isWritable(Path path) throws IOException {
-    if (!storageAuthCheck) {
-      // no checks for non-secure hadoop installations
-      return true;
-    }
-    if (path == null) { //what??!!
-      return false;
-    }
-    final FileStatus stat;
-    final FileSystem fs;
-    try {
-      fs = getFs(path);
-      stat = fs.getFileStatus(path);
-      HdfsUtils.checkFileAccess(fs, stat, FsAction.WRITE);
-      return true;
-    } catch (FileNotFoundException fnfe){
-      // File named by path doesn't exist; nothing to validate.
-      return true;
-    } catch (Exception e) {
-      // all other exceptions are considered as emanating from
-      // unauthorized accesses
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Exception when checking if path (" + path + ")", e);
-      }
-      return false;
-    }
-  }
-
-  /*
-  // NOTE: This is for generating the internal path name for partitions. Users
-  // should always use the MetaStore API to get the path name for a partition.
-  // Users should not directly take partition values and turn it into a path
-  // name by themselves, because the logic below may change in the future.
-  //
-  // In the future, it's OK to add new chars to the escape list, and old data
-  // won't be corrupt, because the full path name in metastore is stored.
-  // In that case, Hive will continue to read the old data, but when it creates
-  // new partitions, it will use new names.
-  static BitSet charToEscape = new BitSet(128);
-  static {
-    for (char c = 0; c < ' '; c++) {
-      charToEscape.set(c);
-    }
-    char[] clist = new char[] { '"', '#', '%', '\'', '*', '/', ':', '=', '?',
-        '\\', '\u00FF' };
-    for (char c : clist) {
-      charToEscape.set(c);
-    }
-  }
-
-  static boolean needsEscaping(char c) {
-    return c >= 0 && c < charToEscape.size() && charToEscape.get(c);
-  }
-  */
-
-  static String escapePathName(String path) {
-    return FileUtils.escapePathName(path);
-  }
-
-  static String unescapePathName(String path) {
-    return FileUtils.unescapePathName(path);
-  }
-
-  /**
-   * Given a partition specification, return the path corresponding to the
-   * partition spec. By default, the specification does not include dynamic 
partitions.
-   * @param spec
-   * @return string representation of the partition specification.
-   * @throws MetaException
-   */
-  public static String makePartPath(Map<String, String> spec)
-      throws MetaException {
-    return makePartName(spec, true);
-  }
-
-  /**
-   * Makes a partition name from a specification
-   * @param spec
-   * @param addTrailingSeperator if true, adds a trailing separator e.g. 
'ds=1/'
-   * @return partition name
-   * @throws MetaException
-   */
-  public static String makePartName(Map<String, String> spec,
-      boolean addTrailingSeperator)
-      throws MetaException {
-    StringBuilder suffixBuf = new StringBuilder();
-    int i = 0;
-    for (Entry<String, String> e : spec.entrySet()) {
-      if (e.getValue() == null || e.getValue().length() == 0) {
-        throw new MetaException("Partition spec is incorrect. " + spec);
-      }
-      if (i>0) {
-        suffixBuf.append(Path.SEPARATOR);
-      }
-      suffixBuf.append(escapePathName(e.getKey()));
-      suffixBuf.append('=');
-      suffixBuf.append(escapePathName(e.getValue()));
-      i++;
-    }
-    if (addTrailingSeperator) {
-      suffixBuf.append(Path.SEPARATOR);
-    }
-    return suffixBuf.toString();
-  }
-  /**
-   * Given a dynamic partition specification, return the path corresponding to 
the
-   * static part of partition specification. This is basically a copy of 
makePartName
-   * but we get rid of MetaException since it is not serializable.
-   * @param spec
-   * @return string representation of the static part of the partition 
specification.
-   */
-  public static String makeDynamicPartName(Map<String, String> spec) {
-    StringBuilder suffixBuf = new StringBuilder();
-    for (Entry<String, String> e : spec.entrySet()) {
-      if (e.getValue() != null && e.getValue().length() > 0) {
-        suffixBuf.append(escapePathName(e.getKey()));
-        suffixBuf.append('=');
-        suffixBuf.append(escapePathName(e.getValue()));
-        suffixBuf.append(Path.SEPARATOR);
-      } else { // stop once we see a dynamic partition
-        break;
-      }
-    }
-    return suffixBuf.toString();
-  }
-
-  static final Pattern pat = Pattern.compile("([^/]+)=([^/]+)");
-
-  private static final Pattern slash = Pattern.compile("/");
-
-  /**
-   * Extracts values from partition name without the column names.
-   * @param name Partition name.
-   * @param result The result. Must be pre-sized to the expected number of 
columns.
-   */
-  public static AbstractList<String> makeValsFromName(
-      String name, AbstractList<String> result) throws MetaException {
-    assert name != null;
-    String[] parts = slash.split(name, 0);
-    if (result == null) {
-      result = new ArrayList<>(parts.length);
-      for (int i = 0; i < parts.length; ++i) {
-        result.add(null);
-      }
-    } else if (parts.length != result.size()) {
-      throw new MetaException(
-          "Expected " + result.size() + " components, got " + parts.length + " 
(" + name + ")");
-    }
-    for (int i = 0; i < parts.length; ++i) {
-      int eq = parts[i].indexOf('=');
-      if (eq <= 0) {
-        throw new MetaException("Unexpected component " + parts[i]);
-      }
-      result.set(i, unescapePathName(parts[i].substring(eq + 1)));
-    }
-    return result;
-  }
-
-  public static LinkedHashMap<String, String> makeSpecFromName(String name)
-      throws MetaException {
-    if (name == null || name.isEmpty()) {
-      throw new MetaException("Partition name is invalid. " + name);
-    }
-    LinkedHashMap<String, String> partSpec = new LinkedHashMap<String, 
String>();
-    makeSpecFromName(partSpec, new Path(name));
-    return partSpec;
-  }
-
-  public static void makeSpecFromName(Map<String, String> partSpec, Path 
currPath) {
-    List<String[]> kvs = new ArrayList<String[]>();
-    do {
-      String component = currPath.getName();
-      Matcher m = pat.matcher(component);
-      if (m.matches()) {
-        String k = unescapePathName(m.group(1));
-        String v = unescapePathName(m.group(2));
-        String[] kv = new String[2];
-        kv[0] = k;
-        kv[1] = v;
-        kvs.add(kv);
-      }
-      currPath = currPath.getParent();
-    } while (currPath != null && !currPath.getName().isEmpty());
-
-    // reverse the list since we checked the part from leaf dir to table's 
base dir
-    for (int i = kvs.size(); i > 0; i--) {
-      partSpec.put(kvs.get(i - 1)[0], kvs.get(i - 1)[1]);
-    }
-  }
-
-  public static Map<String, String> makeEscSpecFromName(String name) throws 
MetaException {
-
-    if (name == null || name.isEmpty()) {
-      throw new MetaException("Partition name is invalid. " + name);
-    }
-    LinkedHashMap<String, String> partSpec = new LinkedHashMap<String, 
String>();
-
-    Path currPath = new Path(name);
-
-    List<String[]> kvs = new ArrayList<String[]>();
-    do {
-      String component = currPath.getName();
-      Matcher m = pat.matcher(component);
-      if (m.matches()) {
-        String k = m.group(1);
-        String v = m.group(2);
-        String[] kv = new String[2];
-        kv[0] = k;
-        kv[1] = v;
-        kvs.add(kv);
-      }
-      currPath = currPath.getParent();
-    } while (currPath != null && !currPath.getName().isEmpty());
-
-    // reverse the list since we checked the part from leaf dir to table's 
base dir
-    for (int i = kvs.size(); i > 0; i--) {
-      partSpec.put(kvs.get(i - 1)[0], kvs.get(i - 1)[1]);
-    }
-
-    return partSpec;
-  }
-
-  /**
-   * Returns the default partition path of a table within a given database and 
partition key value
-   * pairs. It uses the database location and appends it the table name and 
the partition key,value
-   * pairs to create the Path for the partition directory
-   *
-   * @param db - parent database which is used to get the base location of the 
partition directory
-   * @param tableName - table name for the partitions
-   * @param pm - Partition key value pairs
-   * @return
-   * @throws MetaException
-   */
-  public Path getDefaultPartitionPath(Database db, String tableName,
-      Map<String, String> pm) throws MetaException {
-    return getPartitionPath(getDefaultTablePath(db, tableName), pm);
-  }
-
-  /**
-   * Returns the path object for the given partition key-value pairs and the 
base location
-   *
-   * @param tblPath - the base location for the partitions. Typically the 
table location
-   * @param pm - Partition key value pairs
-   * @return
-   * @throws MetaException
-   */
-  public Path getPartitionPath(Path tblPath, Map<String, String> pm)
-      throws MetaException {
-    return new Path(tblPath, makePartPath(pm));
-  }
-
-  /**
-   * Given a database, a table and the partition key value pairs this method 
returns the Path object
-   * corresponding to the partition key value pairs. It uses the table 
location if available else
-   * uses the database location for constructing the path corresponding to the 
partition key-value
-   * pairs
-   *
-   * @param db - Parent database of the given table
-   * @param table - Table for which the partition key-values are given
-   * @param vals - List of values for the partition keys
-   * @return Path corresponding to the partition key-value pairs
-   * @throws MetaException
-   */
-  public Path getPartitionPath(Database db, Table table, List<String> vals)
-      throws MetaException {
-    List<FieldSchema> partKeys = table.getPartitionKeys();
-    if (partKeys == null || (partKeys.size() != vals.size())) {
-      throw new MetaException("Invalid number of partition keys found for " + 
table.getTableName());
-    }
-    Map<String, String> pm = new LinkedHashMap<>(vals.size());
-    int i = 0;
-    for (FieldSchema key : partKeys) {
-      pm.put(key.getName(), vals.get(i));
-      i++;
-    }
-
-    if (table.getSd().getLocation() != null) {
-      return getPartitionPath(getDnsPath(new 
Path(table.getSd().getLocation())), pm);
-    } else {
-      return getDefaultPartitionPath(db, table.getTableName(), pm);
-    }
-  }
-
-  public boolean isDir(Path f) throws MetaException {
-    FileSystem fs = null;
-    try {
-      fs = getFs(f);
-      FileStatus fstatus = fs.getFileStatus(f);
-      if (!fstatus.isDir()) {
-        return false;
-      }
-    } catch (FileNotFoundException e) {
-      return false;
-    } catch (IOException e) {
-      MetaStoreUtils.logAndThrowMetaException(e);
-    }
-    return true;
-  }
-
-  public static String makePartName(List<FieldSchema> partCols,
-      List<String> vals) throws MetaException {
-    return makePartName(partCols, vals, null);
-  }
-
-  /**
-   * @param desc
-   * @return array of FileStatus objects corresponding to the files
-   * making up the passed storage description
-   */
-  public FileStatus[] getFileStatusesForSD(StorageDescriptor desc)
-      throws MetaException {
-    return getFileStatusesForLocation(desc.getLocation());
-  }
-
-  /**
-   * @param location
-   * @return array of FileStatus objects corresponding to the files
-   * making up the passed storage description
-   */
-  public FileStatus[] getFileStatusesForLocation(String location)
-      throws MetaException {
-    try {
-      Path path = new Path(location);
-      FileSystem fileSys = path.getFileSystem(conf);
-      return HiveStatsUtils.getFileStatusRecurse(path, -1, fileSys);
-    } catch (IOException ioe) {
-      MetaStoreUtils.logAndThrowMetaException(ioe);
-    }
-    return null;
-  }
-
-  /**
-   * @param table
-   * @return array of FileStatus objects corresponding to the files making up 
the passed
-   * unpartitioned table
-   */
-  public FileStatus[] getFileStatusesForUnpartitionedTable(Database db, Table 
table)
-      throws MetaException {
-    Path tablePath = getDnsPath(new Path(table.getSd().getLocation()));
-    try {
-      FileSystem fileSys = tablePath.getFileSystem(conf);
-      return HiveStatsUtils.getFileStatusRecurse(tablePath, -1, fileSys);
-    } catch (IOException ioe) {
-      MetaStoreUtils.logAndThrowMetaException(ioe);
-    }
-    return null;
-  }
-
-  /**
-   * Makes a valid partition name.
-   * @param partCols The partition columns
-   * @param vals The partition values
-   * @param defaultStr
-   *    The default name given to a partition value if the respective value is 
empty or null.
-   * @return An escaped, valid partition name.
-   * @throws MetaException
-   */
-  public static String makePartName(List<FieldSchema> partCols,
-      List<String> vals, String defaultStr) throws MetaException {
-    if ((partCols.size() != vals.size()) || (partCols.size() == 0)) {
-      String errorStr = "Invalid partition key & values; keys [";
-      for (FieldSchema fs : partCols) {
-        errorStr += (fs.getName() + ", ");
-      }
-      errorStr += "], values [";
-      for (String val : vals) {
-        errorStr += (val + ", ");
-      }
-      throw new MetaException(errorStr + "]");
-    }
-    List<String> colNames = new ArrayList<String>();
-    for (FieldSchema col: partCols) {
-      colNames.add(col.getName());
-    }
-    return FileUtils.makePartName(colNames, vals, defaultStr);
-  }
-
-  public static List<String> getPartValuesFromPartName(String partName)
-      throws MetaException {
-    LinkedHashMap<String, String> partSpec = 
Warehouse.makeSpecFromName(partName);
-    List<String> values = new ArrayList<String>();
-    values.addAll(partSpec.values());
-    return values;
-  }
-
-  public static Map<String, String> makeSpecFromValues(List<FieldSchema> 
partCols,
-      List<String> values) {
-    Map<String, String> spec = new LinkedHashMap<String, String>();
-    for (int i = 0; i < values.size(); i++) {
-      spec.put(partCols.get(i).getName(), values.get(i));
-    }
-    return spec;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/56083008/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 8aa2d90..714ea1f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -2413,7 +2413,7 @@ public class DDLTask extends Task<DDLWork> implements 
Serializable {
       if (tbl.getStorageHandler() == null) {
         // If serialization.format property has the default value, it will not 
to be included in
         // SERDE properties
-        if (MetaStoreUtils.DEFAULT_SERIALIZATION_FORMAT.equals(serdeParams.get(
+        if (Warehouse.DEFAULT_SERIALIZATION_FORMAT.equals(serdeParams.get(
             serdeConstants.SERIALIZATION_FORMAT))){
           serdeParams.remove(serdeConstants.SERIALIZATION_FORMAT);
         }
@@ -4823,11 +4823,11 @@ public class DDLTask extends Task<DDLWork> implements 
Serializable {
     if (!sd.isSetLocation())
     {
       // Location is not set, leave it as-is if this is not a default DB
-      if (databaseName.equalsIgnoreCase(MetaStoreUtils.DEFAULT_DATABASE_NAME))
+      if (databaseName.equalsIgnoreCase(Warehouse.DEFAULT_DATABASE_NAME))
       {
         // Default database name path is always ignored, use 
METASTOREWAREHOUSE and object name
         // instead
-        path = new Path(HiveConf.getVar(conf, 
HiveConf.ConfVars.METASTOREWAREHOUSE), 
MetaStoreUtils.encodeTableName(name.toLowerCase()));
+        path = new Path(HiveConf.getVar(conf, 
HiveConf.ConfVars.METASTOREWAREHOUSE), 
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.encodeTableName(name.toLowerCase()));
       }
     }
     else
@@ -4855,7 +4855,7 @@ public class DDLTask extends Task<DDLWork> implements 
Serializable {
     if (crtIndex.getLocation() == null) {
       // Location is not set, leave it as-is if index doesn't belong to 
default DB
       // Currently all indexes are created in current DB only
-      if 
(Utilities.getDatabaseName(name).equalsIgnoreCase(MetaStoreUtils.DEFAULT_DATABASE_NAME))
 {
+      if 
(Utilities.getDatabaseName(name).equalsIgnoreCase(Warehouse.DEFAULT_DATABASE_NAME))
 {
         // Default database name path is always ignored, use 
METASTOREWAREHOUSE and object name
         // instead
         String warehouse = HiveConf.getVar(conf, ConfVars.METASTOREWAREHOUSE);

http://git-wip-us.apache.org/repos/asf/hive/blob/56083008/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
index c22d69b..4db6806 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
@@ -22,7 +22,6 @@ package org.apache.hadoop.hive.ql.exec;
 import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.Callable;
@@ -350,7 +349,7 @@ public class StatsTask extends Task<StatsWork> implements 
Serializable {
       throws MetaException {
 
     // prefix is of the form dbName.tblName
-    String prefix = table.getDbName() + "." + 
MetaStoreUtils.encodeTableName(table.getTableName());
+    String prefix = table.getDbName() + "." + 
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.encodeTableName(table.getTableName());
     if (partition != null) {
       return Utilities.join(prefix, 
Warehouse.makePartPath(partition.getSpec()));
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/56083008/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java
index acf9746..cef07ad 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java
@@ -18,10 +18,10 @@
 package org.apache.hadoop.hive.ql.exec.repl.bootstrap.load.table;
 
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.exec.ReplCopyTask;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;

http://git-wip-us.apache.org/repos/asf/hive/blob/56083008/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java
index 9ffd152..2bf3784 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java
@@ -18,10 +18,10 @@
 package org.apache.hadoop.hive.ql.exec.repl.bootstrap.load.table;
 
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.exec.ReplCopyTask;
 import org.apache.hadoop.hive.ql.exec.Task;

http://git-wip-us.apache.org/repos/asf/hive/blob/56083008/ql/src/java/org/apache/hadoop/hive/ql/hooks/EnforceReadOnlyTables.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/hooks/EnforceReadOnlyTables.java 
b/ql/src/java/org/apache/hadoop/hive/ql/hooks/EnforceReadOnlyTables.java
index 4569ed5..107ce68 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/EnforceReadOnlyTables.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/EnforceReadOnlyTables.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.hive.ql.hooks;
 
-import static 
org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME;
+import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
 
 import java.util.HashSet;
 import java.util.Set;

http://git-wip-us.apache.org/repos/asf/hive/blob/56083008/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java 
b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java
index 24df25b..9c9a3ab 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java
@@ -22,10 +22,8 @@ import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.common.ValidReadTxnList;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.ql.Context;
-import org.apache.hadoop.hive.ql.Driver.DriverState;
 import org.apache.hadoop.hive.ql.Driver.LockedDriverState;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.QueryPlan;
@@ -35,7 +33,6 @@ import org.apache.hadoop.hive.ql.metadata.DummyPartition;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.util.ReflectionUtils;
 
 import java.util.*;
@@ -345,7 +342,7 @@ class DummyTxnManager extends HiveTxnManagerImpl {
         try {
           locks.add(new HiveLockObj(
                       new HiveLockObject(new DummyPartition(p.getTable(), 
p.getTable().getDbName()
-                                                            + "/" + 
MetaStoreUtils.encodeTableName(p.getTable().getTableName())
+                                                            + "/" + 
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.encodeTableName(p.getTable().getTableName())
                                                             + "/" + 
partialName,
                                                               partialSpec), 
lockData), mode));
           partialName += "/";

http://git-wip-us.apache.org/repos/asf/hive/blob/56083008/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockObject.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockObject.java 
b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockObject.java
index a514339..eb66c31 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockObject.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockObject.java
@@ -24,7 +24,6 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.hadoop.hive.common.StringInternUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.metadata.DummyPartition;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -198,12 +197,12 @@ public class HiveLockObject {
   }
 
   public HiveLockObject(Table tbl, HiveLockObjectData lockData) {
-    this(new String[] {tbl.getDbName(), 
MetaStoreUtils.encodeTableName(tbl.getTableName())}, lockData);
+    this(new String[] {tbl.getDbName(), 
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.encodeTableName(tbl.getTableName())},
 lockData);
   }
 
   public HiveLockObject(Partition par, HiveLockObjectData lockData) {
     this(new String[] {par.getTable().getDbName(),
-        MetaStoreUtils.encodeTableName(par.getTable().getTableName()), 
par.getName()}, lockData);
+                       
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.encodeTableName(par.getTable().getTableName()),
 par.getName()}, lockData);
   }
 
   public HiveLockObject(DummyPartition par, HiveLockObjectData lockData) {

http://git-wip-us.apache.org/repos/asf/hive/blob/56083008/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
index 4add836..5737c66 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
@@ -48,7 +48,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
@@ -101,7 +100,7 @@ public class HiveMetaStoreChecker {
       throws HiveException, IOException {
 
     if (dbName == null || "".equalsIgnoreCase(dbName)) {
-      dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME;
+      dbName = Warehouse.DEFAULT_DATABASE_NAME;
     }
 
     try {

http://git-wip-us.apache.org/repos/asf/hive/blob/56083008/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java
index dcea0e5..12e2e24 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.hive.ql.optimizer.FieldNode;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
 import org.apache.hadoop.hive.ql.exec.GroupByOperator;
@@ -181,7 +180,7 @@ public final class RewriteQueryUsingAggregateIndexCtx  
implements NodeProcessorC
     TableScanDesc indexTableScanDesc = new TableScanDesc(indexTableHandle);
     indexTableScanDesc.setGatherStats(false);
 
-    String k = MetaStoreUtils.encodeTableName(indexTableName) + Path.SEPARATOR;
+    String k = 
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.encodeTableName(indexTableName)
 + Path.SEPARATOR;
     indexTableScanDesc.setStatsAggPrefix(k);
     scanOperator.setConf(indexTableScanDesc);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/56083008/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
index aa4c660..ed004fe 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
@@ -39,7 +39,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.Database;
@@ -839,7 +838,7 @@ public class ImportSemanticAnalyzer extends 
BaseSemanticAnalyzer {
         tblDesc.setLocation(
             wh.getDnsPath(new Path(
                 wh.getDefaultDatabasePath(tblDesc.getDatabaseName()),
-                
MetaStoreUtils.encodeTableName(tblDesc.getTableName().toLowerCase())
+                
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.encodeTableName(tblDesc.getTableName().toLowerCase())
             )
         ).toString());
 

http://git-wip-us.apache.org/repos/asf/hive/blob/56083008/ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java
index fe065f8..0ae95be 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java
@@ -28,11 +28,11 @@ import java.util.Set;
 import java.util.Stack;
 import java.util.LinkedHashSet;
 
+import org.apache.hadoop.hive.metastore.Warehouse;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.ql.ErrorMsg;
@@ -170,7 +170,7 @@ public class MacroSemanticAnalyzer extends 
BaseSemanticAnalyzer {
   }
 
   private void addEntities() throws SemanticException {
-    Database database = getDatabase(MetaStoreUtils.DEFAULT_DATABASE_NAME);
+    Database database = getDatabase(Warehouse.DEFAULT_DATABASE_NAME);
     // This restricts macro creation to privileged users.
     outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_NO_LOCK));
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/56083008/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index db29e3a..7bdf39e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -10506,7 +10506,7 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
       // db_name.table_name + partitionSec
       // as the prefix for easy of read during explain and debugging.
       // Currently, partition spec can only be static partition.
-      String k = MetaStoreUtils.encodeTableName(tblName) + Path.SEPARATOR;
+      String k = 
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.encodeTableName(tblName) 
+ Path.SEPARATOR;
       tsDesc.setStatsAggPrefix(tab.getDbName()+"."+k);
 
       // set up WriteEntity for replication

http://git-wip-us.apache.org/repos/asf/hive/blob/56083008/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLAuthorizationUtils.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLAuthorizationUtils.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLAuthorizationUtils.java
index 462963a..6bd29b4 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLAuthorizationUtils.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLAuthorizationUtils.java
@@ -29,6 +29,7 @@ import java.util.Locale;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.hadoop.hive.metastore.Warehouse;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.FileStatus;
@@ -39,7 +40,6 @@ import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
 import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
@@ -274,7 +274,7 @@ public class SQLAuthorizationUtils {
       return userName.equals(thriftTableObj.getOwner());
     }
     case DATABASE: {
-      if 
(MetaStoreUtils.DEFAULT_DATABASE_NAME.equalsIgnoreCase(hivePrivObject.getDbname()))
 {
+      if 
(Warehouse.DEFAULT_DATABASE_NAME.equalsIgnoreCase(hivePrivObject.getDbname())) {
         return true;
       }
       Database db = null;

http://git-wip-us.apache.org/repos/asf/hive/blob/56083008/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java 
b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
index 8b64407..97c8124 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
@@ -17,7 +17,7 @@
  */
 
 package org.apache.hadoop.hive.ql.session;
-import static 
org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME;
+import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
 
 import java.io.File;
 import java.io.IOException;

http://git-wip-us.apache.org/repos/asf/hive/blob/56083008/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java 
b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
index b4898e2..c0c496f 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
@@ -26,13 +26,13 @@ import java.util.List;
 
 import junit.framework.TestCase;
 
+import org.apache.hadoop.hive.metastore.Warehouse;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.CompilationOpContext;
 import org.apache.hadoop.hive.ql.DriverContext;
 import org.apache.hadoop.hive.ql.QueryState;
@@ -137,7 +137,7 @@ public class TestExecDriver extends TestCase {
       cols.add("key");
       cols.add("value");
       for (String src : srctables) {
-        db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, src, true, true);
+        db.dropTable(Warehouse.DEFAULT_DATABASE_NAME, src, true, true);
         db.createTable(src, cols, null, TextInputFormat.class,
             HiveIgnoreKeyTextOutputFormat.class);
         db.loadTable(hadoopDataFile[i], src, false, true, false, false, false);
@@ -492,7 +492,7 @@ public class TestExecDriver extends TestCase {
   public void testMapPlan1() throws Exception {
 
     LOG.info("Beginning testMapPlan1");
-    populateMapPlan1(db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, "src"));
+    populateMapPlan1(db.getTable(Warehouse.DEFAULT_DATABASE_NAME, "src"));
     executePlan();
     fileDiff("lt100.txt.deflate", "mapplan1.out");
   }
@@ -500,7 +500,7 @@ public class TestExecDriver extends TestCase {
   public void testMapPlan2() throws Exception {
 
     LOG.info("Beginning testMapPlan2");
-    populateMapPlan2(db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, "src"));
+    populateMapPlan2(db.getTable(Warehouse.DEFAULT_DATABASE_NAME, "src"));
     executePlan();
     fileDiff("lt100.txt", "mapplan2.out");
   }
@@ -508,7 +508,7 @@ public class TestExecDriver extends TestCase {
   public void testMapRedPlan1() throws Exception {
 
     LOG.info("Beginning testMapRedPlan1");
-    populateMapRedPlan1(db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME,
+    populateMapRedPlan1(db.getTable(Warehouse.DEFAULT_DATABASE_NAME,
         "src"));
     executePlan();
     fileDiff("kv1.val.sorted.txt", "mapredplan1.out");
@@ -517,7 +517,7 @@ public class TestExecDriver extends TestCase {
   public void testMapRedPlan2() throws Exception {
 
     LOG.info("Beginning testMapPlan2");
-    populateMapRedPlan2(db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME,
+    populateMapRedPlan2(db.getTable(Warehouse.DEFAULT_DATABASE_NAME,
         "src"));
     executePlan();
     fileDiff("lt100.sorted.txt", "mapredplan2.out");
@@ -526,8 +526,8 @@ public class TestExecDriver extends TestCase {
   public void testMapRedPlan3() throws Exception {
 
     LOG.info("Beginning testMapPlan3");
-    populateMapRedPlan3(db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME,
-        "src"), db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, "src2"));
+    populateMapRedPlan3(db.getTable(Warehouse.DEFAULT_DATABASE_NAME,
+        "src"), db.getTable(Warehouse.DEFAULT_DATABASE_NAME, "src2"));
     executePlan();
     fileDiff("kv1kv2.cogroup.txt", "mapredplan3.out");
   }
@@ -535,7 +535,7 @@ public class TestExecDriver extends TestCase {
   public void testMapRedPlan4() throws Exception {
 
     LOG.info("Beginning testMapPlan4");
-    populateMapRedPlan4(db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME,
+    populateMapRedPlan4(db.getTable(Warehouse.DEFAULT_DATABASE_NAME,
         "src"));
     executePlan();
     fileDiff("kv1.string-sorted.txt", "mapredplan4.out");
@@ -544,7 +544,7 @@ public class TestExecDriver extends TestCase {
   public void testMapRedPlan5() throws Exception {
 
     LOG.info("Beginning testMapPlan5");
-    populateMapRedPlan5(db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME,
+    populateMapRedPlan5(db.getTable(Warehouse.DEFAULT_DATABASE_NAME,
         "src"));
     executePlan();
     fileDiff("kv1.string-sorted.txt", "mapredplan5.out");
@@ -553,7 +553,7 @@ public class TestExecDriver extends TestCase {
   public void testMapRedPlan6() throws Exception {
 
     LOG.info("Beginning testMapPlan6");
-    populateMapRedPlan6(db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME,
+    populateMapRedPlan6(db.getTable(Warehouse.DEFAULT_DATABASE_NAME,
         "src"));
     executePlan();
     fileDiff("lt100.sorted.txt", "mapredplan6.out");

http://git-wip-us.apache.org/repos/asf/hive/blob/56083008/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java 
b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
index 91eb033..dc7d51d 100755
--- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.hive.ql.metadata;
 
-import static 
org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME;
+import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
 
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -33,7 +33,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.PartitionDropOptions;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.Database;
@@ -118,13 +117,13 @@ public class TestHive extends TestCase {
       // create a simple table and test create, drop, get
       String tableName = "table_for_testtable";
       try {
-        hm.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName);
+        hm.dropTable(Warehouse.DEFAULT_DATABASE_NAME, tableName);
       } catch (HiveException e1) {
         e1.printStackTrace();
         assertTrue("Unable to drop table", false);
       }
 
-      Table tbl = new Table(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName);
+      Table tbl = new Table(Warehouse.DEFAULT_DATABASE_NAME, tableName);
       List<FieldSchema> fields = tbl.getCols();
 
       fields.add(new FieldSchema("col1", serdeConstants.INT_TYPE_NAME, "int -- 
first column"));
@@ -184,9 +183,9 @@ public class TestHive extends TestCase {
       validateTable(tbl, tableName);
 
       try {
-        hm.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, true,
+        hm.dropTable(Warehouse.DEFAULT_DATABASE_NAME, tableName, true,
             false);
-        Table ft2 = hm.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME,
+        Table ft2 = hm.getTable(Warehouse.DEFAULT_DATABASE_NAME,
             tableName, false);
         assertNull("Unable to drop table ", ft2);
       } catch (HiveException e) {
@@ -216,12 +215,12 @@ public class TestHive extends TestCase {
     String tableName = "table_for_test_thrifttable";
     try {
       try {
-        hm.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName);
+        hm.dropTable(Warehouse.DEFAULT_DATABASE_NAME, tableName);
       } catch (HiveException e1) {
         System.err.println(StringUtils.stringifyException(e1));
         assertTrue("Unable to drop table", false);
       }
-      Table tbl = new Table(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName);
+      Table tbl = new Table(Warehouse.DEFAULT_DATABASE_NAME, tableName);
       tbl.setInputFormatClass(SequenceFileInputFormat.class.getName());
       tbl.setOutputFormatClass(SequenceFileOutputFormat.class.getName());
       tbl.setSerializationLib(ThriftDeserializer.class.getName());
@@ -308,7 +307,7 @@ public class TestHive extends TestCase {
       // (create table sets it to empty (non null) structures)
       tbl.getTTable().setPrivilegesIsSet(false);
 
-      ft = hm.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName);
+      ft = hm.getTable(Warehouse.DEFAULT_DATABASE_NAME, tableName);
       assertNotNull("Unable to fetch table", ft);
       ft.checkValidity(hiveConf);
       assertEquals("Table names didn't match for table: " + tableName, tbl
@@ -526,7 +525,7 @@ public class TestHive extends TestCase {
    * @throws Exception on failure.
    */
   public void testDropPartitionsWithPurge() throws Exception {
-    String dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME;
+    String dbName = Warehouse.DEFAULT_DATABASE_NAME;
     String tableName = "table_for_testDropPartitionsWithPurge";
 
     try {
@@ -589,7 +588,7 @@ public class TestHive extends TestCase {
    */
   public void testAutoPurgeTablesAndPartitions() throws Throwable {
 
-    String dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME;
+    String dbName = Warehouse.DEFAULT_DATABASE_NAME;
     String tableName = "table_for_testAutoPurgeTablesAndPartitions";
     try {
 
@@ -643,7 +642,7 @@ public class TestHive extends TestCase {
     try {
       String tableName = "table_for_testpartition";
       try {
-        hm.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName);
+        hm.dropTable(Warehouse.DEFAULT_DATABASE_NAME, tableName);
       } catch (HiveException e) {
         System.err.println(StringUtils.stringifyException(e));
         assertTrue("Unable to drop table: " + tableName, false);
@@ -664,7 +663,7 @@ public class TestHive extends TestCase {
       }
       Table tbl = null;
       try {
-        tbl = hm.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName);
+        tbl = hm.getTable(Warehouse.DEFAULT_DATABASE_NAME, tableName);
       } catch (HiveException e) {
         System.err.println(StringUtils.stringifyException(e));
         assertTrue("Unable to fetch table: " + tableName, false);
@@ -679,7 +678,7 @@ public class TestHive extends TestCase {
         System.err.println(StringUtils.stringifyException(e));
         assertTrue("Unable to create parition for table: " + tableName, false);
       }
-      hm.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName);
+      hm.dropTable(Warehouse.DEFAULT_DATABASE_NAME, tableName);
     } catch (Throwable e) {
       System.err.println(StringUtils.stringifyException(e));
       System.err.println("testPartition() failed");
@@ -696,15 +695,15 @@ public class TestHive extends TestCase {
     try{
       // create a simple table
       String tableName = "table_for_testindex";
-      String qTableName = MetaStoreUtils.DEFAULT_DATABASE_NAME + "." + 
tableName;
+      String qTableName = Warehouse.DEFAULT_DATABASE_NAME + "." + tableName;
       try {
-        hm.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName);
+        hm.dropTable(Warehouse.DEFAULT_DATABASE_NAME, tableName);
       } catch (HiveException e) {
         e.printStackTrace();
         assertTrue("Unable to drop table", false);
       }
 
-      Table tbl = new Table(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName);
+      Table tbl = new Table(Warehouse.DEFAULT_DATABASE_NAME, tableName);
       List<FieldSchema> fields = tbl.getCols();
 
       fields.add(new FieldSchema("col1", serdeConstants.INT_TYPE_NAME, "int -- 
first column"));
@@ -731,7 +730,7 @@ public class TestHive extends TestCase {
       List<String> indexedCols = new ArrayList<String>();
       indexedCols.add("col1");
       String indexTableName = "index_on_table_for_testindex_table";
-      String qIndexTableName = MetaStoreUtils.DEFAULT_DATABASE_NAME + "." + 
indexTableName;
+      String qIndexTableName = Warehouse.DEFAULT_DATABASE_NAME + "." + 
indexTableName;
       boolean deferredRebuild = true;
       String inputFormat = SequenceFileInputFormat.class.getName();
       String outputFormat = SequenceFileOutputFormat.class.getName();
@@ -776,7 +775,7 @@ public class TestHive extends TestCase {
 
       // Drop index
       try {
-        hm.dropIndex(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, 
indexName, false, true);
+        hm.dropIndex(Warehouse.DEFAULT_DATABASE_NAME, tableName, indexName, 
false, true);
       } catch (HiveException e) {
         System.err.println(StringUtils.stringifyException(e));
         assertTrue("Unable to drop index: " + indexName, false);

http://git-wip-us.apache.org/repos/asf/hive/blob/56083008/ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java
----------------------------------------------------------------------
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java 
b/ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java
index fdebb94..64a90c5 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java
@@ -20,21 +20,18 @@ package org.apache.hadoop.hive.ql.session;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
 
-import java.io.BufferedReader;
 import java.io.File;
 import java.io.IOException;
-import java.io.InputStreamReader;
 import java.lang.reflect.Method;
 import java.util.Arrays;
 import java.util.Collection;
 
 import org.apache.commons.io.FileUtils;
-import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.hive.metastore.Warehouse;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hive.common.util.HiveTestUtils;
 import org.junit.After;
 import org.junit.Assert;
@@ -112,7 +109,7 @@ public class TestSessionState {
   @Test
   public void testgetDbName() throws Exception {
     //check that we start with default db
-    assertEquals(MetaStoreUtils.DEFAULT_DATABASE_NAME,
+    assertEquals(Warehouse.DEFAULT_DATABASE_NAME,
         SessionState.get().getCurrentDatabase());
     final String newdb = "DB_2";
 
@@ -123,7 +120,7 @@ public class TestSessionState {
 
     //verify that a new sessionstate has default db
     SessionState.start(new HiveConf());
-    assertEquals(MetaStoreUtils.DEFAULT_DATABASE_NAME,
+    assertEquals(Warehouse.DEFAULT_DATABASE_NAME,
         SessionState.get().getCurrentDatabase());
 
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/56083008/standalone-metastore/pom.xml
----------------------------------------------------------------------
diff --git a/standalone-metastore/pom.xml b/standalone-metastore/pom.xml
index 5cdbbcb..d91b22d 100644
--- a/standalone-metastore/pom.xml
+++ b/standalone-metastore/pom.xml
@@ -63,6 +63,11 @@
       <version>${dropwizard.version}</version>
     </dependency>
     <dependency>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-lang3</artifactId>
+      <version>${commons-lang3.version}</version>
+    </dependency>
+    <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-common</artifactId>
       <version>${hadoop.version}</version>
@@ -80,6 +85,12 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-distcp</artifactId>
+      <version>${hadoop.version}</version>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdfs</artifactId>
       <version>${hadoop.version}</version>
       <optional>true</optional>

http://git-wip-us.apache.org/repos/asf/hive/blob/56083008/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java
new file mode 100644
index 0000000..dd9296a
--- /dev/null
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java
@@ -0,0 +1,393 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.io.IOException;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.lang3.concurrent.BasicThreadFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileChecksum;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.fs.Trash;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.utils.FileUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ReplChangeManager {
+  private static final Logger LOG = 
LoggerFactory.getLogger(ReplChangeManager.class);
+  static private ReplChangeManager instance;
+
+  private static boolean inited = false;
+  private static boolean enabled = false;
+  private static Path cmroot;
+  private static Configuration conf;
+  private String msUser;
+  private String msGroup;
+  private FileSystem fs;
+
+  private static final String ORIG_LOC_TAG = "user.original-loc";
+  static final String REMAIN_IN_TRASH_TAG = "user.remain-in-trash";
+  private static final String URI_FRAGMENT_SEPARATOR = "#";
+
+  public enum RecycleType {
+    MOVE,
+    COPY
+  }
+
+  public static ReplChangeManager getInstance(Configuration conf) throws 
MetaException {
+    if (instance == null) {
+      instance = new ReplChangeManager(conf);
+    }
+    return instance;
+  }
+
+  private ReplChangeManager(Configuration conf) throws MetaException {
+    try {
+      if (!inited) {
+        if (MetastoreConf.getBoolVar(conf, ConfVars.REPLCMENABLED)) {
+          ReplChangeManager.enabled = true;
+          ReplChangeManager.cmroot = new Path(MetastoreConf.getVar(conf, 
ConfVars.REPLCMDIR));
+          ReplChangeManager.conf = conf;
+
+          fs = cmroot.getFileSystem(conf);
+          // Create cmroot with permission 700 if not exist
+          if (!fs.exists(cmroot)) {
+            fs.mkdirs(cmroot);
+            fs.setPermission(cmroot, new FsPermission("700"));
+          }
+          UserGroupInformation usergroupInfo = 
UserGroupInformation.getCurrentUser();
+          msUser = usergroupInfo.getShortUserName();
+          msGroup = usergroupInfo.getPrimaryGroupName();
+        }
+        inited = true;
+      }
+    } catch (IOException e) {
+      throw new MetaException(StringUtils.stringifyException(e));
+    }
+  }
+
+  // Filter files starts with ".". Note Hadoop consider files starts with
+  // "." or "_" as hidden file. However, we need to replicate files starts
+  // with "_". We find at least 2 use cases:
+  // 1. For har files, _index and _masterindex is required files
+  // 2. _success file is required for Oozie to indicate availability of data 
source
+  private static final PathFilter hiddenFileFilter = new PathFilter(){
+    public boolean accept(Path p){
+      return !p.getName().startsWith(".");
+    }
+  };
+
+  /***
+   * Move a path into cmroot. If the path is a directory (of a partition, or 
table if nonpartitioned),
+   *   recursively move files inside directory to cmroot. Note the table must 
be managed table
+   * @param path a single file or directory
+   * @param type if the files to be copied or moved to cmpath.
+   *             Copy is costly but preserve the source file
+   * @param ifPurge if the file should skip Trash when move/delete source file.
+   *                This is referred only if type is MOVE.
+   * @return int
+   * @throws MetaException
+   */
+  int recycle(Path path, RecycleType type, boolean ifPurge) throws 
MetaException {
+    if (!enabled) {
+      return 0;
+    }
+
+    try {
+      int count = 0;
+
+      if (fs.isDirectory(path)) {
+        FileStatus[] files = fs.listStatus(path, hiddenFileFilter);
+        for (FileStatus file : files) {
+          count += recycle(file.getPath(), type, ifPurge);
+        }
+      } else {
+        String fileCheckSum = checksumFor(path, fs);
+        Path cmPath = getCMPath(conf, path.getName(), fileCheckSum);
+
+        // set timestamp before moving to cmroot, so we can
+        // avoid race condition CM remove the file before setting
+        // timestamp
+        long now = System.currentTimeMillis();
+        fs.setTimes(path, now, -1);
+
+        boolean success = false;
+        if (fs.exists(cmPath) && 
fileCheckSum.equalsIgnoreCase(checksumFor(cmPath, fs))) {
+          // If already a file with same checksum exists in cmPath, just 
ignore the copy/move
+          // Also, mark the operation is unsuccessful to notify that file with 
same name already
+          // exist which will ensure the timestamp of cmPath is updated to 
avoid clean-up by
+          // CM cleaner.
+          success = false;
+        } else {
+          switch (type) {
+            case MOVE: {
+              if (LOG.isDebugEnabled()) {
+                LOG.debug("Moving {} to {}", path.toString(), 
cmPath.toString());
+              }
+              // Rename fails if the file with same name already exist.
+              success = fs.rename(path, cmPath);
+              break;
+            }
+            case COPY: {
+              if (LOG.isDebugEnabled()) {
+                LOG.debug("Copying {} to {}", path.toString(), 
cmPath.toString());
+              }
+              // It is possible to have a file with same checksum in cmPath 
but the content is
+              // partially copied or corrupted. In this case, just overwrite 
the existing file with
+              // new one.
+              success = FileUtils.copy(fs, path, fs, cmPath, false, true, 
conf);
+              break;
+            }
+            default:
+              // Operation fails as invalid input
+              break;
+          }
+        }
+
+        // Ignore if a file with same content already exist in cmroot
+        // We might want to setXAttr for the new location in the future
+        if (success) {
+          // set the file owner to hive (or the id metastore run as)
+          fs.setOwner(cmPath, msUser, msGroup);
+
+          // tag the original file name so we know where the file comes from
+          // Note we currently only track the last known trace as
+          // xattr has limited capacity. We shall revisit and store all 
original
+          // locations if orig-loc becomes important
+          try {
+            fs.setXAttr(cmPath, ORIG_LOC_TAG, path.toString().getBytes());
+          } catch (UnsupportedOperationException e) {
+            LOG.warn("Error setting xattr for {}", path.toString());
+          }
+
+          count++;
+        } else {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("A file with the same content of {} already exists, 
ignore", path.toString());
+          }
+          // Need to extend the tenancy if we saw a newer file with the same 
content
+          fs.setTimes(cmPath, now, -1);
+        }
+
+        // Tag if we want to remain in trash after deletion.
+        // If multiple files share the same content, then
+        // any file claim remain in trash would be granted
+        if ((type == RecycleType.MOVE) && !ifPurge) {
+          try {
+            fs.setXAttr(cmPath, REMAIN_IN_TRASH_TAG, new byte[]{0});
+          } catch (UnsupportedOperationException e) {
+            LOG.warn("Error setting xattr for {}", cmPath.toString());
+          }
+        }
+      }
+      return count;
+    } catch (IOException e) {
+      throw new MetaException(StringUtils.stringifyException(e));
+    }
+  }
+
+  // Get checksum of a file
+  static public String checksumFor(Path path, FileSystem fs) throws 
IOException {
+    // TODO: fs checksum only available on hdfs, need to
+    //       find a solution for other fs (eg, local fs, s3, etc)
+    String checksumString = null;
+    FileChecksum checksum = fs.getFileChecksum(path);
+    if (checksum != null) {
+      checksumString = StringUtils.byteToHexString(
+          checksum.getBytes(), 0, checksum.getLength());
+    }
+    return checksumString;
+  }
+
+  static public void setCmRoot(Path cmRoot) {
+    ReplChangeManager.cmroot = cmRoot;
+  }
+
+  /***
+   * Convert a path of file inside a partition or table (if non-partitioned)
+   *   to a deterministic location of cmroot. So user can retrieve the file 
back
+   *   with the original location plus checksum.
+   * @param conf
+   * @param name original filename
+   * @param checkSum checksum of the file, can be retrieved by {@link 
#checksumFor(Path, FileSystem)}
+   * @return Path
+   */
+  static Path getCMPath(Configuration conf, String name, String checkSum) 
throws IOException, MetaException {
+    String newFileName = name + "_" + checkSum;
+    int maxLength = 
conf.getInt(DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY,
+        DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_DEFAULT);
+
+    if (newFileName.length() > maxLength) {
+      newFileName = newFileName.substring(0, maxLength-1);
+    }
+
+    return new Path(cmroot, newFileName);
+  }
+
+  /***
+   * Get original file specified by src and chksumString. If the file exists 
and checksum
+   * matches, return the file; otherwise, use chksumString to retrieve it from 
cmroot
+   * @param src Original file location
+   * @param checksumString Checksum of the original file
+   * @param conf
+   * @return Corresponding FileStatus object
+   */
+  static public FileStatus getFileStatus(Path src, String checksumString,
+      Configuration conf) throws MetaException {
+    try {
+      FileSystem srcFs = src.getFileSystem(conf);
+      if (checksumString == null) {
+        return srcFs.getFileStatus(src);
+      }
+
+      if (!srcFs.exists(src)) {
+        return srcFs.getFileStatus(getCMPath(conf, src.getName(), 
checksumString));
+      }
+
+      String currentChecksumString = checksumFor(src, srcFs);
+      if (currentChecksumString == null || 
checksumString.equals(currentChecksumString)) {
+        return srcFs.getFileStatus(src);
+      } else {
+        return srcFs.getFileStatus(getCMPath(conf, src.getName(), 
checksumString));
+      }
+    } catch (IOException e) {
+      throw new MetaException(StringUtils.stringifyException(e));
+    }
+  }
+
+  /***
+   * Concatenate filename and checksum with "#"
+   * @param fileUriStr Filename string
+   * @param fileChecksum Checksum string
+   * @return Concatenated Uri string
+   */
+  // TODO: this needs to be enhanced once change management based filesystem 
is implemented
+  // Currently using fileuri#checksum as the format
+  static public String encodeFileUri(String fileUriStr, String fileChecksum) {
+    if (fileChecksum != null) {
+      return fileUriStr + URI_FRAGMENT_SEPARATOR + fileChecksum;
+    } else {
+      return fileUriStr;
+    }
+  }
+
+  /***
+   * Split uri with fragment into file uri and checksum
+   * @param fileURIStr uri with fragment
+   * @return array of file name and checksum
+   */
+  static public String[] getFileWithChksumFromURI(String fileURIStr) {
+    String[] uriAndFragment = fileURIStr.split(URI_FRAGMENT_SEPARATOR);
+    String[] result = new String[2];
+    result[0] = uriAndFragment[0];
+    if (uriAndFragment.length>1) {
+      result[1] = uriAndFragment[1];
+    }
+    return result;
+  }
+
+  public static boolean isCMFileUri(Path fromPath, FileSystem srcFs) {
+    String[] result = getFileWithChksumFromURI(fromPath.toString());
+    return result[1] != null;
+  }
+
+  /**
+   * Thread to clear old files of cmroot recursively
+   */
+  static class CMClearer implements Runnable {
+    private Path cmroot;
+    private long secRetain;
+    private Configuration conf;
+
+    CMClearer(String cmrootString, long secRetain, Configuration conf) {
+      this.cmroot = new Path(cmrootString);
+      this.secRetain = secRetain;
+      this.conf = conf;
+    }
+
+    @Override
+    public void run() {
+      try {
+        LOG.info("CMClearer started");
+
+        long now = System.currentTimeMillis();
+        FileSystem fs = cmroot.getFileSystem(conf);
+        FileStatus[] files = fs.listStatus(cmroot);
+
+        for (FileStatus file : files) {
+          long modifiedTime = file.getModificationTime();
+          if (now - modifiedTime > secRetain*1000) {
+            try {
+              if 
(fs.getXAttrs(file.getPath()).containsKey(REMAIN_IN_TRASH_TAG)) {
+                boolean succ = Trash.moveToAppropriateTrash(fs, 
file.getPath(), conf);
+                if (succ) {
+                  if (LOG.isDebugEnabled()) {
+                    LOG.debug("Move " + file.toString() + " to trash");
+                  }
+                } else {
+                  LOG.warn("Fail to move " + file.toString() + " to trash");
+                }
+              } else {
+                boolean succ = fs.delete(file.getPath(), false);
+                if (succ) {
+                  if (LOG.isDebugEnabled()) {
+                    LOG.debug("Remove " + file.toString());
+                  }
+                } else {
+                  LOG.warn("Fail to remove " + file.toString());
+                }
+              }
+            } catch (UnsupportedOperationException e) {
+              LOG.warn("Error getting xattr for " + file.getPath().toString());
+            }
+          }
+        }
+      } catch (IOException e) {
+        LOG.error("Exception when clearing cmroot:" + 
StringUtils.stringifyException(e));
+      }
+    }
+  }
+
+  // Schedule CMClearer thread. Will be invoked by metastore
+  static void scheduleCMClearer(Configuration conf) {
+    if (MetastoreConf.getBoolVar(conf, ConfVars.REPLCMENABLED)) {
+      ScheduledExecutorService executor = 
Executors.newSingleThreadScheduledExecutor(
+          new BasicThreadFactory.Builder()
+          .namingPattern("cmclearer-%d")
+          .daemon(true)
+          .build());
+      executor.scheduleAtFixedRate(new CMClearer(MetastoreConf.getVar(conf, 
ConfVars.REPLCMDIR),
+          MetastoreConf.getTimeVar(conf, ConfVars.REPLCMRETIAN, 
TimeUnit.SECONDS), conf),
+          0, MetastoreConf.getTimeVar(conf, ConfVars.REPLCMINTERVAL, 
TimeUnit.SECONDS), TimeUnit.SECONDS);
+    }
+  }
+}

Reply via email to