http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
index d18ddc8..b46cc38 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
@@ -463,6 +463,9 @@ public class MetastoreConf {
         "hive.metastore.event.message.factory",
         "org.apache.hadoop.hive.metastore.messaging.json.JSONMessageFactory",
         "Factory class for making encoding and decoding messages in the events 
generated."),
+    EVENT_DB_LISTENER_TTL("metastore.event.db.listener.timetolive",
+        "hive.metastore.event.db.listener.timetolive", 86400, TimeUnit.SECONDS,
+        "time after which events will be removed from the database listener 
queue"),
     
EVENT_DB_NOTIFICATION_API_AUTH("metastore.metastore.event.db.notification.api.auth",
         "hive.metastore.event.db.notification.api.auth", true,
         "Should metastore do authorization against database notification 
related APIs such as get_next_notification.\n" +
@@ -799,6 +802,19 @@ public class MetastoreConf {
         "internal use only, true when in testing tez"),
     // We need to track this as some listeners pass it through our config and 
we need to honor
     // the system properties.
+    HIVE_AUTHORIZATION_MANAGER("hive.security.authorization.manager",
+        "hive.security.authorization.manager",
+        
"org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory",
+        "The Hive client authorization manager class name. The user defined 
authorization class should implement \n" +
+            "interface 
org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider."),
+    
HIVE_METASTORE_AUTHENTICATOR_MANAGER("hive.security.metastore.authenticator.manager",
+        "hive.security.metastore.authenticator.manager",
+        
"org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator",
+        "authenticator manager class name to be used in the metastore for 
authentication. \n" +
+            "The user defined authenticator should implement interface 
org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider."),
+    
HIVE_METASTORE_AUTHORIZATION_AUTH_READS("hive.security.metastore.authorization.auth.reads",
+        "hive.security.metastore.authorization.auth.reads", true,
+        "If this is true, metastore authorizer authorizes read actions on 
database, table"),
     HIVE_METASTORE_AUTHORIZATION_MANAGER(NO_SUCH_KEY,
         "hive.security.metastore.authorization.manager",
         
"org.apache.hadoop.hive.ql.security.authorization.DefaultHiveMetastoreAuthorizationProvider",

http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventUtils.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventUtils.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventUtils.java
new file mode 100644
index 0000000..7d8c1d4
--- /dev/null
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventUtils.java
@@ -0,0 +1,202 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hive.metastore.messaging;
+
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import 
org.apache.hadoop.hive.metastore.messaging.event.filters.DatabaseAndTableFilter;
+import org.apache.thrift.TException;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+public class EventUtils {
+
+  public interface NotificationFetcher {
+    int getBatchSize() throws IOException;
+    long getCurrentNotificationEventId() throws IOException;
+    long getDbNotificationEventsCount(long fromEventId, String dbName) throws 
IOException;
+    List<NotificationEvent> getNextNotificationEvents(
+        long pos, IMetaStoreClient.NotificationFilter filter) throws 
IOException;
+  }
+
+  // MetaStoreClient-based impl of NotificationFetcher
+  public static class MSClientNotificationFetcher implements  
NotificationFetcher{
+
+    private IMetaStoreClient msc = null;
+    private Integer batchSize = null;
+
+    public MSClientNotificationFetcher(IMetaStoreClient msc){
+      this.msc = msc;
+    }
+
+    @Override
+    public int getBatchSize() throws IOException {
+      if (batchSize == null){
+        try {
+          batchSize = Integer.parseInt(
+            
msc.getConfigValue(MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX.toString(), "50"));
+          // TODO: we're asking the metastore what its configuration for this 
var is - we may
+          // want to revisit to pull from client side instead. The reason I 
have it this way
+          // is because the metastore is more likely to have a reasonable 
config for this than
+          // an arbitrary client.
+        } catch (TException e) {
+          throw new IOException(e);
+        }
+      }
+      return batchSize;
+    }
+
+    @Override
+    public long getCurrentNotificationEventId() throws IOException {
+      try {
+        return msc.getCurrentNotificationEventId().getEventId();
+      } catch (TException e) {
+        throw new IOException(e);
+      }
+    }
+
+    @Override
+    public long getDbNotificationEventsCount(long fromEventId, String dbName) 
throws IOException {
+      try {
+        NotificationEventsCountRequest rqst
+                = new NotificationEventsCountRequest(fromEventId, dbName);
+        return msc.getNotificationEventsCount(rqst).getEventsCount();
+      } catch (TException e) {
+        throw new IOException(e);
+      }
+    }
+
+    @Override
+    public List<NotificationEvent> getNextNotificationEvents(
+        long pos, IMetaStoreClient.NotificationFilter filter) throws 
IOException {
+      try {
+        return msc.getNextNotification(pos,getBatchSize(), filter).getEvents();
+      } catch (TException e) {
+        throw new IOException(e);
+      }
+    }
+  }
+
+  public static class NotificationEventIterator implements 
Iterator<NotificationEvent> {
+
+    private NotificationFetcher nfetcher;
+    private IMetaStoreClient.NotificationFilter filter;
+    private int maxEvents;
+
+    private Iterator<NotificationEvent> batchIter = null;
+    private List<NotificationEvent> batch = null;
+    private long pos;
+    private long maxPos;
+    private int eventCount;
+
+    public NotificationEventIterator(
+        NotificationFetcher nfetcher, long eventFrom, int maxEvents,
+        String dbName, String tableName) throws IOException {
+      init(nfetcher, eventFrom, maxEvents, new DatabaseAndTableFilter(dbName, 
tableName));
+      // using init(..) instead of this(..) because the 
EventUtils.getDbTblNotificationFilter
+      // is an operation that needs to run before delegating to the other 
ctor, and this messes up chaining
+      // ctors
+    }
+
+    public NotificationEventIterator(
+        NotificationFetcher nfetcher, long eventFrom, int maxEvents,
+        IMetaStoreClient.NotificationFilter filter) throws IOException {
+      init(nfetcher,eventFrom,maxEvents,filter);
+    }
+
+    private void init(
+        NotificationFetcher nfetcher, long eventFrom, int maxEvents,
+        IMetaStoreClient.NotificationFilter filter) throws IOException {
+      this.nfetcher = nfetcher;
+      this.filter = filter;
+      this.pos = eventFrom;
+      if (maxEvents < 1){
+        // 0 or -1 implies fetch everything
+        this.maxEvents = Integer.MAX_VALUE;
+      } else {
+        this.maxEvents = maxEvents;
+      }
+
+      this.eventCount = 0;
+      this.maxPos = nfetcher.getCurrentNotificationEventId();
+    }
+
+    private void fetchNextBatch() throws IOException {
+      batch = nfetcher.getNextNotificationEvents(pos, filter);
+      int batchSize = nfetcher.getBatchSize();
+      while ( ((batch == null) || (batch.isEmpty())) && (pos < maxPos) ){
+        // no valid events this batch, but we're still not done processing 
events
+        pos += batchSize;
+        batch = nfetcher.getNextNotificationEvents(pos,filter);
+      }
+
+      if (batch == null){
+        batch = new ArrayList<>();
+        // instantiate empty list so that we don't error out on iterator 
fetching.
+        // If we're here, then the next check of pos will show our caller that
+        // that we've exhausted our event supply
+      }
+      batchIter = batch.iterator();
+    }
+
+    @Override
+    public boolean hasNext() {
+      if (eventCount >= maxEvents){
+        // If we've already satisfied the number of events we were supposed to 
deliver, we end it.
+        return false;
+      }
+      if ((batchIter != null) && (batchIter.hasNext())){
+        // If we have a valid batchIter and it has more elements, return them.
+        return true;
+      }
+      // If we're here, we want more events, and either batchIter is null, or 
batchIter
+      // has reached the end of the current batch. Let's fetch the next batch.
+      try {
+        fetchNextBatch();
+      } catch (IOException e) {
+        // Regrettable that we have to wrap the IOException into a 
RuntimeException,
+        // but throwing the exception is the appropriate result here, and 
hasNext()
+        // signature will only allow RuntimeExceptions. Iterator.hasNext() 
really
+        // should have allowed IOExceptions
+        throw new RuntimeException(e);
+      }
+      // New batch has been fetched. If it's not empty, we have more elements 
to process.
+      return !batch.isEmpty();
+    }
+
+    @Override
+    public NotificationEvent next() {
+      eventCount++;
+      NotificationEvent ev = batchIter.next();
+      pos = ev.getEventId();
+      return ev;
+    }
+
+    @Override
+    public void remove() {
+      throw new UnsupportedOperationException("remove() not supported on 
NotificationEventIterator");
+    }
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/AndFilter.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/AndFilter.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/AndFilter.java
new file mode 100644
index 0000000..454d2cc
--- /dev/null
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/AndFilter.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.messaging.event.filters;
+
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+
+public class AndFilter implements IMetaStoreClient.NotificationFilter {
+  final IMetaStoreClient.NotificationFilter[] filters;
+
+  public AndFilter(final IMetaStoreClient.NotificationFilter... filters) {
+    this.filters = filters;
+  }
+
+  @Override
+  public boolean accept(final NotificationEvent event) {
+    for (IMetaStoreClient.NotificationFilter filter : filters) {
+      if (!filter.accept(event)) {
+        return false;
+      }
+    }
+    return true;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/BasicFilter.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/BasicFilter.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/BasicFilter.java
new file mode 100644
index 0000000..84302d6
--- /dev/null
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/BasicFilter.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.messaging.event.filters;
+
+import org.apache.hadoop.hive.metastore.IMetaStoreClient.NotificationFilter;
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+
+public abstract class BasicFilter implements NotificationFilter {
+  @Override
+  public boolean accept(final NotificationEvent event) {
+    if (event == null) {
+      return false; // get rid of trivial case first, so that we can safely 
assume non-null
+    }
+    return shouldAccept(event);
+  }
+
+  abstract boolean shouldAccept(final NotificationEvent event);
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/DatabaseAndTableFilter.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/DatabaseAndTableFilter.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/DatabaseAndTableFilter.java
new file mode 100644
index 0000000..0852abd
--- /dev/null
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/DatabaseAndTableFilter.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.messaging.event.filters;
+
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+
+/**
+ * Utility function that constructs a notification filter to match a given db 
name and/or table name.
+ * If dbName == null, fetches all warehouse events.
+ * If dnName != null, but tableName == null, fetches all events for the db
+ * If dbName != null &amp;&amp; tableName != null, fetches all events for the 
specified table
+ */
+public class DatabaseAndTableFilter extends BasicFilter {
+  private final String databaseName, tableName;
+
+  public DatabaseAndTableFilter(final String databaseName, final String 
tableName) {
+    this.databaseName = databaseName;
+    this.tableName = tableName;
+  }
+
+  @Override
+  boolean shouldAccept(final NotificationEvent event) {
+    if (databaseName == null) {
+      return true; // if our dbName is null, we're interested in all wh events
+    }
+    if (databaseName.equalsIgnoreCase(event.getDbName())) {
+      if ((tableName == null)
+          // if our dbName is equal, but tableName is blank, we're interested 
in this db-level event
+          || (tableName.equalsIgnoreCase(event.getTableName()))
+        // table level event that matches us
+          ) {
+        return true;
+      }
+    }
+    return false;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/EventBoundaryFilter.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/EventBoundaryFilter.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/EventBoundaryFilter.java
new file mode 100644
index 0000000..988874d
--- /dev/null
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/EventBoundaryFilter.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.messaging.event.filters;
+
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+
+public class EventBoundaryFilter extends BasicFilter {
+  private final long eventFrom, eventTo;
+
+  public EventBoundaryFilter(final long eventFrom, final long eventTo) {
+    this.eventFrom = eventFrom;
+    this.eventTo = eventTo;
+  }
+
+  @Override
+  boolean shouldAccept(final NotificationEvent event) {
+    return eventFrom <= event.getEventId() && event.getEventId() <= eventTo;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/MessageFormatFilter.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/MessageFormatFilter.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/MessageFormatFilter.java
new file mode 100644
index 0000000..b0ce3b9
--- /dev/null
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/MessageFormatFilter.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.messaging.event.filters;
+
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+
+public class MessageFormatFilter extends BasicFilter {
+  private final String format;
+
+  public MessageFormatFilter(String format) {
+    this.format = format;
+  }
+
+  @Override
+  boolean shouldAccept(final NotificationEvent event) {
+    if (format == null) {
+      return true; // let's say that passing null in will not do any filtering.
+    }
+    return format.equalsIgnoreCase(event.getMessageFormat());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveMetaTool.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveMetaTool.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveMetaTool.java
new file mode 100644
index 0000000..f4eacd5
--- /dev/null
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveMetaTool.java
@@ -0,0 +1,484 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.tools;
+
+import java.net.URI;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.OptionBuilder;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.metastore.ObjectStore;
+
+/**
+ * This class provides Hive admins a tool to
+ * - execute JDOQL against the metastore using DataNucleus
+ * - perform HA name node upgrade
+ */
+
+public class HiveMetaTool {
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(HiveMetaTool.class.getName());
+  private final Options cmdLineOptions = new Options();
+  private ObjectStore objStore;
+  private boolean isObjStoreInitialized;
+
+  public HiveMetaTool() {
+    this.isObjStoreInitialized = false;
+  }
+
+  @SuppressWarnings("static-access")
+  private void init() {
+
+    System.out.println("Initializing HiveMetaTool..");
+
+    Option help = new Option("help", "print this message");
+    Option listFSRoot = new Option("listFSRoot", "print the current FS root 
locations");
+    Option executeJDOQL =
+        OptionBuilder.withArgName("query-string")
+            .hasArgs()
+            .withDescription("execute the given JDOQL query")
+            .create("executeJDOQL");
+
+    /* Ideally we want to specify the different arguments to updateLocation as 
separate argNames.
+     * However if we did that, HelpFormatter swallows all but the last 
argument. Note that this is
+     * a know issue with the HelpFormatter class that has not been fixed. We 
specify all arguments
+     * with a single argName to workaround this HelpFormatter bug.
+     */
+    Option updateFSRootLoc =
+        OptionBuilder
+            .withArgName("new-loc> " + "<old-loc")
+            .hasArgs(2)
+            .withDescription(
+                "Update FS root location in the metastore to new location.Both 
new-loc and " +
+                    "old-loc should be valid URIs with valid host names and 
schemes." +
+                    "When run with the dryRun option changes are displayed but 
are not " +
+                    "persisted. When run with the serdepropKey/tablePropKey 
option " +
+                    "updateLocation looks for the 
serde-prop-key/table-prop-key that is " +
+                    "specified and updates its value if found.")
+                    .create("updateLocation");
+    Option dryRun = new Option("dryRun" , "Perform a dry run of updateLocation 
changes.When " +
+      "run with the dryRun option updateLocation changes are displayed but not 
persisted. " +
+      "dryRun is valid only with the updateLocation option.");
+    Option serdePropKey =
+        OptionBuilder.withArgName("serde-prop-key")
+        .hasArgs()
+        .withValueSeparator()
+        .withDescription("Specify the key for serde property to be updated. 
serdePropKey option " +
+           "is valid only with updateLocation option.")
+        .create("serdePropKey");
+    Option tablePropKey =
+        OptionBuilder.withArgName("table-prop-key")
+        .hasArg()
+        .withValueSeparator()
+        .withDescription("Specify the key for table property to be updated. 
tablePropKey option " +
+          "is valid only with updateLocation option.")
+        .create("tablePropKey");
+
+    cmdLineOptions.addOption(help);
+    cmdLineOptions.addOption(listFSRoot);
+    cmdLineOptions.addOption(executeJDOQL);
+    cmdLineOptions.addOption(updateFSRootLoc);
+    cmdLineOptions.addOption(dryRun);
+    cmdLineOptions.addOption(serdePropKey);
+    cmdLineOptions.addOption(tablePropKey);
+  }
+
+  private void initObjectStore(Configuration conf) {
+    if (!isObjStoreInitialized) {
+      objStore = new ObjectStore();
+      objStore.setConf(conf);
+      isObjStoreInitialized = true;
+    }
+  }
+
+  private void shutdownObjectStore() {
+    if (isObjStoreInitialized) {
+      objStore.shutdown();
+      isObjStoreInitialized = false;
+    }
+  }
+
+  private void listFSRoot() {
+    Configuration conf = MetastoreConf.newMetastoreConf();
+    initObjectStore(conf);
+
+    Set<String> hdfsRoots = objStore.listFSRoots();
+    if (hdfsRoots != null) {
+      System.out.println("Listing FS Roots..");
+      for (String s : hdfsRoots) {
+        System.out.println(s);
+      }
+    } else {
+      System.err.println("Encountered error during listFSRoot - " +
+        "commit of JDO transaction failed");
+    }
+  }
+
+  private void executeJDOQLSelect(String query) {
+    Configuration conf = MetastoreConf.newMetastoreConf();
+    initObjectStore(conf);
+
+    System.out.println("Executing query: " + query);
+    try (ObjectStore.QueryWrapper queryWrapper = new 
ObjectStore.QueryWrapper()) {
+      Collection<?> result = objStore.executeJDOQLSelect(query, queryWrapper);
+      if (result != null) {
+        Iterator<?> iter = result.iterator();
+        while (iter.hasNext()) {
+          Object o = iter.next();
+          System.out.println(o.toString());
+        }
+      } else {
+        System.err.println("Encountered error during executeJDOQLSelect -" +
+            "commit of JDO transaction failed.");
+      }
+    }
+  }
+
+  private void executeJDOQLUpdate(String query) {
+    Configuration conf = MetastoreConf.newMetastoreConf();
+    initObjectStore(conf);
+
+    System.out.println("Executing query: " + query);
+    long numUpdated = objStore.executeJDOQLUpdate(query);
+    if (numUpdated >= 0) {
+      System.out.println("Number of records updated: " + numUpdated);
+    } else {
+      System.err.println("Encountered error during executeJDOQL -" +
+        "commit of JDO transaction failed.");
+    }
+  }
+
+  private int printUpdateLocations(Map<String, String> updateLocations) {
+    int count = 0;
+    for (String key: updateLocations.keySet()) {
+      String value = updateLocations.get(key);
+      System.out.println("old location: " + key + " new location: " + value);
+      count++;
+    }
+    return count;
+  }
+
+  private void 
printTblURIUpdateSummary(ObjectStore.UpdateMStorageDescriptorTblURIRetVal 
retVal,
+    boolean isDryRun) {
+    String tblName = "SDS";
+    String fieldName = "LOCATION";
+
+    if (retVal == null) {
+      System.err.println("Encountered error while executing 
updateMStorageDescriptorTblURI - " +
+          "commit of JDO transaction failed. Failed to update FSRoot locations 
in " +
+          fieldName + "field in " + tblName + " table.");
+    } else {
+      Map<String, String> updateLocations = retVal.getUpdateLocations();
+      if (isDryRun) {
+        System.out.println("Dry Run of updateLocation on table " + tblName + 
"..");
+      } else {
+        System.out.println("Successfully updated the following locations..");
+      }
+      int count = printUpdateLocations(updateLocations);
+      if (isDryRun) {
+        System.out.println("Found " + count + " records in " + tblName + " 
table to update");
+      } else {
+        System.out.println("Updated " + count + " records in " + tblName + " 
table");
+      }
+      List<String> badRecords = retVal.getBadRecords();
+      if (badRecords.size() > 0) {
+        System.err.println("Warning: Found records with bad " + fieldName +  " 
in " +
+          tblName + " table.. ");
+        for (String badRecord:badRecords) {
+          System.err.println("bad location URI: " + badRecord);
+        }
+      }
+      int numNullRecords = retVal.getNumNullRecords();
+      if (numNullRecords != 0) {
+        LOG.debug("Number of NULL location URI: " + numNullRecords +
+            ". This can happen for View or Index.");
+      }
+    }
+  }
+
+  private void 
printDatabaseURIUpdateSummary(ObjectStore.UpdateMDatabaseURIRetVal retVal,
+    boolean isDryRun) {
+    String tblName = "DBS";
+    String fieldName = "LOCATION_URI";
+
+    if (retVal == null) {
+      System.err.println("Encountered error while executing updateMDatabaseURI 
- " +
+          "commit of JDO transaction failed. Failed to update FSRoot locations 
in " +
+          fieldName + "field in " + tblName + " table.");
+    } else {
+      Map<String, String> updateLocations = retVal.getUpdateLocations();
+      if (isDryRun) {
+        System.out.println("Dry Run of updateLocation on table " + tblName + 
"..");
+      } else {
+        System.out.println("Successfully updated the following locations..");
+      }
+      int count = printUpdateLocations(updateLocations);
+      if (isDryRun) {
+        System.out.println("Found " + count + " records in " + tblName + " 
table to update");
+      } else {
+        System.out.println("Updated " + count + " records in " + tblName + " 
table");
+      }
+      List<String> badRecords = retVal.getBadRecords();
+      if (badRecords.size() > 0) {
+        System.err.println("Warning: Found records with bad " + fieldName +  " 
in " +
+          tblName + " table.. ");
+        for (String badRecord:badRecords) {
+          System.err.println("bad location URI: " + badRecord);
+        }
+      }
+    }
+  }
+
+  private void printPropURIUpdateSummary(ObjectStore.UpdatePropURIRetVal 
retVal, String
+      tablePropKey, boolean isDryRun, String tblName, String methodName) {
+    if (retVal == null) {
+      System.err.println("Encountered error while executing " + methodName + " 
- " +
+          "commit of JDO transaction failed. Failed to update FSRoot locations 
in " +
+          "value field corresponding to" + tablePropKey + " in " + tblName + " 
table.");
+    } else {
+      Map<String, String> updateLocations = retVal.getUpdateLocations();
+      if (isDryRun) {
+        System.out.println("Dry Run of updateLocation on table " + tblName + 
"..");
+      } else {
+        System.out.println("Successfully updated the following locations..");
+      }
+      int count = printUpdateLocations(updateLocations);
+      if (isDryRun) {
+        System.out.println("Found " + count + " records in " + tblName + " 
table to update");
+      } else {
+        System.out.println("Updated " + count + " records in " + tblName + " 
table");
+      }
+      List<String> badRecords = retVal.getBadRecords();
+      if (badRecords.size() > 0) {
+        System.err.println("Warning: Found records with bad " + tablePropKey + 
 " key in " +
+            tblName + " table.. ");
+        for (String badRecord:badRecords) {
+          System.err.println("bad location URI: " + badRecord);
+        }
+      }
+    }
+  }
+
+  private void printSerdePropURIUpdateSummary(ObjectStore.UpdateSerdeURIRetVal 
retVal,
+    String serdePropKey, boolean isDryRun) {
+    String tblName = "SERDE_PARAMS";
+
+    if (retVal == null) {
+      System.err.println("Encountered error while executing updateSerdeURI - " 
+
+        "commit of JDO transaction failed. Failed to update FSRoot locations 
in " +
+        "value field corresponding to " + serdePropKey + " in " + tblName + " 
table.");
+    } else {
+      Map<String, String> updateLocations = retVal.getUpdateLocations();
+      if (isDryRun) {
+        System.out.println("Dry Run of updateLocation on table " + tblName + 
"..");
+      } else {
+        System.out.println("Successfully updated the following locations..");
+      }
+      int count = printUpdateLocations(updateLocations);
+      if (isDryRun) {
+        System.out.println("Found " + count + " records in " + tblName + " 
table to update");
+      } else {
+        System.out.println("Updated " + count + " records in " + tblName + " 
table");
+      }
+      List<String> badRecords = retVal.getBadRecords();
+      if (badRecords.size() > 0) {
+        System.err.println("Warning: Found records with bad " + serdePropKey + 
 " key in " +
+        tblName + " table.. ");
+        for (String badRecord:badRecords) {
+          System.err.println("bad location URI: " + badRecord);
+        }
+      }
+    }
+  }
+
+  public void updateFSRootLocation(URI oldURI, URI newURI, String 
serdePropKey, String
+      tablePropKey, boolean isDryRun) {
+    Configuration conf = MetastoreConf.newMetastoreConf();
+    initObjectStore(conf);
+
+    System.out.println("Looking for LOCATION_URI field in DBS table to 
update..");
+    ObjectStore.UpdateMDatabaseURIRetVal updateMDBURIRetVal = 
objStore.updateMDatabaseURI(oldURI,
+                                                                 newURI, 
isDryRun);
+    printDatabaseURIUpdateSummary(updateMDBURIRetVal, isDryRun);
+
+    System.out.println("Looking for LOCATION field in SDS table to update..");
+    ObjectStore.UpdateMStorageDescriptorTblURIRetVal updateTblURIRetVal =
+                         objStore.updateMStorageDescriptorTblURI(oldURI, 
newURI, isDryRun);
+    printTblURIUpdateSummary(updateTblURIRetVal, isDryRun);
+
+    if (tablePropKey != null) {
+      System.out.println("Looking for value of " + tablePropKey + " key in 
TABLE_PARAMS table " +
+          "to update..");
+      ObjectStore.UpdatePropURIRetVal updateTblPropURIRetVal =
+          objStore.updateTblPropURI(oldURI, newURI,
+              tablePropKey, isDryRun);
+      printPropURIUpdateSummary(updateTblPropURIRetVal, tablePropKey, 
isDryRun, "TABLE_PARAMS",
+          "updateTblPropURI");
+
+      System.out.println("Looking for value of " + tablePropKey + " key in 
SD_PARAMS table " +
+        "to update..");
+      ObjectStore.UpdatePropURIRetVal updatePropURIRetVal = objStore
+          .updateMStorageDescriptorTblPropURI(oldURI, newURI, tablePropKey, 
isDryRun);
+      printPropURIUpdateSummary(updatePropURIRetVal, tablePropKey, isDryRun, 
"SD_PARAMS",
+          "updateMStorageDescriptorTblPropURI");
+    }
+
+    if (serdePropKey != null) {
+      System.out.println("Looking for value of " + serdePropKey + " key in 
SERDE_PARAMS table " +
+        "to update..");
+      ObjectStore.UpdateSerdeURIRetVal updateSerdeURIretVal = 
objStore.updateSerdeURI(oldURI,
+                                                                newURI, 
serdePropKey, isDryRun);
+      printSerdePropURIUpdateSummary(updateSerdeURIretVal, serdePropKey, 
isDryRun);
+    }
+  }
+
+  private static void printAndExit(HiveMetaTool metaTool) {
+    HelpFormatter formatter = new HelpFormatter();
+    formatter.printHelp("metatool", metaTool.cmdLineOptions);
+    System.exit(1);
+  }
+
+  public static void main(String[] args) {
+    HiveMetaTool metaTool = new HiveMetaTool();
+    metaTool.init();
+    CommandLineParser parser = new GnuParser();
+    CommandLine line = null;
+
+    try {
+      try {
+        line = parser.parse(metaTool.cmdLineOptions, args);
+      } catch (ParseException e) {
+        System.err.println("HiveMetaTool:Parsing failed.  Reason: " + 
e.getLocalizedMessage());
+        printAndExit(metaTool);
+      }
+
+      if (line.hasOption("help")) {
+        HelpFormatter formatter = new HelpFormatter();
+        formatter.printHelp("metatool", metaTool.cmdLineOptions);
+      } else if (line.hasOption("listFSRoot")) {
+        if (line.hasOption("dryRun")) {
+          System.err.println("HiveMetaTool: dryRun is not valid with 
listFSRoot");
+          printAndExit(metaTool);
+        } else if (line.hasOption("serdePropKey")) {
+          System.err.println("HiveMetaTool: serdePropKey is not valid with 
listFSRoot");
+          printAndExit(metaTool);
+        } else if (line.hasOption("tablePropKey")) {
+          System.err.println("HiveMetaTool: tablePropKey is not valid with 
listFSRoot");
+          printAndExit(metaTool);
+        }
+        metaTool.listFSRoot();
+      } else if (line.hasOption("executeJDOQL")) {
+        String query = line.getOptionValue("executeJDOQL");
+        if (line.hasOption("dryRun")) {
+          System.err.println("HiveMetaTool: dryRun is not valid with 
executeJDOQL");
+          printAndExit(metaTool);
+        } else if (line.hasOption("serdePropKey")) {
+          System.err.println("HiveMetaTool: serdePropKey is not valid with 
executeJDOQL");
+          printAndExit(metaTool);
+        } else if (line.hasOption("tablePropKey")) {
+          System.err.println("HiveMetaTool: tablePropKey is not valid with 
executeJDOQL");
+          printAndExit(metaTool);
+        }
+        if (query.toLowerCase().trim().startsWith("select")) {
+          metaTool.executeJDOQLSelect(query);
+        } else if (query.toLowerCase().trim().startsWith("update")) {
+          metaTool.executeJDOQLUpdate(query);
+        } else {
+          System.err.println("HiveMetaTool:Unsupported statement type");
+          printAndExit(metaTool);
+        }
+      } else if (line.hasOption("updateLocation")) {
+        String[] loc = line.getOptionValues("updateLocation");
+        boolean isDryRun = false;
+        String serdepropKey = null;
+        String tablePropKey = null;
+
+        if (loc.length != 2 && loc.length != 3) {
+          System.err.println("HiveMetaTool:updateLocation takes in 2 required 
and 1 " +
+              "optional arguments but " +
+              "was passed " + loc.length + " arguments");
+          printAndExit(metaTool);
+        }
+
+        Path newPath = new Path(loc[0]);
+        Path oldPath = new Path(loc[1]);
+
+        URI oldURI = oldPath.toUri();
+        URI newURI = newPath.toUri();
+
+        if (line.hasOption("dryRun")) {
+          isDryRun = true;
+        }
+
+        if (line.hasOption("serdePropKey")) {
+          serdepropKey = line.getOptionValue("serdePropKey");
+        }
+
+        if (line.hasOption("tablePropKey")) {
+          tablePropKey = line.getOptionValue("tablePropKey");
+        }
+
+        /*
+         * validate input - Both new and old URI should contain valid host 
names and valid schemes.
+         * port is optional in both the URIs since HDFS HA NN URI doesn't have 
a port.
+         */
+          if (oldURI.getHost() == null || newURI.getHost() == null) {
+            System.err.println("HiveMetaTool:A valid host is required in both 
old-loc and new-loc");
+          } else if (oldURI.getScheme() == null || newURI.getScheme() == null) 
{
+            System.err.println("HiveMetaTool:A valid scheme is required in 
both old-loc and new-loc");
+          } else {
+            metaTool.updateFSRootLocation(oldURI, newURI, serdepropKey, 
tablePropKey, isDryRun);
+          }
+        } else {
+          if (line.hasOption("dryRun")) {
+            System.err.println("HiveMetaTool: dryRun is not a valid standalone 
option");
+          } else if (line.hasOption("serdePropKey")) {
+            System.err.println("HiveMetaTool: serdePropKey is not a valid 
standalone option");
+          } else if (line.hasOption("tablePropKey")) {
+            System.err.println("HiveMetaTool: tablePropKey is not a valid 
standalone option");
+            printAndExit(metaTool);
+          } else {
+            System.err.print("HiveMetaTool:Parsing failed.  Reason: Invalid 
arguments: " );
+            for (String s : line.getArgs()) {
+              System.err.print(s + " ");
+            }
+            System.err.println();
+          }
+          printAndExit(metaTool);
+        }
+      } finally {
+        metaTool.shutdownObjectStore();
+      }
+   }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java
index 5dcedcd..b44ff8c 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java
@@ -53,6 +53,16 @@ public class FileUtils {
       return !name.startsWith("_") && !name.startsWith(".");
     }
   };
+  /**
+   * Filter that filters out hidden files
+   */
+  private static final PathFilter hiddenFileFilter = new PathFilter() {
+    @Override
+    public boolean accept(Path p) {
+      String name = p.getName();
+      return !name.startsWith("_") && !name.startsWith(".");
+    }
+  };
 
   /**
    * Move a particular file or directory to the trash.
@@ -424,4 +434,23 @@ public class FileUtils {
       throw new MetaException("Unable to : " + path);
     }
   }
+
+  /**
+   * Utility method that determines if a specified directory already has
+   * contents (non-hidden files) or not - useful to determine if an
+   * immutable table already has contents, for example.
+   *
+   * @param path
+   * @throws IOException
+   */
+  public static boolean isDirEmpty(FileSystem fs, Path path) throws 
IOException {
+
+    if (fs.exists(path)) {
+      FileStatus[] status = fs.globStatus(new Path(path, "*"), 
hiddenFileFilter);
+      if (status.length > 0) {
+        return false;
+      }
+    }
+    return true;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
index beee86f..cde34bc 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.metastore.ColumnType;
+import org.apache.hadoop.hive.metastore.HiveMetaStore;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
@@ -52,7 +53,6 @@ import 
org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregatorFa
 import org.apache.hadoop.hive.metastore.columnstats.merge.ColumnStatsMerger;
 import 
org.apache.hadoop.hive.metastore.columnstats.merge.ColumnStatsMergerFactory;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
-import org.apache.hadoop.hive.metastore.events.EventCleanerTask;
 import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
 import org.apache.hadoop.security.SaslRpcServer;
@@ -64,9 +64,13 @@ import org.slf4j.LoggerFactory;
 
 import javax.annotation.Nullable;
 import java.io.File;
+import java.io.IOException;
 import java.lang.reflect.InvocationTargetException;
 import java.math.BigDecimal;
 import java.math.BigInteger;
+import java.net.InetSocketAddress;
+import java.net.ServerSocket;
+import java.net.Socket;
 import java.net.URL;
 import java.net.URLClassLoader;
 import java.nio.charset.Charset;
@@ -81,6 +85,7 @@ import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Properties;
 import java.util.SortedMap;
 import java.util.SortedSet;
 import java.util.TreeMap;
@@ -298,8 +303,8 @@ public class MetaStoreUtils {
     }
   }
 
-  public static String getPartitionValWithInvalidCharacter(List<String> 
partVals,
-                                                           Pattern 
partitionValidationPattern) {
+  private static String getPartitionValWithInvalidCharacter(List<String> 
partVals,
+                                                            Pattern 
partitionValidationPattern) {
     if (partitionValidationPattern == null) {
       return null;
     }
@@ -499,8 +504,9 @@ public class MetaStoreUtils {
   }
 
   // check if stats need to be (re)calculated
-  public static boolean requireCalStats(Configuration hiveConf, Partition 
oldPart,
-    Partition newPart, Table tbl, EnvironmentContext environmentContext) {
+  public static boolean requireCalStats(Partition oldPart,
+                                        Partition newPart, Table tbl,
+                                        EnvironmentContext environmentContext) 
{
 
     if (environmentContext != null
         && environmentContext.isSetProperties()
@@ -792,9 +798,11 @@ public class MetaStoreUtils {
                 Configuration.class).newInstance(conf);
         listeners.add(listener);
       } catch (InvocationTargetException ie) {
+        LOG.error("Got InvocationTargetException", ie);
         throw new MetaException("Failed to instantiate listener named: "+
             listenerImpl + ", reason: " + ie.getCause());
       } catch (Exception e) {
+        LOG.error("Got Exception", e);
         throw new MetaException("Failed to instantiate listener named: "+
             listenerImpl + ", reason: " + e);
       }
@@ -961,13 +969,20 @@ public class MetaStoreUtils {
       ColumnStatisticsObj statsObjNew = csNew.getStatsObj().get(index);
       ColumnStatisticsObj statsObjOld = map.get(statsObjNew.getColName());
       if (statsObjOld != null) {
+        // because we already confirm that the stats is accurate
+        // it is impossible that the column types have been changed while the
+        // column stats is still accurate.
+        assert (statsObjNew.getStatsData().getSetField() == 
statsObjOld.getStatsData()
+            .getSetField());
         // If statsObjOld is found, we can merge.
         ColumnStatsMerger merger = 
ColumnStatsMergerFactory.getColumnStatsMerger(statsObjNew,
             statsObjOld);
         merger.merge(statsObjNew, statsObjOld);
       }
+      // If statsObjOld is not found, we just use statsObjNew as it is 
accurate.
       list.add(statsObjNew);
     }
+    // in all the other cases, we can not merge
     csNew.setStatsObj(list);
   }
 
@@ -1064,15 +1079,485 @@ public class MetaStoreUtils {
     return machineList.includes(ipAddress);
   }
 
-  // TODO This should be moved to MetaStoreTestUtils once it is moved into 
standalone-metastore.
   /**
-   * Setup a configuration file for standalone mode.  There are a few config 
variables that have
-   * defaults that require parts of Hive that aren't present in standalone 
mode.  This method
-   * sets them to something that will work without the rest of Hive.
-   * @param conf Configuration object
+   * Convert FieldSchemas to Thrift DDL.
    */
-  public static void setConfForStandloneMode(Configuration conf) {
-    MetastoreConf.setVar(conf, MetastoreConf.ConfVars.TASK_THREADS_ALWAYS,
-        EventCleanerTask.class.getName());
+  public static String getDDLFromFieldSchema(String structName,
+                                             List<FieldSchema> fieldSchemas) {
+    StringBuilder ddl = new StringBuilder();
+    ddl.append("struct ");
+    ddl.append(structName);
+    ddl.append(" { ");
+    boolean first = true;
+    for (FieldSchema col : fieldSchemas) {
+      if (first) {
+        first = false;
+      } else {
+        ddl.append(", ");
+      }
+      ddl.append(ColumnType.typeToThriftType(col.getType()));
+      ddl.append(' ');
+      ddl.append(col.getName());
+    }
+    ddl.append("}");
+
+    LOG.trace("DDL: {}", ddl);
+    return ddl.toString();
+  }
+
+  public static Properties getTableMetadata(
+      org.apache.hadoop.hive.metastore.api.Table table) {
+    return MetaStoreUtils.getSchema(table.getSd(), table.getSd(), table
+        .getParameters(), table.getDbName(), table.getTableName(), 
table.getPartitionKeys());
+  }
+
+  public static Properties getPartitionMetadata(
+      org.apache.hadoop.hive.metastore.api.Partition partition,
+      org.apache.hadoop.hive.metastore.api.Table table) {
+    return MetaStoreUtils
+        .getSchema(partition.getSd(), partition.getSd(), partition
+                .getParameters(), table.getDbName(), table.getTableName(),
+            table.getPartitionKeys());
+  }
+
+  public static Properties getSchema(
+      org.apache.hadoop.hive.metastore.api.Partition part,
+      org.apache.hadoop.hive.metastore.api.Table table) {
+    return MetaStoreUtils.getSchema(part.getSd(), table.getSd(), table
+        .getParameters(), table.getDbName(), table.getTableName(), 
table.getPartitionKeys());
+  }
+
+  /**
+   * Get partition level schema from table level schema.
+   * This function will use the same column names, column types and partition 
keys for
+   * each partition Properties. Their values are copied from the table 
Properties. This
+   * is mainly to save CPU and memory. CPU is saved because the first time the
+   * StorageDescriptor column names are accessed, JDO needs to execute a SQL 
query to
+   * retrieve the data. If we know the data will be the same as the table 
level schema
+   * and they are immutable, we should just reuse the table level schema 
objects.
+   *
+   * @param sd The Partition level Storage Descriptor.
+   * @param parameters partition level parameters
+   * @param tblSchema The table level schema from which this partition should 
be copied.
+   * @return the properties
+   */
+  public static Properties getPartSchemaFromTableSchema(
+      StorageDescriptor sd,
+      Map<String, String> parameters,
+      Properties tblSchema) {
+
+    // Inherent most properties from table level schema and overwrite some 
properties
+    // in the following code.
+    // This is mainly for saving CPU and memory to reuse the column names, 
types and
+    // partition columns in the table level schema.
+    Properties schema = (Properties) tblSchema.clone();
+
+    // InputFormat
+    String inputFormat = sd.getInputFormat();
+    if (inputFormat == null || inputFormat.length() == 0) {
+      String tblInput =
+          
schema.getProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.FILE_INPUT_FORMAT);
+      if (tblInput == null) {
+        inputFormat = 
org.apache.hadoop.mapred.SequenceFileInputFormat.class.getName();
+      } else {
+        inputFormat = tblInput;
+      }
+    }
+    
schema.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.FILE_INPUT_FORMAT,
+        inputFormat);
+
+    // OutputFormat
+    String outputFormat = sd.getOutputFormat();
+    if (outputFormat == null || outputFormat.length() == 0) {
+      String tblOutput =
+          
schema.getProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.FILE_OUTPUT_FORMAT);
+      if (tblOutput == null) {
+        outputFormat = 
org.apache.hadoop.mapred.SequenceFileOutputFormat.class.getName();
+      } else {
+        outputFormat = tblOutput;
+      }
+    }
+    
schema.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.FILE_OUTPUT_FORMAT,
+        outputFormat);
+
+    // Location
+    if (sd.getLocation() != null) {
+      
schema.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_LOCATION,
+          sd.getLocation());
+    }
+
+    // Bucket count
+    
schema.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.BUCKET_COUNT,
+        Integer.toString(sd.getNumBuckets()));
+
+    if (sd.getBucketCols() != null && sd.getBucketCols().size() > 0) {
+      
schema.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.BUCKET_FIELD_NAME,
+          sd.getBucketCols().get(0));
+    }
+
+    // SerdeInfo
+    if (sd.getSerdeInfo() != null) {
+
+      // We should not update the following 3 values if SerDeInfo contains 
these.
+      // This is to keep backward compatible with getSchema(), where these 3 
keys
+      // are updated after SerDeInfo properties got copied.
+      String cols = 
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_COLUMNS;
+      String colTypes = 
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_COLUMN_TYPES;
+      String parts = 
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS;
+
+      for (Map.Entry<String,String> param : 
sd.getSerdeInfo().getParameters().entrySet()) {
+        String key = param.getKey();
+        if (schema.get(key) != null &&
+            (key.equals(cols) || key.equals(colTypes) || key.equals(parts))) {
+          continue;
+        }
+        schema.put(key, (param.getValue() != null) ? param.getValue() : 
StringUtils.EMPTY);
+      }
+
+      if (sd.getSerdeInfo().getSerializationLib() != null) {
+        schema.setProperty(ColumnType.SERIALIZATION_LIB, 
sd.getSerdeInfo().getSerializationLib());
+      }
+    }
+
+    // skipping columns since partition level field schemas are the same as 
table level's
+    // skipping partition keys since it is the same as table level partition 
keys
+
+    if (parameters != null) {
+      for (Map.Entry<String, String> e : parameters.entrySet()) {
+        schema.setProperty(e.getKey(), e.getValue());
+      }
+    }
+
+    return schema;
+  }
+
+  private static Properties addCols(Properties schema, List<FieldSchema> cols) 
{
+
+    StringBuilder colNameBuf = new StringBuilder();
+    StringBuilder colTypeBuf = new StringBuilder();
+    StringBuilder colComment = new StringBuilder();
+
+    boolean first = true;
+    String columnNameDelimiter = getColumnNameDelimiter(cols);
+    for (FieldSchema col : cols) {
+      if (!first) {
+        colNameBuf.append(columnNameDelimiter);
+        colTypeBuf.append(":");
+        colComment.append('\0');
+      }
+      colNameBuf.append(col.getName());
+      colTypeBuf.append(col.getType());
+      colComment.append((null != col.getComment()) ? col.getComment() : 
StringUtils.EMPTY);
+      first = false;
+    }
+    schema.setProperty(
+        
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_COLUMNS,
+        colNameBuf.toString());
+    schema.setProperty(ColumnType.COLUMN_NAME_DELIMITER, columnNameDelimiter);
+    String colTypes = colTypeBuf.toString();
+    schema.setProperty(
+        
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_COLUMN_TYPES,
+        colTypes);
+    schema.setProperty("columns.comments", colComment.toString());
+
+    return schema;
+
+  }
+
+  public static Properties getSchemaWithoutCols(StorageDescriptor sd,
+                                                Map<String, String> 
parameters, String databaseName, String tableName,
+                                                List<FieldSchema> 
partitionKeys) {
+    Properties schema = new Properties();
+    String inputFormat = sd.getInputFormat();
+    if (inputFormat == null || inputFormat.length() == 0) {
+      inputFormat = org.apache.hadoop.mapred.SequenceFileInputFormat.class
+          .getName();
+    }
+    schema.setProperty(
+        
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.FILE_INPUT_FORMAT,
+        inputFormat);
+    String outputFormat = sd.getOutputFormat();
+    if (outputFormat == null || outputFormat.length() == 0) {
+      outputFormat = org.apache.hadoop.mapred.SequenceFileOutputFormat.class
+          .getName();
+    }
+    schema.setProperty(
+        
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.FILE_OUTPUT_FORMAT,
+        outputFormat);
+
+    schema.setProperty(
+        
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_NAME,
+        databaseName + "." + tableName);
+
+    if (sd.getLocation() != null) {
+      schema.setProperty(
+          
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_LOCATION,
+          sd.getLocation());
+    }
+    schema.setProperty(
+        
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.BUCKET_COUNT, 
Integer
+            .toString(sd.getNumBuckets()));
+    if (sd.getBucketCols() != null && sd.getBucketCols().size() > 0) {
+      schema.setProperty(
+          
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.BUCKET_FIELD_NAME, 
sd
+              .getBucketCols().get(0));
+    }
+    if (sd.getSerdeInfo() != null) {
+      for (Map.Entry<String,String> param : 
sd.getSerdeInfo().getParameters().entrySet()) {
+        schema.put(param.getKey(), (param.getValue() != null) ? 
param.getValue() : StringUtils.EMPTY);
+      }
+
+      if (sd.getSerdeInfo().getSerializationLib() != null) {
+        schema.setProperty(ColumnType.SERIALIZATION_LIB, sd 
.getSerdeInfo().getSerializationLib());
+      }
+    }
+
+    if (sd.getCols() != null) {
+      schema.setProperty(ColumnType.SERIALIZATION_DDL, 
getDDLFromFieldSchema(tableName, sd.getCols()));
+    }
+
+    String partString = StringUtils.EMPTY;
+    String partStringSep = StringUtils.EMPTY;
+    String partTypesString = StringUtils.EMPTY;
+    String partTypesStringSep = StringUtils.EMPTY;
+    for (FieldSchema partKey : partitionKeys) {
+      partString = partString.concat(partStringSep);
+      partString = partString.concat(partKey.getName());
+      partTypesString = partTypesString.concat(partTypesStringSep);
+      partTypesString = partTypesString.concat(partKey.getType());
+      if (partStringSep.length() == 0) {
+        partStringSep = "/";
+        partTypesStringSep = ":";
+      }
+    }
+    if (partString.length() > 0) {
+      schema
+          .setProperty(
+              
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS,
+              partString);
+      schema
+          .setProperty(
+              
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMN_TYPES,
+              partTypesString);
+    }
+
+    if (parameters != null) {
+      for (Map.Entry<String, String> e : parameters.entrySet()) {
+        // add non-null parameters to the schema
+        if ( e.getValue() != null) {
+          schema.setProperty(e.getKey(), e.getValue());
+        }
+      }
+    }
+
+    return schema;
+  }
+
+  public static Properties getSchema(
+      org.apache.hadoop.hive.metastore.api.StorageDescriptor sd,
+      org.apache.hadoop.hive.metastore.api.StorageDescriptor tblsd,
+      Map<String, String> parameters, String databaseName, String tableName,
+      List<FieldSchema> partitionKeys) {
+
+    return addCols(getSchemaWithoutCols(sd, parameters, databaseName, 
tableName, partitionKeys), tblsd.getCols());
+  }
+
+  public static String getColumnNameDelimiter(List<FieldSchema> fieldSchemas) {
+    // we first take a look if any fieldSchemas contain COMMA
+    for (int i = 0; i < fieldSchemas.size(); i++) {
+      if (fieldSchemas.get(i).getName().contains(",")) {
+        return String.valueOf(ColumnType.COLUMN_COMMENTS_DELIMITER);
+      }
+    }
+    return String.valueOf(',');
+  }
+
+  /**
+   * Convert FieldSchemas to columnNames.
+   */
+  public static String getColumnNamesFromFieldSchema(List<FieldSchema> 
fieldSchemas) {
+    String delimiter = getColumnNameDelimiter(fieldSchemas);
+    StringBuilder sb = new StringBuilder();
+    for (int i = 0; i < fieldSchemas.size(); i++) {
+      if (i > 0) {
+        sb.append(delimiter);
+      }
+      sb.append(fieldSchemas.get(i).getName());
+    }
+    return sb.toString();
+  }
+
+  /**
+   * Convert FieldSchemas to columnTypes.
+   */
+  public static String getColumnTypesFromFieldSchema(
+      List<FieldSchema> fieldSchemas) {
+    StringBuilder sb = new StringBuilder();
+    for (int i = 0; i < fieldSchemas.size(); i++) {
+      if (i > 0) {
+        sb.append(",");
+      }
+      sb.append(fieldSchemas.get(i).getType());
+    }
+    return sb.toString();
+  }
+
+  public static String getColumnCommentsFromFieldSchema(List<FieldSchema> 
fieldSchemas) {
+    StringBuilder sb = new StringBuilder();
+    for (int i = 0; i < fieldSchemas.size(); i++) {
+      if (i > 0) {
+        sb.append(ColumnType.COLUMN_COMMENTS_DELIMITER);
+      }
+      sb.append(fieldSchemas.get(i).getComment());
+    }
+    return sb.toString();
+  }
+
+  public static int startMetaStore() throws Exception {
+    return startMetaStore(HadoopThriftAuthBridge.getBridge(), null);
+  }
+
+  public static int startMetaStore(final HadoopThriftAuthBridge bridge, 
Configuration conf) throws
+      Exception {
+    int port = findFreePort();
+    startMetaStore(port, bridge, conf);
+    return port;
+  }
+
+  public static int startMetaStore(Configuration conf) throws Exception {
+    return startMetaStore(HadoopThriftAuthBridge.getBridge(), conf);
+  }
+
+  public static void startMetaStore(final int port, final 
HadoopThriftAuthBridge bridge) throws Exception {
+    startMetaStore(port, bridge, null);
+  }
+
+  public static void startMetaStore(final int port,
+                                    final HadoopThriftAuthBridge bridge, 
Configuration hiveConf)
+      throws Exception{
+    if (hiveConf == null) {
+      hiveConf = MetastoreConf.newMetastoreConf();
+    }
+    final Configuration finalHiveConf = hiveConf;
+    Thread thread = new Thread(new Runnable() {
+      @Override
+      public void run() {
+        try {
+          HiveMetaStore.startMetaStore(port, bridge, finalHiveConf);
+        } catch (Throwable e) {
+          LOG.error("Metastore Thrift Server threw an exception...",e);
+        }
+      }
+    });
+    thread.setDaemon(true);
+    thread.start();
+    loopUntilHMSReady(port);
+  }
+
+  /**
+   * A simple connect test to make sure that the metastore is up
+   * @throws Exception
+   */
+  private static void loopUntilHMSReady(int port) throws Exception {
+    int retries = 0;
+    Exception exc;
+    while (true) {
+      try {
+        Socket socket = new Socket();
+        socket.connect(new InetSocketAddress(port), 5000);
+        socket.close();
+        return;
+      } catch (Exception e) {
+        if (retries++ > 60) { //give up
+          exc = e;
+          break;
+        }
+        Thread.sleep(1000);
+      }
+    }
+    // something is preventing metastore from starting
+    // print the stack from all threads for debugging purposes
+    LOG.error("Unable to connect to metastore server: " + exc.getMessage());
+    LOG.info("Printing all thread stack traces for debugging before throwing 
exception.");
+    LOG.info(getAllThreadStacksAsString());
+    throw exc;
+  }
+
+  private static String getAllThreadStacksAsString() {
+    Map<Thread, StackTraceElement[]> threadStacks = Thread.getAllStackTraces();
+    StringBuilder sb = new StringBuilder();
+    for (Map.Entry<Thread, StackTraceElement[]> entry : 
threadStacks.entrySet()) {
+      Thread t = entry.getKey();
+      sb.append(System.lineSeparator());
+      sb.append("Name: ").append(t.getName()).append(" State: 
").append(t.getState());
+      addStackString(entry.getValue(), sb);
+    }
+    return sb.toString();
+  }
+
+  private static void addStackString(StackTraceElement[] stackElems, 
StringBuilder sb) {
+    sb.append(System.lineSeparator());
+    for (StackTraceElement stackElem : stackElems) {
+      sb.append(stackElem).append(System.lineSeparator());
+    }
+  }
+
+  /**
+   * Finds a free port on the machine.
+   *
+   * @return
+   * @throws IOException
+   */
+  public static int findFreePort() throws IOException {
+    ServerSocket socket= new ServerSocket(0);
+    int port = socket.getLocalPort();
+    socket.close();
+    return port;
+  }
+
+  /**
+   * Finds a free port on the machine, but allow the
+   * ability to specify a port number to not use, no matter what.
+   */
+  public static int findFreePortExcepting(int portToExclude) throws 
IOException {
+    ServerSocket socket1 = null;
+    ServerSocket socket2 = null;
+    try {
+      socket1 = new ServerSocket(0);
+      socket2 = new ServerSocket(0);
+      if (socket1.getLocalPort() != portToExclude) {
+        return socket1.getLocalPort();
+      }
+      // If we're here, then socket1.getLocalPort was the port to exclude
+      // Since both sockets were open together at a point in time, we're
+      // guaranteed that socket2.getLocalPort() is not the same.
+      return socket2.getLocalPort();
+    } finally {
+      if (socket1 != null){
+        socket1.close();
+      }
+      if (socket2 != null){
+        socket2.close();
+      }
+    }
+  }
+
+  public static String getIndexTableName(String dbName, String baseTblName, 
String indexName) {
+    return dbName + "__" + baseTblName + "_" + indexName + "__";
+  }
+
+  public static boolean isMaterializedViewTable(Table table) {
+    if (table == null) {
+      return false;
+    }
+    return TableType.MATERIALIZED_VIEW.toString().equals(table.getTableType());
+  }
+
+  public static List<String> getColumnNames(List<FieldSchema> schema) {
+    List<String> cols = new ArrayList<>(schema.size());
+    for (FieldSchema fs : schema) {
+      cols.add(fs.getName());
+    }
+    return cols;
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java
index 41a18cb..0b0cfbd 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java
@@ -36,11 +36,15 @@ import org.apache.zookeeper.client.ZooKeeperSaslClient;
 import javax.security.auth.login.AppConfigurationEntry;
 import org.apache.thrift.transport.TSSLTransportFactory;
 import org.apache.thrift.transport.TServerSocket;
+import org.apache.thrift.transport.TSocket;
+import org.apache.thrift.transport.TTransport;
 import org.apache.thrift.transport.TTransportException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import javax.net.ssl.SSLParameters;
 import javax.net.ssl.SSLServerSocket;
+import javax.net.ssl.SSLSocket;
 import javax.security.auth.login.LoginException;
 import javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag;
 
@@ -284,4 +288,26 @@ public class SecurityUtils {
     }
     return thriftServerSocket;
   }
+
+  public static TTransport getSSLSocket(String host, int port, int 
loginTimeout,
+                                        String trustStorePath, String 
trustStorePassWord) throws TTransportException {
+    TSSLTransportFactory.TSSLTransportParameters params =
+        new TSSLTransportFactory.TSSLTransportParameters();
+    params.setTrustStore(trustStorePath, trustStorePassWord);
+    params.requireClientAuth(true);
+    // The underlying SSLSocket object is bound to host:port with the given 
SO_TIMEOUT and
+    // SSLContext created with the given params
+    TSocket tSSLSocket = TSSLTransportFactory.getClientSocket(host, port, 
loginTimeout, params);
+    return getSSLSocketWithHttps(tSSLSocket);
+  }
+
+  // Using endpoint identification algorithm as HTTPS enables us to do
+  // CNAMEs/subjectAltName verification
+  private static TSocket getSSLSocketWithHttps(TSocket tSSLSocket) throws 
TTransportException {
+    SSLSocket sslSocket = (SSLSocket) tSSLSocket.getSocket();
+    SSLParameters sslParams = sslSocket.getSSLParameters();
+    sslParams.setEndpointIdentificationAlgorithm("HTTPS");
+    sslSocket.setSSLParameters(sslParams);
+    return new TSocket(sslSocket);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/AlternateFailurePreListener.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/AlternateFailurePreListener.java
 
b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/AlternateFailurePreListener.java
new file mode 100644
index 0000000..bde29a1
--- /dev/null
+++ 
b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/AlternateFailurePreListener.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+import javax.jdo.JDOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.events.PreEventContext;
+
+/**
+ *
+ * AlternateFailurePreListener.
+ *
+ * An implementation of MetaStorePreEventListener which fails every other time 
it's invoked,
+ * starting with the first time.
+ *
+ * It also records and makes available the number of times it's been invoked.
+ */
+public class AlternateFailurePreListener extends MetaStorePreEventListener {
+
+  private static int callCount = 0;
+  private static boolean throwException = true;
+
+  public AlternateFailurePreListener(Configuration config) {
+    super(config);
+  }
+
+  @Override
+  public void onEvent(PreEventContext context) throws MetaException, 
NoSuchObjectException,
+      InvalidOperationException {
+
+    callCount++;
+    if (throwException) {
+      throwException = false;
+      throw new JDOException();
+    }
+
+    throwException = true;
+  }
+
+  public static int getCallCount() {
+    return callCount;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyEndFunctionListener.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyEndFunctionListener.java
 
b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyEndFunctionListener.java
new file mode 100644
index 0000000..ca1f10b
--- /dev/null
+++ 
b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyEndFunctionListener.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+
+
+/** A dummy implementation for
+ * {@link org.apache.hadoop.hive.metastore.MetaStoreEndFunctionListener}
+ * for testing purposes.
+ */
+public class DummyEndFunctionListener extends MetaStoreEndFunctionListener{
+
+  public static final List<String> funcNameList = new ArrayList<>();
+  public static final List<MetaStoreEndFunctionContext> contextList =
+    new ArrayList<>();
+
+  public DummyEndFunctionListener(Configuration config) {
+    super(config);
+  }
+
+  @Override
+  public void onEndFunction(String functionName, MetaStoreEndFunctionContext 
context) {
+    funcNameList.add(functionName);
+    contextList.add(context);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyListener.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyListener.java
 
b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyListener.java
new file mode 100644
index 0000000..baecd12
--- /dev/null
+++ 
b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyListener.java
@@ -0,0 +1,144 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.events.AddIndexEvent;
+import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterIndexEvent;
+import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
+import org.apache.hadoop.hive.metastore.events.ConfigChangeEvent;
+import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.CreateFunctionEvent;
+import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
+import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.DropFunctionEvent;
+import org.apache.hadoop.hive.metastore.events.DropIndexEvent;
+import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.DropTableEvent;
+import org.apache.hadoop.hive.metastore.events.ListenerEvent;
+import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent;
+
+/** A dummy implementation for
+ * {@link org.apache.hadoop.hive.metastore.MetaStoreEventListener}
+ * for testing purposes.
+ */
+public class DummyListener extends MetaStoreEventListener{
+
+  public static final List<ListenerEvent> notifyList = new ArrayList<>();
+
+  /**
+   * @return The last event received, or null if no event was received.
+   */
+  public static ListenerEvent getLastEvent() {
+    if (notifyList.isEmpty()) {
+      return null;
+    } else {
+      return notifyList.get(notifyList.size() - 1);
+    }
+  }
+
+  public DummyListener(Configuration config) {
+    super(config);
+  }
+
+  @Override
+  public void onConfigChange(ConfigChangeEvent configChange) {
+    addEvent(configChange);
+  }
+
+  @Override
+  public void onAddPartition(AddPartitionEvent partition) throws MetaException 
{
+    addEvent(partition);
+  }
+
+  @Override
+  public void onCreateDatabase(CreateDatabaseEvent db) throws MetaException {
+    addEvent(db);
+  }
+
+  @Override
+  public void onCreateTable(CreateTableEvent table) throws MetaException {
+    addEvent(table);
+  }
+
+  @Override
+  public void onDropDatabase(DropDatabaseEvent db) throws MetaException {
+    addEvent(db);
+  }
+
+  @Override
+  public void onDropPartition(DropPartitionEvent partition) throws 
MetaException {
+    addEvent(partition);
+  }
+
+  @Override
+  public void onDropTable(DropTableEvent table) throws MetaException {
+    addEvent(table);
+  }
+
+  @Override
+  public void onAlterTable(AlterTableEvent event) throws MetaException {
+    addEvent(event);
+  }
+
+  @Override
+  public void onAlterPartition(AlterPartitionEvent event) throws MetaException 
{
+    addEvent(event);
+  }
+
+  @Override
+  public void onLoadPartitionDone(LoadPartitionDoneEvent partEvent) throws 
MetaException {
+    addEvent(partEvent);
+  }
+
+  @Override
+  public void onAddIndex(AddIndexEvent indexEvent) throws MetaException {
+    addEvent(indexEvent);
+  }
+
+  @Override
+  public void onDropIndex(DropIndexEvent indexEvent) throws MetaException {
+    addEvent(indexEvent);
+  }
+
+  @Override
+  public void onAlterIndex(AlterIndexEvent indexEvent) throws MetaException {
+    addEvent(indexEvent);
+  }
+
+  @Override
+  public void onCreateFunction (CreateFunctionEvent fnEvent) throws 
MetaException {
+    addEvent(fnEvent);
+  }
+
+  @Override
+  public void onDropFunction (DropFunctionEvent fnEvent) throws MetaException {
+    addEvent(fnEvent);
+  }
+
+  private void addEvent(ListenerEvent event) {
+    notifyList.add(event);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyMetaStoreInitListener.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyMetaStoreInitListener.java
 
b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyMetaStoreInitListener.java
new file mode 100644
index 0000000..0f2a3c7
--- /dev/null
+++ 
b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyMetaStoreInitListener.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+
+/*
+ * An implementation of MetaStoreInitListener to verify onInit is called when
+ * HMSHandler is initialized
+ */
+public class DummyMetaStoreInitListener extends MetaStoreInitListener{
+
+  public static boolean wasCalled = false;
+  public DummyMetaStoreInitListener(Configuration config) {
+    super(config);
+  }
+
+  @Override
+  public void onInit(MetaStoreInitContext context) throws MetaException {
+    wasCalled = true;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyPreListener.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyPreListener.java
 
b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyPreListener.java
new file mode 100644
index 0000000..0a68bac
--- /dev/null
+++ 
b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyPreListener.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.events.PreEventContext;
+
+/**
+ *
+ * DummyPreListener.
+ *
+ * An implementation of MetaStorePreEventListener which stores the Events it's 
seen in a list.
+ */
+public class DummyPreListener extends MetaStorePreEventListener {
+
+  public static final List<PreEventContext> notifyList = new ArrayList<>();
+
+  public DummyPreListener(Configuration config) {
+    super(config);
+  }
+
+  @Override
+  public void onEvent(PreEventContext context) throws MetaException, 
NoSuchObjectException,
+      InvalidOperationException {
+    notifyList.add(context);
+  }
+
+}

Reply via email to