PHOENIX-4685 Properly handle connection caching for Phoenix inside 
RegionServers(addendum) (James Taylor)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7bf23ead
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7bf23ead
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7bf23ead

Branch: refs/heads/4.x-cdh5.12
Commit: 7bf23ead336ffcea37a41ff1f5bdd6ef1286fde8
Parents: bb5af35
Author: Ankit Singhal <ankitsingha...@gmail.com>
Authored: Wed May 16 17:37:18 2018 -0700
Committer: James Taylor <jtay...@salesforce.com>
Committed: Wed May 16 21:49:01 2018 -0700

----------------------------------------------------------------------
 .../org/apache/phoenix/util/ServerUtil.java     | 23 +++++++++++++++-----
 .../java/org/apache/phoenix/query/BaseTest.java | 13 +++++++----
 2 files changed, 26 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/7bf23ead/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
index 891839a..09701c5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
@@ -31,8 +31,6 @@ import java.util.concurrent.ExecutorService;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import javax.annotation.concurrent.GuardedBy;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -43,15 +41,14 @@ import org.apache.hadoop.hbase.NotServingRegionException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.CoprocessorHConnection;
-import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.HTablePool;
 import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import 
org.apache.hadoop.hbase.ipc.controller.InterRegionServerIndexRpcControllerFactory;
-import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.coprocessor.HashJoinCacheNotFoundException;
@@ -318,8 +315,8 @@ public class ServerUtil {
         }
 
         @Override
-        public synchronized void shutdown() {
-            // We need not close the cached connections as they are shared 
across the server.
+        public void shutdown() {
+            ConnectionFactory.shutdown();
         }
 
         @Override
@@ -342,6 +339,20 @@ public class ServerUtil {
         private static Map<ConnectionType, ClusterConnection> connections =
                 new ConcurrentHashMap<ConnectionType, ClusterConnection>();
 
+        public static void shutdown() {
+            synchronized (CoprocessorHConnectionTableFactory.class) {
+                for (ClusterConnection connection : connections.values()) {
+                    try {
+                        connection.close();
+                    } catch (IOException e) {
+                        LOG.warn("Unable to close coprocessor connection", e);
+                    }
+                }
+                connections.clear();
+            }
+        }
+            
+        
         public static ClusterConnection getConnection(final ConnectionType 
connectionType, final Configuration conf, final HRegionServer server) throws 
IOException {
             ClusterConnection connection = null;
             if((connection = connections.get(connectionType)) == null) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7bf23ead/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index 0ea63e7..f49d291 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -137,6 +137,7 @@ import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.ServerUtil.ConnectionFactory;
 import org.junit.ClassRule;
 import org.junit.rules.TemporaryFolder;
 import org.slf4j.Logger;
@@ -477,10 +478,14 @@ public abstract class BaseTest {
                             } catch (Throwable t) {
                                 logger.error("Exception caught when shutting 
down mini cluster", t);
                             } finally {
-                                logger.info(
-                                    "Time in seconds spent in shutting down 
mini cluster with "
-                                            + numTables + " tables: "
-                                            + (System.currentTimeMillis() - 
startTime) / 1000);
+                                try {
+                                    ConnectionFactory.shutdown();
+                                } finally {
+                                    logger.info(
+                                        "Time in seconds spent in shutting 
down mini cluster with "
+                                                + numTables + " tables: "
+                                                + (System.currentTimeMillis() 
- startTime) / 1000);
+                                }
                             }
                         }
                     }

Reply via email to