This is an automated email from the ASF dual-hosted git repository.

stoty pushed a commit to branch 5.1
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/5.1 by this push:
     new 2699e530e4 PHOENIX-7223 Make Sure Tools Always Close HBase Connections 
on Exit
2699e530e4 is described below

commit 2699e530e41a5c0b1f08b607d2c3aca9064aae81
Author: Istvan Toth <st...@apache.org>
AuthorDate: Tue Feb 20 13:01:19 2024 +0100

    PHOENIX-7223 Make Sure Tools Always Close HBase Connections on Exit
    
    * don't throw exceptions from Tools, log the error and return non-zero exit 
code
    * Close all Phoenix Connections in Tools
    * Close cached CQSI objects on PhoenixDriver.close()
---
 .../apache/phoenix/end2end/CsvBulkLoadToolIT.java  |  40 +++---
 .../phoenix/end2end/RegexBulkLoadToolIT.java       |  20 +--
 .../org/apache/phoenix/jdbc/PhoenixDriver.java     |  13 +-
 .../phoenix/mapreduce/AbstractBulkLoadTool.java    | 157 ++++++++++++---------
 .../apache/phoenix/mapreduce/OrphanViewTool.java   |   2 +
 .../phoenix/mapreduce/index/IndexUpgradeTool.java  |  11 +-
 .../phoenix/schema/stats/UpdateStatisticsTool.java |  19 ++-
 .../org/apache/phoenix/schema/tool/SchemaTool.java |  29 ++--
 .../util/MergeViewIndexIdSequencesTool.java        |  17 +--
 9 files changed, 174 insertions(+), 134 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
index c6e8246c0a..60d7071f89 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
@@ -35,7 +35,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapred.FileAlreadyExistsException;
 import org.apache.phoenix.end2end.index.IndexTestUtil;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.mapreduce.CsvBulkLoadTool;
@@ -146,12 +145,10 @@ public class CsvBulkLoadToolIT extends BaseOwnClusterIT {
                     "--table", "table1",
                     "--schema", "s",
                     "--zookeeper", zkQuorum});
-            fail("Bulk loading error should have happened earlier");
-        } catch (Exception e){
-            assertTrue(e.getMessage().contains("Bulk Loading error: Bulk 
loading is disabled for " +
-                    "non empty tables with global indexes, because it will 
corrupt " +
-                    "the global index table in most cases.\n" +
-                    "Use the --corruptindexes option to override this 
check."));
+            assertTrue("Bulk loading error should have happened earlier", 
exitCode != 0);
+        } catch (Exception e) {
+            fail("Tools should return non-zero exit codes on failure"
+                    + " instead of throwing an exception");
         }
 
         ResultSet rs = stmt.executeQuery("SELECT id, name, t FROM s.table1 
ORDER BY id");
@@ -393,7 +390,7 @@ public class CsvBulkLoadToolIT extends BaseOwnClusterIT {
                 + " (FIRST_NAME ASC)"
                 + " INCLUDE (LAST_NAME)";
         stmt.execute(ddl);
-        
+
         FileSystem fs = FileSystem.get(getUtility().getConfiguration());
         FSDataOutputStream outputStream = fs.create(new 
Path("/tmp/input3.csv"));
         PrintWriter printWriter = new PrintWriter(outputStream);
@@ -518,17 +515,17 @@ public class CsvBulkLoadToolIT extends BaseOwnClusterIT {
         CsvBulkLoadTool csvBulkLoadTool = new CsvBulkLoadTool();
         csvBulkLoadTool.setConf(getUtility().getConfiguration());
         try {
-            csvBulkLoadTool.run(new String[] {
+            int exitCode = csvBulkLoadTool.run(new String[] {
                 "--input", "/tmp/input4.csv",
                 "--table", tableName,
                 "--zookeeper", zkQuorum });
-            fail(String.format("Table %s not created, hence should 
fail",tableName));
+            assertTrue(String.format("Table %s not created, hence should 
fail", tableName),
+                exitCode != 0);
         } catch (Exception ex) {
-            assertTrue(ex instanceof IllegalArgumentException); 
-            assertTrue(ex.getMessage().contains(String.format("Table %s not 
found", tableName)));
-        }
+            fail("Tools should return non-zero exit codes on failure"
+                    + " instead of throwing an exception");        }
     }
-    
+
     @Test
     public void testAlreadyExistsOutputPath() {
         String tableName = "TABLE9";
@@ -537,7 +534,7 @@ public class CsvBulkLoadToolIT extends BaseOwnClusterIT {
             Statement stmt = conn.createStatement();
             stmt.execute("CREATE TABLE " + tableName + "(ID INTEGER NOT NULL 
PRIMARY KEY, "
                     + "FIRST_NAME VARCHAR, LAST_NAME VARCHAR)");
-            
+
             FileSystem fs = FileSystem.get(getUtility().getConfiguration());
             fs.create(new Path(outputPath));
             FSDataOutputStream outputStream = fs.create(new 
Path("/tmp/input9.csv"));
@@ -545,18 +542,21 @@ public class CsvBulkLoadToolIT extends BaseOwnClusterIT {
             printWriter.println("1,FirstName 1,LastName 1");
             printWriter.println("2,FirstName 2,LastName 2");
             printWriter.close();
-            
+
             CsvBulkLoadTool csvBulkLoadTool = new CsvBulkLoadTool();
             csvBulkLoadTool.setConf(getUtility().getConfiguration());
-            csvBulkLoadTool.run(new String[] {
+            int exitCode = csvBulkLoadTool.run(new String[] {
                 "--input", "/tmp/input9.csv",
                 "--output", outputPath,
                 "--table", tableName,
                 "--zookeeper", zkQuorum });
-            
-            fail(String.format("Output path %s already exists. hence, should 
fail",outputPath));
+
+            assertTrue(
+                String.format("Output path %s already exists. hence, should 
fail", outputPath),
+                exitCode != 0);
         } catch (Exception ex) {
-            assertTrue(ex instanceof FileAlreadyExistsException); 
+            fail("Tools should return non-zero exit codes when fail,"
+                + " instead of throwing an exception");
         }
     }
 
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/RegexBulkLoadToolIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RegexBulkLoadToolIT.java
index 5c66e125f7..e96f97cd32 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/RegexBulkLoadToolIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RegexBulkLoadToolIT.java
@@ -33,7 +33,6 @@ import java.sql.Statement;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapred.FileAlreadyExistsException;
 import org.apache.phoenix.mapreduce.RegexBulkLoadTool;
 import org.apache.phoenix.util.DateUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
@@ -305,15 +304,16 @@ public class RegexBulkLoadToolIT extends BaseOwnClusterIT 
{
         RegexBulkLoadTool regexBulkLoadTool = new RegexBulkLoadTool();
         regexBulkLoadTool.setConf(getUtility().getConfiguration());
         try {
-            regexBulkLoadTool.run(new String[] {
+            int exitCode = regexBulkLoadTool.run(new String[] {
                 "--input", "/tmp/input4.csv",
                 "--table", tableName,
                 "--regex", "([^,]*),([^,]*),([^,]*)",
                 "--zookeeper", zkQuorum });
-            fail(String.format("Table %s not created, hence should 
fail",tableName));
+            assertTrue(String.format("Table %s not created, hence should 
fail", tableName),
+                exitCode != 0);
         } catch (Exception ex) {
-            assertTrue(ex instanceof IllegalArgumentException); 
-            assertTrue(ex.getMessage().contains(String.format("Table %s not 
found", tableName)));
+            fail("Tools should return non-zero exit codes on failure"
+                    + " instead of throwing an exception");
         }
     }
     
@@ -336,16 +336,18 @@ public class RegexBulkLoadToolIT extends BaseOwnClusterIT 
{
             
             RegexBulkLoadTool regexBulkLoadTool = new RegexBulkLoadTool();
             regexBulkLoadTool.setConf(getUtility().getConfiguration());
-            regexBulkLoadTool.run(new String[] {
+            int exitCode = regexBulkLoadTool.run(new String[] {
                 "--input", "/tmp/input9.csv",
                 "--output", outputPath,
                 "--table", tableName,
                 "--regex", "([^,]*),([^,]*),([^,]*)",
                 "--zookeeper", zkQuorum });
-            
-            fail(String.format("Output path %s already exists. hence, should 
fail",outputPath));
+            assertTrue(
+                String.format("Output path %s already exists. hence, should 
fail", outputPath),
+                exitCode != 0);
         } catch (Exception ex) {
-            assertTrue(ex instanceof FileAlreadyExistsException); 
+            fail("Tools should return non-zero exit codes on failure"
+                    + " instead of throwing an exception");
         }
     }
     
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
index 35079f1fd9..5f6abbbe86 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
@@ -140,6 +140,7 @@ public final class PhoenixDriver extends 
PhoenixEmbeddedDriver {
     }
 
     // One entry per cluster here
+    // TODO that's not true, we can have multiple connections with different 
configs / principals
     private final Cache<ConnectionInfo, ConnectionQueryServices> 
connectionQueryServicesCache =
         initializeConnectionCache();
 
@@ -311,8 +312,18 @@ public final class PhoenixDriver extends 
PhoenixEmbeddedDriver {
                 services = null;
             }
         }
+
+        if (connectionQueryServicesCache != null) {
+            try {
+                for (ConnectionQueryServices cqsi : 
connectionQueryServicesCache.asMap().values()) {
+                    cqsi.close();
+                }
+            } catch (Exception e) {
+                LOGGER.warn("Failed to close ConnectionQueryServices 
instance", e);
+            }
+        }
     }
-    
+
     private enum LockMode {
         READ, WRITE
     };
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
index b8bc14a989..e0c01f2c0f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
@@ -183,7 +183,12 @@ public abstract class AbstractBulkLoadTool extends 
Configured implements Tool {
         } catch (IllegalStateException e) {
             printHelpAndExit(e.getMessage(), getOptions());
         }
-        return loadData(conf, cmdLine);
+        try {
+            return loadData(conf, cmdLine);
+        } catch (Exception e) {
+            e.printStackTrace();
+            return -1;
+        }
     }
 
 
@@ -216,85 +221,99 @@ public abstract class AbstractBulkLoadTool extends 
Configured implements Tool {
             PhoenixTextInputFormat.setSkipHeader(conf);
         }
 
-        final Connection conn = QueryUtil.getConnection(conf);
-        if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug("Reading columns from {} :: {}", ((PhoenixConnection) 
conn).getURL(),
-                    qualifiedTableName);
-        }
-        List<ColumnInfo> importColumns = buildImportColumns(conn, cmdLine, 
qualifiedTableName);
-        Preconditions.checkNotNull(importColumns);
-        Preconditions.checkArgument(!importColumns.isEmpty(), "Column info 
list is empty");
-        FormatToBytesWritableMapper.configureColumnInfoList(conf, 
importColumns);
-        boolean ignoreInvalidRows = 
cmdLine.hasOption(IGNORE_ERRORS_OPT.getOpt());
-        
conf.setBoolean(FormatToBytesWritableMapper.IGNORE_INVALID_ROW_CONFKEY, 
ignoreInvalidRows);
-        conf.set(FormatToBytesWritableMapper.TABLE_NAME_CONFKEY,
-                SchemaUtil.getEscapedFullTableName(qualifiedTableName));
-        // give subclasses their hook
-        configureOptions(cmdLine, importColumns, conf);
-        String sName = SchemaUtil.normalizeIdentifier(schemaName);
-        String tName = SchemaUtil.normalizeIdentifier(tableName);
-
-        String tn = SchemaUtil.getEscapedTableName(sName, tName);
-        ResultSet rsempty = conn.createStatement().executeQuery("SELECT * FROM 
" + tn + " LIMIT 1");
-        boolean tableNotEmpty = rsempty.next();
-        rsempty.close();
-
-        try {
-            validateTable(conn, sName, tName);
-        } finally {
-            conn.close();
-        }
-
         final String inputPaths = 
cmdLine.getOptionValue(INPUT_PATH_OPT.getOpt());
         final Path outputPath;
-        if (cmdLine.hasOption(OUTPUT_PATH_OPT.getOpt())) {
-            outputPath = new 
Path(cmdLine.getOptionValue(OUTPUT_PATH_OPT.getOpt()));
-        } else {
-            outputPath = new Path("/tmp/" + UUID.randomUUID());
-        }
-
         List<TargetTableRef> tablesToBeLoaded = new 
ArrayList<TargetTableRef>();
-        PTable table = PhoenixRuntime.getTable(conn, qualifiedTableName);
-        tablesToBeLoaded.add(new TargetTableRef(qualifiedTableName, 
table.getPhysicalName().getString()));
         boolean hasLocalIndexes = false;
-        boolean hasGlobalIndexes = false;
-        for(PTable index: table.getIndexes()) {
-            if (index.getIndexType() == IndexType.LOCAL) {
-                hasLocalIndexes =
-                        qualifiedIndexTableName == null ? true : 
index.getTableName().getString()
-                                .equals(qualifiedIndexTableName);
-                if (hasLocalIndexes && hasGlobalIndexes) break;
+
+        try (Connection conn = QueryUtil.getConnection(conf)) {
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug("Reading columns from {} :: {}", 
((PhoenixConnection) conn).getURL(),
+                    qualifiedTableName);
             }
-            if (index.getIndexType() == IndexType.GLOBAL) {
-                hasGlobalIndexes = true;
-                if (hasLocalIndexes && hasGlobalIndexes) break;
+            List<ColumnInfo> importColumns = buildImportColumns(conn, cmdLine, 
qualifiedTableName);
+            Preconditions.checkNotNull(importColumns);
+            Preconditions.checkArgument(!importColumns.isEmpty(), "Column info 
list is empty");
+            FormatToBytesWritableMapper.configureColumnInfoList(conf, 
importColumns);
+            boolean ignoreInvalidRows = 
cmdLine.hasOption(IGNORE_ERRORS_OPT.getOpt());
+            
conf.setBoolean(FormatToBytesWritableMapper.IGNORE_INVALID_ROW_CONFKEY,
+                ignoreInvalidRows);
+            conf.set(FormatToBytesWritableMapper.TABLE_NAME_CONFKEY,
+                SchemaUtil.getEscapedFullTableName(qualifiedTableName));
+            // give subclasses their hook
+            configureOptions(cmdLine, importColumns, conf);
+            String sName = SchemaUtil.normalizeIdentifier(schemaName);
+            String tName = SchemaUtil.normalizeIdentifier(tableName);
+
+            String tn = SchemaUtil.getEscapedTableName(sName, tName);
+            ResultSet rsempty =
+                    conn.createStatement().executeQuery("SELECT * FROM " + tn 
+ " LIMIT 1");
+            boolean tableNotEmpty = rsempty.next();
+            rsempty.close();
+
+            try {
+                validateTable(conn, sName, tName);
+            } finally {
+                conn.close();
             }
-        }
-
-        if(hasGlobalIndexes && tableNotEmpty && 
!cmdLine.hasOption(ENABLE_CORRUPT_INDEXES.getOpt())){
-            throw new IllegalStateException("Bulk Loading error: Bulk loading 
is disabled for non" +
-                    " empty tables with global indexes, because it will 
corrupt the global index table in most cases.\n" +
-                    "Use the --corruptindexes option to override this check.");
-        }
 
-        // using conn after it's been closed... o.O
-        tablesToBeLoaded.addAll(getIndexTables(conn, qualifiedTableName));
+            if (cmdLine.hasOption(OUTPUT_PATH_OPT.getOpt())) {
+                outputPath = new 
Path(cmdLine.getOptionValue(OUTPUT_PATH_OPT.getOpt()));
+            } else {
+                outputPath = new Path("/tmp/" + UUID.randomUUID());
+            }
 
-        // When loading a single index table, check index table name is correct
-        if (qualifiedIndexTableName != null){
-            TargetTableRef targetIndexRef = null;
-            for (TargetTableRef tmpTable : tablesToBeLoaded){
-                if 
(tmpTable.getLogicalName().compareToIgnoreCase(qualifiedIndexTableName) == 0) {
-                    targetIndexRef = tmpTable;
-                    break;
+            PTable table = PhoenixRuntime.getTable(conn, qualifiedTableName);
+            tablesToBeLoaded.add(
+                new TargetTableRef(qualifiedTableName, 
table.getPhysicalName().getString()));
+            boolean hasGlobalIndexes = false;
+            for (PTable index : table.getIndexes()) {
+                if (index.getIndexType() == IndexType.LOCAL) {
+                    hasLocalIndexes =
+                            qualifiedIndexTableName == null ? true
+                                    : index.getTableName().getString()
+                                            .equals(qualifiedIndexTableName);
+                    if (hasLocalIndexes && hasGlobalIndexes) {
+                        break;
+                    }
                 }
+                if (index.getIndexType() == IndexType.GLOBAL) {
+                    hasGlobalIndexes = true;
+                    if (hasLocalIndexes && hasGlobalIndexes) {
+                        break;
+                    }
+                }
+            }
+
+            if (hasGlobalIndexes && tableNotEmpty
+                    && !cmdLine.hasOption(ENABLE_CORRUPT_INDEXES.getOpt())) {
+                throw new IllegalStateException(
+                        "Bulk Loading error: Bulk loading is disabled for non"
+                                + " empty tables with global indexes, because 
it will corrupt the"
+                                + " global index table in most cases.\n"
+                                + "Use the --corruptindexes option to override 
this check.");
             }
-            if (targetIndexRef == null){
-                throw new IllegalStateException("Bulk Loader error: index 
table " +
-                        qualifiedIndexTableName + " doesn't exist");
+
+            // using conn after it's been closed... o.O
+            tablesToBeLoaded.addAll(getIndexTables(conn, qualifiedTableName));
+
+            // When loading a single index table, check index table name is 
correct
+            if (qualifiedIndexTableName != null) {
+                TargetTableRef targetIndexRef = null;
+                for (TargetTableRef tmpTable : tablesToBeLoaded) {
+                    if (tmpTable.getLogicalName()
+                            .compareToIgnoreCase(qualifiedIndexTableName) == 
0) {
+                        targetIndexRef = tmpTable;
+                        break;
+                    }
+                }
+                if (targetIndexRef == null) {
+                    throw new IllegalStateException("Bulk Loader error: index 
table "
+                            + qualifiedIndexTableName + " doesn't exist");
+                }
+                tablesToBeLoaded.clear();
+                tablesToBeLoaded.add(targetIndexRef);
             }
-            tablesToBeLoaded.clear();
-            tablesToBeLoaded.add(targetIndexRef);
         }
 
         return submitJob(conf, tableName, inputPaths, outputPath, 
tablesToBeLoaded, hasLocalIndexes);
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
index efa57d7e04..162e4bacf2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
@@ -431,6 +431,7 @@ public class OrphanViewTool extends Configured implements 
Tool {
             }
         } finally {
             if (newConn) {
+                // TODO can this be rewritten with try-with-resources ?
                 tryClosingConnection(tenantConnection);
             }
         }
@@ -904,6 +905,7 @@ public class OrphanViewTool extends Configured implements 
Tool {
                     ExceptionUtils.getStackTrace(ex));
             return -1;
         } finally {
+            // TODO use try-with-resources at least for the Connection ?
             closeConnectionAndFiles(connection);
         }
     }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexUpgradeTool.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexUpgradeTool.java
index 11597f87e3..2cf35e8958 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexUpgradeTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexUpgradeTool.java
@@ -207,9 +207,14 @@ public class IndexUpgradeTool extends Configured 
implements Tool {
         } catch (IllegalStateException e) {
             printHelpAndExit(e.getMessage(), getOptions());
         }
-        initializeTool(cmdLine);
-        prepareToolSetup();
-        executeTool();
+        try {
+            initializeTool(cmdLine);
+            prepareToolSetup();
+            executeTool();
+        } catch (Exception e) {
+            e.printStackTrace();
+            hasFailure = true;
+        }
         if (hasFailure) {
             return -1;
         } else {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/UpdateStatisticsTool.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/UpdateStatisticsTool.java
index e3f7d9aec4..fee32585fd 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/UpdateStatisticsTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/UpdateStatisticsTool.java
@@ -96,13 +96,18 @@ public class UpdateStatisticsTool extends Configured 
implements Tool {
 
     @Override
     public int run(String[] args) throws Exception {
-        parseArgs(args);
-        preJobTask();
-        configureJob();
-        TableMapReduceUtil.initCredentials(job);
-        int ret = runJob();
-        postJobTask();
-        return ret;
+        try {
+            parseArgs(args);
+            preJobTask();
+            configureJob();
+            TableMapReduceUtil.initCredentials(job);
+            int ret = runJob();
+            postJobTask();
+            return ret;
+        } catch (Exception e) {
+            e.printStackTrace();
+            return -1;
+        }
     }
 
     /**
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/tool/SchemaTool.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/tool/SchemaTool.java
index 6bc4922f90..f000b98064 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/tool/SchemaTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/tool/SchemaTool.java
@@ -66,19 +66,24 @@ public class SchemaTool extends Configured implements Tool {
 
     @Override
     public int run(String[] args) throws Exception {
-        populateToolAttributes(args);
-        SchemaProcessor processor=null;
-        if(Mode.SYNTH.equals(mode)) {
-            processor = new SchemaSynthesisProcessor(ddlFile);
-        } else if(Mode.EXTRACT.equals(mode)) {
-            conf = HBaseConfiguration.addHbaseResources(getConf());
-            processor = new SchemaExtractionProcessor(tenantId, conf, 
pSchemaName, pTableName);
-        } else {
-            throw new Exception(mode+" is not accepted, provide [synth or 
extract]");
+        try {
+            populateToolAttributes(args);
+            SchemaProcessor processor=null;
+            if(Mode.SYNTH.equals(mode)) {
+                processor = new SchemaSynthesisProcessor(ddlFile);
+            } else if(Mode.EXTRACT.equals(mode)) {
+                conf = HBaseConfiguration.addHbaseResources(getConf());
+                processor = new SchemaExtractionProcessor(tenantId, conf, 
pSchemaName, pTableName);
+            } else {
+                throw new Exception(mode+" is not accepted, provide [synth or 
extract]");
+            }
+            output = processor.process();
+            LOGGER.info("Effective DDL with " + mode.toString() +": " + 
output);
+            return 0;
+        } catch (Exception e) {
+            e.printStackTrace();
+            return -1;
         }
-        output = processor.process();
-        LOGGER.info("Effective DDL with " + mode.toString() +": " + output);
-        return 0;
     }
 
     public String getOutput() {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/MergeViewIndexIdSequencesTool.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/util/MergeViewIndexIdSequencesTool.java
index 0bb0fb8549..3eb62ea086 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/util/MergeViewIndexIdSequencesTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/util/MergeViewIndexIdSequencesTool.java
@@ -91,25 +91,16 @@ public class MergeViewIndexIdSequencesTool extends 
Configured implements Tool {
     @Override
     public int run(String[] args) throws Exception {
         int status = 0;
-        PhoenixConnection conn = null;
-        try {
-            parseOptions(args);
-
-            final Configuration config = 
HBaseConfiguration.addHbaseResources(getConf());
-
-            conn = ConnectionUtil.getInputConnection(config).
-                    unwrap(PhoenixConnection.class);
+        parseOptions(args);
 
+        final Configuration config = 
HBaseConfiguration.addHbaseResources(getConf());
+        try (PhoenixConnection conn = 
ConnectionUtil.getInputConnection(config).
+                    unwrap(PhoenixConnection.class)) {
             UpgradeUtil.mergeViewIndexIdSequences(conn);
-
         } catch (Exception e) {
             LOGGER.error("Get an error while running 
MergeViewIndexIdSequencesTool, "
                     + e.getMessage());
             status = 1;
-        } finally {
-            if (conn != null) {
-                conn.close();
-            }
         }
         return status;
     }

Reply via email to