deniskuzZ commented on code in PR #4517:
URL: https://github.com/apache/hive/pull/4517#discussion_r1410566433


##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DirectSqlUpdate.java:
##########
@@ -0,0 +1,1541 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.Order;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.SkewedInfo;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.events.UpdatePartitionColumnStatEvent;
+import 
org.apache.hadoop.hive.metastore.events.UpdatePartitionColumnStatEventBatch;
+import org.apache.hadoop.hive.metastore.messaging.EventMessage;
+import org.apache.hadoop.hive.metastore.model.MColumnDescriptor;
+import org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics;
+import org.apache.hadoop.hive.metastore.model.MStringList;
+import org.apache.hadoop.hive.metastore.tools.SQLGenerator;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.datanucleus.ExecutionContext;
+import org.datanucleus.api.jdo.JDOPersistenceManager;
+import org.datanucleus.metadata.AbstractClassMetaData;
+import org.datanucleus.metadata.IdentityType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.hadoop.conf.Configuration;
+
+import javax.jdo.PersistenceManager;
+import javax.jdo.datastore.JDOConnection;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.stream.Collectors;
+
+import static 
org.apache.hadoop.hive.common.StatsSetupConst.COLUMN_STATS_ACCURATE;
+import static org.apache.hadoop.hive.metastore.HMSHandler.getPartValsFromName;
+import static 
org.apache.hadoop.hive.metastore.MetastoreDirectSqlUtils.executeWithArray;
+import static 
org.apache.hadoop.hive.metastore.MetastoreDirectSqlUtils.extractSqlInt;
+import static 
org.apache.hadoop.hive.metastore.MetastoreDirectSqlUtils.extractSqlLong;
+
+/**
+ * This class contains the optimizations for MetaStore that rely on direct SQL 
access to
+ * the underlying database. It should use ANSI SQL and be compatible with 
common databases
+ * such as MySQL (note that MySQL doesn't use full ANSI mode by default), 
Postgres, etc.
+ *
+ * This class separates out the statistics update part from MetaStoreDirectSql 
class.
+ */
+class DirectSqlUpdate {
+  private static final Logger LOG = 
LoggerFactory.getLogger(DirectSqlUpdate.class.getName());
+  PersistenceManager pm;
+  Configuration conf;
+  DatabaseProduct dbType;
+  int maxBatchSize;
+  SQLGenerator sqlGenerator;
+  private static final ReentrantLock derbyLock = new ReentrantLock(true);
+  
+  public DirectSqlUpdate(PersistenceManager pm, Configuration conf,
+                                          DatabaseProduct dbType, int 
batchSize) {
+    this.pm = pm;
+    this.conf = conf;
+    this.dbType = dbType;
+    this.maxBatchSize = batchSize;
+    sqlGenerator = new SQLGenerator(dbType, conf);
+  }
+
+  /**
+   * {@link #lockInternal()} and {@link #unlockInternal()} are used to 
serialize those operations that require
+   * Select ... For Update to sequence operations properly.  In practice that 
means when running
+   * with Derby database.  See more notes at class level.
+   */
+  private void lockInternal() {
+    if(dbType.isDERBY()) {
+      derbyLock.lock();
+    }
+  }
+
+  private void unlockInternal() {
+    if(dbType.isDERBY()) {
+      derbyLock.unlock();
+    }
+  }
+
+  void rollbackDBConn(Connection dbConn) {
+    try {
+      if (dbConn != null && !dbConn.isClosed()) dbConn.rollback();
+    } catch (SQLException e) {
+      LOG.warn("Failed to rollback db connection ", e);
+    }
+  }
+
+  void closeDbConn(JDOConnection jdoConn) {
+    try {
+      if (jdoConn != null) {
+        jdoConn.close();
+      }
+    } catch (Exception e) {
+      LOG.warn("Failed to close db connection", e);
+    }
+  }
+
+  void closeStmt(Statement stmt) {
+    try {
+      if (stmt != null && !stmt.isClosed()) stmt.close();
+    } catch (SQLException e) {
+      LOG.warn("Failed to close statement ", e);
+    }
+  }
+
+  void close(ResultSet rs) {
+    try {
+      if (rs != null && !rs.isClosed()) {
+        rs.close();
+      }
+    }
+    catch(SQLException ex) {
+      LOG.warn("Failed to close statement ", ex);
+    }
+  }
+
+  static String quoteString(String input) {
+    return "'" + input + "'";
+  }
+
+  void close(ResultSet rs, Statement stmt, JDOConnection dbConn) {
+    close(rs);
+    closeStmt(stmt);
+    closeDbConn(dbConn);
+  }
+
+  private void populateInsertUpdateMap(Map<PartitionInfo, ColumnStatistics> 
statsPartInfoMap,
+                                       Map<PartColNameInfo, 
MPartitionColumnStatistics> updateMap,
+                                       Map<PartColNameInfo, 
MPartitionColumnStatistics>insertMap,
+                                       Connection dbConn, Table tbl) throws 
SQLException, MetaException, NoSuchObjectException {
+    StringBuilder prefix = new StringBuilder();
+    StringBuilder suffix = new StringBuilder();
+    Statement statement = null;
+    ResultSet rs = null;
+    List<String> queries = new ArrayList<>();
+    Set<PartColNameInfo> selectedParts = new HashSet<>();
+
+    List<Long> partIdList = statsPartInfoMap.keySet().stream().map(
+            e -> e.partitionId).collect(Collectors.toList()
+    );
+
+    prefix.append("select \"PART_ID\", \"COLUMN_NAME\" from \"PART_COL_STATS\" 
WHERE ");
+    TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix,
+            partIdList, "\"PART_ID\"", true, false);
+
+    for (String query : queries) {
+      try {
+        statement = dbConn.createStatement();
+        LOG.debug("Going to execute query " + query);
+        rs = statement.executeQuery(query);
+        while (rs.next()) {
+          selectedParts.add(new PartColNameInfo(rs.getLong(1), 
rs.getString(2)));
+        }
+      } finally {
+        close(rs, statement, null);
+      }
+    }
+
+    for (Map.Entry entry : statsPartInfoMap.entrySet()) {
+      PartitionInfo partitionInfo = (PartitionInfo) entry.getKey();
+      ColumnStatistics colStats = (ColumnStatistics) entry.getValue();
+      long partId = partitionInfo.partitionId;
+      ColumnStatisticsDesc statsDesc = colStats.getStatsDesc();
+      if (!statsDesc.isSetCatName()) {
+        statsDesc.setCatName(tbl.getCatName());
+      }
+      for (ColumnStatisticsObj statisticsObj : colStats.getStatsObj()) {
+        PartColNameInfo temp = new PartColNameInfo(partId, 
statisticsObj.getColName());
+        if (selectedParts.contains(temp)) {
+          updateMap.put(temp, StatObjectConverter.
+                  convertToMPartitionColumnStatistics(null, statsDesc, 
statisticsObj, colStats.getEngine()));
+        } else {
+          insertMap.put(temp, StatObjectConverter.
+                  convertToMPartitionColumnStatistics(null, statsDesc, 
statisticsObj, colStats.getEngine()));
+        }
+      }
+    }
+  }
+
+  private void updatePartColStatTable(Map<PartColNameInfo, 
MPartitionColumnStatistics> updateMap,
+                                          Connection dbConn) throws 
SQLException, MetaException, NoSuchObjectException {
+    PreparedStatement pst = null;
+    for (Map.Entry entry : updateMap.entrySet()) {
+      PartColNameInfo partColNameInfo = (PartColNameInfo) entry.getKey();
+      Long partId = partColNameInfo.partitionId;
+      MPartitionColumnStatistics mPartitionColumnStatistics = 
(MPartitionColumnStatistics) entry.getValue();
+      String update = "UPDATE \"PART_COL_STATS\" SET ";
+      update += 
StatObjectConverter.getUpdatedColumnSql(mPartitionColumnStatistics);
+      update += " WHERE \"PART_ID\" = " + partId + " AND "
+              + " \"COLUMN_NAME\" = " +  
quoteString(mPartitionColumnStatistics.getColName());
+      try {
+        pst = dbConn.prepareStatement(update);
+        
StatObjectConverter.initUpdatedColumnStatement(mPartitionColumnStatistics, pst);
+        LOG.debug("Going to execute update " + update);
+        int numUpdate = pst.executeUpdate();
+        if (numUpdate != 1) {
+          throw new MetaException("Invalid state of  PART_COL_STATS for 
PART_ID " + partId);
+        }
+      } finally {
+        closeStmt(pst);
+      }
+    }
+  }
+
+  private void insertIntoPartColStatTable(Map<PartColNameInfo, 
MPartitionColumnStatistics> insertMap,
+                                          long maxCsId,
+                                          Connection dbConn) throws 
SQLException, MetaException, NoSuchObjectException {
+    PreparedStatement preparedStatement = null;
+    int numRows = 0;
+    String insert = "INSERT INTO \"PART_COL_STATS\" (\"CS_ID\", \"CAT_NAME\", 
\"DB_NAME\","
+            + "\"TABLE_NAME\", \"PARTITION_NAME\", \"COLUMN_NAME\", 
\"COLUMN_TYPE\", \"PART_ID\","
+            + " \"LONG_LOW_VALUE\", \"LONG_HIGH_VALUE\", 
\"DOUBLE_HIGH_VALUE\", \"DOUBLE_LOW_VALUE\","
+            + " \"BIG_DECIMAL_LOW_VALUE\", \"BIG_DECIMAL_HIGH_VALUE\", 
\"NUM_NULLS\", \"NUM_DISTINCTS\", \"BIT_VECTOR\" ,"
+            + " \"HISTOGRAM\", \"AVG_COL_LEN\", \"MAX_COL_LEN\", 
\"NUM_TRUES\", \"NUM_FALSES\", \"LAST_ANALYZED\", \"ENGINE\") values "
+            + "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 
?, ?, ?)";
+
+    try {
+      preparedStatement = dbConn.prepareStatement(insert);
+      for (Map.Entry entry : insertMap.entrySet()) {
+        PartColNameInfo partColNameInfo = (PartColNameInfo) entry.getKey();
+        Long partId = partColNameInfo.partitionId;
+        MPartitionColumnStatistics mPartitionColumnStatistics = 
(MPartitionColumnStatistics) entry.getValue();
+
+        preparedStatement.setLong(1, maxCsId);
+        preparedStatement.setString(2, 
mPartitionColumnStatistics.getCatName());
+        preparedStatement.setString(3, mPartitionColumnStatistics.getDbName());
+        preparedStatement.setString(4, 
mPartitionColumnStatistics.getTableName());
+        preparedStatement.setString(5, 
mPartitionColumnStatistics.getPartitionName());
+        preparedStatement.setString(6, 
mPartitionColumnStatistics.getColName());
+        preparedStatement.setString(7, 
mPartitionColumnStatistics.getColType());
+        preparedStatement.setLong(8, partId);
+        preparedStatement.setObject(9, 
mPartitionColumnStatistics.getLongLowValue());
+        preparedStatement.setObject(10, 
mPartitionColumnStatistics.getLongHighValue());
+        preparedStatement.setObject(11, 
mPartitionColumnStatistics.getDoubleHighValue());
+        preparedStatement.setObject(12, 
mPartitionColumnStatistics.getDoubleLowValue());
+        preparedStatement.setString(13, 
mPartitionColumnStatistics.getDecimalLowValue());
+        preparedStatement.setString(14, 
mPartitionColumnStatistics.getDecimalHighValue());
+        preparedStatement.setObject(15, 
mPartitionColumnStatistics.getNumNulls());
+        preparedStatement.setObject(16, 
mPartitionColumnStatistics.getNumDVs());
+        preparedStatement.setObject(17, 
mPartitionColumnStatistics.getBitVector());
+        preparedStatement.setBytes(18, 
mPartitionColumnStatistics.getHistogram());
+        preparedStatement.setObject(19, 
mPartitionColumnStatistics.getAvgColLen());
+        preparedStatement.setObject(20, 
mPartitionColumnStatistics.getMaxColLen());
+        preparedStatement.setObject(21, 
mPartitionColumnStatistics.getNumTrues());
+        preparedStatement.setObject(22, 
mPartitionColumnStatistics.getNumFalses());
+        preparedStatement.setLong(23, 
mPartitionColumnStatistics.getLastAnalyzed());
+        preparedStatement.setString(24, 
mPartitionColumnStatistics.getEngine());
+
+        maxCsId++;
+        numRows++;
+        preparedStatement.addBatch();
+        if (numRows == maxBatchSize) {
+          preparedStatement.executeBatch();
+          numRows = 0;
+        }
+      }
+
+      if (numRows != 0) {
+        preparedStatement.executeBatch();
+      }
+    } finally {
+      closeStmt(preparedStatement);
+    }
+  }
+
+  private Map<Long, String> getParamValues(Connection dbConn, List<Long> 
partIdList) throws SQLException {
+    List<String> queries = new ArrayList<>();
+    StringBuilder prefix = new StringBuilder();
+    StringBuilder suffix = new StringBuilder();
+    Statement statement = null;
+    ResultSet rs = null;
+
+    prefix.append("select \"PART_ID\", \"PARAM_VALUE\" "
+            + " from \"PARTITION_PARAMS\" where "
+            + " \"PARAM_KEY\" = 'COLUMN_STATS_ACCURATE' "
+            + " and ");
+    TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix,
+            partIdList, "\"PART_ID\"", true, false);
+
+    Map<Long, String> partIdToParaMap = new HashMap<>();
+    for (String query : queries) {
+      try {
+        statement = dbConn.createStatement();
+        LOG.debug("Going to execute query " + query);
+        rs = statement.executeQuery(query);
+        while (rs.next()) {
+          partIdToParaMap.put(rs.getLong(1), rs.getString(2));
+        }
+      } finally {
+        close(rs, statement, null);
+      }
+    }
+    return partIdToParaMap;
+  }
+
+  private void updateWriteIdForPartitions(Connection dbConn, long writeId, 
List<Long> partIdList) throws SQLException {
+    StringBuilder prefix = new StringBuilder();
+    List<String> queries = new ArrayList<>();
+    StringBuilder suffix = new StringBuilder();
+
+    prefix.append("UPDATE \"PARTITIONS\" set \"WRITE_ID\" = " + writeId + " 
where ");
+    TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix,
+            partIdList, "\"PART_ID\"", false, false);
+
+    Statement statement = null;
+    for (String query : queries) {
+      try {
+        statement = dbConn.createStatement();
+        LOG.debug("Going to execute update " + query);
+        statement.executeUpdate(query);
+      } finally {
+        closeStmt(statement);
+      }
+    }
+  }
+
+  private Map<String, Map<String, String>> 
updatePartitionParamTable(Connection dbConn,
+                                                                     
Map<PartitionInfo, ColumnStatistics> partitionInfoMap,
+                                                                     String 
validWriteIds,
+                                                                     long 
writeId,
+                                                                     boolean 
isAcidTable)
+          throws SQLException, MetaException {
+    Map<String, Map<String, String>> result = new HashMap<>();
+    boolean areTxnStatsSupported = MetastoreConf.getBoolVar(conf, 
ConfVars.HIVE_TXN_STATS_ENABLED);
+    PreparedStatement statementInsert = null;
+    PreparedStatement statementDelete = null;
+    PreparedStatement statementUpdate = null;
+    String insert = "INSERT INTO \"PARTITION_PARAMS\" (\"PART_ID\", 
\"PARAM_KEY\", \"PARAM_VALUE\") "
+            + "VALUES( ? , 'COLUMN_STATS_ACCURATE'  , ? )";
+    String delete = "DELETE from \"PARTITION_PARAMS\" "
+            + " where \"PART_ID\" = ? "
+            + " and \"PARAM_KEY\" = 'COLUMN_STATS_ACCURATE'";
+    String update = "UPDATE \"PARTITION_PARAMS\" set \"PARAM_VALUE\" = ? "
+            + " where \"PART_ID\" = ? "
+            + " and \"PARAM_KEY\" = 'COLUMN_STATS_ACCURATE'";
+    int numInsert = 0;
+    int numDelete = 0;
+    int numUpdate = 0;
+
+    List<Long> partIdList = partitionInfoMap.keySet().stream().map(
+            e -> e.partitionId).collect(Collectors.toList()
+    );
+
+    // get the old parameters from PARTITION_PARAMS table.
+    Map<Long, String> partIdToParaMap = getParamValues(dbConn, partIdList);
+
+    try {
+      statementInsert = dbConn.prepareStatement(insert);
+      statementDelete = dbConn.prepareStatement(delete);
+      statementUpdate = dbConn.prepareStatement(update);
+      for (Map.Entry entry : partitionInfoMap.entrySet()) {
+        PartitionInfo partitionInfo = (PartitionInfo) entry.getKey();
+        ColumnStatistics colStats = (ColumnStatistics) entry.getValue();
+        List<String> colNames = colStats.getStatsObj().stream().map(e -> 
e.getColName()).collect(Collectors.toList());
+        long partWriteId = partitionInfo.writeId;
+        long partId = partitionInfo.partitionId;
+        Map<String, String> newParameter;
+
+        if (!partIdToParaMap.containsKey(partId)) {
+          newParameter = new HashMap<>();
+          newParameter.put(COLUMN_STATS_ACCURATE, "TRUE");
+          StatsSetupConst.setColumnStatsState(newParameter, colNames);
+          statementInsert.setLong(1, partId);
+          statementInsert.setString(2, 
newParameter.get(COLUMN_STATS_ACCURATE));
+          numInsert++;
+          statementInsert.addBatch();
+          if (numInsert == maxBatchSize) {
+            LOG.debug(" Executing insert " + insert);
+            statementInsert.executeBatch();
+            numInsert = 0;
+          }
+        } else {
+          String oldStats = partIdToParaMap.get(partId);
+
+          Map<String, String> oldParameter = new HashMap<>();
+          oldParameter.put(COLUMN_STATS_ACCURATE, oldStats);
+
+          newParameter = new HashMap<>();
+          newParameter.put(COLUMN_STATS_ACCURATE, oldStats);
+          StatsSetupConst.setColumnStatsState(newParameter, colNames);
+
+          if (isAcidTable) {
+            String errorMsg = ObjectStore.verifyStatsChangeCtx(
+                    colStats.getStatsDesc().getDbName() + "." + 
colStats.getStatsDesc().getTableName(),
+                    oldParameter, newParameter, writeId, validWriteIds, true);
+            if (errorMsg != null) {
+              throw new MetaException(errorMsg);
+            }
+          }
+
+          if (isAcidTable &&
+                  (!areTxnStatsSupported || 
!ObjectStore.isCurrentStatsValidForTheQuery(oldParameter, partWriteId,
+                          validWriteIds, true))) {
+            statementDelete.setLong(1, partId);
+            statementDelete.addBatch();
+            numDelete++;
+            if (numDelete == maxBatchSize) {
+              statementDelete.executeBatch();
+              numDelete = 0;
+              LOG.debug("Removed COLUMN_STATS_ACCURATE from the parameters of 
the partition "
+                      + colStats.getStatsDesc().getDbName() + "." + 
colStats.getStatsDesc().getTableName() + "."
+                      + colStats.getStatsDesc().getPartName());
+            }
+          } else {
+            statementUpdate.setString(1, 
newParameter.get(COLUMN_STATS_ACCURATE));
+            statementUpdate.setLong(2, partId);
+            statementUpdate.addBatch();
+            numUpdate++;
+            if (numUpdate == maxBatchSize) {
+              LOG.debug(" Executing update " + statementUpdate);
+              statementUpdate.executeBatch();
+              numUpdate = 0;
+            }
+          }
+        }
+        result.put(partitionInfo.partitionName, newParameter);
+      }
+
+      if (numInsert != 0) {
+        statementInsert.executeBatch();
+      }
+
+      if (numUpdate != 0) {
+        statementUpdate.executeBatch();
+      }
+
+      if (numDelete != 0) {
+        statementDelete.executeBatch();
+      }
+
+      if (isAcidTable) {
+        updateWriteIdForPartitions(dbConn, writeId, partIdList);
+      }
+      return result;
+    } finally {
+      closeStmt(statementInsert);
+      closeStmt(statementUpdate);
+      closeStmt(statementDelete);
+    }
+  }
+
+  private static class PartitionInfo {

Review Comment:
   add `final` and move to the end of the class



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to