pvargacl commented on a change in pull request #1087: URL: https://github.com/apache/hive/pull/1087#discussion_r450793830
########## File path: standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java ########## @@ -2032,28 +2077,61 @@ public void seedWriteIdOnAcidConversion(InitializeTableWriteIdsRequest rqst) // The initial value for write id should be 1 and hence we add 1 with number of write ids // allocated here String s = "INSERT INTO \"NEXT_WRITE_ID\" (\"NWI_DATABASE\", \"NWI_TABLE\", \"NWI_NEXT\") VALUES (?, ?, " - + Long.toString(rqst.getSeeWriteId() + 1) + ")"; - pst = sqlGenerator.prepareStmtWithParameters(dbConn, s, Arrays.asList(rqst.getDbName(), rqst.getTblName())); + + Long.toString(rqst.getSeedWriteId() + 1) + ")"; + pst = sqlGenerator.prepareStmtWithParameters(dbConn, s, Arrays.asList(rqst.getDbName(), rqst.getTableName())); LOG.debug("Going to execute insert <" + s.replaceAll("\\?", "{}") + ">", - quoteString(rqst.getDbName()), quoteString(rqst.getTblName())); + quoteString(rqst.getDbName()), quoteString(rqst.getTableName())); pst.execute(); LOG.debug("Going to commit"); dbConn.commit(); } catch (SQLException e) { - LOG.debug("Going to rollback"); rollbackDBConn(dbConn); - checkRetryable(dbConn, e, "seedWriteIdOnAcidConversion(" + rqst + ")"); - throw new MetaException("Unable to update transaction database " - + StringUtils.stringifyException(e)); + checkRetryable(dbConn, e, "seedWriteId(" + rqst + ")"); + throw new MetaException("Unable to update transaction database " + StringUtils.stringifyException(e)); } finally { close(null, pst, dbConn); unlockInternal(); } } catch (RetryException e) { - seedWriteIdOnAcidConversion(rqst); + seedWriteId(rqst); } + } + + @Override + public void seedTxnId(SeedTxnIdRequest rqst) throws MetaException { + try { + Connection dbConn = null; + Statement stmt = null; + try { + lockInternal(); + dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); + stmt = dbConn.createStatement(); + /* + * Locking the txnLock an exclusive way, we do not want to set the txnId backward accidentally + * if there are concurrent open transactions + */ + acquireTxnLock(stmt, false); + long highWaterMark = getHighWaterMark(stmt); + if (highWaterMark >= rqst.getSeedTxnId()) { Review comment: This is not about the writeIds, it is about the txnId. If you have a data from a database where there were high amount of transaction and the compaction run on the table, you will have some high txnId in the visibilityTxnId in the name of the compacted folder. If you then move this data to a cluster with less transaction (ex. a test cluster) and you run the msck repair, you have to skip the txnId forward so the next query will read the compacted folder. Here the race condition is, that somehow the txnId sequence gets ahead of you between the check and the seeding the value, in that case we throw this exception to not to set the sequence backward. Anyway, in this case if you run the msck repair again it will succeed, since the txnid will be high enough. The writeId race condition won't cause a problem I think, since if some other transaction allocated the first writeId the seedWriteId will fail on the unique constraint on the table ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: gitbox-unsubscr...@hive.apache.org For additional commands, e-mail: gitbox-h...@hive.apache.org