This is an automated email from the ASF dual-hosted git repository. vihangk1 pushed a commit to branch branch-2.3 in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/branch-2.3 by this push: new 9fbcfdd HIVE-24559: Fix some spelling issues (Ricky Ma reviewed by Vihang Karajgaonkar and Miklos Gergely) 9fbcfdd is described below commit 9fbcfddd976c11e4ccc54ed1860362935cb34fac Author: RickyMa <mhx8...@gmail.com> AuthorDate: Tue Dec 29 02:49:09 2020 +0800 HIVE-24559: Fix some spelling issues (Ricky Ma reviewed by Vihang Karajgaonkar and Miklos Gergely) Closes #1805 --- .../src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java | 6 +++--- ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java | 4 ++-- .../apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java | 2 +- ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index a310c27..3023083 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -626,7 +626,7 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI { * as a logical time counter. If S.commitTime < T.startTime, T and S do NOT overlap. * * Motivating example: - * Suppose we have multi-statment transactions T and S both of which are attempting x = x + 1 + * Suppose we have multi-statement transactions T and S both of which are attempting x = x + 1 * In order to prevent lost update problem, the the non-overlapping txns must lock in the snapshot * that they read appropriately. In particular, if txns do not overlap, then one follows the other * (assumig they write the same entity), and thus the 2nd must see changes of the 1st. We ensure @@ -855,7 +855,7 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI { /** * As much as possible (i.e. in absence of retries) we want both operations to be done on the same * connection (but separate transactions). This avoid some flakiness in BONECP where if you - * perform an operation on 1 connection and immediately get another fron the pool, the 2nd one + * perform an operation on 1 connection and immediately get another from the pool, the 2nd one * doesn't see results of the first. * * Retry-by-caller note: If the call to lock is from a transaction, then in the worst case @@ -2430,7 +2430,7 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI { * Lock acquisition is meant to be fair, so every lock can only block on some lock with smaller * hl_lock_ext_id by only checking earlier locks. * - * For any given SQL statment all locks required by it are grouped under single extLockId and are + * For any given SQL statement all locks required by it are grouped under single extLockId and are * granted all at once or all locks wait. * * This is expected to run at READ_COMMITTED. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java index 6a43385..4f7698d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java @@ -463,8 +463,8 @@ public enum ErrorMsg { MATERIALIZED_VIEW_DEF_EMPTY(10403, "Query for the materialized view rebuild could not be retrieved"), MERGE_PREDIACTE_REQUIRED(10404, "MERGE statement with both UPDATE and DELETE clauses " + "requires \"AND <boolean>\" on the 1st WHEN MATCHED clause of <{0}>", true), - MERGE_TOO_MANY_DELETE(10405, "MERGE statment can have at most 1 WHEN MATCHED ... DELETE clause: <{0}>", true), - MERGE_TOO_MANY_UPDATE(10406, "MERGE statment can have at most 1 WHEN MATCHED ... UPDATE clause: <{0}>", true), + MERGE_TOO_MANY_DELETE(10405, "MERGE statement can have at most 1 WHEN MATCHED ... DELETE clause: <{0}>", true), + MERGE_TOO_MANY_UPDATE(10406, "MERGE statement can have at most 1 WHEN MATCHED ... UPDATE clause: <{0}>", true), INVALID_JOIN_CONDITION(10407, "Error parsing condition in outer join"), INVALID_TARGET_COLUMN_IN_SET_CLAUSE(10408, "Target column \"{0}\" of set clause is not found in table \"{1}\".", true), HIVE_GROUPING_FUNCTION_EXPR_NOT_IN_GROUPBY(10409, "Expression in GROUPING function not present in GROUP BY"), diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java index 0541a40..0aaf529 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java @@ -284,7 +284,7 @@ public class UpdateDeleteSemanticAnalyzer extends SemanticAnalyzer { } } /** - * Parse the newly generated SQL statment to get a new AST + * Parse the newly generated SQL statement to get a new AST */ private ReparseResult parseRewrittenQuery(StringBuilder rewrittenQueryStr, String originalQuery) throws SemanticException { // Parse the rewritten query string diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java b/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java index a7ff9a3..b2f1f8a 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java @@ -279,7 +279,7 @@ public class TestAcidUtils { delta_120_130 is from streaming ingest in which case 121 can be open (and thus 122-130 are open too) 99 here would be Aborted since 121 is minOpenTxn, base_100 is still good - For multi-statment txns, see HIVE-13369*/ + For multi-statement txns, see HIVE-13369*/ dir = AcidUtils.getAcidState(part, conf, new ValidReadTxnList("150:121:99:121")); assertEquals("mock:/tbl/part1/base_100", dir.getBaseDirectory().toString()); assertEquals(1, dir.getCurrentDirectories().size());