deniskuzZ commented on code in PR #4166:
URL: https://github.com/apache/hive/pull/4166#discussion_r1154650166
##########
ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rebuild/AlterMaterializedViewRebuildAnalyzer.java:
##########
@@ -564,30 +582,38 @@ private void fixUpASTAggregateIncrementalRebuild(
HiveParser.TOK_INSERT, HiveParser.TOK_SELECT);
ASTNode selectExprNodeInputROJ = (ASTNode) ParseDriver.adaptor.create(
HiveParser.TOK_SELEXPR, "TOK_SELEXPR");
- ASTNode tableName = createRowIdNode(TableName.getDbTable(
+ astBuilder.createAcidSortNodes(TableName.getDbTable(
materializationNode.getChild(0).getText(),
- materializationNode.getChild(1).getText()));
- ParseDriver.adaptor.addChild(selectExprNodeInputROJ, tableName);
- ParseDriver.adaptor.addChild(selectNodeInputROJ, selectExprNodeInputROJ);
+ materializationNode.getChild(1).getText()))
+ .forEach(astNode ->
ParseDriver.adaptor.addChild(selectNodeInputROJ, astNode));
// 4) Transform first INSERT branch into an UPDATE
- // 4.1) Adding ROW__ID field
- ASTNode selectNodeInUpdate = (ASTNode) updateNode.getChild(1);
- if (selectNodeInUpdate.getType() != HiveParser.TOK_SELECT) {
- throw new SemanticException("TOK_SELECT expected in incremental
rewriting");
- }
- ASTNode selectExprNodeInUpdate = (ASTNode)
ParseDriver.adaptor.dupNode(selectExprNodeInputROJ);
- ParseDriver.adaptor.addChild(selectExprNodeInUpdate,
createRowIdNode((ASTNode) subqueryNodeInputROJ.getChild(1)));
- selectNodeInUpdate.insertChild(0, selectExprNodeInUpdate);
- // 4.2) Modifying filter condition.
+ // 4.1) Modifying filter condition.
ASTNode whereClauseInUpdate = findWhereClause(updateNode);
if (whereClauseInUpdate.getChild(0).getType() != HiveParser.KW_OR) {
throw new SemanticException("OR clause expected below TOK_WHERE in
incremental rewriting");
}
// We bypass the OR clause and select the first disjunct for the Update
branch
ParseDriver.adaptor.setChild(whereClauseInUpdate, 0,
disjuncts.get(Context.DestClausePrefix.UPDATE));
+ ASTNode updateDeleteNode = (ASTNode)
ParseDriver.adaptor.dupTree(updateNode);
+ // 4.2) Adding ROW__ID field
+ ASTNode selectNodeInUpdateDelete = (ASTNode) updateDeleteNode.getChild(1);
+ if (selectNodeInUpdateDelete.getType() != HiveParser.TOK_SELECT) {
+ throw new SemanticException("TOK_SELECT expected in incremental
rewriting");
+ }
+ // Remove children
+ while (selectNodeInUpdateDelete.getChildCount() > 0) {
+ selectNodeInUpdateDelete.deleteChild(0);
Review Comment:
can we remove all children in 1 go and don't call
freshenParentAndChildIndexes for every child?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]