This is an automated email from the ASF dual-hosted git repository.

zabetak pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new fb5e8703d91 HIVE-28197: Add deserializer to convert JSON plans to 
RelNodes (#6312)
fb5e8703d91 is described below

commit fb5e8703d915ec045a3c0148a765c9554b22bfe9
Author: Stamatis Zampetakis <[email protected]>
AuthorDate: Sat Feb 28 10:10:54 2026 +0200

    HIVE-28197: Add deserializer to convert JSON plans to RelNodes (#6312)
---
 .../ql/optimizer/calcite/HiveRelJsonReader.java    |  68 ++++++
 .../optimizer/calcite/HiveRelJsonSchemaReader.java | 141 +++++++++++
 ...iveTypeFactory.java => HiveRexJsonBuilder.java} |  41 ++--
 .../hive/ql/optimizer/calcite/HiveTypeFactory.java |   7 +
 .../calcite/reloperators/HiveAggregate.java        |  28 +++
 .../calcite/reloperators/HiveAntiJoin.java         |  10 +
 .../optimizer/calcite/reloperators/HiveFilter.java |   5 +
 .../optimizer/calcite/reloperators/HiveJoin.java   |  14 ++
 .../calcite/reloperators/HiveProject.java          |   9 +
 .../calcite/reloperators/HiveSemiJoin.java         |  10 +
 .../calcite/reloperators/HiveSortExchange.java     |  18 +-
 .../calcite/reloperators/HiveSortLimit.java        |   5 +
 .../reloperators/HiveTableFunctionScan.java        |   5 +
 .../calcite/reloperators/HiveTableScan.java        |  24 +-
 .../calcite/reloperators/HiveTableSpool.java       |  12 +
 .../optimizer/calcite/reloperators/HiveUnion.java  |   5 +
 .../optimizer/calcite/reloperators/HiveValues.java |   5 +
 .../calcite/rules/HiveSubQueryRemoveRule.java      |  11 +-
 .../calcite/translator/HiveSqlOperatorTable.java   |  73 ++++++
 .../calcite/translator/SqlFunctionConverter.java   |  18 +-
 .../optimizer/calcite/TestHiveRelJsonReader.java   | 156 ++++++++++++
 .../calcite/TestHiveRelJsonSchemaReader.java       |  57 +++++
 .../hive/ql/optimizer/calcite/TpcdsTable.java      | 269 +++++++++++++++++++++
 23 files changed, 949 insertions(+), 42 deletions(-)

diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelJsonReader.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelJsonReader.java
new file mode 100644
index 00000000000..e8c051b18c1
--- /dev/null
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelJsonReader.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.optimizer.calcite;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelOptSchema;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.externalize.RelJsonReader;
+import org.apache.calcite.sql.fun.SqlStdOperatorTable;
+import org.apache.hadoop.hive.common.classification.InterfaceStability;
+import org.apache.hadoop.hive.conf.HiveConf;
+import 
org.apache.hadoop.hive.ql.optimizer.calcite.translator.HiveSqlOperatorTable;
+
+import java.io.IOException;
+import java.nio.charset.Charset;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.Objects;
+
+import static org.apache.calcite.sql.util.SqlOperatorTables.chain;
+
+/**
+ * Reads a JSON plan and converts it to a Hive relational expression (RelNode).
+ */
[email protected]
+public class HiveRelJsonReader {
+  private final RelOptCluster cluster;
+
+  public HiveRelJsonReader(RelOptCluster cluster) {
+    this.cluster = Objects.requireNonNull(cluster);
+  }
+
+  public RelNode readFile(Path path) throws IOException {
+    return readJson(Files.readString(path, Charset.defaultCharset()));
+  }
+
+  public RelNode readJson(String json) throws IOException {
+    HiveConf conf = cluster.getPlanner().getContext().unwrap(HiveConf.class);
+    if (conf == null) {
+      conf = new HiveConf();
+    }
+    RelOptSchema schema = HiveRelJsonSchemaReader.read(json, conf, 
cluster.getTypeFactory());
+    RelJsonReader reader = new RelJsonReader(
+        cluster,
+        schema,
+        null,
+        t -> t.withOperatorTable(chain(new HiveSqlOperatorTable(), 
SqlStdOperatorTable.instance())));
+    // At the moment we assume that the JSON plan always has a top-level field 
"CBOPlan"
+    // that contains the actual plan that can be handled by RelJsonReader.
+    return reader.read(new 
ObjectMapper().readTree(json).get("CBOPlan").toString());
+  }
+}
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelJsonSchemaReader.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelJsonSchemaReader.java
new file mode 100644
index 00000000000..26a097ed27a
--- /dev/null
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelJsonSchemaReader.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.optimizer.calcite;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.JsonNodeType;
+import org.apache.calcite.plan.RelOptPlanner;
+import org.apache.calcite.plan.RelOptSchema;
+import org.apache.calcite.plan.RelOptTable;
+import org.apache.calcite.rel.externalize.RelJson;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.hadoop.hive.common.classification.InterfaceStability;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.metadata.TableConstraintsInfo;
+import org.apache.hadoop.hive.ql.parse.QueryTables;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * Reads a JSON plan and converts it to a RelOptSchema by finding all tables
+ * that are referenced in the plan. The schema is created exclusively from the
+ * JSON input so any information that is not there will not be available.
+ */
[email protected]
+public final class HiveRelJsonSchemaReader {
+
+  private HiveRelJsonSchemaReader() {
+    throw new IllegalStateException("Utility class");
+  }
+  /**
+   * Reads the schema from the JSON input using the specified configuration 
and type factory.
+   */
+  public static RelOptSchema read(String jsonInput, HiveConf conf, 
RelDataTypeFactory typeFactory) throws IOException {
+    JsonNode node = new ObjectMapper().readTree(jsonInput);
+    Map<List<String>, TableInfo> tables = new HashMap<>();
+    for (JsonNode scan : node.findParents("table")) {
+      List<String> names = new ArrayList<>();
+      for (JsonNode n : scan.get("table")) {
+        names.add(n.asText());
+      }
+      RelDataType type = readType(typeFactory, scan.get("rowType"));
+      JsonNode rowNode = scan.get("rowCount");
+      double rowCount = rowNode != null ? rowNode.asDouble() : 100.0;
+      tables.put(names, new TableInfo(type, rowCount));
+    }
+    return new MapRelOptSchema(conf, typeFactory, tables);
+  }
+
+  private static RelDataType readType(RelDataTypeFactory typeFactory, JsonNode 
typeNode) throws IOException {
+    ObjectMapper typeMapper = new ObjectMapper();
+    Object value;
+    if (typeNode.getNodeType() == JsonNodeType.OBJECT) {
+      value = typeMapper.treeToValue(typeNode, Map.class);
+    } else if (typeNode.getNodeType() == JsonNodeType.ARRAY) {
+      value = typeMapper.treeToValue(typeNode, List.class);
+    } else {
+      throw new IllegalStateException();
+    }
+    return RelJson.create().toType(typeFactory, value);
+  }
+
+  private record TableInfo(RelDataType rowType, double rowCount) {
+  }
+
+  private static final class MapRelOptSchema implements RelOptSchema {
+    private final HiveConf conf;
+    private final RelDataTypeFactory typeFactory;
+    private final Map<List<String>, TableInfo> tables;
+
+    MapRelOptSchema(HiveConf conf, RelDataTypeFactory typeFactory, 
Map<List<String>, TableInfo> tables) {
+      this.conf = conf;
+      this.typeFactory = typeFactory;
+      this.tables = tables;
+    }
+
+    @Override
+    public RelOptTable getTableForMember(List<String> names) {
+      TableInfo tableInfo = tables.get(names);
+      if (tableInfo == null) {
+        return null;
+      }
+      org.apache.hadoop.hive.metastore.api.Table mTable = new 
org.apache.hadoop.hive.metastore.api.Table();
+      mTable.setDbName(names.get(0));
+      mTable.setTableName(names.get(1));
+      Table metadataTable = new Table(mTable);
+      // Set info constraints as empty since we can't extract anything from 
JSON.
+      // and we want to avoid lookups in the metastore that will anyways fail
+      // since tables are not expected to exist.
+      metadataTable.setTableConstraintsInfo(new TableConstraintsInfo());
+      RelOptHiveTable optTable = new RelOptHiveTable(
+          this,
+          typeFactory,
+          names,
+          tableInfo.rowType,
+          metadataTable,
+          new ArrayList<>(),
+          new ArrayList<>(),
+          new ArrayList<>(),
+          conf,
+          new QueryTables(true),
+          new HashMap<>(),
+          new HashMap<>(),
+          new AtomicInteger());
+      optTable.setRowCount(tableInfo.rowCount);
+      return optTable;
+    }
+
+    @Override
+    public RelDataTypeFactory getTypeFactory() {
+      return typeFactory;
+    }
+
+    @Override
+    public void registerRules(RelOptPlanner planner) {
+      // No need to register any rules in the planner
+    }
+  }
+}
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveTypeFactory.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexJsonBuilder.java
similarity index 50%
copy from 
ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveTypeFactory.java
copy to 
ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexJsonBuilder.java
index f0a7514b7ed..66257dab59a 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveTypeFactory.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexJsonBuilder.java
@@ -17,38 +17,27 @@
  */
 package org.apache.hadoop.hive.ql.optimizer.calcite;
 
-import org.apache.calcite.jdbc.JavaTypeFactoryImpl;
 import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rex.RexBuilder;
+import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.sql.type.SqlTypeName;
-import org.checkerframework.checker.nullness.qual.Nullable;
+import org.apache.hadoop.hive.ql.parse.type.RexNodeExprFactory;
 
-import java.util.List;
-
-public class HiveTypeFactory extends JavaTypeFactoryImpl {
-  public HiveTypeFactory() {
-    super(new HiveTypeSystemImpl());
-    if (Bug.CALCITE_6954_FIXED) {
-      throw new IllegalStateException("Class redundant once fix is merged");
-    }
-  }
-
-  @Override
-  protected @Nullable RelDataType leastRestrictiveArrayMultisetType(final 
List<RelDataType> types,
-      final SqlTypeName sqlTypeName) {
-    RelDataType type = super.leastRestrictiveArrayMultisetType(types, 
sqlTypeName);
-    if (type != null) {
-      return canonize(type);
-    }
-    return null;
+/**
+ * Factory for row expressions created from JSON files.
+ */
+class HiveRexJsonBuilder extends RexBuilder {
+  HiveRexJsonBuilder() {
+    super(new HiveTypeFactory());
   }
 
   @Override
-  protected @Nullable RelDataType leastRestrictiveMapType(final 
List<RelDataType> types,
-      final SqlTypeName sqlTypeName) {
-    RelDataType type = super.leastRestrictiveMapType(types, sqlTypeName);
-    if (type != null) {
-      return canonize(type);
+  public RexNode makeLiteral(Object value, RelDataType type, boolean 
allowCast, boolean trim) {
+    // VARCHAR will always come back as CHAR if we don't allow a cast
+    allowCast = SqlTypeName.VARCHAR.equals(type.getSqlTypeName()) || allowCast;
+    if (SqlTypeName.CHAR_TYPES.contains(type.getSqlTypeName())) {
+      value = RexNodeExprFactory.makeHiveUnicodeString((String) value);
     }
-    return null;
+    return super.makeLiteral(value, type, allowCast, trim);
   }
 }
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveTypeFactory.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveTypeFactory.java
index f0a7514b7ed..ebf81fdc897 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveTypeFactory.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveTypeFactory.java
@@ -20,8 +20,10 @@
 import org.apache.calcite.jdbc.JavaTypeFactoryImpl;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.util.ConversionUtil;
 import org.checkerframework.checker.nullness.qual.Nullable;
 
+import java.nio.charset.Charset;
 import java.util.List;
 
 public class HiveTypeFactory extends JavaTypeFactoryImpl {
@@ -51,4 +53,9 @@ public HiveTypeFactory() {
     }
     return null;
   }
+
+  @Override
+  public Charset getDefaultCharset() {
+    return Charset.forName(ConversionUtil.NATIVE_UTF16_CHARSET_NAME);
+  }
 }
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveAggregate.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveAggregate.java
index 8775361520a..52a11d4e8d9 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveAggregate.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveAggregate.java
@@ -24,6 +24,7 @@
 import org.apache.calcite.linq4j.Ord;
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelInput;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.RelShuttle;
 import org.apache.calcite.rel.core.Aggregate;
@@ -32,12 +33,15 @@
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeFactory;
 import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.sql.SqlAggFunction;
 import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.sql.type.SqlTypeUtil;
 import org.apache.calcite.util.ImmutableBitSet;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelShuttle;
 import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
 
 import com.google.common.collect.Sets;
+import 
org.apache.hadoop.hive.ql.optimizer.calcite.translator.SqlFunctionConverter;
 
 public class HiveAggregate extends Aggregate implements HiveRelNode {
 
@@ -51,6 +55,30 @@ public HiveAggregate(RelOptCluster cluster, RelTraitSet 
traitSet, RelNode child,
             groupSet, groupSets, aggCalls);
   }
 
+  public HiveAggregate(RelInput input) {
+    this(
+        input.getCluster(),
+        input.getTraitSet(),
+        input.getInput(),
+        input.getBitSet("group"),
+        input.getBitSetList("groups"),
+        input.getAggregateCalls("aggs")
+            .stream()
+            .map(call -> 
HiveAggregate.replaceAggFunction(input.getInput().getRowType(), call))
+            .toList());
+  }
+
+  private static AggregateCall replaceAggFunction(RelDataType rowType, 
AggregateCall aggCall) {
+    // Fix the return type of the agg function
+    SqlAggFunction aggFunction = SqlFunctionConverter.getCalciteAggFn(
+        aggCall.getAggregation().getName(),
+        SqlTypeUtil.projectTypes(rowType, aggCall.getArgList()),
+        aggCall.getType());
+    return AggregateCall.create(aggFunction, aggCall.isDistinct(), 
aggCall.isApproximate(), aggCall.ignoreNulls(),
+        aggCall.getArgList(), aggCall.filterArg, aggCall.distinctKeys, 
aggCall.getCollation(), aggCall.getType(),
+        aggCall.getName());
+  }
+
   @Override
   public Aggregate copy(RelTraitSet traitSet, RelNode input,
           ImmutableBitSet groupSet,
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveAntiJoin.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveAntiJoin.java
index 2c53979115b..5d59f4413b7 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveAntiJoin.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveAntiJoin.java
@@ -21,6 +21,7 @@
 import com.google.common.collect.Sets;
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelInput;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.core.Join;
 import org.apache.calcite.rel.core.JoinRelType;
@@ -68,6 +69,15 @@ protected HiveAntiJoin(RelOptCluster cluster,
             this.getCondition(), joinKeyExprs, filterNulls, null);
   }
 
+  public HiveAntiJoin(RelInput input) throws CalciteSemanticException {
+    this(
+        input.getCluster(),
+        input.getTraitSet(),
+        input.getInputs().get(0),
+        input.getInputs().get(1),
+        input.getExpression("condition"));
+  }
+
   public RexNode getJoinFilter() {
     return joinFilter;
   }
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFilter.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFilter.java
index 1a1f4d3d5dd..2b34f414af2 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFilter.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFilter.java
@@ -19,6 +19,7 @@
 
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelInput;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.RelShuttle;
 import org.apache.calcite.rel.core.Filter;
@@ -71,6 +72,10 @@ public HiveFilter(RelOptCluster cluster, RelTraitSet traits, 
RelNode child, RexN
     this.correlationInfos = new CorrelationInfoSupplier(getCondition());
   }
 
+  public HiveFilter(RelInput input) {
+    this(input.getCluster(), input.getTraitSet(), input.getInput(), 
input.getExpression("condition"));
+  }
+
   @Override
   public Filter copy(RelTraitSet traitSet, RelNode input, RexNode condition) {
     assert traitSet.containsIfApplicable(HiveRelNode.CONVENTION);
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java
index 6e13854d2b2..a24ad7c960e 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java
@@ -22,6 +22,7 @@
 import java.util.List;
 import java.util.Set;
 
+import com.google.common.collect.ImmutableSet;
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelOptCost;
 import org.apache.calcite.plan.RelTraitSet;
@@ -29,6 +30,7 @@
 import org.apache.calcite.rel.RelCollation;
 import org.apache.calcite.rel.RelCollations;
 import org.apache.calcite.rel.RelDistribution;
+import org.apache.calcite.rel.RelInput;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.RelWriter;
 import org.apache.calcite.rel.core.Join;
@@ -93,6 +95,18 @@ protected HiveJoin(RelOptCluster cluster, RelTraitSet 
traits, RelNode left, RelN
     this.joinAlgorithm = joinAlgo;
   }
 
+  public HiveJoin(RelInput input) throws InvalidRelException, 
CalciteSemanticException {
+    this(
+        input.getCluster(),
+        input.getTraitSet(),
+        input.getInputs().get(0),
+        input.getInputs().get(1),
+        input.getExpression("condition"),
+        input.getEnum("joinType", JoinRelType.class),
+        ImmutableSet.of(),
+        null);
+  }
+
   @Override
   public final HiveJoin copy(RelTraitSet traitSet, RexNode conditionExpr, 
RelNode left,
       RelNode right, JoinRelType joinType, boolean semiJoinDone) {
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java
index fa0bbb307a1..2a04c676f42 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java
@@ -24,6 +24,7 @@
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelTraitSet;
 import org.apache.calcite.rel.RelCollation;
+import org.apache.calcite.rel.RelInput;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.RelShuttle;
 import org.apache.calcite.rel.core.Project;
@@ -62,6 +63,14 @@ public HiveProject(RelOptCluster cluster, RelTraitSet 
traitSet, RelNode child, L
     assert traitSet.containsIfApplicable(HiveRelNode.CONVENTION);
   }
 
+  public HiveProject(RelInput input) {
+    this(input.getCluster(),
+        TraitsUtil.getDefaultTraitSet(input.getCluster()),
+        input.getInput(),
+        input.getExpressionList("exprs"),
+        input.getRowType("exprs", "fields"));
+  }
+
   /**
    * Creates a HiveProject with no sort keys.
    *
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSemiJoin.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSemiJoin.java
index a25e247eb85..f55c9ed5d65 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSemiJoin.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSemiJoin.java
@@ -23,6 +23,7 @@
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelTraitSet;
 import org.apache.calcite.rel.InvalidRelException;
+import org.apache.calcite.rel.RelInput;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.core.Join;
 import org.apache.calcite.rel.core.JoinInfo;
@@ -71,6 +72,15 @@ protected HiveSemiJoin(RelOptCluster cluster,
             this.getCondition(), joinKeyExprs, filterNulls, null);
   }
 
+  public HiveSemiJoin(RelInput input) throws InvalidRelException, 
CalciteSemanticException {
+    this(
+        input.getCluster(),
+        input.getTraitSet(),
+        input.getInputs().get(0),
+        input.getInputs().get(1),
+        input.getExpression("condition"));
+  }
+
   public RexNode getJoinFilter() {
     return joinFilter;
   }
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSortExchange.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSortExchange.java
index 17fee95f43c..4da2ba9ac35 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSortExchange.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSortExchange.java
@@ -21,11 +21,11 @@
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelTraitSet;
 import org.apache.calcite.rel.RelCollation;
-import org.apache.calcite.rel.RelCollationImpl;
 import org.apache.calcite.rel.RelCollationTraitDef;
 import org.apache.calcite.rel.RelDistribution;
 import org.apache.calcite.rel.RelDistributionTraitDef;
 import org.apache.calcite.rel.RelFieldCollation;
+import org.apache.calcite.rel.RelInput;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.core.SortExchange;
 import org.apache.calcite.rex.RexNode;
@@ -48,6 +48,11 @@ private HiveSortExchange(RelOptCluster cluster, RelTraitSet 
traitSet,
     this.keys = new ImmutableList.Builder<RexNode>().addAll(keys).build();
   }
 
+  public HiveSortExchange(RelInput input) {
+    super(input);
+    this.keys = sortKeys(input.getCollation(), input.getInput());
+  }
+
   /**
    * Creates a HiveSortExchange.
    *
@@ -76,15 +81,16 @@ public static HiveSortExchange create(RelNode input,
     distribution = RelDistributionTraitDef.INSTANCE.canonize(distribution);
     collation = RelCollationTraitDef.INSTANCE.canonize(collation);
     RelTraitSet traitSet = getTraitSet(cluster, collation, distribution);
-    RelCollation canonizedCollation = 
traitSet.canonize(RelCollationImpl.of(collation.getFieldCollations()));
+    return new HiveSortExchange(cluster, traitSet, input, distribution, 
collation, sortKeys(collation, input));
+  }
 
+  private static ImmutableList<RexNode> sortKeys(RelCollation collation, 
RelNode input) {
     ImmutableList.Builder<RexNode> builder = ImmutableList.builder();
-    for (RelFieldCollation relFieldCollation : 
canonizedCollation.getFieldCollations()) {
+    for (RelFieldCollation relFieldCollation : collation.getFieldCollations()) 
{
       int index = relFieldCollation.getFieldIndex();
-      builder.add(cluster.getRexBuilder().makeInputRef(input, index));
+      builder.add(input.getCluster().getRexBuilder().makeInputRef(input, 
index));
     }
-
-    return new HiveSortExchange(cluster, traitSet, input, distribution, 
collation, builder.build());
+    return builder.build();
   }
 
   @Override
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSortLimit.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSortLimit.java
index 67484f44375..40a9645b42f 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSortLimit.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSortLimit.java
@@ -23,6 +23,7 @@
 import org.apache.calcite.plan.RelTraitSet;
 import org.apache.calcite.rel.RelCollation;
 import org.apache.calcite.rel.RelCollationTraitDef;
+import org.apache.calcite.rel.RelInput;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.RelShuttle;
 import org.apache.calcite.rel.core.Sort;
@@ -51,6 +52,10 @@ public HiveSortLimit(RelOptCluster cluster, RelTraitSet 
traitSet, RelNode child,
         offset, fetch);
   }
 
+  public HiveSortLimit(RelInput input) {
+    super(input);
+  }
+
   /**
    * Creates a HiveSortLimit.
    *
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableFunctionScan.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableFunctionScan.java
index 881e57720f0..176d27e07d9 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableFunctionScan.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableFunctionScan.java
@@ -23,6 +23,7 @@
 
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelInput;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.core.TableFunctionScan;
 import org.apache.calcite.rel.metadata.RelColumnMapping;
@@ -54,6 +55,10 @@ protected HiveTableFunctionScan(RelOptCluster cluster, 
RelTraitSet traitSet, Lis
     super(cluster, traitSet, inputs, rexCall, elementType, rowType, 
columnMappings);
   }
 
+  public HiveTableFunctionScan(RelInput input) {
+    super(input);
+  }
+
   public static HiveTableFunctionScan create(RelOptCluster cluster, 
RelTraitSet traitSet,
       List<RelNode> inputs, RexNode rexCall, Type elementType, RelDataType 
rowType,
       Set<RelColumnMapping> columnMappings) throws CalciteSemanticException {
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java
index 05e8b448f62..9c50fb7c677 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java
@@ -26,6 +26,7 @@
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelOptPlanner;
 import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelInput;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.RelShuttle;
 import org.apache.calcite.rel.RelWriter;
@@ -63,6 +64,7 @@
  */
 public class HiveTableScan extends TableScan implements HiveRelNode {
 
+  private static final String QBID_TAG = "qbid:alias";
   public enum HiveTableScanTrait {
     /**
      * If this is a fully acid table scan fetch the deleted rows too.
@@ -139,6 +141,18 @@ public HiveTableScan(RelOptCluster cluster, RelTraitSet 
traitSet, RelOptHiveTabl
     this(cluster, traitSet, table, alias, concatQbIDAlias, table.getRowType(), 
useQBIdInDigest, insideView, null);
   }
 
+  public HiveTableScan(RelInput input) {
+    this(
+        input.getCluster(),
+        input.getTraitSet(),
+        (RelOptHiveTable) input.getTable("table"),
+        input.getString("table:alias"),
+        input.getString(QBID_TAG),
+        input.get(QBID_TAG) != null,
+        input.getBoolean("insideView", false),
+        createTableScanTrait(input));
+  }
+
   public HiveTableScan(RelOptCluster cluster, RelTraitSet traitSet, 
RelOptHiveTable table,
       String alias, String concatQbIDAlias, boolean useQBIdInDigest, boolean 
insideView,
       HiveTableScanTrait tableScanTrait) {
@@ -203,7 +217,7 @@ public HiveTableScan copyIncludingTable(RelDataType 
newRowtype) {
   // expression was already generated.
   @Override public RelWriter explainTerms(RelWriter pw) {
     return super.explainTerms(pw)
-      .itemIf("qbid:alias", concatQbIDAlias, this.useQBIdInDigest)
+      .itemIf(QBID_TAG, concatQbIDAlias, this.useQBIdInDigest)
       .itemIf("htColumns", this.neededColIndxsFrmReloptHT, pw.getDetailLevel() 
== SqlExplainLevel.DIGEST_ATTRIBUTES)
       .itemIf("insideView", this.isInsideView(), pw.getDetailLevel() == 
SqlExplainLevel.DIGEST_ATTRIBUTES)
       .itemIf("plKey", ((RelOptHiveTable) table).getPartitionListKey(), 
pw.getDetailLevel() == SqlExplainLevel.DIGEST_ATTRIBUTES)
@@ -325,6 +339,14 @@ public HiveTableScanTrait getTableScanTrait() {
     return tableScanTrait;
   }
 
+  private static HiveTableScanTrait createTableScanTrait(RelInput input) {
+    String enumName = input.getString("tableScanTrait");
+    if (enumName == null) {
+      return null;
+    }
+    return HiveTableScanTrait.valueOf(enumName);
+  }
+
   @Override
   public RelNode accept(RelShuttle shuttle) {
     if(shuttle instanceof HiveRelShuttle) {
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableSpool.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableSpool.java
index d5a39835b87..b09e42cf06f 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableSpool.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableSpool.java
@@ -18,17 +18,29 @@
 
 import org.apache.calcite.plan.RelOptTable;
 import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.prepare.RelOptTableImpl;
+import org.apache.calcite.rel.RelInput;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.RelWriter;
 import org.apache.calcite.rel.core.Spool;
 import org.apache.calcite.rel.core.TableSpool;
 import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
 
+import java.util.List;
+
 public class HiveTableSpool extends TableSpool implements HiveRelNode {
   public HiveTableSpool(RelNode input, Type readType, Type writeType, 
RelOptTable table) {
     super(input.getCluster(), 
TraitsUtil.getDefaultTraitSet(input.getCluster()), input, readType, writeType, 
table);
   }
 
+  public HiveTableSpool(RelInput relInput) {
+    this(
+        relInput.getInput(),
+        Type.LAZY,
+        Type.LAZY,
+        RelOptTableImpl.create(null, relInput.getInput().getRowType(), 
(List<String>) relInput.get("table"), null));
+  }
+
   @Override
   protected Spool copy(RelTraitSet traitSet, RelNode input, Type readType, 
Type writeType) {
     return new HiveTableSpool(input, readType, writeType, table);
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveUnion.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveUnion.java
index 0e272f97335..e14069baf19 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveUnion.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveUnion.java
@@ -21,6 +21,7 @@
 
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelInput;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.RelShuttle;
 import org.apache.calcite.rel.core.SetOp;
@@ -33,6 +34,10 @@ public HiveUnion(RelOptCluster cluster, RelTraitSet traits, 
List<RelNode> inputs
     super(cluster, traits, inputs, true);
   }
 
+  public HiveUnion(RelInput input) {
+    super(input);
+  }
+
   @Override
   public SetOp copy(RelTraitSet traitSet, List<RelNode> inputs, boolean all) {
     return new HiveUnion(this.getCluster(), traitSet, inputs);
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveValues.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveValues.java
index ecf2804c2b4..e479331e8b9 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveValues.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveValues.java
@@ -21,6 +21,7 @@
 import com.google.common.collect.ImmutableList;
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelInput;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.core.Values;
 import org.apache.calcite.rel.type.RelDataType;
@@ -41,6 +42,10 @@ public HiveValues(RelOptCluster cluster, RelDataType 
rowType, ImmutableList<Immu
     super(cluster, rowType, tuples, traits);
   }
 
+  public HiveValues(RelInput input) {
+    super(input);
+  }
+
   @Override
   public RelNode copy(RelTraitSet traitSet, List<RelNode> inputs) {
     return new HiveValues(getCluster(), getRowType(), tuples, getTraitSet());
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java
index 705b04728c4..9979991af62 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java
@@ -79,6 +79,10 @@
  */
 public class HiveSubQueryRemoveRule extends RelOptRule {
 
+  public static final SqlFunction SQ_COUNT_CHECK =
+      new SqlFunction("sq_count_check", SqlKind.OTHER_FUNCTION, 
ReturnTypes.BOOLEAN, InferTypes.RETURN_TYPE,
+          OperandTypes.NUMERIC, SqlFunctionCategory.USER_DEFINED_FUNCTION);
+
   public static RelOptRule forProject(HiveConf conf) {
     return new HiveSubQueryRemoveRule(
         RelOptRule.operandJ(HiveProject.class, null, 
RexUtil.SubQueryFinder::containsSubQuery, any()),
@@ -180,15 +184,10 @@ private RexNode rewriteScalar(RelMetadataQuery mq, 
RexSubQuery e, Set<Correlatio
       // returns single row/column
       builder.aggregate(builder.groupKey(), builder.count(false, "cnt"));
 
-      SqlFunction countCheck =
-          new SqlFunction("sq_count_check", SqlKind.OTHER_FUNCTION, 
ReturnTypes.BOOLEAN,
-              InferTypes.RETURN_TYPE, OperandTypes.NUMERIC,
-              SqlFunctionCategory.USER_DEFINED_FUNCTION);
-
       //we create FILTER (sq_count_check(count())) instead of PROJECT because 
RelFieldTrimmer
       // ends up getting rid of Project since it is not used further up the 
tree
       //sq_count_check returns true when subquery returns single row, else it 
fails
-      builder.filter(builder.call(countCheck, builder.field("cnt")));
+      builder.filter(builder.call(SQ_COUNT_CHECK, builder.field("cnt")));
       if (!variablesSet.isEmpty()) {
         builder.join(JoinRelType.LEFT, builder.literal(true), variablesSet);
       } else {
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveSqlOperatorTable.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveSqlOperatorTable.java
new file mode 100644
index 00000000000..c7e8cac2f3d
--- /dev/null
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveSqlOperatorTable.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.optimizer.calcite.translator;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.calcite.sql.SqlFunctionCategory;
+import org.apache.calcite.sql.SqlIdentifier;
+import org.apache.calcite.sql.SqlOperator;
+import org.apache.calcite.sql.SqlOperatorTable;
+import org.apache.calcite.sql.SqlSyntax;
+import org.apache.calcite.sql.validate.SqlNameMatcher;
+import org.apache.hadoop.hive.ql.exec.FunctionInfo;
+import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Implementation of {@link SqlOperatorTable} for Hive operators.
+ * <p>Implementation details: contrary to other implementations of an operator 
table
+ * this does not follow the SINGLETON pattern since it is stateless thus very 
cheap to create.
+ */
+public final class HiveSqlOperatorTable implements SqlOperatorTable {
+
+  @Override
+  public void lookupOperatorOverloads(SqlIdentifier opName, 
SqlFunctionCategory category, SqlSyntax syntax,
+      List<SqlOperator> operatorList, SqlNameMatcher nameMatcher) {
+    // If the operator is registered no need to go again through the registry
+    String name = opName.getSimple();
+    SqlOperator op = 
SqlFunctionConverter.hiveToCalcite.get(name.toLowerCase());
+    if (op != null) {
+      operatorList.add(op);
+      return;
+    }
+    FunctionInfo fi;
+    try {
+      fi = FunctionRegistry.getFunctionInfo(name);
+      if (fi == null) {
+        return;
+      }
+      if (fi.isGenericUDF()) {
+        operatorList.add(SqlFunctionConverter.getCalciteOperator(name, 
fi.getGenericUDF(), ImmutableList.of(), null));
+      }
+      if (fi.isGenericUDAF()) {
+        operatorList.add(SqlFunctionConverter.getCalciteAggFn(name, 
Collections.emptyList(), null));
+      }
+    } catch (SemanticException e) {
+      // If the lookup in the registry fails just return as if the function is 
not found
+    }
+  }
+
+  @Override
+  public List<SqlOperator> getOperatorList() {
+    // Currently we don't use this method so for simplicity just return empty.
+    return Collections.emptyList();
+  }
+}
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
index a216bdfd00f..d6a93070ff0 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
@@ -37,6 +37,7 @@
 import org.apache.calcite.sql.type.SqlOperandTypeInference;
 import org.apache.calcite.sql.type.SqlReturnTypeInference;
 import org.apache.calcite.sql.type.SqlTypeFamily;
+import org.apache.calcite.sql.type.SqlTypeName;
 import org.apache.calcite.util.Util;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hive.ql.exec.DataSketchesFunctions;
@@ -58,12 +59,14 @@
 import 
org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveExtractDate;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFloorDate;
 import 
org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFromUnixTimeSqlOperator;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveGroupingID;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveIn;
 import 
org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSqlFunction;
 import 
org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveToDateSqlOperator;
 import 
org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTruncSqlOperator;
 import 
org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveToUnixTimestampSqlOperator;
 import 
org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnixTimestampSqlOperator;
+import 
org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveSubQueryRemoveRule;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
 import org.apache.hadoop.hive.ql.parse.HiveParser;
 import org.apache.hadoop.hive.ql.parse.ParseDriver;
@@ -436,6 +439,9 @@ private static class StaticBlockBuilder {
       registerFunction("concat", HiveConcat.INSTANCE,
           hToken(HiveParser.Identifier, "concat")
       );
+      registerDuplicateFunction("||", HiveConcat.INSTANCE,
+          hToken(HiveParser.Identifier, "concat")
+      );
       registerFunction("substring", SqlStdOperatorTable.SUBSTRING,
           hToken(HiveParser.Identifier, "substring")
       );
@@ -467,7 +473,9 @@ private static class StaticBlockBuilder {
           hToken(HiveParser.Identifier, "from_unixtime"));
       registerFunction("date_add", HiveDateAddSqlOperator.INSTANCE, 
hToken(HiveParser.Identifier, "date_add"));
       registerFunction("date_sub", HiveDateSubSqlOperator.INSTANCE, 
hToken(HiveParser.Identifier, "date_sub"));
-
+      registerFunction("sq_count_check", HiveSubQueryRemoveRule.SQ_COUNT_CHECK,
+          hToken(HiveParser.Identifier, "sq_count_check"));
+      hiveToCalcite.put("grouping__id", HiveGroupingID.INSTANCE);
       registerPlugin(DataSketchesFunctions.INSTANCE);
     }
 
@@ -524,7 +532,11 @@ private static CalciteUDFInfo getUDFInfo(String 
hiveUdfName,
       List<RelDataType> calciteArgTypes, RelDataType calciteRetType) {
     CalciteUDFInfo udfInfo = new CalciteUDFInfo();
     udfInfo.udfName = hiveUdfName;
-    udfInfo.returnTypeInference = ReturnTypes.explicit(calciteRetType);
+    if (calciteRetType == null) {
+      udfInfo.returnTypeInference = opBinding -> 
opBinding.getTypeFactory().createSqlType(SqlTypeName.UNKNOWN);
+    } else {
+      udfInfo.returnTypeInference = ReturnTypes.explicit(calciteRetType);
+    }
     udfInfo.operandTypeInference = InferTypes.explicit(calciteArgTypes);
     ImmutableList.Builder<SqlTypeFamily> typeFamilyBuilder = new 
ImmutableList.Builder<SqlTypeFamily>();
     for (RelDataType at : calciteArgTypes) {
@@ -565,7 +577,7 @@ public static SqlOperator getCalciteFn(String hiveUdfName,
     return calciteOp;
   }
 
-  public static SqlAggFunction getCalciteAggFn(String hiveUdfName, 
ImmutableList<RelDataType> calciteArgTypes,
+  public static SqlAggFunction getCalciteAggFn(String hiveUdfName, 
List<RelDataType> calciteArgTypes,
       RelDataType calciteRetType) {
     SqlAggFunction calciteAggFn = (SqlAggFunction) 
hiveToCalcite.get(hiveUdfName);
 
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestHiveRelJsonReader.java
 
b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestHiveRelJsonReader.java
new file mode 100644
index 00000000000..89c2606e04e
--- /dev/null
+++ 
b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestHiveRelJsonReader.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.optimizer.calcite;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.calcite.plan.Contexts;
+import org.apache.calcite.plan.ConventionTraitDef;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelOptUtil;
+import org.apache.calcite.plan.volcano.VolcanoPlanner;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hive.testutils.HiveTestEnvSetup;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.nio.charset.Charset;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Arrays;
+import java.util.Set;
+import java.util.function.Function;
+import java.util.function.UnaryOperator;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import static org.junit.jupiter.api.Assertions.assertLinesMatch;
+import static org.junit.jupiter.api.Assertions.fail;
+
+class TestHiveRelJsonReader {
+  private static final Path TPCDS_RESULTS_PATH =
+      Paths.get(HiveTestEnvSetup.HIVE_ROOT, 
"ql/src/test/results/clientpositive/perf/tpcds30tb/");
+
+  static Stream<Path> inputJsonFiles() throws IOException {
+    return Files.list(TPCDS_RESULTS_PATH.resolve("json"));
+  }
+
+  @ParameterizedTest
+  @MethodSource("inputJsonFiles")
+  void testReadJson(Path jsonFile) throws IOException {
+    String jsonContent =
+        Files.readAllLines(jsonFile).stream().filter(line -> 
!line.startsWith("Warning")).collect(Collectors.joining());
+    // Use VolcanoPlanner to be able to set the ConventionTraitDef in the 
cluster,
+    // otherwise all traitsets will have an empty set of traits which may lead 
to
+    // assertions failures when creating Hive operators.
+    VolcanoPlanner planner = new VolcanoPlanner(Contexts.of(new HiveConf()));
+    planner.addRelTraitDef(ConventionTraitDef.INSTANCE);
+    RelOptCluster cluster = RelOptCluster.create(planner, new 
HiveRexJsonBuilder());
+    String actualPlan = RelOptUtil.toString(new 
HiveRelJsonReader(cluster).readJson(jsonContent));
+    String expectedPlan = readExpectedPlan(jsonFile);
+    assertLinesMatch(normalize(jsonFile, expectedPlan), normalize(jsonFile, 
actualPlan), "Failed for: " + jsonFile);
+  }
+
+  private static String readExpectedPlan(Path jsonFile) throws IOException {
+    String cboFileName = jsonFile.getFileName().toString().replace("query", 
"cbo_query");
+    Path cboFile = TPCDS_RESULTS_PATH.resolve("tez").resolve(cboFileName);
+    if (!Files.exists(cboFile)) {
+      fail("CBO file not found for JSON file: " + jsonFile + ", expected at: " 
+ cboFile);
+    }
+    return new String(Files.readAllBytes(cboFile), Charset.defaultCharset());
+  }
+
+  private static Stream<String> normalize(Path file, String plan) {
+    Function<String, String> normalizer = Function.identity();
+    for (Normalizer i : Normalizer.values()) {
+      if (i.affects(file.getFileName().toString())) {
+        normalizer = normalizer.andThen(i::apply);
+      }
+    }
+    return Arrays.stream(plan.split("\\R"))
+        .filter(line -> !line.startsWith("Warning"))
+        .filter(line -> !line.startsWith("CBO PLAN:"))
+        .filter(line -> !line.trim().isEmpty())
+        .map(normalizer);
+  }
+
+  /**
+   * Normalization logic for addressing small discrepancies between plans.
+   */
+  private enum Normalizer {
+    /**
+     * A normalizer for scientific notation discrepancies.
+     * <p>
+     * Normalizes the input line by converting occurrences of scientific 
notation from numbers.
+     */
+    SCIENTIFIC_NOTATION(
+        Pattern.compile("\\d+(\\.\\d+)?E\\d+"),
+        num -> new BigDecimal(num).toPlainString(),
+        "query34.q.out",
+        "query39.q.out",
+        "query73.q.out",
+        "query83.q.out"),
+    /**
+     * A normalizer for COUNT function discrepancies. At the moment the 
deserializer fails to
+     * distinguish between COUNT (from {@link 
org.apache.calcite.sql.fun.SqlStdOperatorTable}) and
+     * count (from {@link 
org.apache.hadoop.hive.ql.optimizer.calcite.functions.HiveSqlCountAggFunction})
+     * operators. The deserializer always picks the latter no matter which one 
is really present
+     * in the original plan.
+     * <p>
+     * Normalizes the input line by converting occurrences of the COUNT 
aggregate function name to lowercase implicitly
+     * converting the SQL standard operator to Hive built-in.
+     */
+    COUNT(
+        Pattern.compile("COUNT\\(", Pattern.CASE_INSENSITIVE),
+        String::toLowerCase,
+        "query6.q.out",
+        "query14.q.out",
+        "query44.q.out",
+        "query54.q.out",
+        "query58.q.out");
+
+    private final Pattern pattern;
+    private final UnaryOperator<String> replacer;
+    private final Set<String> affectedFiles;
+
+    Normalizer(Pattern pattern, UnaryOperator<String> replacer, String... 
files) {
+      this.pattern = pattern;
+      this.replacer = replacer;
+      this.affectedFiles = ImmutableSet.copyOf(files);
+    }
+
+    boolean affects(String fileName) {
+      return affectedFiles.contains(fileName);
+    }
+
+    String apply(String line) {
+      Matcher matcher = pattern.matcher(line);
+      StringBuilder sb = new StringBuilder();
+      while (matcher.find()) {
+        matcher.appendReplacement(sb, replacer.apply(matcher.group()));
+      }
+      matcher.appendTail(sb);
+      return sb.toString();
+    }
+
+  }
+}
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestHiveRelJsonSchemaReader.java
 
b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestHiveRelJsonSchemaReader.java
new file mode 100644
index 00000000000..291b0b172cd
--- /dev/null
+++ 
b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestHiveRelJsonSchemaReader.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.optimizer.calcite;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.calcite.plan.RelOptSchema;
+import org.apache.calcite.plan.RelOptTable;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hive.testutils.HiveTestEnvSetup;
+import org.junit.jupiter.api.Test;
+
+import java.io.IOException;
+import java.nio.charset.Charset;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Arrays;
+import java.util.Set;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+
+class TestHiveRelJsonSchemaReader {
+  private static final Path TPCDS_JSON_PATH =
+      Paths.get(HiveTestEnvSetup.HIVE_ROOT, 
"ql/src/test/results/clientpositive/perf/tpcds30tb/json");
+
+  @Test
+  void testReadSchemaFromTpcdsQuery1() throws IOException {
+    Path iFile = TPCDS_JSON_PATH.resolve("query1.q.out");
+    String jsonContent = new String(Files.readAllBytes(iFile), 
Charset.defaultCharset());
+    RelOptSchema schema = HiveRelJsonSchemaReader.read(jsonContent, new 
HiveConf(), new HiveTypeFactory());
+    Set<TpcdsTable> validTables =
+        ImmutableSet.of(TpcdsTable.CUSTOMER, TpcdsTable.STORE, 
TpcdsTable.DATE_DIM, TpcdsTable.STORE_RETURNS);
+    for (TpcdsTable t : validTables) {
+      RelOptTable table = schema.getTableForMember(Arrays.asList("default", 
t.name().toLowerCase()));
+      assertNotNull(table, "Table " + t.name() + " not found in schema");
+      assertEquals(t.type(schema.getTypeFactory()), table.getRowType());
+      assertEquals(t.rowCount(), table.getRowCount(), 1d);
+    }
+  }
+
+}
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TpcdsTable.java 
b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TpcdsTable.java
new file mode 100644
index 00000000000..0ce12395615
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TpcdsTable.java
@@ -0,0 +1,269 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.optimizer.calcite;
+
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rel.type.RelProtoDataType;
+import org.apache.calcite.sql.type.SqlTypeName;
+
+public enum TpcdsTable {
+  CUSTOMER(
+      80000000,
+      f -> f.builder()
+          .add("c_customer_sk", SqlTypeName.BIGINT)
+          .nullable(false)
+          .add("c_customer_id", SqlTypeName.CHAR, 16)
+          .nullable(false)
+          .add("c_current_cdemo_sk", SqlTypeName.BIGINT)
+          .nullable(true)
+          .add("c_current_hdemo_sk", SqlTypeName.BIGINT)
+          .nullable(true)
+          .add("c_current_addr_sk", SqlTypeName.BIGINT)
+          .nullable(true)
+          .add("c_first_shipto_date_sk", SqlTypeName.BIGINT)
+          .nullable(true)
+          .add("c_first_sales_date_sk", SqlTypeName.BIGINT)
+          .nullable(true)
+          .add("c_salutation", SqlTypeName.CHAR, 10)
+          .nullable(true)
+          .add("c_first_name", SqlTypeName.CHAR, 20)
+          .nullable(true)
+          .add("c_last_name", SqlTypeName.CHAR, 30)
+          .nullable(true)
+          .add("c_preferred_cust_flag", SqlTypeName.CHAR, 1)
+          .nullable(true)
+          .add("c_birth_day", SqlTypeName.INTEGER)
+          .nullable(true)
+          .add("c_birth_month", SqlTypeName.INTEGER)
+          .nullable(true)
+          .add("c_birth_year", SqlTypeName.INTEGER)
+          .nullable(true)
+          .add("c_birth_country", SqlTypeName.VARCHAR, 20)
+          .nullable(true)
+          .add("c_login", SqlTypeName.CHAR, 13)
+          .nullable(true)
+          .add("c_email_address", SqlTypeName.CHAR, 50)
+          .nullable(true)
+          .add("c_last_review_date_sk", SqlTypeName.BIGINT)
+          .nullable(true)
+          .addAll(systemColumns(f).getFieldList())
+          .build()), STORE_RETURNS(
+      8332595709d,
+      f -> f.builder()
+          .add("sr_return_time_sk", SqlTypeName.BIGINT)
+          .nullable(true)
+          .add("sr_item_sk", SqlTypeName.BIGINT)
+          .nullable(false)
+          .add("sr_customer_sk", SqlTypeName.BIGINT)
+          .nullable(true)
+          .add("sr_cdemo_sk", SqlTypeName.BIGINT)
+          .nullable(true)
+          .add("sr_hdemo_sk", SqlTypeName.BIGINT)
+          .nullable(true)
+          .add("sr_addr_sk", SqlTypeName.BIGINT)
+          .nullable(true)
+          .add("sr_store_sk", SqlTypeName.BIGINT)
+          .nullable(true)
+          .add("sr_reason_sk", SqlTypeName.BIGINT)
+          .nullable(true)
+          .add("sr_ticket_number", SqlTypeName.BIGINT)
+          .nullable(false)
+          .add("sr_return_quantity", SqlTypeName.INTEGER)
+          .nullable(true)
+          .add("sr_return_amt", SqlTypeName.DECIMAL, 7, 2)
+          .nullable(true)
+          .add("sr_return_tax", SqlTypeName.DECIMAL, 7, 2)
+          .nullable(true)
+          .add("sr_return_amt_inc_tax", SqlTypeName.DECIMAL, 7, 2)
+          .nullable(true)
+          .add("sr_fee", SqlTypeName.DECIMAL, 7, 2)
+          .nullable(true)
+          .add("sr_return_ship_cost", SqlTypeName.DECIMAL, 7, 2)
+          .nullable(true)
+          .add("sr_refunded_cash", SqlTypeName.DECIMAL, 7, 2)
+          .nullable(true)
+          .add("sr_reversed_charge", SqlTypeName.DECIMAL, 7, 2)
+          .nullable(true)
+          .add("sr_store_credit", SqlTypeName.DECIMAL, 7, 2)
+          .nullable(true)
+          .add("sr_net_loss", SqlTypeName.DECIMAL, 7, 2)
+          .nullable(true)
+          .add("sr_returned_date_sk", SqlTypeName.BIGINT)
+          .nullable(true)
+          .addAll(systemColumns(f).getFieldList())
+          .build()), DATE_DIM(
+      73049,
+      f -> f.builder()
+          .add("d_date_sk", SqlTypeName.BIGINT)
+          .nullable(false)
+          .add("d_date_id", SqlTypeName.VARCHAR, Integer.MAX_VALUE)
+          .nullable(false)
+          .add("d_date", SqlTypeName.DATE)
+          .nullable(true)
+          .add("d_month_seq", SqlTypeName.INTEGER)
+          .nullable(true)
+          .add("d_week_seq", SqlTypeName.INTEGER)
+          .nullable(true)
+          .add("d_quarter_seq", SqlTypeName.INTEGER)
+          .nullable(true)
+          .add("d_year", SqlTypeName.INTEGER)
+          .nullable(true)
+          .add("d_dow", SqlTypeName.INTEGER)
+          .nullable(true)
+          .add("d_moy", SqlTypeName.INTEGER)
+          .nullable(true)
+          .add("d_dom", SqlTypeName.INTEGER)
+          .nullable(true)
+          .add("d_qoy", SqlTypeName.INTEGER)
+          .nullable(true)
+          .add("d_fy_year", SqlTypeName.INTEGER)
+          .nullable(true)
+          .add("d_fy_quarter_seq", SqlTypeName.INTEGER)
+          .nullable(true)
+          .add("d_fy_week_seq", SqlTypeName.INTEGER)
+          .nullable(true)
+          .add("d_day_name", SqlTypeName.CHAR, 9)
+          .nullable(true)
+          .add("d_quarter_name", SqlTypeName.CHAR, 6)
+          .nullable(true)
+          .add("d_holiday", SqlTypeName.CHAR, 1)
+          .nullable(true)
+          .add("d_weekend", SqlTypeName.CHAR, 1)
+          .nullable(true)
+          .add("d_following_holiday", SqlTypeName.CHAR, 1)
+          .nullable(true)
+          .add("d_first_dom", SqlTypeName.INTEGER)
+          .nullable(true)
+          .add("d_last_dom", SqlTypeName.INTEGER)
+          .nullable(true)
+          .add("d_same_day_ly", SqlTypeName.INTEGER)
+          .nullable(true)
+          .add("d_same_day_lq", SqlTypeName.INTEGER)
+          .nullable(true)
+          .add("d_current_day", SqlTypeName.CHAR, 1)
+          .nullable(true)
+          .add("d_current_week", SqlTypeName.CHAR, 1)
+          .nullable(true)
+          .add("d_current_month", SqlTypeName.CHAR, 1)
+          .nullable(true)
+          .add("d_current_quarter", SqlTypeName.CHAR, 1)
+          .nullable(true)
+          .add("d_current_year", SqlTypeName.CHAR, 1)
+          .nullable(true)
+          .addAll(systemColumns(f).getFieldList())
+          .build()), STORE(
+      1704,
+      f -> f.builder()
+          .add("s_store_sk", SqlTypeName.BIGINT)
+          .nullable(false)
+          .add("s_store_id", SqlTypeName.VARCHAR, Integer.MAX_VALUE)
+          .nullable(false)
+          .add("s_rec_start_date", SqlTypeName.DATE)
+          .nullable(true)
+          .add("s_rec_end_date", SqlTypeName.DATE)
+          .nullable(true)
+          .add("s_closed_date_sk", SqlTypeName.BIGINT)
+          .nullable(true)
+          .add("s_store_name", SqlTypeName.VARCHAR, 50)
+          .nullable(true)
+          .add("s_number_employees", SqlTypeName.INTEGER)
+          .nullable(true)
+          .add("s_floor_space", SqlTypeName.INTEGER)
+          .nullable(true)
+          .add("s_hours", SqlTypeName.CHAR, 20)
+          .nullable(true)
+          .add("s_manager", SqlTypeName.VARCHAR, 40)
+          .nullable(true)
+          .add("s_market_id", SqlTypeName.INTEGER)
+          .nullable(true)
+          .add("s_geography_class", SqlTypeName.VARCHAR, 100)
+          .nullable(true)
+          .add("s_market_desc", SqlTypeName.VARCHAR, 100)
+          .nullable(true)
+          .add("s_market_manager", SqlTypeName.VARCHAR, 40)
+          .nullable(true)
+          .add("s_division_id", SqlTypeName.INTEGER)
+          .nullable(true)
+          .add("s_division_name", SqlTypeName.VARCHAR, 50)
+          .nullable(true)
+          .add("s_company_id", SqlTypeName.INTEGER)
+          .nullable(true)
+          .add("s_company_name", SqlTypeName.VARCHAR, 50)
+          .nullable(true)
+          .add("s_street_number", SqlTypeName.VARCHAR, 10)
+          .nullable(true)
+          .add("s_street_name", SqlTypeName.VARCHAR, 60)
+          .nullable(true)
+          .add("s_street_type", SqlTypeName.CHAR, 15)
+          .nullable(true)
+          .add("s_suite_number", SqlTypeName.CHAR, 10)
+          .nullable(true)
+          .add("s_city", SqlTypeName.VARCHAR, 60)
+          .nullable(true)
+          .add("s_county", SqlTypeName.VARCHAR, 30)
+          .nullable(true)
+          .add("s_state", SqlTypeName.CHAR, 2)
+          .nullable(true)
+          .add("s_zip", SqlTypeName.CHAR, 10)
+          .nullable(true)
+          .add("s_country", SqlTypeName.VARCHAR, 20)
+          .nullable(true)
+          .add("s_gmt_offset", SqlTypeName.DECIMAL, 5, 2)
+          .nullable(true)
+          .add("s_tax_percentage", SqlTypeName.DECIMAL, 5, 2)
+          .nullable(true)
+          .addAll(systemColumns(f).getFieldList())
+          .build());
+  private final RelProtoDataType protoType;
+  private final double rowCount;
+
+  TpcdsTable(double rowCount, RelProtoDataType protoType) {
+    this.protoType = protoType;
+    this.rowCount = rowCount;
+  }
+
+  public RelDataType type(RelDataTypeFactory typeFactory) {
+    return protoType.apply(typeFactory);
+  }
+
+  public double rowCount() {
+    return rowCount;
+  }
+
+  private static RelDataType systemColumns(RelDataTypeFactory factory) {
+    RelDataType rowId = factory.builder()
+        .add("writeid", SqlTypeName.BIGINT)
+        .nullable(true)
+        .add("bucketid", SqlTypeName.INTEGER)
+        .nullable(true)
+        .add("rowid", SqlTypeName.BIGINT)
+        .nullable(true)
+        .build();
+    return factory.builder()
+        .add("BLOCK__OFFSET__INSIDE__FILE", SqlTypeName.BIGINT)
+        .nullable(true)
+        .add("INPUT__FILE__NAME", SqlTypeName.VARCHAR, Integer.MAX_VALUE)
+        .nullable(true)
+        .add("ROW__ID", rowId)
+        .nullable(true)
+        .add("ROW__IS__DELETED", SqlTypeName.BOOLEAN)
+        .nullable(true)
+        .build();
+  }
+}

Reply via email to