This is an automated email from the ASF dual-hosted git repository.
caogaofei pushed a commit to branch ty/packageRefactor
in repository https://gitbox.apache.org/repos/asf/iotdb.git
The following commit(s) were added to refs/heads/ty/packageRefactor by this
push:
new aaded1d60d5 fix idea refactor error
new aa34908a5f8 Merge branch 'ty/packageRefactor' of
github.com:apache/iotdb into ty/packageRefactor
aaded1d60d5 is described below
commit aaded1d60d5cba72c3ba65e9eb5b5411d2449097
Author: Beyyes <[email protected]>
AuthorDate: Tue Jun 27 12:12:29 2023 +0800
fix idea refactor error
---
.../db/queryengine/common/MPPQueryContext.java | 2 +-
.../common/header/ColumnHeaderConstant.java | 8 +--
.../queryengine/common/header/DatasetHeader.java | 2 +-
.../common/schematree/ClusterSchemaTree.java | 2 +-
.../common/schematree/MeasurementSchemaInfo.java | 8 +--
.../timerangeiterator/ITimeRangeIterator.java | 2 +-
.../SingleTimeWindowIterator.java | 2 +-
.../metric/QueryExecutionMetricSet.java | 6 +--
.../iotdb/db/queryengine/plan/Coordinator.java | 2 +-
.../db/queryengine/plan/analyze/Analysis.java | 12 ++---
.../queryengine/plan/analyze/AnalyzeVisitor.java | 62 +++++++++++-----------
.../plan/analyze/ClusterPartitionFetcher.java | 4 +-
.../plan/analyze/ExpressionAnalyzer.java | 36 ++++++-------
.../plan/analyze/GroupByLevelController.java | 2 +-
.../plan/analyze/IPartitionFetcher.java | 16 +++---
.../analyze/cache/partition/PartitionCache.java | 10 ++--
.../analyze/cache/schema/DataNodeSchemaCache.java | 14 ++---
.../schema/DeviceUsingTemplateSchemaCache.java | 4 +-
.../cache/schema/TimeSeriesSchemaCache.java | 2 +-
.../schema/lastcache/DataNodeLastCacheManager.java | 6 +--
.../cache/schema/lastcache/LastCacheContainer.java | 2 +-
.../analyze/schema/AutoCreateSchemaExecutor.java | 2 +-
.../analyze/schema/ClusterSchemaFetchExecutor.java | 8 +--
.../plan/analyze/schema/ClusterSchemaFetcher.java | 18 +++----
.../plan/analyze/schema/ISchemaAutoCreation.java | 4 +-
.../plan/analyze/schema/ISchemaComputation.java | 20 +++----
.../plan/analyze/schema/ISchemaFetcher.java | 24 ++++-----
.../plan/analyze/schema/ISchemaValidation.java | 4 +-
.../plan/analyze/schema/NormalSchemaFetcher.java | 28 +++++-----
.../plan/analyze/schema/TemplateSchemaFetcher.java | 24 ++++-----
.../queryengine/plan/execution/QueryExecution.java | 30 +++++------
.../config/executor/ClusterConfigTaskExecutor.java | 18 +++----
.../visitor/ReplaceLogicalViewVisitor.java | 2 +-
.../plan/optimization/LimitOffsetPushDown.java | 4 +-
.../db/queryengine/plan/parser/ASTVisitor.java | 14 ++---
.../plan/parser/StatementGenerator.java | 8 +--
.../plan/planner/LocalExecutionPlanContext.java | 8 +--
.../plan/planner/LocalExecutionPlanner.java | 4 +-
.../plan/planner/LogicalPlanVisitor.java | 4 +-
.../queryengine/plan/planner/LogicalPlanner.java | 2 +-
.../distribution/DistributionPlanContext.java | 2 +-
.../plan/planner/distribution/SourceRewriter.java | 12 ++---
.../plan/planner/plan/LogicalQueryPlan.java | 2 +-
.../plan/planner/plan/PlanFragment.java | 2 +-
.../plan/planner/plan/node/PlanNode.java | 2 +-
.../node/metedata/read/SchemaFetchMergeNode.java | 2 +-
.../node/metedata/read/SchemaFetchScanNode.java | 2 +-
.../write/CreateAlignedTimeSeriesNode.java | 3 +-
.../planner/plan/node/process/DeviceViewNode.java | 8 +--
.../planner/plan/node/process/GroupByTagNode.java | 2 +-
.../plan/node/process/last/LastQueryNode.java | 2 +-
.../node/source/SeriesAggregationSourceNode.java | 2 +-
.../plan/scheduler/ClusterScheduler.java | 6 +--
.../scheduler/FixedRateFragInsStateTracker.java | 2 +-
.../plan/scheduler/SimpleQueryTerminator.java | 8 +--
.../plan/statement/component/OrderByComponent.java | 2 +-
.../plan/statement/component/ResultColumn.java | 2 +-
.../plan/statement/component/ResultSetFormat.java | 2 +-
.../plan/statement/crud/InsertBaseStatement.java | 2 +-
.../plan/statement/crud/QueryStatement.java | 10 ++--
.../metadata/CreateContinuousQueryStatement.java | 10 ++--
.../MultiInputColumnIntermediateLayer.java | 6 +--
...InputColumnMultiReferenceIntermediateLayer.java | 8 +--
...nputColumnSingleReferenceIntermediateLayer.java | 8 +--
.../transformer/unary/TransparentTransformer.java | 2 +-
.../row/ElasticSerializableRowRecordList.java | 2 +-
.../row/SerializableRowRecordList.java | 2 +-
.../tv/ElasticSerializableBinaryTVList.java | 2 +-
.../datastructure/tv/SerializableTVList.java | 2 +-
69 files changed, 273 insertions(+), 274 deletions(-)
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/MPPQueryContext.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/MPPQueryContext.java
index c8fb2018f5b..f6908cae4a1 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/MPPQueryContext.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/MPPQueryContext.java
@@ -26,7 +26,7 @@ import java.util.LinkedList;
import java.util.List;
/**
- * This class is used to record the context of a read including QueryId, read
statement, session
+ * This class is used to record the context of a query including QueryId,
query statement, session
* info and so on
*/
public class MPPQueryContext {
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/header/ColumnHeaderConstant.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/header/ColumnHeaderConstant.java
index 7f4d2448887..66abe3abc20 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/header/ColumnHeaderConstant.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/header/ColumnHeaderConstant.java
@@ -31,13 +31,13 @@ public class ColumnHeaderConstant {
// forbidding instantiation
}
- // column names for read statement
+ // column names for query statement
public static final String TIME = "Time";
public static final String ENDTIME = "__endTime";
public static final String VALUE = "Value";
public static final String DEVICE = "Device";
- // column names for schemaengine statement
+ // column names for schema statement
public static final String DATABASE = "Database";
public static final String TIMESERIES = "Timeseries";
public static final String ALIAS = "Alias";
@@ -141,7 +141,7 @@ public class ColumnHeaderConstant {
public static final String SCHEMA_REGION_NUM = "SchemaRegionNum";
public static final String DATA_REGION_NUM = "DataRegionNum";
- // column names for show schemaengine template statement
+ // column names for show schema template statement
public static final String TEMPLATE_NAME = "TemplateName";
// column names for show pipe sink
@@ -165,7 +165,7 @@ public class ColumnHeaderConstant {
public static final String CQID = "CQId";
public static final String QUERY = "Query";
- // column names for show read processlist
+ // column names for show query processlist
public static final String QUERY_ID = "QueryId";
public static final String ELAPSED_TIME = "ElapsedTime";
public static final String STATEMENT = "Statement";
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/header/DatasetHeader.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/header/DatasetHeader.java
index 9178d8dc5c4..c4d6e740b6a 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/header/DatasetHeader.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/header/DatasetHeader.java
@@ -31,7 +31,7 @@ import java.util.List;
import java.util.Map;
import java.util.Objects;
-/** The header of read result dataset. */
+/** The header of query result dataset. */
public class DatasetHeader {
public static final DatasetHeader EMPTY_HEADER = new DatasetHeader(new
ArrayList<>(), false);
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/ClusterSchemaTree.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/ClusterSchemaTree.java
index a00093eabf4..aedb70cd430 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/ClusterSchemaTree.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/ClusterSchemaTree.java
@@ -59,7 +59,7 @@ public class ClusterSchemaTree implements ISchemaTree {
private final SchemaNode root;
- /** a flag recording whether there is logical view in this schemaengine
tree. */
+ /** a flag recording whether there is logical view in this schema tree. */
private boolean hasLogicalMeasurementPath = false;
public ClusterSchemaTree() {
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/MeasurementSchemaInfo.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/MeasurementSchemaInfo.java
index 3caedbad546..989a08dd23b 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/MeasurementSchemaInfo.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/MeasurementSchemaInfo.java
@@ -24,10 +24,10 @@ import
org.apache.iotdb.tsfile.write.schema.IMeasurementSchema;
import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
/**
- * This class acts as common measurement schemaengine format during system
module interactions,
- * mainly in analyzer and SchemaFetcher. Currently, this class cooperates with
DeviceSchemaInfo and
- * wraps measurement name, alias and MeasurementSchema, which are necessary to
construct schemaTree
- * for Query and Insertion.
+ * This class acts as common measurement schema format during system module
interactions, mainly in
+ * analyzer and SchemaFetcher. Currently, this class cooperates with
DeviceSchemaInfo and wraps
+ * measurement name, alias and MeasurementSchema, which are necessary to
construct schemaTree for
+ * Query and Insertion.
*/
public class MeasurementSchemaInfo implements IMeasurementSchemaInfo {
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/timerangeiterator/ITimeRangeIterator.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/timerangeiterator/ITimeRangeIterator.java
index 2a35ef9caba..e33a053ce9b 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/timerangeiterator/ITimeRangeIterator.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/timerangeiterator/ITimeRangeIterator.java
@@ -22,7 +22,7 @@ package
org.apache.iotdb.db.queryengine.execution.aggregation.timerangeiterator;
import org.apache.iotdb.tsfile.read.common.TimeRange;
/**
- * This interface used for iteratively generating aggregated time windows in
GROUP BY read.
+ * This interface used for iteratively generating aggregated time windows in
GROUP BY query.
*
* <p>It will return a leftCloseRightClose time window, by decreasing maxTime
if leftCloseRightOpen
* and increasing minTime if leftOpenRightClose.
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/timerangeiterator/SingleTimeWindowIterator.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/timerangeiterator/SingleTimeWindowIterator.java
index 073dd5823a0..9137abb3794 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/timerangeiterator/SingleTimeWindowIterator.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/timerangeiterator/SingleTimeWindowIterator.java
@@ -24,7 +24,7 @@ import org.apache.iotdb.tsfile.read.common.TimeRange;
/** Used for aggregation with only one time window. i.e. Aggregation without
group by. */
public class SingleTimeWindowIterator implements ITimeRangeIterator {
- // total read [startTime, endTime)
+ // total query [startTime, endTime)
private final long startTime;
private final long endTime;
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/metric/QueryExecutionMetricSet.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/metric/QueryExecutionMetricSet.java
index 6a4bf3e7006..91d7857aa58 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/metric/QueryExecutionMetricSet.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/metric/QueryExecutionMetricSet.java
@@ -38,7 +38,7 @@ public class QueryExecutionMetricSet implements IMetricSet {
// empty constructor
}
- // region read dispatch
+ // region query dispatch
public static final String WAIT_FOR_DISPATCH = "wait_for_dispatch";
public static final String DISPATCH_READ = "dispatch_read";
private Timer waitForDispatchTimer = DoNothingMetricManager.DO_NOTHING_TIMER;
@@ -71,7 +71,7 @@ public class QueryExecutionMetricSet implements IMetricSet {
}
// endregion
- // region read execution
+ // region query execution
public static final String LOCAL_EXECUTION_PLANNER =
"local_execution_planner";
public static final String QUERY_RESOURCE_INIT = "query_resource_init";
public static final String GET_QUERY_RESOURCE_FROM_MEM =
"get_query_resource_from_mem";
@@ -137,7 +137,7 @@ public class QueryExecutionMetricSet implements IMetricSet {
}
// endregion
- // region read aggregation
+ // region query aggregation
public static final String AGGREGATION_FROM_RAW_DATA =
"aggregation_from_raw_data";
public static final String AGGREGATION_FROM_STATISTICS =
"aggregation_from_statistics";
private Timer aggregationFromRawDataTimer =
DoNothingMetricManager.DO_NOTHING_TIMER;
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/Coordinator.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/Coordinator.java
index 0e2406d94ee..042a1527be1 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/Coordinator.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/Coordinator.java
@@ -55,7 +55,7 @@ import java.util.concurrent.ScheduledExecutorService;
/**
* The coordinator for MPP. It manages all the queries which are executed in
current Node. And it
- * will be responsible for the lifecycle of a read. A read request will be
represented as a
+ * will be responsible for the lifecycle of a query. A query request will be
represented as a
* QueryExecution.
*/
public class Coordinator {
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/Analysis.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/Analysis.java
index 3bda71bb662..60947aee37a 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/Analysis.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/Analysis.java
@@ -56,7 +56,7 @@ import java.util.Set;
import static com.google.common.base.Preconditions.checkArgument;
-/** Analysis used for planning a read. TODO: This class may need to store more
info for a read. */
+/** Analysis used for planning a query. TODO: This class may need to store
more info for a query. */
public class Analysis {
/////////////////////////////////////////////////////////////////////////////////////////////////
@@ -112,7 +112,7 @@ public class Analysis {
tag values -> (grouped expression -> output expressions)
For different combination of tag keys, the grouped expression may be
different. Let's say there
are 3 timeseries root.sg.d1.temperature, root.sg.d1.status,
root.sg.d2.temperature, and their
- tags are [k1=v1], [k1=v1] and [k1=v2] respectively. For read "SELECT
last_value(**) FROM root
+ tags are [k1=v1], [k1=v1] and [k1=v2] respectively. For query "SELECT
last_value(**) FROM root
GROUP BY k1", timeseries are grouped by their tags into 2 buckets. Bucket
[v1] has
[root.sg.d1.temperature, root.sg.d1.status], while bucket [v2] has
[root.sg.d2.temperature].
Thus, the aggregation results of bucket [v1] and [v2] are different. Bucket
[v1] has 2
@@ -132,7 +132,7 @@ public class Analysis {
// input expressions of aggregations to be calculated
private Map<String, Set<Expression>> deviceToSourceTransformExpressions;
- // map from device name to read filter under this device
+ // map from device name to query filter under this device
private Map<String, Expression> deviceToWhereExpression;
// all aggregations that need to be calculated
@@ -150,7 +150,7 @@ public class Analysis {
// the sortItems used in order by push down of align by device
private Map<String, List<SortItem>> deviceToSortItems;
- // e.g. [s1,s2,s3] is read, but [s1, s3] exists in device1, then device1 ->
[1, 3], s1 is 1 but
+ // e.g. [s1,s2,s3] is query, but [s1, s3] exists in device1, then device1 ->
[1, 3], s1 is 1 but
// not 0 because device is the first column
private Map<String, List<Integer>> deviceViewInputIndexesMap;
@@ -195,7 +195,7 @@ public class Analysis {
private OrderByParameter mergeOrderParameter;
- // This field will be set and used when the order by in last read only
indicates the ordering of
+ // This field will be set and used when the order by in last query only
indicates the ordering of
// timeseries, otherwise it will be null
private Ordering timeseriesOrderingForLastQuery = null;
@@ -228,7 +228,7 @@ public class Analysis {
// devicePath -> <template, paths set template>
private Map<PartialPath, Pair<Template, PartialPath>>
deviceTemplateSetInfoMap;
- // potential template used in timeseries read or fetch
+ // potential template used in timeseries query or fetch
private Map<Integer, Template> relatedTemplateInfo;
// generated by combine the input path pattern and template set path
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeVisitor.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeVisitor.java
index 35723547f1b..85be6a86b4a 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeVisitor.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeVisitor.java
@@ -253,7 +253,7 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
(QueryStatement) new ConcatPathRewriter().rewrite(queryStatement,
patternTree);
analysis.setStatement(queryStatement);
- // request schemaengine fetch API
+ // request schema fetch API
long startTime = System.nanoTime();
ISchemaTree schemaTree;
try {
@@ -263,7 +263,7 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
} else {
schemaTree = schemaFetcher.fetchSchema(patternTree, context);
}
- // If there is no leaf node in the schemaengine tree, the read should
be completed
+ // If there is no leaf node in the schema tree, the query should be
completed
// immediately
if (schemaTree.isEmpty()) {
return finishQuery(queryStatement, analysis);
@@ -273,10 +273,10 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
updateSchemaTreeByViews(analysis, schemaTree);
if (analysis.useLogicalView()) {
if (queryStatement.isAlignByDevice()) {
- throw new SemanticException("Views cannot be used in ALIGN BY
DEVICE read yet.");
+ throw new SemanticException("Views cannot be used in ALIGN BY
DEVICE query yet.");
}
if (queryStatement.isGroupByTag()) {
- throw new SemanticException("Views cannot be used in GROUP BY TAGS
read yet.");
+ throw new SemanticException("Views cannot be used in GROUP BY TAGS
query yet.");
}
}
} finally {
@@ -286,12 +286,12 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
}
analysis.setSchemaTree(schemaTree);
- // extract global time filter from read filter and determine if there is
a value filter
+ // extract global time filter from query filter and determine if there
is a value filter
analyzeGlobalTimeFilter(analysis, queryStatement);
if (queryStatement.isLastQuery()) {
if (analysis.hasValueFilter()) {
- throw new SemanticException("Only time filters are supported in LAST
read");
+ throw new SemanticException("Only time filters are supported in LAST
query");
}
analyzeOrderBy(analysis, queryStatement);
@@ -382,7 +382,7 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
} catch (StatementAnalyzeException e) {
throw new StatementAnalyzeException(
- "Meet error when analyzing the read statement: " + e.getMessage());
+ "Meet error when analyzing the query statement: " + e.getMessage());
}
return analysis;
}
@@ -449,7 +449,7 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
ExpressionAnalyzer.bindSchemaForExpression(selectExpression,
schemaTree)) {
if (!(sourceExpression instanceof TimeSeriesOperand)) {
throw new SemanticException(
- "Views with functions and expressions cannot be used in LAST
read");
+ "Views with functions and expressions cannot be used in LAST
query");
}
sourceExpressions.add(sourceExpression);
}
@@ -869,7 +869,7 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
}
/**
- * This method is used to analyze GROUP BY TAGS read.
+ * This method is used to analyze GROUP BY TAGS query.
*
* <p>TODO: support slimit/soffset/value filter
*/
@@ -881,7 +881,7 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
return;
}
if (analysis.hasValueFilter()) {
- throw new SemanticException("Only time filters are supported in GROUP BY
TAGS read");
+ throw new SemanticException("Only time filters are supported in GROUP BY
TAGS query");
}
List<String> tagKeys =
queryStatement.getGroupByTagComponent().getTagKeys();
@@ -1305,7 +1305,7 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
analysis.setRespDatasetHeader(new DatasetHeader(columnHeaders,
isIgnoreTimestamp));
}
- // For last read
+ // For last query
private void analyzeOrderBy(Analysis analysis, QueryStatement
queryStatement) {
if (!queryStatement.hasOrderBy()) return;
@@ -1319,7 +1319,7 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
if (!lastQueryColumnNames.contains(sortKey.toUpperCase())) {
throw new SemanticException(
String.format(
- "%s in order by clause doesn't exist in the result of last
read.", sortKey));
+ "%s in order by clause doesn't exist in the result of last
query.", sortKey));
}
}
}
@@ -1611,7 +1611,7 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
if (!queryStatement.isCqQueryBody()
&& (groupByTimeComponent.getStartTime() == 0 &&
groupByTimeComponent.getEndTime() == 0)) {
throw new SemanticException(
- "The read time range should be specified in the GROUP BY TIME
clause.");
+ "The query time range should be specified in the GROUP BY TIME
clause.");
}
analysis.setGroupByTimeParameter(new
GroupByTimeParameter(groupByTimeComponent));
}
@@ -1813,7 +1813,7 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
}
deviceViewIntoPathDescriptor.validate();
- // fetch schemaengine of target paths
+ // fetch schema of target paths
long startTime = System.nanoTime();
ISchemaTree targetSchemaTree = schemaFetcher.fetchSchema(targetPathTree,
null);
QueryPlanCostMetricSet.getInstance()
@@ -1867,7 +1867,7 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
}
intoPathDescriptor.validate();
- // fetch schemaengine of target paths
+ // fetch schema of target paths
long startTime = System.nanoTime();
ISchemaTree targetSchemaTree = schemaFetcher.fetchSchema(targetPathTree,
null);
updateSchemaTreeByViews(analysis, targetSchemaTree);
@@ -2392,7 +2392,7 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
autoCreateAndVerifySchema(loadTsFileStatement, device2Schemas,
device2IsAligned);
- // load function will read data partition in scheduler
+ // load function will query data partition in scheduler
Analysis analysis = new Analysis();
analysis.setStatement(loadTsFileStatement);
return analysis;
@@ -2416,16 +2416,16 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
ISchemaTree schemaTree =
autoCreateSchema(
device2Schemas,
- device2IsAligned); // schemaengine fetcher will not auto create
if config set
+ device2IsAligned); // schema fetcher will not auto create if
config set
// isAutoCreateSchemaEnabled is false.
if (loadTsFileStatement.isVerifySchema()) {
verifySchema(schemaTree, device2Schemas, device2IsAligned);
}
} catch (Exception e) {
- logger.warn("Auto create or verify schemaengine error.", e);
+ logger.warn("Auto create or verify schema error.", e);
throw new SemanticException(
String.format(
- "Auto create or verify schemaengine error when executing
statement %s.",
+ "Auto create or verify schema error when executing statement
%s.",
loadTsFileStatement));
} finally {
device2Schemas.clear();
@@ -2462,7 +2462,7 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
if (IoTDBDescriptor.getInstance().getConfig().isAutoCreateSchemaEnabled()
|| statement.isVerifySchema()) {
- // construct schemaengine
+ // construct schema
for (Map.Entry<String, List<TimeseriesMetadata>> entry :
device2Metadata.entrySet()) {
String device = entry.getKey();
List<TimeseriesMetadata> timeseriesMetadataList = entry.getValue();
@@ -2706,7 +2706,7 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
if (showTimeSeriesStatement.isOrderByHeat()) {
patternTree.constructTree();
- // request schemaengine fetch API
+ // request schema fetch API
logger.debug("[StartFetchSchema]");
ISchemaTree schemaTree = schemaFetcher.fetchSchema(patternTree, context);
updateSchemaTreeByViews(analysis, schemaTree);
@@ -3280,7 +3280,7 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
// region view
/**
- * Compute how many paths exist, get the schemaengine tree and the number of
existed paths.
+ * Compute how many paths exist, get the schema tree and the number of
existed paths.
*
* @return a pair of ISchemaTree, and the number of exist paths.
*/
@@ -3288,7 +3288,7 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
List<PartialPath> pathList, Analysis analysis, MPPQueryContext context) {
ISchemaTree schemaTree = analysis.getSchemaTree();
if (schemaTree == null) {
- // source is not represented by read, thus has not done fetch
schemaengine.
+ // source is not represented by query, thus has not done fetch schema.
PathPatternTree pathPatternTree = new PathPatternTree();
for (PartialPath path : pathList) {
pathPatternTree.appendPathPattern(path);
@@ -3307,9 +3307,9 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
/**
* @param pathList the paths you want to check
- * @param schemaTree the given schemaengine tree
- * @return if all paths you give can be found in schemaengine tree, return a
pair of view paths
- * and null; else return view paths and the non-exist path.
+ * @param schemaTree the given schema tree
+ * @return if all paths you give can be found in schema tree, return a pair
of view paths and
+ * null; else return view paths and the non-exist path.
*/
private Pair<List<PartialPath>, PartialPath> findAllViewsInPaths(
List<PartialPath> pathList, ISchemaTree schemaTree) {
@@ -3345,7 +3345,7 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
analysis.setFailStatus(
RpcUtils.getStatus(
TSStatusCode.UNSUPPORTED_OPERATION.getStatusCode(),
- "Columns in the read statement is empty. Please check your
SQL."));
+ "Columns in the query statement is empty. Please check your
SQL."));
return new Pair<>(null, analysis);
}
if (queryAnalysis.useLogicalView()) {
@@ -3353,7 +3353,7 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
analysis.setFailStatus(
RpcUtils.getStatus(
TSStatusCode.UNSUPPORTED_OPERATION.getStatusCode(),
- "Can not create a view based on existing views. Check the read
in your SQL."));
+ "Can not create a view based on existing views. Check the query
in your SQL."));
return new Pair<>(null, analysis);
}
List<Expression> expressionList = new ArrayList<>();
@@ -3374,7 +3374,7 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
Pair<ISchemaTree, Integer> schemaOfNeedToCheck =
fetchSchemaOfPathsAndCount(pathsNeedCheck, analysis, context);
if (schemaOfNeedToCheck.right != pathsNeedCheck.size()) {
- // some source paths is not exist, and could not fetch schemaengine.
+ // some source paths is not exist, and could not fetch schema.
analysis.setFinishQueryAfterAnalyze(true);
analysis.setFailStatus(
RpcUtils.getStatus(
@@ -3465,7 +3465,7 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
analysis.setStatement(createLogicalViewStatement);
if (createLogicalViewStatement.getViewExpression() == null) {
- // analyze read in statement
+ // analyze query in statement
QueryStatement queryStatement =
createLogicalViewStatement.getQueryStatement();
if (queryStatement != null) {
Pair<List<Expression>, Analysis> queryAnalysisPair =
@@ -3500,7 +3500,7 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
return analysis;
}
- // set schemaengine partition info, this info will be used to split
logical plan node.
+ // set schema partition info, this info will be used to split logical plan
node.
PathPatternTree patternTree = new PathPatternTree();
for (PartialPath thisFullPath :
createLogicalViewStatement.getTargetPathList()) {
patternTree.appendFullPath(thisFullPath);
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ClusterPartitionFetcher.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ClusterPartitionFetcher.java
index f174c5f7851..c13d30e780c 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ClusterPartitionFetcher.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ClusterPartitionFetcher.java
@@ -293,7 +293,7 @@ public class ClusterPartitionFetcher implements
IPartitionFetcher {
partitionCache.invalidAllCache();
}
- /** split data partition read param by database */
+ /** split data partition query param by database */
private Map<String, List<DataPartitionQueryParam>>
splitDataPartitionQueryParam(
List<DataPartitionQueryParam> dataPartitionQueryParams, boolean
isAutoCreate) {
List<String> devicePaths = new ArrayList<>();
@@ -393,7 +393,7 @@ public class ClusterPartitionFetcher implements
IPartitionFetcher {
return new TDataPartitionReq(partitionSlotsMap);
}
- /** For read, DataPartitionQueryParam is shared by each device */
+ /** For query, DataPartitionQueryParam is shared by each device */
private TDataPartitionReq constructDataPartitionReqForQuery(
Map<String, List<DataPartitionQueryParam>> sgNameToQueryParamsMap) {
Map<String, Map<TSeriesPartitionSlot, TTimeSlotList>> partitionSlotsMap =
new HashMap<>();
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ExpressionAnalyzer.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ExpressionAnalyzer.java
index d35d154e575..98a58242a8e 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ExpressionAnalyzer.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ExpressionAnalyzer.java
@@ -88,7 +88,7 @@ public class ExpressionAnalyzer {
/**
* Check if all suffix paths in expression are measurements or one-level
wildcards, used in ALIGN
- * BY DEVICE read or GroupByLevel read. If not, throw a {@link
SemanticException}.
+ * BY DEVICE query or GroupByLevel query. If not, throw a {@link
SemanticException}.
*
* @param expression expression to be checked
*/
@@ -270,7 +270,7 @@ public class ExpressionAnalyzer {
*
* @param expression expression in SELECT or WITHOUT NULL clause which may
include suffix paths
* @param prefixPaths prefix paths in the FROM clause
- * @param patternTree a PathPatternTree contains all paths to read
+ * @param patternTree a PathPatternTree contains all paths to query
* @return the concatenated expression list
*/
public static List<Expression> concatExpressionWithSuffixPaths(
@@ -358,7 +358,7 @@ public class ExpressionAnalyzer {
*
* @param predicate expression in WHERE clause
* @param prefixPaths prefix paths in the FROM clause
- * @param patternTree a PathPatternTree contains all paths to read
+ * @param patternTree a PathPatternTree contains all paths to query
*/
public static void constructPatternTreeFromExpression(
Expression predicate, List<PartialPath> prefixPaths, PathPatternTree
patternTree) {
@@ -407,11 +407,11 @@ public class ExpressionAnalyzer {
}
/**
- * Bind schemaengine ({@link PartialPath} -> {@link MeasurementPath}) and
removes wildcards in
+ * Bind schema ({@link PartialPath} -> {@link MeasurementPath}) and removes
wildcards in
* Expression. And all logical view will be replaced.
*
- * @param schemaTree interface for querying schemaengine information
- * @return the expression list after binding schemaengine and whether there
is logical view in
+ * @param schemaTree interface for querying schema information
+ * @return the expression list after binding schema and whether there is
logical view in
* expressions
*/
public static List<Expression> bindSchemaForExpression(
@@ -421,12 +421,12 @@ public class ExpressionAnalyzer {
/**
* Concat suffix path in WHERE and HAVING clause with the prefix path in the
FROM clause. And
- * then, bind schemaengine ({@link PartialPath} -> {@link MeasurementPath})
and removes wildcards
- * in Expression. Logical view will be replaced.
+ * then, bind schema ({@link PartialPath} -> {@link MeasurementPath}) and
removes wildcards in
+ * Expression. Logical view will be replaced.
*
* @param prefixPaths prefix paths in the FROM clause
- * @param schemaTree interface for querying schemaengine information
- * @return the expression list with full path and after binding schemaengine
+ * @param schemaTree interface for querying schema information
+ * @return the expression list with full path and after binding schema
*/
public static List<Expression> bindSchemaForPredicate(
Expression predicate, List<PartialPath> prefixPaths, ISchemaTree
schemaTree, boolean isRoot) {
@@ -442,12 +442,12 @@ public class ExpressionAnalyzer {
}
/**
- * Concat expression with the device path in the FROM clause.And then, bind
schemaengine ({@link
+ * Concat expression with the device path in the FROM clause.And then, bind
schema ({@link
* PartialPath} -> {@link MeasurementPath}) and removes wildcards in
Expression. This method used
- * in ALIGN BY DEVICE read.
+ * in ALIGN BY DEVICE query.
*
* @param devicePath device path in the FROM clause
- * @return expression list with full path and after binding schemaengine
+ * @return expression list with full path and after binding schema
*/
public static List<Expression> concatDeviceAndBindSchemaForExpression(
Expression expression, PartialPath devicePath, ISchemaTree schemaTree) {
@@ -458,10 +458,10 @@ public class ExpressionAnalyzer {
}
/**
- * Concat measurement in WHERE and HAVING clause with device path. And then,
bind schemaengine
- * ({@link PartialPath} -> {@link MeasurementPath}) and removes wildcards.
+ * Concat measurement in WHERE and HAVING clause with device path. And then,
bind schema ({@link
+ * PartialPath} -> {@link MeasurementPath}) and removes wildcards.
*
- * @return the expression list with full path and after binding schemaengine
+ * @return the expression list with full path and after binding schema
*/
public static List<Expression> concatDeviceAndBindSchemaForPredicate(
Expression predicate, PartialPath devicePath, ISchemaTree schemaTree,
boolean isWhere) {
@@ -473,9 +473,9 @@ public class ExpressionAnalyzer {
}
/**
- * Extract global time filter from read filter.
+ * Extract global time filter from query filter.
*
- * @param predicate raw read filter
+ * @param predicate raw query filter
* @param canRewrite determined by the father of current expression
* @param isFirstOr whether it is the first LogicOrExpression encountered
* @return global time filter
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/GroupByLevelController.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/GroupByLevelController.java
index 99ac9a87c2f..e63c87c5fac 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/GroupByLevelController.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/GroupByLevelController.java
@@ -39,7 +39,7 @@ import java.util.stream.Collectors;
import static com.google.common.base.Preconditions.checkState;
/**
- * This class is used to control the row number of group by level read. For
example, selected
+ * This class is used to control the row number of group by level query. For
example, selected
* series[root.sg.d1.s1, root.sg.d2.s1, root.sg2.d1.s1], level = 1; the result
rows will be
* [root.sg.*.s1, root.sg2.*.s1], sLimit and sOffset will be used to control
the result numbers
* rather than the selected series.
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/IPartitionFetcher.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/IPartitionFetcher.java
index 06bd7d3b061..8930dccb21d 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/IPartitionFetcher.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/IPartitionFetcher.java
@@ -31,25 +31,25 @@ import java.util.Map;
public interface IPartitionFetcher {
- /** Get schemaengine partition without automatically create, used in write
and read scenarios. */
+ /** Get schema partition without automatically create, used in write and
query scenarios. */
SchemaPartition getSchemaPartition(PathPatternTree patternTree);
/**
- * Get or create schemaengine partition, used in insertion with
enable_auto_create_schema is true.
- * if schemaPartition does not exist, then automatically create.
+ * Get or create schema partition, used in insertion with
enable_auto_create_schema is true. if
+ * schemaPartition does not exist, then automatically create.
*/
SchemaPartition getOrCreateSchemaPartition(PathPatternTree patternTree);
/**
- * Get data partition, used in read scenarios.
+ * Get data partition, used in query scenarios.
*
* @param sgNameToQueryParamsMap database name -> the list of
DataPartitionQueryParams
*/
DataPartition getDataPartition(Map<String, List<DataPartitionQueryParam>>
sgNameToQueryParamsMap);
/**
- * Get data partition, used in read scenarios which contains time filter
like: time < XX or time >
- * XX
+ * Get data partition, used in query scenarios which contains time filter
like: time < XX or time
+ * > XX
*
* @return sgNameToQueryParamsMap database name -> the list of
DataPartitionQueryParams
*/
@@ -73,13 +73,13 @@ public interface IPartitionFetcher {
*/
DataPartition getOrCreateDataPartition(List<DataPartitionQueryParam>
dataPartitionQueryParams);
- /** Get schemaengine partition and matched nodes according to path pattern
tree. */
+ /** Get schema partition and matched nodes according to path pattern tree. */
default SchemaNodeManagementPartition getSchemaNodeManagementPartition(
PathPatternTree patternTree) {
return getSchemaNodeManagementPartitionWithLevel(patternTree, null);
}
- /** Get schemaengine partition and matched nodes according to path pattern
tree and node level. */
+ /** Get schema partition and matched nodes according to path pattern tree
and node level. */
SchemaNodeManagementPartition getSchemaNodeManagementPartitionWithLevel(
PathPatternTree patternTree, Integer level);
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/partition/PartitionCache.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/partition/PartitionCache.java
index 27b219bc418..478033b335e 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/partition/PartitionCache.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/partition/PartitionCache.java
@@ -471,7 +471,7 @@ public class PartitionCache {
// endregion
- // region schemaengine partition cache
+ // region schema partition cache
/**
* get schemaPartition
@@ -589,7 +589,7 @@ public class PartitionCache {
// region data partition cache
/**
- * get dataPartition by read param map
+ * get dataPartition by query param map
*
* @param storageGroupToQueryParamsMap database to dataPartitionQueryParam
map
* @return DataPartition of storageGroupToQueryParamsMap
@@ -628,7 +628,7 @@ public class PartitionCache {
*
* @param dataPartitionMap result
* @param storageGroupName database that need to get
- * @param dataPartitionQueryParams specific read params of data partition
+ * @param dataPartitionQueryParams specific query params of data partition
* @return whether hit
*/
private boolean getStorageGroupDataPartition(
@@ -663,7 +663,7 @@ public class PartitionCache {
* get dataPartition from device
*
* @param seriesSlotToTimePartitionMap result
- * @param dataPartitionQueryParam specific read param of data partition
+ * @param dataPartitionQueryParam specific query param of data partition
* @param cachedStorageGroupPartitionMap all cached data partition map of
related database
* @return whether hit
*/
@@ -694,7 +694,7 @@ public class PartitionCache {
cachedSeriesPartitionTable.getSeriesPartitionMap();
Map<TTimePartitionSlot, List<TRegionReplicaSet>> timePartitionSlotListMap =
seriesSlotToTimePartitionMap.computeIfAbsent(seriesPartitionSlot, k ->
new HashMap<>());
- // Notice: when read all time partition, then miss
+ // Notice: when query all time partition, then miss
if (dataPartitionQueryParam.getTimePartitionSlotList().isEmpty()) {
return false;
}
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/DataNodeSchemaCache.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/DataNodeSchemaCache.java
index fec04efd9bc..90cebfee2bf 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/DataNodeSchemaCache.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/DataNodeSchemaCache.java
@@ -104,7 +104,7 @@ public class DataNodeSchemaCache {
}
/**
- * Get SchemaEntity info without auto create schemaengine
+ * Get SchemaEntity info without auto create schema
*
* @param devicePath should not be measurementPath or AlignedPath
* @param measurements
@@ -134,10 +134,10 @@ public class DataNodeSchemaCache {
}
/**
- * This function is used to process logical view schemaengine list in
statement. It will try to
- * find the source paths of those views in cache. If it found sources,
measurement schemas of
- * sources will be recorded in measurement schemaengine list; else the views
will be recorded as
- * missed. The indexes of missed views and full paths of their source paths
will be returned.
+ * This function is used to process logical view schema list in statement.
It will try to find the
+ * source paths of those views in cache. If it found sources, measurement
schemas of sources will
+ * be recorded in measurement schema list; else the views will be recorded
as missed. The indexes
+ * of missed views and full paths of their source paths will be returned.
*
* @param schemaComputation the statement you want to process
* @return The indexes of missed views and full paths of their source paths
will be returned.
@@ -155,8 +155,8 @@ public class DataNodeSchemaCache {
}
/**
- * Store the fetched schemaengine in either the schemaCache or
templateSchemaCache, depending on
- * its associated device.
+ * Store the fetched schema in either the schemaCache or
templateSchemaCache, depending on its
+ * associated device.
*/
public void put(ClusterSchemaTree tree) {
Optional<Pair<Template, ?>> templateInfo;
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/DeviceUsingTemplateSchemaCache.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/DeviceUsingTemplateSchemaCache.java
index 4b1849bea01..ac62c1d1331 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/DeviceUsingTemplateSchemaCache.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/DeviceUsingTemplateSchemaCache.java
@@ -126,8 +126,8 @@ public class DeviceUsingTemplateSchemaCache {
/**
* CONFORM indicates that the provided devicePath had been cached as a
template activated path,
- * ensuring that the alignment of the device, as well as the name and
schemaengine of every
- * measurement are consistent with the cache.
+ * ensuring that the alignment of the device, as well as the name and schema
of every measurement
+ * are consistent with the cache.
*
* @param computation
* @return true if conform to template cache, which means no need to fetch
or create anymore
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/TimeSeriesSchemaCache.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/TimeSeriesSchemaCache.java
index 1e9480d93a8..4e3edadac6b 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/TimeSeriesSchemaCache.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/TimeSeriesSchemaCache.java
@@ -77,7 +77,7 @@ public class TimeSeriesSchemaCache {
}
/**
- * Get SchemaEntity info without auto create schemaengine
+ * Get SchemaEntity info without auto create schema
*
* @param devicePath should not be measurementPath or AlignedPath
* @param measurements
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/lastcache/DataNodeLastCacheManager.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/lastcache/DataNodeLastCacheManager.java
index c9352d06dd0..bc1551236fc 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/lastcache/DataNodeLastCacheManager.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/lastcache/DataNodeLastCacheManager.java
@@ -35,7 +35,7 @@ public class DataNodeLastCacheManager {
/**
* get the last cache value from time series
*
- * @param entry schemaengine cache entry in DataNodeSchemaCache
+ * @param entry schema cache entry in DataNodeSchemaCache
* @return the last cache value
*/
public static TimeValuePair getLastCache(SchemaCacheEntry entry) {
@@ -49,7 +49,7 @@ public class DataNodeLastCacheManager {
/**
* update the last cache value of time series
*
- * @param entry schemaengine cache entry in DataNodeSchemaCache
+ * @param entry schema cache entry in DataNodeSchemaCache
* @param timeValuePair the latest point value
* @param highPriorityUpdate the last value from insertPlan is high priority
* @param latestFlushedTime latest flushed time
@@ -69,7 +69,7 @@ public class DataNodeLastCacheManager {
/**
* reset the last cache value of time series
*
- * @param entry schemaengine cache entry in DataNodeSchemaCache
+ * @param entry schema cache entry in DataNodeSchemaCache
*/
public static void resetLastCache(SchemaCacheEntry entry) {
if (!CACHE_ENABLED || null == entry) {
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/lastcache/LastCacheContainer.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/lastcache/LastCacheContainer.java
index 40c91393b21..c3d2c73d207 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/lastcache/LastCacheContainer.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/lastcache/LastCacheContainer.java
@@ -46,7 +46,7 @@ public class LastCacheContainer implements
ILastCacheContainer {
}
if (lastCacheValue == null) {
- // If no cached last, (1) a last read (2) an unseq insertion or (3) a
seq insertion will
+ // If no cached last, (1) a last query (2) an unseq insertion or (3) a
seq insertion will
// update cache.
if (!highPriorityUpdate || latestFlushedTime <=
timeValuePair.getTimestamp()) {
lastCacheValue = new LastCacheValue(timeValuePair.getTimestamp(),
timeValuePair.getValue());
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/AutoCreateSchemaExecutor.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/AutoCreateSchemaExecutor.java
index a31fcfe0cd6..b4207af4f2f 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/AutoCreateSchemaExecutor.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/AutoCreateSchemaExecutor.java
@@ -407,7 +407,7 @@ class AutoCreateSchemaExecutor {
}
}
- // try to create the target timeseries and merge schemaengine of
successfully created
+ // try to create the target timeseries and merge schema of successfully
created
// timeseries and existing timeseries into given schemaTree
private void internalCreateTimeSeries(
ClusterSchemaTree schemaTree,
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ClusterSchemaFetchExecutor.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ClusterSchemaFetchExecutor.java
index 0d5e367bc04..1056d121c96 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ClusterSchemaFetchExecutor.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ClusterSchemaFetchExecutor.java
@@ -90,7 +90,7 @@ class ClusterSchemaFetchExecutor {
*
* @param fullPathList all the fullPath without wildcard split from
rawPatternTree
* @param rawPatternTree the pattern tree consisting of the fullPathList
- * @return fetched schemaengine
+ * @return fetched schema
*/
ClusterSchemaTree fetchSchemaOfPreciseMatchOrPreciseDeviceUsingTemplate(
List<PartialPath> fullPathList, PathPatternTree rawPatternTree) {
@@ -176,14 +176,14 @@ class ClusterSchemaFetchExecutor {
if (executionResult.status.getCode() !=
TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
throw new RuntimeException(
String.format(
- "cannot fetch schemaengine, status is: %s, msg is: %s",
+ "cannot fetch schema, status is: %s, msg is: %s",
executionResult.status.getCode(),
executionResult.status.getMessage()));
}
try (SetThreadName threadName = new
SetThreadName(executionResult.queryId.getId())) {
ClusterSchemaTree result = new ClusterSchemaTree();
Set<String> databaseSet = new HashSet<>();
while (coordinator.getQueryExecution(queryId).hasNextResult()) {
- // The read will be transited to FINISHED when invoking
getBatchResult() at the last time
+ // The query will be transited to FINISHED when invoking
getBatchResult() at the last time
// So we don't need to clean up it manually
Optional<TsBlock> tsBlock;
try {
@@ -225,7 +225,7 @@ class ClusterSchemaFetchExecutor {
resultSchemaTree.mergeSchemaTree(ClusterSchemaTree.deserialize(inputStream));
} else {
throw new RuntimeException(
- new MetadataException("Failed to fetch schemaengine because of
unrecognized data"));
+ new MetadataException("Failed to fetch schema because of
unrecognized data"));
}
} catch (Exception e) {
throw new RuntimeException(e);
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ClusterSchemaFetcher.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ClusterSchemaFetcher.java
index 390fdcdb8c5..d2ff9293fef 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ClusterSchemaFetcher.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ClusterSchemaFetcher.java
@@ -125,7 +125,7 @@ public class ClusterSchemaFetcher implements ISchemaFetcher
{
return clusterSchemaFetchExecutor.fetchSchemaOfFuzzyMatch(patternTree,
false);
}
- // The schemaengine cache R/W and fetch operation must be locked together
thus the cache clean
+ // The schema cache R/W and fetch operation must be locked together thus
the cache clean
// operation executed by delete timeseries will be effective.
schemaCache.takeReadLock();
try {
@@ -182,7 +182,7 @@ public class ClusterSchemaFetcher implements ISchemaFetcher
{
@Override
public void fetchAndComputeSchemaWithAutoCreate(
ISchemaComputationWithAutoCreation schemaComputationWithAutoCreation) {
- // The schemaengine cache R/W and fetch operation must be locked together
thus the cache clean
+ // The schema cache R/W and fetch operation must be locked together thus
the cache clean
// operation executed by delete timeseries will be effective.
schemaCache.takeReadLock();
try {
@@ -200,12 +200,12 @@ public class ClusterSchemaFetcher implements
ISchemaFetcher {
templateSetInfo, schemaComputationWithAutoCreation);
}
- // all schemaengine has been taken and processed
+ // all schema has been taken and processed
if (indexOfMissingMeasurements.isEmpty()) {
return;
}
- // offer null for the rest missing schemaengine processing
+ // offer null for the rest missing schema processing
for (int index : indexOfMissingMeasurements) {
schemaComputationWithAutoCreation.computeMeasurement(index, null);
}
@@ -217,7 +217,7 @@ public class ClusterSchemaFetcher implements ISchemaFetcher
{
@Override
public void fetchAndComputeSchemaWithAutoCreate(
List<? extends ISchemaComputationWithAutoCreation>
schemaComputationWithAutoCreationList) {
- // The schemaengine cache R/W and fetch operation must be locked together
thus the cache clean
+ // The schema cache R/W and fetch operation must be locked together thus
the cache clean
// operation executed by delete timeseries will be effective.
schemaCache.takeReadLock();
try {
@@ -258,7 +258,7 @@ public class ClusterSchemaFetcher implements ISchemaFetcher
{
List<TSEncoding[]> encodingsList,
List<CompressionType[]> compressionTypesList,
List<Boolean> isAlignedList) {
- // The schemaengine cache R/W and fetch operation must be locked together
thus the cache clean
+ // The schema cache R/W and fetch operation must be locked together thus
the cache clean
// operation executed by delete timeseries will be effective.
schemaCache.takeReadLock();
try {
@@ -275,12 +275,12 @@ public class ClusterSchemaFetcher implements
ISchemaFetcher {
}
}
- // all schemaengine can be taken from cache
+ // all schema can be taken from cache
if (indexOfDevicesWithMissingMeasurements.isEmpty()) {
return schemaTree;
}
- // try fetch the missing schemaengine from remote and cache fetched
schemaengine
+ // try fetch the missing schema from remote and cache fetched schema
ClusterSchemaTree remoteSchemaTree =
clusterSchemaFetchExecutor.fetchSchemaOfMultiDevices(
devicePathList,
@@ -295,7 +295,7 @@ public class ClusterSchemaFetcher implements ISchemaFetcher
{
return schemaTree;
}
- // auto create the still missing schemaengine and merge them into
schemaTree
+ // auto create the still missing schema and merge them into schemaTree
List<Integer> indexOfDevicesNeedAutoCreateSchema = new ArrayList<>();
List<List<Integer>> indexOfMeasurementsNeedAutoCreate = new
ArrayList<>();
List<Integer> indexOfMissingMeasurements;
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ISchemaAutoCreation.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ISchemaAutoCreation.java
index da14c96f29b..9aca5dd3fbe 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ISchemaAutoCreation.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ISchemaAutoCreation.java
@@ -25,8 +25,8 @@ import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
/**
- * This interface defines the required info provided for schemaengine auto
creation, which is
- * executed schemaengine fetcher.
+ * This interface defines the required info provided for schema auto creation,
which is executed
+ * schema fetcher.
*/
public interface ISchemaAutoCreation {
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ISchemaComputation.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ISchemaComputation.java
index 98134d0544f..981ac9cffa5 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ISchemaComputation.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ISchemaComputation.java
@@ -27,8 +27,8 @@ import org.apache.iotdb.tsfile.utils.Pair;
import java.util.List;
/**
- * This interface defines the required behaviour invoked during schemaengine
fetch/computation,
- * which is executed by schemaengine fetcher.
+ * This interface defines the required behaviour invoked during schema
fetch/computation, which is
+ * executed by schema fetcher.
*/
public interface ISchemaComputation {
@@ -41,7 +41,7 @@ public interface ISchemaComputation {
/**
* @param index the index of fetched measurement in array returned by
getMeasurements
- * @param measurementSchemaInfo the measurement schemaengine of fetched
measurement
+ * @param measurementSchemaInfo the measurement schema of fetched measurement
*/
void computeMeasurement(int index, IMeasurementSchemaInfo
measurementSchemaInfo);
@@ -49,8 +49,8 @@ public interface ISchemaComputation {
boolean hasLogicalViewNeedProcess();
/**
- * @return the logical view schemaengine list recorded by this statement. It
may be NULL if it is
- * not used before.
+ * @return the logical view schema list recorded by this statement. It may
be NULL if it is not
+ * used before.
*/
List<LogicalViewSchema> getLogicalViewSchemaList();
@@ -62,18 +62,18 @@ public interface ISchemaComputation {
List<Integer> getIndexListOfLogicalViewPaths();
/**
- * Record the beginning and ending of logical schemaengine list. After
calling this interface, the
- * range should be record. For example, the range is [0,4) which means 4
schemas exist. Later,
- * more 3 schemas are added, this function is called, then it records [4,7).
+ * Record the beginning and ending of logical schema list. After calling
this interface, the range
+ * should be record. For example, the range is [0,4) which means 4 schemas
exist. Later, more 3
+ * schemas are added, this function is called, then it records [4,7).
*/
void recordRangeOfLogicalViewSchemaListNow();
- /** @return the recorded range of logical view schemaengine list. */
+ /** @return the recorded range of logical view schema list. */
Pair<Integer, Integer> getRangeOfLogicalViewSchemaListRecorded();
/**
* @param index the index of fetched measurement in array returned by
getMeasurements
- * @param measurementSchemaInfo the measurement schemaengine of source of
the logical view
+ * @param measurementSchemaInfo the measurement schema of source of the
logical view
* @param isAligned whether the source of this view is aligned.
*/
void computeMeasurementOfView(
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ISchemaFetcher.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ISchemaFetcher.java
index 7b677157378..1308fb30102 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ISchemaFetcher.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ISchemaFetcher.java
@@ -38,26 +38,26 @@ import java.util.Map;
public interface ISchemaFetcher {
/**
- * Fetch all the schemaengine of existing timeseries matched by the given
patternTree
+ * Fetch all the schema of existing timeseries matched by the given
patternTree
*
* @param patternTree used for matching the timeseries
- * @return the matched timeseries schemaengine organized as tree structure
logically
+ * @return the matched timeseries schema organized as tree structure
logically
*/
ISchemaTree fetchSchema(PathPatternTree patternTree, MPPQueryContext
context);
/**
- * Fetch all the schemaengine with tags of existing timeseries matched by
the given patternTree
+ * Fetch all the schema with tags of existing timeseries matched by the
given patternTree
*
* @param patternTree used for matching the timeseries
- * @return the matched timeseries schemaengine organized as tree structure
logically
+ * @return the matched timeseries schema organized as tree structure
logically
*/
ISchemaTree fetchSchemaWithTags(PathPatternTree patternTree);
/**
- * Fetch and compute the schemaengine of target timeseries, with device and
measurement defined in
- * given schemaComputationWithAutoCreation. The computation defined in given
- * schemaComputationWithAutoCreation will be executed during scanning the
fetched schemaengine. If
- * some target timeseries doesn't exist, they will be auto created.
+ * Fetch and compute the schema of target timeseries, with device and
measurement defined in given
+ * schemaComputationWithAutoCreation. The computation defined in given
+ * schemaComputationWithAutoCreation will be executed during scanning the
fetched schema. If some
+ * target timeseries doesn't exist, they will be auto created.
*
* @param schemaComputationWithAutoCreation define the target device,
measurements and computation
*/
@@ -65,10 +65,10 @@ public interface ISchemaFetcher {
ISchemaComputationWithAutoCreation schemaComputationWithAutoCreation);
/**
- * Fetch and compute the schemaengine of target timeseries, with device and
measurement defined in
- * given schemaComputationWithAutoCreation. The computation defined in given
- * schemaComputationWithAutoCreation will be executed during scanning the
fetched schemaengine. If
- * some target timeseries doesn't exist, they will be auto created.
+ * Fetch and compute the schema of target timeseries, with device and
measurement defined in given
+ * schemaComputationWithAutoCreation. The computation defined in given
+ * schemaComputationWithAutoCreation will be executed during scanning the
fetched schema. If some
+ * target timeseries doesn't exist, they will be auto created.
*
* @param schemaComputationWithAutoCreationList define the target devices,
measurements and
* computation
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ISchemaValidation.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ISchemaValidation.java
index f72c9c2a3bb..658cd2ef79d 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ISchemaValidation.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ISchemaValidation.java
@@ -21,7 +21,7 @@ package org.apache.iotdb.db.queryengine.plan.analyze.schema;
import
org.apache.iotdb.db.queryengine.common.schematree.IMeasurementSchemaInfo;
-/** This interface defines the info and behaviour of a schemaengine validation
task. */
+/** This interface defines the info and behaviour of a schema validation task.
*/
public interface ISchemaValidation extends ISchemaComputationWithAutoCreation {
@Override
@@ -44,7 +44,7 @@ public interface ISchemaValidation extends
ISchemaComputationWithAutoCreation {
* Record the real value of <code>isAligned</code> of this device. This will
change the value of
* <code>isAligned</code> in this insert statement.
*
- * @param isAligned The real value of attribute <code>isAligned</code> of
this device schemaengine
+ * @param isAligned The real value of attribute <code>isAligned</code> of
this device schema
*/
void validateDeviceSchema(boolean isAligned);
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/NormalSchemaFetcher.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/NormalSchemaFetcher.java
index 0ca7cd9c996..b6d3b4b629b 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/NormalSchemaFetcher.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/NormalSchemaFetcher.java
@@ -115,13 +115,13 @@ class NormalSchemaFetcher {
schemaCache.computeSourceOfLogicalView(schemaComputationWithAutoCreation);
List<Integer> indexOfMissingLogicalView = missedIndexAndPathString.left;
List<String> missedPathStringOfLogicalView =
missedIndexAndPathString.right;
- // all schemaengine can be taken from cache
+ // all schema can be taken from cache
if (indexOfMissingMeasurements.isEmpty() &&
indexOfMissingLogicalView.isEmpty()) {
return indexOfMissingMeasurements;
}
- // [Step 3] Fetch 1. fetch schemaengine from remote. Process logical view
first; then process
+ // [Step 3] Fetch 1. fetch schema from remote. Process logical view first;
then process
// measurements.
- // try fetch the missing raw schemaengine from remote and cache fetched
schemaengine
+ // try fetch the missing raw schema from remote and cache fetched schema
ClusterSchemaTree remoteSchemaTree;
if (missedPathStringOfLogicalView.isEmpty()) {
remoteSchemaTree =
@@ -141,7 +141,7 @@ class NormalSchemaFetcher {
// make sure all missed views are computed.
remoteSchemaTree.computeSourceOfLogicalView(
schemaComputationWithAutoCreation, indexOfMissingLogicalView);
- // check and compute the fetched schemaengine
+ // check and compute the fetched schema
indexOfMissingMeasurements =
remoteSchemaTree.compute(schemaComputationWithAutoCreation,
indexOfMissingMeasurements);
schemaComputationWithAutoCreation.recordRangeOfLogicalViewSchemaListNow();
@@ -158,12 +158,12 @@ class NormalSchemaFetcher {
schemaComputationWithAutoCreation, indexOfMissingLogicalView);
}
- // all schemaengine has been taken and processed
+ // all schema has been taken and processed
if (indexOfMissingMeasurements.isEmpty()) {
return indexOfMissingMeasurements;
}
- // [Step 5] Auto Create and process the missing schemaengine
+ // [Step 5] Auto Create and process the missing schema
if (config.isAutoCreateSchemaEnabled()) {
// Check the isAligned value. If the input value is different from the
actual value of the
// existing device, throw exception.
@@ -222,13 +222,13 @@ class NormalSchemaFetcher {
}
missedIndexAndPathStringOfViewList.add(missedIndexAndPathString);
}
- // all schemaengine can be taken from cache
+ // all schema can be taken from cache
if (indexOfDevicesWithMissingMeasurements.isEmpty() &&
(!hasUnFetchedLogicalView)) {
return;
}
- // [Step 3] Fetch 1.fetch schemaengine from remote. Process logical view
first; then process
+ // [Step 3] Fetch 1.fetch schema from remote. Process logical view first;
then process
// measurements.
- // try fetch the missing schemaengine from remote
+ // try fetch the missing schema from remote
ISchemaComputationWithAutoCreation schemaComputationWithAutoCreation;
ClusterSchemaTree remoteSchemaTree;
if (!hasUnFetchedLogicalView) {
@@ -266,7 +266,7 @@ class NormalSchemaFetcher {
remoteSchemaTree.computeSourceOfLogicalView(
schemaComputationWithAutoCreation,
missedIndexAndPathStringOfViewList.get(i).left);
}
- // check and compute the fetched schemaengine
+ // check and compute the fetched schema
List<Integer> indexOfDevicesNeedAutoCreateSchema = new ArrayList<>();
List<List<Integer>> indexOfMeasurementsNeedAutoCreate = new ArrayList<>();
for (int i = 0; i < indexOfDevicesWithMissingMeasurements.size(); i++) {
@@ -307,12 +307,12 @@ class NormalSchemaFetcher {
}
}
- // all schemaengine has been taken and processed
+ // all schema has been taken and processed
if (indexOfDevicesNeedAutoCreateSchema.isEmpty()) {
return;
}
- // [Step 5] Auto Create and process the missing schemaengine
+ // [Step 5] Auto Create and process the missing schema
if (config.isAutoCreateSchemaEnabled()) {
List<PartialPath> devicePathList =
schemaComputationWithAutoCreationList.stream()
@@ -360,7 +360,7 @@ class NormalSchemaFetcher {
}
}
- // all schemaengine has been taken and processed
+ // all schema has been taken and processed
if (indexOfDevicesWithMissingMeasurements.isEmpty()) {
return;
}
@@ -369,7 +369,7 @@ class NormalSchemaFetcher {
indexOfMissingMeasurementsList = indexOfMeasurementsNeedAutoCreate;
}
- // offer null for the rest missing schemaengine processing
+ // offer null for the rest missing schema processing
for (int i = 0; i < indexOfDevicesWithMissingMeasurements.size(); i++) {
schemaComputationWithAutoCreation =
schemaComputationWithAutoCreationList.get(indexOfDevicesWithMissingMeasurements.get(i));
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/TemplateSchemaFetcher.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/TemplateSchemaFetcher.java
index 8208dc3dbaf..36d54117cc7 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/TemplateSchemaFetcher.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/TemplateSchemaFetcher.java
@@ -77,7 +77,7 @@ class TemplateSchemaFetcher {
List<Integer> indexOfMissingMeasurements =
templateSchemaCache.computeWithTemplate(schemaComputationWithAutoCreation);
- // all schemaengine can be taken from cache
+ // all schema can be taken from cache
if (indexOfMissingMeasurements.isEmpty()) {
return indexOfMissingMeasurements;
}
@@ -88,24 +88,24 @@ class TemplateSchemaFetcher {
}
// not activated or not cached
- // try fetch the missing schemaengine from remote and cache fetched
schemaengine
+ // try fetch the missing schema from remote and cache fetched schema
ClusterSchemaTree remoteSchemaTree =
clusterSchemaFetchExecutor.fetchSchemaOfOneDevice(
schemaComputationWithAutoCreation.getDevicePath(),
schemaComputationWithAutoCreation.getMeasurements(),
indexOfMissingMeasurements);
- // check and compute the fetched schemaengine
+ // check and compute the fetched schema
indexOfMissingMeasurements =
remoteSchemaTree.compute(schemaComputationWithAutoCreation,
indexOfMissingMeasurements);
- // all schemaengine has been taken and processed
+ // all schema has been taken and processed
if (indexOfMissingMeasurements.isEmpty()) {
// already activated
return indexOfMissingMeasurements;
}
// not activated
- // auto create and process the missing schemaengine
+ // auto create and process the missing schema
if (config.isAutoCreateSchemaEnabled()) {
ClusterSchemaTree schemaTree = new ClusterSchemaTree();
autoCreateSchemaExecutor.autoActivateTemplate(schemaTree, devicePath,
template.getId());
@@ -160,12 +160,12 @@ class TemplateSchemaFetcher {
}
}
- // all schemaengine can be taken from cache
+ // all schema can be taken from cache
if (indexOfDevicesWithMissingMeasurements.isEmpty()) {
return;
}
- // try fetch the missing schemaengine from remote
+ // try fetch the missing schema from remote
ClusterSchemaTree remoteSchemaTree =
clusterSchemaFetchExecutor.fetchSchemaOfMultiDevices(
schemaComputationWithAutoCreationList.stream()
@@ -176,7 +176,7 @@ class TemplateSchemaFetcher {
.collect(Collectors.toList()),
indexOfDevicesWithMissingMeasurements,
indexOfMissingMeasurementsList);
- // check and compute the fetched schemaengine
+ // check and compute the fetched schema
List<Integer> indexOfDevicesNeedAutoCreateSchema = new ArrayList<>();
List<List<Integer>> indexOfMeasurementsNeedAutoCreate = new ArrayList<>();
for (int i = 0; i < indexOfDevicesWithMissingMeasurements.size(); i++) {
@@ -191,12 +191,12 @@ class TemplateSchemaFetcher {
}
}
- // all schemaengine has been taken and processed
+ // all schema has been taken and processed
if (indexOfDevicesNeedAutoCreateSchema.isEmpty()) {
return;
}
- // auto create and process the missing schemaengine
+ // auto create and process the missing schema
if (config.isAutoCreateSchemaEnabled()) {
ClusterSchemaTree schemaTree = new ClusterSchemaTree();
autoCreateSchemaExecutor.autoActivateTemplate(
@@ -221,7 +221,7 @@ class TemplateSchemaFetcher {
}
}
- // all schemaengine has been taken and processed
+ // all schema has been taken and processed
if (indexOfDevicesWithMissingMeasurements.isEmpty()) {
return;
}
@@ -230,7 +230,7 @@ class TemplateSchemaFetcher {
indexOfMissingMeasurementsList = indexOfMeasurementsNeedAutoCreate;
}
- // offer null for the rest missing schemaengine processing
+ // offer null for the rest missing schema processing
for (int i = 0; i < indexOfDevicesWithMissingMeasurements.size(); i++) {
schemaComputationWithAutoCreation =
schemaComputationWithAutoCreationList.get(indexOfDevicesWithMissingMeasurements.get(i));
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/QueryExecution.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/QueryExecution.java
index 1399b2299c4..c02b07cd11a 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/QueryExecution.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/QueryExecution.java
@@ -92,10 +92,10 @@ import static
org.apache.iotdb.db.queryengine.metric.QueryExecutionMetricSet.WAI
import static
org.apache.iotdb.db.queryengine.metric.QueryPlanCostMetricSet.DISTRIBUTION_PLANNER;
/**
- * QueryExecution stores all the status of a read which is being prepared or
running inside the MPP
- * frame. It takes three main responsibilities: 1. Prepare a read. Transform a
read from statement
+ * QueryExecution stores all the status of a query which is being prepared or
running inside the MPP
+ * frame. It takes three main responsibilities: 1. Prepare a query. Transform
a query from statement
* to DistributedQueryPlan with fragment instances. 2. Dispatch all the
fragment instances to
- * corresponding physical nodes. 3. Collect and monitor the progress/states of
this read.
+ * corresponding physical nodes. 3. Collect and monitor the progress/states of
this query.
*/
public class QueryExecution implements IQueryExecution {
private static final Logger logger =
LoggerFactory.getLogger(QueryExecution.class);
@@ -181,7 +181,7 @@ public class QueryExecution implements IQueryExecution {
if (!state.isDone()) {
return;
}
- // TODO: (xingtanzjr) If the read is in abnormal state, the
releaseResource() should be
+ // TODO: (xingtanzjr) If the query is in abnormal state, the
releaseResource() should be
// invoked
if (state == QueryState.FAILED
|| state == QueryState.ABORTED
@@ -215,7 +215,7 @@ public class QueryExecution implements IQueryExecution {
return;
}
- // check timeout for read first
+ // check timeout for query first
checkTimeOutForQuery();
doLogicalPlan();
doDistributedPlan();
@@ -240,7 +240,7 @@ public class QueryExecution implements IQueryExecution {
}
private void checkTimeOutForQuery() {
- // only check read operation's timeout because we will never limit write
operation's execution
+ // only check query operation's timeout because we will never limit write
operation's execution
// time
if (isQuery()) {
long currentTime = System.currentTimeMillis();
@@ -257,7 +257,7 @@ public class QueryExecution implements IQueryExecution {
stateMachine.transitionToFailed();
return getStatus();
}
- logger.warn("error when executing read. {}",
stateMachine.getFailureMessage());
+ logger.warn("error when executing query. {}",
stateMachine.getFailureMessage());
// stop and clean up resources the QueryExecution used
this.stopAndCleanup(stateMachine.getFailureException());
logger.info("[WaitBeforeRetry] wait {}ms.", RETRY_INTERVAL_IN_MS);
@@ -277,7 +277,7 @@ public class QueryExecution implements IQueryExecution {
// re-stop
this.stopped.compareAndSet(true, false);
this.resultHandleCleanUp.compareAndSet(true, false);
- // re-analyze the read
+ // re-analyze the query
this.analysis = analyze(rawStatement, context, partitionFetcher,
schemaFetcher);
// re-start the QueryExecution
this.start();
@@ -297,7 +297,7 @@ public class QueryExecution implements IQueryExecution {
this.analysis.setRespDatasetHeader(memorySource.getDatasetHeader());
}
- // Analyze the statement in QueryContext. Generate the analysis this read
need
+ // Analyze the statement in QueryContext. Generate the analysis this query
need
private Analysis analyze(
Statement statement,
MPPQueryContext context,
@@ -327,7 +327,7 @@ public class QueryExecution implements IQueryExecution {
return;
}
- // TODO: (xingtanzjr) initialize the read scheduler according to
configuration
+ // TODO: (xingtanzjr) initialize the query scheduler according to
configuration
this.scheduler =
new ClusterScheduler(
context,
@@ -343,7 +343,7 @@ public class QueryExecution implements IQueryExecution {
PERFORMANCE_OVERVIEW_METRICS.recordScheduleCost(System.nanoTime() -
startTime);
}
- // Use LogicalPlanner to do the logical read plan and logical optimization
+ // Use LogicalPlanner to do the logical query plan and logical optimization
public void doLogicalPlan() {
LogicalPlanner planner = new LogicalPlanner(this.context,
this.planOptimizers);
this.logicalPlan = planner.plan(this.analysis);
@@ -384,7 +384,7 @@ public class QueryExecution implements IQueryExecution {
return ret.toString();
}
- // Stop the workers for this read
+ // Stop the workers for this query
public void stop(Throwable t) {
// only stop once
if (stopped.compareAndSet(false, true) && this.scheduler != null) {
@@ -392,7 +392,7 @@ public class QueryExecution implements IQueryExecution {
}
}
- // Stop the read and clean up all the resources this read occupied
+ // Stop the query and clean up all the resources this query occupied
public void stopAndCleanup() {
stop(null);
releaseResource();
@@ -437,7 +437,7 @@ public class QueryExecution implements IQueryExecution {
}
}
- // Stop the read and clean up all the resources this read occupied
+ // Stop the query and clean up all the resources this query occupied
public void stopAndCleanup(Throwable t) {
stop(t);
releaseResource(t);
@@ -465,7 +465,7 @@ public class QueryExecution implements IQueryExecution {
/**
* This method will be called by the request thread from client connection.
This method will block
* until one of these conditions occurs: 1. There is a batch of result 2.
There is no more result
- * 3. The read has been cancelled 4. The read is timeout This method will
fetch the result from
+ * 3. The query has been cancelled 4. The query is timeout This method will
fetch the result from
* DataStreamManager use the virtual ResultOperator's ID (This part will be
designed and
* implemented with DataStreamManager)
*/
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/executor/ClusterConfigTaskExecutor.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/executor/ClusterConfigTaskExecutor.java
index 863ae295d98..75d657c2383 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/executor/ClusterConfigTaskExecutor.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/executor/ClusterConfigTaskExecutor.java
@@ -1056,7 +1056,7 @@ public class ClusterConfigTaskExecutor implements
IConfigTaskExecutor {
CONFIG_NODE_CLIENT_MANAGER.borrowClient(ConfigNodeInfo.CONFIG_REGION_ID)) {
final TSStatus executionStatus = client.killQuery(queryId, dataNodeId);
if (TSStatusCode.SUCCESS_STATUS.getStatusCode() !=
executionStatus.getCode()) {
- LOGGER.warn("Failed to kill read [{}], because {}", queryId,
executionStatus.message);
+ LOGGER.warn("Failed to kill query [{}], because {}", queryId,
executionStatus.message);
future.setException(new IoTDBException(executionStatus.message,
executionStatus.code));
} else {
future.set(new ConfigTaskResult(TSStatusCode.SUCCESS_STATUS));
@@ -1241,7 +1241,7 @@ public class ClusterConfigTaskExecutor implements
IConfigTaskExecutor {
// Get response or throw exception
if (TSStatusCode.SUCCESS_STATUS.getStatusCode() != tsStatus.getCode()) {
LOGGER.warn(
- "Failed to execute create schemaengine template {} in config node,
status is {}.",
+ "Failed to execute create schema template {} in config node,
status is {}.",
createSchemaTemplateStatement.getName(),
tsStatus);
future.setException(new IoTDBException(tsStatus.getMessage(),
tsStatus.getCode()));
@@ -1352,7 +1352,7 @@ public class ClusterConfigTaskExecutor implements
IConfigTaskExecutor {
if (TSStatusCode.SUCCESS_STATUS.getStatusCode() != tsStatus.getCode()) {
LOGGER.warn(
- "Failed to execute deactivate schemaengine template {} from {} in
config node, status is {}.",
+ "Failed to execute deactivate schema template {} from {} in config
node, status is {}.",
deactivateTemplateStatement.getTemplateName(),
deactivateTemplateStatement.getPathPatternList(),
tsStatus);
@@ -1378,7 +1378,7 @@ public class ClusterConfigTaskExecutor implements
IConfigTaskExecutor {
// Get response or throw exception
if (TSStatusCode.SUCCESS_STATUS.getStatusCode() != tsStatus.getCode()) {
LOGGER.warn(
- "Failed to execute drop schemaengine template {} in config node,
status is {}.",
+ "Failed to execute drop schema template {} in config node, status
is {}.",
dropSchemaTemplateStatement.getTemplateName(),
tsStatus);
future.setException(new IoTDBException(tsStatus.message,
tsStatus.code));
@@ -1407,7 +1407,7 @@ public class ClusterConfigTaskExecutor implements
IConfigTaskExecutor {
future.setException(
new MetadataException(
String.format(
- "Duplicated measurement [%s] in schemaengine template
alter request",
+ "Duplicated measurement [%s] in schema template alter
request",
duplicateMeasurement)));
return future;
}
@@ -1439,7 +1439,7 @@ public class ClusterConfigTaskExecutor implements
IConfigTaskExecutor {
if (TSStatusCode.SUCCESS_STATUS.getStatusCode() != tsStatus.getCode()) {
LOGGER.warn(
- "Failed to alter schemaengine template {} in config node, status
is {}.",
+ "Failed to alter schema template {} in config node, status is {}.",
alterSchemaTemplateStatement.getTemplateAlterInfo().getTemplateName(),
tsStatus);
future.setException(new IoTDBException(tsStatus.getMessage(),
tsStatus.getCode()));
@@ -1496,7 +1496,7 @@ public class ClusterConfigTaskExecutor implements
IConfigTaskExecutor {
if (TSStatusCode.SUCCESS_STATUS.getStatusCode() != tsStatus.getCode()) {
LOGGER.warn(
- "Failed to execute unset schemaengine template {} from {} in
config node, status is {}.",
+ "Failed to execute unset schema template {} from {} in config
node, status is {}.",
unsetSchemaTemplateStatement.getTemplateName(),
unsetSchemaTemplateStatement.getPath(),
tsStatus);
@@ -2022,7 +2022,7 @@ public class ClusterConfigTaskExecutor implements
IConfigTaskExecutor {
final TSStatus executionStatus = client.createCQ(tCreateCQReq);
if (TSStatusCode.SUCCESS_STATUS.getStatusCode() !=
executionStatus.getCode()) {
LOGGER.warn(
- "[{}] Failed to create continuous read {}. TSStatus is {}",
+ "[{}] Failed to create continuous query {}. TSStatus is {}",
executionStatus,
createContinuousQueryStatement.getCqId(),
executionStatus.message);
@@ -2043,7 +2043,7 @@ public class ClusterConfigTaskExecutor implements
IConfigTaskExecutor {
CONFIG_NODE_CLIENT_MANAGER.borrowClient(ConfigNodeInfo.CONFIG_REGION_ID)) {
final TSStatus executionStatus = client.dropCQ(new TDropCQReq(cqId));
if (TSStatusCode.SUCCESS_STATUS.getStatusCode() !=
executionStatus.getCode()) {
- LOGGER.warn("[{}] Failed to drop continuous read {}.",
executionStatus, cqId);
+ LOGGER.warn("[{}] Failed to drop continuous query {}.",
executionStatus, cqId);
future.setException(new IoTDBException(executionStatus.message,
executionStatus.code));
} else {
future.set(new ConfigTaskResult(TSStatusCode.SUCCESS_STATUS));
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/expression/visitor/ReplaceLogicalViewVisitor.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/expression/visitor/ReplaceLogicalViewVisitor.java
index 3741836145d..508d40b7724 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/expression/visitor/ReplaceLogicalViewVisitor.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/expression/visitor/ReplaceLogicalViewVisitor.java
@@ -83,7 +83,7 @@ public class ReplaceLogicalViewVisitor extends
ExpressionVisitor<Expression, Lis
// step 2. replace that TimeSeriesOperand with expression recorded in
LogicalViewSchema (view
// expression).
// step 3. record paths that appeared in view expression. They should be
fetched, then you can
- // use fetched schemaengine
+ // use fetched schema
// to complete new added TimeSeriesOperand.
int oldSize = this.newAddedPathList.size();
Expression result = this.process(expression, this.newAddedPathList);
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/optimization/LimitOffsetPushDown.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/optimization/LimitOffsetPushDown.java
index 72a20ad9f09..52c9b8514fe 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/optimization/LimitOffsetPushDown.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/optimization/LimitOffsetPushDown.java
@@ -44,8 +44,8 @@ import
org.apache.iotdb.db.queryengine.plan.statement.crud.QueryStatement;
*
* <p><b>Rule:</b> The LIMIT OFFSET condition can be pushed down to the
SeriesScanNode, when the
* following conditions are met:
- * <li>Time series read (not aggregation read).
- * <li>The read expressions are all scalar expression.
+ * <li>Time series query (not aggregation query).
+ * <li>The query expressions are all scalar expression.
* <li>Functions that need to be calculated based on before or after values
are not used, such as
* trend functions, FILL(previous), FILL(linear).
* <li>Only one scan node is included in the distributed plan. That is, only
one single series or a
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/parser/ASTVisitor.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/parser/ASTVisitor.java
index 8bd1e92cf7d..83dc516aa9f 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/parser/ASTVisitor.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/parser/ASTVisitor.java
@@ -1530,7 +1530,7 @@ public class ASTVisitor extends
IoTDBSqlParserBaseVisitor<Statement> {
}
/**
- * parse time interval or sliding step in group by read.
+ * parse time interval or sliding step in group by query.
*
* @param duration represent duration string like: 12d8m9ns, 1y1d, etc.
* @return time in milliseconds, microseconds, or nanoseconds depending on
the profile
@@ -3138,7 +3138,7 @@ public class ASTVisitor extends
IoTDBSqlParserBaseVisitor<Statement> {
return new KillQueryStatement();
}
- // show read processlist
+ // show query processlist
@Override
public Statement visitShowQueries(IoTDBSqlParser.ShowQueriesContext ctx) {
@@ -3229,7 +3229,7 @@ public class ASTVisitor extends
IoTDBSqlParserBaseVisitor<Statement> {
return new ShowConfigNodesStatement();
}
- // schemaengine template
+ // schema template
@Override
public Statement
visitCreateSchemaTemplate(IoTDBSqlParser.CreateSchemaTemplateContext ctx) {
@@ -3315,7 +3315,7 @@ public class ASTVisitor extends
IoTDBSqlParserBaseVisitor<Statement> {
List<TSEncoding> encodings,
List<CompressionType> compressors) {
if (ctx.aliasNodeName() != null) {
- throw new SemanticException("schemaengine template: alias is not
supported yet.");
+ throw new SemanticException("schema template: alias is not supported
yet.");
}
TSDataType dataType = parseDataTypeAttribute(ctx);
@@ -3372,15 +3372,15 @@ public class ASTVisitor extends
IoTDBSqlParserBaseVisitor<Statement> {
}
if (props.size() > 0) {
- throw new SemanticException("schemaengine template: property is not
supported yet.");
+ throw new SemanticException("schema template: property is not supported
yet.");
}
if (ctx.tagClause() != null) {
- throw new SemanticException("schemaengine template: tag is not supported
yet.");
+ throw new SemanticException("schema template: tag is not supported
yet.");
}
if (ctx.attributeClause() != null) {
- throw new SemanticException("schemaengine template: attribute is not
supported yet.");
+ throw new SemanticException("schema template: attribute is not supported
yet.");
}
}
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/parser/StatementGenerator.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/parser/StatementGenerator.java
index 74082be86b8..8a2e98d3be2 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/parser/StatementGenerator.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/parser/StatementGenerator.java
@@ -135,7 +135,7 @@ public class StatementGenerator {
public static Statement createStatement(TSRawDataQueryReq rawDataQueryReq,
ZoneId zoneId)
throws IllegalPathException {
final long startTime = System.nanoTime();
- // construct read statement
+ // construct query statement
SelectComponent selectComponent = new SelectComponent(zoneId);
FromComponent fromComponent = new FromComponent();
WhereCondition whereCondition = new WhereCondition();
@@ -154,7 +154,7 @@ public class StatementGenerator {
new ResultColumn(
new TimeSeriesOperand(new PartialPath("", false)),
ResultColumn.ColumnType.RAW));
- // set read filter
+ // set query filter
GreaterEqualExpression leftPredicate =
new GreaterEqualExpression(
new TimestampOperand(),
@@ -177,7 +177,7 @@ public class StatementGenerator {
public static Statement createStatement(TSLastDataQueryReq lastDataQueryReq,
ZoneId zoneId)
throws IllegalPathException {
final long startTime = System.nanoTime();
- // construct read statement
+ // construct query statement
SelectComponent selectComponent = new SelectComponent(zoneId);
FromComponent fromComponent = new FromComponent();
@@ -197,7 +197,7 @@ public class StatementGenerator {
new ResultColumn(
new TimeSeriesOperand(new PartialPath("", false)),
ResultColumn.ColumnType.RAW));
- // set read filter
+ // set query filter
WhereCondition whereCondition = new WhereCondition();
GreaterEqualExpression predicate =
new GreaterEqualExpression(
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/LocalExecutionPlanContext.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/LocalExecutionPlanContext.java
index d78334815df..077b7aeaab5 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/LocalExecutionPlanContext.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/LocalExecutionPlanContext.java
@@ -71,10 +71,10 @@ public class LocalExecutionPlanContext {
private List<TSDataType> cachedDataTypes;
- // left is cached last value in last read
+ // left is cached last value in last query
// right is full path for each cached last value
private List<Pair<TimeValuePair, Binary>> cachedLastValueAndPathList;
- // timeFilter for last read
+ // timeFilter for last query
private Filter lastQueryTimeFilter;
// whether we need to update last cache
private boolean needUpdateLastCache;
@@ -107,7 +107,7 @@ public class LocalExecutionPlanContext {
parentContext.getDriverContext().createSubDriverContext(getNextPipelineId());
}
- // for schemaengine region
+ // for schema region
public LocalExecutionPlanContext(
FragmentInstanceContext instanceContext, ISchemaRegion schemaRegion) {
this.allSensorsMap = new ConcurrentHashMap<>();
@@ -115,7 +115,7 @@ public class LocalExecutionPlanContext {
this.nextOperatorId = new AtomicInteger(0);
this.nextPipelineId = new AtomicInteger(0);
- // there is no ttl in schemaengine region, so we don't care this field
+ // there is no ttl in schema region, so we don't care this field
this.dataRegionTTL = Long.MAX_VALUE;
this.driverContext =
new SchemaDriverContext(instanceContext, schemaRegion,
getNextPipelineId());
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/LocalExecutionPlanner.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/LocalExecutionPlanner.java
index 58f094b5354..39542e1c6d0 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/LocalExecutionPlanner.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/LocalExecutionPlanner.java
@@ -68,7 +68,7 @@ public class LocalExecutionPlanner {
// TODO Replace operator with operatorFactory to build multiple driver for
one pipeline
Operator root = plan.accept(new OperatorTreeGenerator(), context);
- // check whether current free memory is enough to execute current read
+ // check whether current free memory is enough to execute current query
long estimatedMemorySize = checkMemory(root,
instanceContext.getStateMachine());
context.addPipelineDriverFactory(root, context.getDriverContext(),
estimatedMemorySize);
@@ -89,7 +89,7 @@ public class LocalExecutionPlanner {
Operator root = plan.accept(new OperatorTreeGenerator(), context);
- // check whether current free memory is enough to execute current read
+ // check whether current free memory is enough to execute current query
checkMemory(root, instanceContext.getStateMachine());
context.addPipelineDriverFactory(root, context.getDriverContext(), 0);
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/LogicalPlanVisitor.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/LogicalPlanVisitor.java
index 82221a2d56b..44efc6dc080 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/LogicalPlanVisitor.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/LogicalPlanVisitor.java
@@ -228,7 +228,7 @@ public class LogicalPlanVisitor extends
StatementVisitor<PlanNode, MPPQueryConte
LogicalPlanBuilder planBuilder = new LogicalPlanBuilder(analysis, context);
if (aggregationExpressions == null) {
- // raw data read
+ // raw data query
planBuilder =
planBuilder
.planRawDataSource(
@@ -242,7 +242,7 @@ public class LogicalPlanVisitor extends
StatementVisitor<PlanNode, MPPQueryConte
queryStatement.getSelectComponent().getZoneId(),
queryStatement.getResultTimeOrder());
} else {
- // aggregation read
+ // aggregation query
boolean isRawDataSource =
analysis.hasValueFilter()
|| analysis.hasGroupByParameter()
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/LogicalPlanner.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/LogicalPlanner.java
index ea7ca5c67eb..e1960ed3c7d 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/LogicalPlanner.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/LogicalPlanner.java
@@ -44,7 +44,7 @@ public class LogicalPlanner {
long startTime = System.nanoTime();
PlanNode rootNode = new
LogicalPlanVisitor(analysis).process(analysis.getStatement(), context);
- // optimize the read logical plan
+ // optimize the query logical plan
if (analysis.getStatement().isQuery()) {
QueryPlanCostMetricSet.getInstance()
.recordPlanCost(LOGICAL_PLANNER, System.nanoTime() - startTime);
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/DistributionPlanContext.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/DistributionPlanContext.java
index e5b9e6d65bd..87efdf508b7 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/DistributionPlanContext.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/DistributionPlanContext.java
@@ -31,7 +31,7 @@ public class DistributionPlanContext {
// That the variable is true means there is some source series which is
// distributed in multi DataRegions
protected boolean oneSeriesInMultiRegion;
- // That the variable is true means this read will be distributed in multi
+ // That the variable is true means this query will be distributed in multi
// DataRegions
protected boolean queryMultiRegion;
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/SourceRewriter.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/SourceRewriter.java
index 236d817cd8d..d1420c79112 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/SourceRewriter.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/SourceRewriter.java
@@ -612,7 +612,7 @@ public class SourceRewriter extends
SimplePlanNodeRewriter<DistributionPlanConte
@Override
public List<PlanNode> visitLastQuery(LastQueryNode node,
DistributionPlanContext context) {
- // For last read, we need to keep every FI's root node is
LastQueryMergeNode. So we
+ // For last query, we need to keep every FI's root node is
LastQueryMergeNode. So we
// force every region group have a parent node even if there is only 1
child for it.
context.setForceAddParent(true);
PlanNode root = processRawMultiChildNode(node, context);
@@ -762,7 +762,7 @@ public class SourceRewriter extends
SimplePlanNodeRewriter<DistributionPlanConte
// Process the other children which are not SeriesSourceNode
for (PlanNode child : node.getChildren()) {
if (!(child instanceof SeriesSourceNode)) {
- // In a general logical read plan, the children of TimeJoinNode should
only be
+ // In a general logical query plan, the children of TimeJoinNode
should only be
// SeriesScanNode or SeriesAggregateScanNode
// So this branch should not be touched.
List<PlanNode> children = visit(child, context);
@@ -939,7 +939,7 @@ public class SourceRewriter extends
SimplePlanNodeRewriter<DistributionPlanConte
root.getChildren().size() == 1
&& root.getChildren().get(0) instanceof
SlidingWindowAggregationNode;
- // TODO: use 2 phase aggregation to optimize the read
+ // TODO: use 2 phase aggregation to optimize the query
return Collections.singletonList(
containsSlidingWindow
? groupSourcesForGroupByTagWithSlidingWindow(
@@ -950,9 +950,9 @@ public class SourceRewriter extends
SimplePlanNodeRewriter<DistributionPlanConte
: groupSourcesForGroupByTag(root, sourceGroup, context));
}
- // If the Aggregation Query contains value filter, we need to use the naive
read plan
- // for it. That is, do the raw data read and then do the aggregation
operation.
- // Currently, the method to judge whether the read should use naive read
plan is whether
+ // If the Aggregation Query contains value filter, we need to use the naive
query plan
+ // for it. That is, do the raw data query and then do the aggregation
operation.
+ // Currently, the method to judge whether the query should use naive query
plan is whether
// AggregationNode is contained in the PlanNode tree of logical plan.
private boolean shouldUseNaiveAggregation(PlanNode root) {
if (root instanceof AggregationNode) {
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/LogicalQueryPlan.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/LogicalQueryPlan.java
index 3f43023b04b..a1c994a037c 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/LogicalQueryPlan.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/LogicalQueryPlan.java
@@ -22,7 +22,7 @@ import org.apache.iotdb.db.queryengine.common.MPPQueryContext;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNode;
/**
- * LogicalQueryPlan represents a logical read plan. It stores the root node of
corresponding read
+ * LogicalQueryPlan represents a logical query plan. It stores the root node
of corresponding query
* plan node tree.
*/
public class LogicalQueryPlan {
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/PlanFragment.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/PlanFragment.java
index 1c8253e65fc..658c39b26ab 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/PlanFragment.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/PlanFragment.java
@@ -35,7 +35,7 @@ import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Objects;
-/** PlanFragment contains a sub-read of distributed read. */
+/** PlanFragment contains a sub-query of distributed query. */
public class PlanFragment {
// TODO once you add field for this class you need to change the serialize
and deserialize methods
private final PlanFragmentId id;
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/PlanNode.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/PlanNode.java
index f9b75cba0bd..9dac211fb02 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/PlanNode.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/PlanNode.java
@@ -34,7 +34,7 @@ import java.util.Objects;
import static java.util.Objects.requireNonNull;
-/** The base class of read logical plan nodes, which is used to compose
logical read plan. */
+/** The base class of query logical plan nodes, which is used to compose
logical read plan. */
public abstract class PlanNode implements IConsensusRequest {
private static final Logger LOGGER = LoggerFactory.getLogger(PlanNode.class);
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/metedata/read/SchemaFetchMergeNode.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/metedata/read/SchemaFetchMergeNode.java
index f15143551f6..591feb67e5f 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/metedata/read/SchemaFetchMergeNode.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/metedata/read/SchemaFetchMergeNode.java
@@ -31,7 +31,7 @@ import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
-/** This class defines the scanned result merge task of schemaengine fetcher.
*/
+/** This class defines the scanned result merge task of schema fetcher. */
public class SchemaFetchMergeNode extends AbstractSchemaMergeNode {
private List<String> storageGroupList;
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/metedata/read/SchemaFetchScanNode.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/metedata/read/SchemaFetchScanNode.java
index 00a156757a7..d1e4c877ff3 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/metedata/read/SchemaFetchScanNode.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/metedata/read/SchemaFetchScanNode.java
@@ -42,7 +42,7 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
-/** This class defines the scan task of schemaengine fetcher. */
+/** This class defines the scan task of schema fetcher. */
public class SchemaFetchScanNode extends SourceNode {
private final PartialPath storageGroup;
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/metedata/write/CreateAlignedTimeSeriesNode.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/metedata/write/CreateAlignedTimeSeriesNode.java
index 127e13400b4..0d463c12e42 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/metedata/write/CreateAlignedTimeSeriesNode.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/metedata/write/CreateAlignedTimeSeriesNode.java
@@ -57,8 +57,7 @@ public class CreateAlignedTimeSeriesNode extends WritePlanNode
private List<Map<String, String>> attributesList;
// only used inside schemaRegion to be serialized to mlog, no need to be
serialized for
- // queryengine
- // transport
+ // mpp transport
private List<Long> tagOffsets = null;
private TRegionReplicaSet regionReplicaSet;
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/process/DeviceViewNode.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/process/DeviceViewNode.java
index 5d1f5849c49..4d4b9b67049 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/process/DeviceViewNode.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/process/DeviceViewNode.java
@@ -38,9 +38,9 @@ import java.util.Objects;
* DeviceViewNode is responsible for constructing a device-based view of a set
of series. And output
* the result with specific order. The order could be 'order by device' or
'order by timestamp'
*
- * <p>Each output from its children should have the same schemaengine. That
means, the columns
- * should be same between these TsBlocks. If the input TsBlock contains n
columns, the device-based
- * view will contain n+1 columns where the new column is Device column.
+ * <p>Each output from its children should have the same schema. That means,
the columns should be
+ * same between these TsBlocks. If the input TsBlock contains n columns, the
device-based view will
+ * contain n+1 columns where the new column is Device column.
*/
public class DeviceViewNode extends MultiChildProcessNode {
@@ -54,7 +54,7 @@ public class DeviceViewNode extends MultiChildProcessNode {
// Device column and measurement columns in result output
private final List<String> outputColumnNames;
- // e.g. [s1,s2,s3] is read, but [s1, s3] exists in device1, then device1 ->
[1, 3], s1 is 1 but
+ // e.g. [s1,s2,s3] is query, but [s1, s3] exists in device1, then device1 ->
[1, 3], s1 is 1 but
// not 0 because device is the first column
private final Map<String, List<Integer>> deviceToMeasurementIndexesMap;
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/process/GroupByTagNode.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/process/GroupByTagNode.java
index 4b6914a5987..b86d4c9e74c 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/process/GroupByTagNode.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/process/GroupByTagNode.java
@@ -222,7 +222,7 @@ public class GroupByTagNode extends MultiChildProcessNode {
* <li>root.sg.d3.s1(k1=v2)
* </ul>
*
- * Then the read <code>
+ * Then the query <code>
* SELECT avg(s1), avg(s2) FROM root.sg.** GROUP BY TAGS(k1)
* </code>will generate a {@link GroupByTagNode} with the
<code>TagValuesToAggregationDescriptors
* </code> as below: <code>
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/process/last/LastQueryNode.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/process/last/LastQueryNode.java
index 619d13434e9..a47810a0076 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/process/last/LastQueryNode.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/process/last/LastQueryNode.java
@@ -42,7 +42,7 @@ public class LastQueryNode extends MultiChildProcessNode {
private final Filter timeFilter;
- // the ordering of timeseries in the result of last read
+ // the ordering of timeseries in the result of last query
// which is set to null if there is no need to sort
private Ordering timeseriesOrdering;
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/source/SeriesAggregationSourceNode.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/source/SeriesAggregationSourceNode.java
index dbb38d7094d..10141648aa2 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/source/SeriesAggregationSourceNode.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/source/SeriesAggregationSourceNode.java
@@ -44,7 +44,7 @@ public abstract class SeriesAggregationSourceNode extends
SeriesSourceNode {
// time filter for current series, could be null if it doesn't exist
@Nullable protected Filter timeFilter;
- // push-downing read filter for current series, could be null if it doesn't
exist
+ // push-downing query filter for current series, could be null if it doesn't
exist
@Nullable protected Filter valueFilter;
// The parameter of `group by time`
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/ClusterScheduler.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/ClusterScheduler.java
index e1dbc878b6d..62c23cd5096 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/ClusterScheduler.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/ClusterScheduler.java
@@ -46,8 +46,8 @@ import java.util.concurrent.ScheduledExecutorService;
import static
org.apache.iotdb.db.queryengine.metric.QueryExecutionMetricSet.WAIT_FOR_DISPATCH;
/**
- * QueryScheduler is used to dispatch the fragment instances of a read to
target nodes. And it will
- * continue to collect and monitor the read execution before the read is
finished.
+ * QueryScheduler is used to dispatch the fragment instances of a query to
target nodes. And it will
+ * continue to collect and monitor the query execution before the query is
finished.
*
* <p>Later, we can add more control logic for a QueryExecution such as retry,
kill and so on by
* this scheduler.
@@ -129,7 +129,7 @@ public class ClusterScheduler implements IScheduler {
return;
}
} catch (InterruptedException | ExecutionException e) {
- // If the dispatch request cannot be sent or TException is caught, we
will retry this read.
+ // If the dispatch request cannot be sent or TException is caught, we
will retry this query.
if (e instanceof InterruptedException) {
Thread.currentThread().interrupt();
}
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/FixedRateFragInsStateTracker.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/FixedRateFragInsStateTracker.java
index 5c5830c9613..5666a5e1882 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/FixedRateFragInsStateTracker.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/FixedRateFragInsStateTracker.java
@@ -131,7 +131,7 @@ public class FixedRateFragInsStateTracker extends
AbstractFragInsStateTracker {
}
} catch (ClientManagerException | TException e) {
// TODO: do nothing ?
- logger.warn("error happened while fetching read state", e);
+ logger.warn("error happened while fetching query state", e);
}
}
}
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/SimpleQueryTerminator.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/SimpleQueryTerminator.java
index 2e425fc6f58..55134bd5f29 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/SimpleQueryTerminator.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/SimpleQueryTerminator.java
@@ -97,7 +97,7 @@ public class SimpleQueryTerminator implements
IQueryTerminator {
public Boolean syncTerminate() {
boolean succeed = true;
for (TEndPoint endPoint : relatedHost) {
- // we only send cancel read request if there is remaining unfinished FI
in that node
+ // we only send cancel query request if there is remaining unfinished FI
in that node
List<FragmentInstanceId> unfinishedFIs =
stateTracker.filterUnFinishedFIs(ownedFragmentInstance.get(endPoint));
if (unfinishedFIs.isEmpty()) {
@@ -128,7 +128,7 @@ public class SimpleQueryTerminator implements
IQueryTerminator {
// we shouldn't return here and need to cancel queryTasks in other
nodes
succeed = false;
} catch (TException t) {
- logger.warn("cancel read {} on node {} failed.", queryId.getId(),
endPoint, t);
+ logger.warn("cancel query {} on node {} failed.", queryId.getId(),
endPoint, t);
// we shouldn't return here and need to cancel queryTasks in other
nodes
succeed = false;
}
@@ -139,7 +139,7 @@ public class SimpleQueryTerminator implements
IQueryTerminator {
public Boolean syncTerminateThrowable() {
boolean succeed = true;
for (TEndPoint endPoint : relatedHost) {
- // we only send cancel read request if there is remaining unfinished FI
in that node
+ // we only send cancel query request if there is remaining unfinished FI
in that node
List<FragmentInstanceId> unfinishedFIs =
stateTracker.filterUnFinishedFIs(ownedFragmentInstance.get(endPoint));
if (unfinishedFIs.isEmpty()) {
@@ -170,7 +170,7 @@ public class SimpleQueryTerminator implements
IQueryTerminator {
// we shouldn't return here and need to cancel queryTasks in other
nodes
succeed = false;
} catch (TException t) {
- logger.warn("cancel read {} on node {} failed.", queryId.getId(),
endPoint, t);
+ logger.warn("cancel query {} on node {} failed.", queryId.getId(),
endPoint, t);
// we shouldn't return here and need to cancel queryTasks in other
nodes
succeed = false;
}
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/component/OrderByComponent.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/component/OrderByComponent.java
index 9c742682d25..4aa7a202aae 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/component/OrderByComponent.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/component/OrderByComponent.java
@@ -27,7 +27,7 @@ import java.util.List;
import static com.google.common.base.Preconditions.checkState;
-/** The order of read result set */
+/** The order of query result set */
public class OrderByComponent extends StatementNode {
private final List<SortItem> sortItemList;
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/component/ResultColumn.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/component/ResultColumn.java
index 85c00890732..e8db3f8a5ee 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/component/ResultColumn.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/component/ResultColumn.java
@@ -25,7 +25,7 @@ import
org.apache.iotdb.db.queryengine.plan.statement.StatementNode;
import java.util.Objects;
/**
- * This class is used to represent a result column of a read.
+ * This class is used to represent a result column of a query.
*
* <p>Assume that we have time series in db as follows: <br>
* [ root.sg.d.a, root.sg.d.b, root.sg.e.a, root.sg.e.b ]
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/component/ResultSetFormat.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/component/ResultSetFormat.java
index 8130fc8243c..ddfbd22fda3 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/component/ResultSetFormat.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/component/ResultSetFormat.java
@@ -19,7 +19,7 @@
package org.apache.iotdb.db.queryengine.plan.statement.component;
-/** The alignment of read result set */
+/** The alignment of query result set */
public enum ResultSetFormat {
ALIGN_BY_TIME,
ALIGN_BY_DEVICE,
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/crud/InsertBaseStatement.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/crud/InsertBaseStatement.java
index b1410845a76..d118884cef6 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/crud/InsertBaseStatement.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/crud/InsertBaseStatement.java
@@ -64,7 +64,7 @@ public abstract class InsertBaseStatement extends Statement {
// region params used by analyzing logical views.
- /** This param records the logical view schemaengine appeared in this
statement. */
+ /** This param records the logical view schema appeared in this statement. */
List<LogicalViewSchema> logicalViewSchemaList;
/**
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/crud/QueryStatement.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/crud/QueryStatement.java
index fe855936308..8e575156a9a 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/crud/QueryStatement.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/crud/QueryStatement.java
@@ -375,7 +375,7 @@ public class QueryStatement extends Statement {
return orderByComponent.getDeviceOrder();
}
- // push down only support raw data read currently
+ // push down only support raw data query currently
public boolean needPushDownSort() {
return !isAggregationQuery() && hasOrderByExpression() &&
isOrderByBasedOnDevice();
}
@@ -470,7 +470,7 @@ public class QueryStatement extends Statement {
}
private static final String RAW_AGGREGATION_HYBRID_QUERY_ERROR_MSG =
- "Raw data and aggregation hybrid read is not supported.";
+ "Raw data and aggregation hybrid query is not supported.";
public void semanticCheck() {
if (isAggregationQuery()) {
@@ -503,7 +503,7 @@ public class QueryStatement extends Statement {
}
if (isGroupByTag()) {
if (hasHaving()) {
- throw new SemanticException("Having clause is not supported yet in
GROUP BY TAGS read");
+ throw new SemanticException("Having clause is not supported yet in
GROUP BY TAGS query");
}
for (String s : getGroupByTagComponent().getTagKeys()) {
if (outputColumn.contains(s)) {
@@ -590,10 +590,10 @@ public class QueryStatement extends Statement {
if (isLastQuery()) {
if (isAlignByDevice()) {
- throw new SemanticException("Last read doesn't support align by
device.");
+ throw new SemanticException("Last query doesn't support align by
device.");
}
if (disableAlign()) {
- throw new SemanticException("Disable align cannot be applied to LAST
read.");
+ throw new SemanticException("Disable align cannot be applied to LAST
query.");
}
for (ResultColumn resultColumn : selectComponent.getResultColumns()) {
Expression expression = resultColumn.getExpression();
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/metadata/CreateContinuousQueryStatement.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/metadata/CreateContinuousQueryStatement.java
index 9f45dc81105..dc5f7e9ec94 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/metadata/CreateContinuousQueryStatement.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/metadata/CreateContinuousQueryStatement.java
@@ -39,16 +39,16 @@ public class CreateContinuousQueryStatement extends
Statement implements IConfig
private String cqId;
- // The read execution time interval, default value is group_by_interval in
group by clause.
+ // The query execution time interval, default value is group_by_interval in
group by clause.
private long everyInterval;
// A date that represents the execution time of a certain cq task, default
value is 0.
private long boundaryTime = 0;
- // The start time of each read execution, default value is every_interval
+ // The start time of each query execution, default value is every_interval
private long startTimeOffset;
- // The end time of each read execution, default value is 0.
+ // The end time of each query execution, default value is 0.
private long endTimeOffset = 0;
// Specify how we deal with the cq task whose previous time interval
execution is not finished
@@ -197,7 +197,7 @@ public class CreateContinuousQueryStatement extends
Statement implements IConfig
}
if (!queryBodyStatement.isSelectInto()) {
- throw new SemanticException("CQ: The read body misses an INTO clause.");
+ throw new SemanticException("CQ: The query body misses an INTO clause.");
}
GroupByTimeComponent groupByTimeComponent =
queryBodyStatement.getGroupByTimeComponent();
if (groupByTimeComponent != null
@@ -208,7 +208,7 @@ public class CreateContinuousQueryStatement extends
Statement implements IConfig
if (queryBodyStatement.getWhereCondition() != null
&& ExpressionAnalyzer.checkIfTimeFilterExist(
queryBodyStatement.getWhereCondition().getPredicate())) {
- throw new SemanticException("CQ: Specifying time filters in the read
body is prohibited.");
+ throw new SemanticException("CQ: Specifying time filters in the query
body is prohibited.");
}
}
}
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/dag/intermediate/MultiInputColumnIntermediateLayer.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/dag/intermediate/MultiInputColumnIntermediateLayer.java
index 88fd554007a..9ecc8bc08f6 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/dag/intermediate/MultiInputColumnIntermediateLayer.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/dag/intermediate/MultiInputColumnIntermediateLayer.java
@@ -525,7 +525,7 @@ public class MultiInputColumnIntermediateLayer extends
IntermediateLayer
if (yieldableState != YieldableState.YIELDABLE) {
return yieldableState;
}
- // display window begin should be set to the same as the min
timestamp of the read
+ // display window begin should be set to the same as the min
timestamp of the query
// result set
nextWindowTimeBegin = rowRecordList.getTime(0);
}
@@ -591,7 +591,7 @@ public class MultiInputColumnIntermediateLayer extends
IntermediateLayer
if (rowRecordList.size() == 0
&& LayerCacheUtils.cacheRow(udfInputDataSet, rowRecordList)
&& nextWindowTimeBegin == Long.MIN_VALUE) {
- // display window begin should be set to the same as the min
timestamp of the read
+ // display window begin should be set to the same as the min
timestamp of the query
// result set
nextWindowTimeBegin = rowRecordList.getTime(0);
}
@@ -748,7 +748,7 @@ public class MultiInputColumnIntermediateLayer extends
IntermediateLayer
nextIndexBegin = i;
break;
}
- // The first window's beginning time is greater than all the
timestamp of the read result
+ // The first window's beginning time is greater than all the
timestamp of the query result
// set
if (i == rowRecordList.size() - 1) {
return YieldableState.NOT_YIELDABLE_NO_MORE_DATA;
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/dag/intermediate/SingleInputColumnMultiReferenceIntermediateLayer.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/dag/intermediate/SingleInputColumnMultiReferenceIntermediateLayer.java
index 817bb8234d9..c6cb5f17a11 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/dag/intermediate/SingleInputColumnMultiReferenceIntermediateLayer.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/dag/intermediate/SingleInputColumnMultiReferenceIntermediateLayer.java
@@ -413,7 +413,7 @@ public class
SingleInputColumnMultiReferenceIntermediateLayer extends Intermedia
}
}
if (nextWindowTimeBeginGivenByStrategy == Long.MIN_VALUE) {
- // display window begin should be set to the same as the min
timestamp of the read
+ // display window begin should be set to the same as the min
timestamp of the query
// result
// set
nextWindowTimeBegin = tvList.getTime(0);
@@ -483,7 +483,7 @@ public class
SingleInputColumnMultiReferenceIntermediateLayer extends Intermedia
&& LayerCacheUtils.cachePoint(
parentLayerPointReaderDataType, parentLayerPointReader,
tvList)
&& nextWindowTimeBeginGivenByStrategy == Long.MIN_VALUE) {
- // display window begin should be set to the same as the min
timestamp of the read
+ // display window begin should be set to the same as the min
timestamp of the query
// result
// set
nextWindowTimeBegin = tvList.getTime(0);
@@ -638,7 +638,7 @@ public class
SingleInputColumnMultiReferenceIntermediateLayer extends Intermedia
nextIndexBegin = i;
break;
}
- // The first window's beginning time is greater than all the
timestamp of the read result
+ // The first window's beginning time is greater than all the
timestamp of the query result
// set
if (i == tvList.size() - 1) {
return YieldableState.NOT_YIELDABLE_NO_MORE_DATA;
@@ -756,7 +756,7 @@ public class
SingleInputColumnMultiReferenceIntermediateLayer extends Intermedia
nextIndexBegin = i;
break;
}
- // The first window's beginning time is greater than all the
timestamp of the read result
+ // The first window's beginning time is greater than all the
timestamp of the query result
// set
if (i == tvList.size() - 1) {
return YieldableState.NOT_YIELDABLE_NO_MORE_DATA;
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/dag/intermediate/SingleInputColumnSingleReferenceIntermediateLayer.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/dag/intermediate/SingleInputColumnSingleReferenceIntermediateLayer.java
index 55e422bf536..ec443005d89 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/dag/intermediate/SingleInputColumnSingleReferenceIntermediateLayer.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/dag/intermediate/SingleInputColumnSingleReferenceIntermediateLayer.java
@@ -287,7 +287,7 @@ public class
SingleInputColumnSingleReferenceIntermediateLayer extends Intermedi
}
}
if (nextWindowTimeBegin == Long.MIN_VALUE) {
- // display window begin should be set to the same as the min
timestamp of the read
+ // display window begin should be set to the same as the min
timestamp of the query
// result set
nextWindowTimeBegin = tvList.getTime(0);
}
@@ -355,7 +355,7 @@ public class
SingleInputColumnSingleReferenceIntermediateLayer extends Intermedi
if (tvList.size() == 0
&& LayerCacheUtils.cachePoint(dataType, parentLayerPointReader,
tvList)
&& nextWindowTimeBegin == Long.MIN_VALUE) {
- // display window begin should be set to the same as the min
timestamp of the read
+ // display window begin should be set to the same as the min
timestamp of the query
// result set
nextWindowTimeBegin = tvList.getTime(0);
}
@@ -510,7 +510,7 @@ public class
SingleInputColumnSingleReferenceIntermediateLayer extends Intermedi
nextIndexBegin = i;
break;
}
- // The first window's beginning time is greater than all the
timestamp of the read result
+ // The first window's beginning time is greater than all the
timestamp of the query result
// set
if (i == tvList.size() - 1) {
return YieldableState.NOT_YIELDABLE_NO_MORE_DATA;
@@ -628,7 +628,7 @@ public class
SingleInputColumnSingleReferenceIntermediateLayer extends Intermedi
nextIndexBegin = i;
break;
}
- // The first window's beginning time is greater than all the
timestamp of the read result
+ // The first window's beginning time is greater than all the
timestamp of the query result
// set
if (i == tvList.size() - 1) {
return YieldableState.NOT_YIELDABLE_NO_MORE_DATA;
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/dag/transformer/unary/TransparentTransformer.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/dag/transformer/unary/TransparentTransformer.java
index 69b66965533..6f146a35862 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/dag/transformer/unary/TransparentTransformer.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/dag/transformer/unary/TransparentTransformer.java
@@ -30,7 +30,7 @@ import java.io.IOException;
*
* <p>i.e. it's just the function f(x) = x.
*
- * <p>It's mainly used for a UDF with aggregation read as its parameters.
+ * <p>It's mainly used for a UDF with aggregation query as its parameters.
*/
public class TransparentTransformer extends UnaryTransformer {
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/datastructure/row/ElasticSerializableRowRecordList.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/datastructure/row/ElasticSerializableRowRecordList.java
index e7fa205d01e..356648e1e5d 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/datastructure/row/ElasticSerializableRowRecordList.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/datastructure/row/ElasticSerializableRowRecordList.java
@@ -241,7 +241,7 @@ public class ElasticSerializableRowRecordList {
return;
}
- throw new QueryProcessException("Memory is not enough for current read.");
+ throw new QueryProcessException("Memory is not enough for current query.");
}
protected void applyNewMemoryControlParameters(
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/datastructure/row/SerializableRowRecordList.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/datastructure/row/SerializableRowRecordList.java
index 9299b797c9f..b220d190749 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/datastructure/row/SerializableRowRecordList.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/datastructure/row/SerializableRowRecordList.java
@@ -89,7 +89,7 @@ public class SerializableRowRecordList implements
SerializableList {
// 1 extra bit for null fields mark in bitMap
int size = (int) (memoryLimitInMB * MB / 2 / (rowLength +
ReadWriteIOUtils.BIT_LEN));
if (size <= 0) {
- throw new QueryProcessException("Memory is not enough for current
read.");
+ throw new QueryProcessException("Memory is not enough for current
query.");
}
return size;
}
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/datastructure/tv/ElasticSerializableBinaryTVList.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/datastructure/tv/ElasticSerializableBinaryTVList.java
index f0e78c03f6f..7ec007e9e96 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/datastructure/tv/ElasticSerializableBinaryTVList.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/datastructure/tv/ElasticSerializableBinaryTVList.java
@@ -97,7 +97,7 @@ public class ElasticSerializableBinaryTVList extends
ElasticSerializableTVList {
return;
}
- throw new RuntimeException("Memory is not enough for current read.");
+ throw new RuntimeException("Memory is not enough for current query.");
}
protected void applyNewMemoryControlParameters(
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/datastructure/tv/SerializableTVList.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/datastructure/tv/SerializableTVList.java
index f9722bfd5ad..12d0a63c4fc 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/datastructure/tv/SerializableTVList.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/datastructure/tv/SerializableTVList.java
@@ -74,7 +74,7 @@ public abstract class SerializableTVList extends BatchData
implements Serializab
}
if (size <= 0) {
- throw new RuntimeException("Memory is not enough for current read.");
+ throw new RuntimeException("Memory is not enough for current query.");
}
return size;
}