This is an automated email from the ASF dual-hosted git repository.
jackietien pushed a commit to branch ty/packageRefactor
in repository https://gitbox.apache.org/repos/asf/iotdb.git
The following commit(s) were added to refs/heads/ty/packageRefactor by this
push:
new 1de33839ec2 fix ci
1de33839ec2 is described below
commit 1de33839ec232c03047b7206aa44fd606344e763
Author: JackieTien97 <[email protected]>
AuthorDate: Tue Jun 27 01:23:50 2023 +0800
fix ci
---
.../resources/conf/iotdb-common.properties | 52 +++++++++++-----------
.../commons/auth/role/LocalFileRoleAccessor.java | 14 +++---
.../commons/auth/user/LocalFileUserAccessor.java | 4 +-
.../apache/iotdb/commons/cluster/NodeStatus.java | 2 +-
.../apache/iotdb/commons/cluster/RegionStatus.java | 2 +-
.../apache/iotdb/commons/conf/CommonConfig.java | 4 +-
.../apache/iotdb/commons/conf/IoTDBConstant.java | 4 +-
.../commons/enums/HandleSystemErrorStrategy.java | 4 +-
.../iotdb/commons/exception/MetadataException.java | 2 +-
.../commons/partition/DataPartitionQueryParam.java | 4 +-
.../commons/partition/DataPartitionTable.java | 4 +-
.../iotdb/commons/partition/QueryExecutor.java | 2 +-
.../iotdb/commons/partition/StorageExecutor.java | 2 +-
.../iotdb/commons/schema/filter/SchemaFilter.java | 2 +-
.../service/metric/PerformanceOverviewMetrics.java | 6 +--
.../iotdb/commons/service/metric/enums/Metric.java | 6 +--
.../iotdb/commons/udf/service/UDFClassLoader.java | 2 +-
.../commons/udf/service/UDFClassLoaderManager.java | 2 +-
.../apache/iotdb/commons/utils/StatusUtils.java | 2 +-
.../tsfile/common/constant/JsonFormatConstant.java | 4 +-
.../apache/iotdb/tsfile/compress/ICompressor.java | 2 +-
.../tsfile/encoding/decoder/BitmapDecoder.java | 2 +-
.../iotdb/tsfile/read/TsFileSequenceReader.java | 10 ++---
.../apache/iotdb/tsfile/read/common/BatchData.java | 2 +-
.../org/apache/iotdb/tsfile/read/common/Chunk.java | 2 +-
.../tsfile/read/controller/IMetadataQuerier.java | 2 +-
.../read/expression/util/ExpressionOptimizer.java | 4 +-
.../iotdb/tsfile/read/filter/TimeFilter.java | 2 +-
.../query/dataset/DataSetWithTimeGenerator.java | 2 +-
.../tsfile/read/query/dataset/QueryDataSet.java | 8 ++--
.../query/executor/ExecutorWithTimeGenerator.java | 2 +-
.../tsfile/read/query/executor/TsFileExecutor.java | 6 +--
.../tsfile/read/reader/page/AlignedPageReader.java | 4 +-
.../reader/series/AbstractFileSeriesReader.java | 2 +-
.../read/reader/series/FileSeriesReader.java | 4 +-
.../reader/series/FileSeriesReaderByTimestamp.java | 4 +-
.../apache/iotdb/tsfile/write/TsFileWriter.java | 10 ++---
.../tsfile/write/chunk/AlignedChunkWriterImpl.java | 8 ++--
.../iotdb/tsfile/write/chunk/ChunkWriterImpl.java | 4 +-
.../apache/iotdb/tsfile/write/schema/Schema.java | 6 +--
.../write/writer/RestorableTsFileIOWriter.java | 4 +-
.../iotdb/tsfile/read/ReadInPartitionTest.java | 10 ++---
.../apache/iotdb/tsfile/read/TsFileReaderTest.java | 24 +++++-----
.../org/apache/iotdb/tsfile/utils/RecordUtils.java | 2 +-
.../tsfile/write/MetadataIndexConstructorTest.java | 2 +-
.../iotdb/tsfile/write/TsFileIOWriterTest.java | 2 +-
.../tsfile/write/TsFileIntegrityCheckingTool.java | 2 +-
.../iotdb/tsfile/write/TsFileReadWriteTest.java | 6 +--
48 files changed, 131 insertions(+), 131 deletions(-)
diff --git
a/iotdb-core/node-commons/src/assembly/resources/conf/iotdb-common.properties
b/iotdb-core/node-commons/src/assembly/resources/conf/iotdb-common.properties
index 11f0a8daf50..ffd682c0e68 100644
---
a/iotdb-core/node-commons/src/assembly/resources/conf/iotdb-common.properties
+++
b/iotdb-core/node-commons/src/assembly/resources/conf/iotdb-common.properties
@@ -36,7 +36,7 @@ cluster_name=defaultCluster
# Datatype: string
#
config_node_consensus_protocol_class=org.apache.iotdb.consensus.ratis.RatisConsensus
-# Default number of schemaengine replicas
+# Default number of schema replicas
# Can not be changed after the first start
# Datatype: int
# schema_replication_factor=1
@@ -100,7 +100,7 @@ cluster_name=defaultCluster
# Only take effect when set schema_region_group_extension_policy=AUTO.
# This parameter is the maximum number of SchemaRegions expected to be managed
by each DataNode.
-# Notice: Since each Database requires at least one SchemaRegionGroup to
manage its schemaengine,
+# Notice: Since each Database requires at least one SchemaRegionGroup to
manage its schema,
# this parameter doesn't limit the upper bound of cluster SchemaRegions when
there are too many Databases.
# Default is equal to the schema_replication_factor to ensure each DataNode
will have a SchemaRegionGroupLeader.
# Datatype: Double
@@ -240,7 +240,7 @@ cluster_name=defaultCluster
# Datatype: int
# io_task_queue_size_for_flushing=10
-# If true, we will estimate each read's possible memory footprint before
executing it and deny it if its estimated memory exceeds current free memory
+# If true, we will estimate each query's possible memory footprint before
executing it and deny it if its estimated memory exceeds current free memory
# Datatype: bool
# enable_query_memory_estimation=true
@@ -248,7 +248,7 @@ cluster_name=defaultCluster
### Schema Engine Configuration
####################
-# The schemaengine management mode of schemaengine storageengine. Currently
support Memory and PBTree.
+# The schema management mode of schema engine. Currently support Memory and
PBTree.
# This config of all DataNodes in one cluster must keep same.
# Datatype: string
# schema_engine_mode=Memory
@@ -295,24 +295,24 @@ cluster_name=defaultCluster
# Set the value to either 'device' or 'timeseries' based on your desired
control level.
# cluster_schema_limit_level=timeseries
-# This configuration parameter sets the maximum number of schemaengine allowed
in the cluster.
+# This configuration parameter sets the maximum number of schema allowed in
the cluster.
# The value should be a positive integer representing the desired threshold.
# When the threshold is reached, users will be prohibited from creating new
time series.
-# Set the value based on the desired maximum number of schemaengine for your
IoTDB cluster.
+# Set the value based on the desired maximum number of schema for your IoTDB
cluster.
# -1 means the system does not impose a limit on the maximum number of time
series.
# cluster_schema_limit_threshold=-1
####################
-### Configurations for creating schemaengine automatically
+### Configurations for creating schema automatically
####################
-# Whether creating schemaengine automatically is enabled
+# Whether creating schema automatically is enabled
# If true, then create database and timeseries automatically when not exists
in insertion
# Or else, user need to create database and timeseries before insertion.
# Datatype: boolean
# enable_auto_create_schema=true
-# Database level when creating schemaengine automatically is enabled
+# Database level when creating schema automatically is enabled
# e.g. root.sg0.d1.s2
# we will set root.sg0 as the database if database level is 1
# Datatype: int
@@ -341,27 +341,27 @@ cluster_name=defaultCluster
# Datatype: TSDataType
# nan_string_infer_type=DOUBLE
-# BOOLEAN encoding when creating schemaengine automatically is enabled
+# BOOLEAN encoding when creating schema automatically is enabled
# Datatype: TSEncoding
# default_boolean_encoding=RLE
-# INT32 encoding when creating schemaengine automatically is enabled
+# INT32 encoding when creating schema automatically is enabled
# Datatype: TSEncoding
# default_int32_encoding=RLE
-# INT64 encoding when creating schemaengine automatically is enabled
+# INT64 encoding when creating schema automatically is enabled
# Datatype: TSEncoding
# default_int64_encoding=RLE
-# FLOAT encoding when creating schemaengine automatically is enabled
+# FLOAT encoding when creating schema automatically is enabled
# Datatype: TSEncoding
# default_float_encoding=GORILLA
-# DOUBLE encoding when creating schemaengine automatically is enabled
+# DOUBLE encoding when creating schema automatically is enabled
# Datatype: TSEncoding
# default_double_encoding=GORILLA
-# TEXT encoding when creating schemaengine automatically is enabled
+# TEXT encoding when creating schema automatically is enabled
# Datatype: TSEncoding
# default_text_encoding=PLAIN
@@ -412,11 +412,11 @@ cluster_name=defaultCluster
# Datatype: int
# max_tsblock_line_number=1000
-# Time cost(ms) threshold for slow read
+# Time cost(ms) threshold for slow query
# Datatype: long
# slow_query_threshold=30000
-# The max executing time of read. unit: ms
+# The max executing time of query. unit: ms
# Datatype: int
# query_timeout_threshold=60000
@@ -424,7 +424,7 @@ cluster_name=defaultCluster
# Datatype: int
# max_allowed_concurrent_queries=1000
-# How many threads can concurrently execute read statement. When <= 0, use CPU
core number.
+# How many threads can concurrently execute query statement. When <= 0, use
CPU core number.
# Datatype: int
# query_thread_count=0
@@ -466,14 +466,14 @@ cluster_name=defaultCluster
# max_waiting_time_when_insert_blocked=10000
# Add a switch to drop ouf-of-order data
-# Out-of-order data will impact the aggregation read a lot. Users may not care
about discarding some out-of-order data.
+# Out-of-order data will impact the aggregation query a lot. Users may not
care about discarding some out-of-order data.
# Datatype: boolean
# enable_discard_out_of_order_data=false
# What will the system do when unrecoverable error occurs.
# Datatype: String
# Optional strategies are as follows:
-# 1. CHANGE_TO_READ_ONLY: set system status to read-only and the system only
accepts read operations.
+# 1. CHANGE_TO_READ_ONLY: set system status to read-only and the system only
accepts query operations.
# 2. SHUTDOWN: the system will be shutdown.
# handle_system_error=CHANGE_TO_READ_ONLY
@@ -807,7 +807,7 @@ cluster_name=defaultCluster
# And it is also used as the default compressor of time column in aligned
timeseries.
# compressor=SNAPPY
-# time interval in minute for calculating read frequency
+# time interval in minute for calculating query frequency
# Datatype: int
# frequency_interval_in_minute=1
@@ -848,13 +848,13 @@ cluster_name=defaultCluster
### UDF Configuration
####################
-# Used to estimate the memory usage of text fields in a UDF read.
+# Used to estimate the memory usage of text fields in a UDF query.
# It is recommended to set this value to be slightly larger than the average
length of all text
# records.
# Datatype: int
# udf_initial_byte_array_length_for_memory_control=48
-# How much memory may be used in ONE UDF read (in MB).
+# How much memory may be used in ONE UDF query (in MB).
# The upper limit is 20% of allocated memory for read.
# Datatype: float
# udf_memory_budget_in_mb=30.0
@@ -922,11 +922,11 @@ cluster_name=defaultCluster
### Continuous Query Configuration
####################
-# The number of threads in the scheduled thread pool that submit continuous
read tasks periodically
+# The number of threads in the scheduled thread pool that submit continuous
query tasks periodically
# Datatype: int
# continuous_query_submit_thread_count=2
-# The minimum value of the continuous read execution time interval
+# The minimum value of the continuous query execution time interval
# Datatype: long(duration)
# continuous_query_min_every_interval_in_ms=1000
@@ -1123,7 +1123,7 @@ cluster_name=defaultCluster
# Whether to display rest service interface information through swagger. eg:
http://ip:port/swagger.json
# enable_swagger=false
-# the default row limit to a REST read response when the rowSize parameter is
not given in request
+# the default row limit to a REST query response when the rowSize parameter is
not given in request
# rest_query_default_row_size_limit=10000
# the expiration time of the user login information cache (in seconds)
diff --git
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/role/LocalFileRoleAccessor.java
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/role/LocalFileRoleAccessor.java
index 787f67f533f..98a5fcaf785 100644
---
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/role/LocalFileRoleAccessor.java
+++
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/role/LocalFileRoleAccessor.java
@@ -44,13 +44,13 @@ import java.util.Set;
import java.util.UUID;
/**
- * This class store each role in a separate sequential file. Role file
schemaengine : Int32 role
- * name size Utf-8 role name bytes Int32 seriesPath privilege number n Int32
seriesPath[1] size
- * Utf-8 seriesPath[1] bytes Int32 privilege num k1 Int32 privilege[1][1]
Int32 privilege[1][2] ...
- * Int32 privilege[1][k1] Int32 seriesPath[2] size Utf-8 seriesPath[2] bytes
Int32 privilege num yk2
- * Int32 privilege[2][1] Int32 privilege[2][2] ... Int32 privilege[2][k2] ...
Int32 seriesPath[n]
- * size Utf-8 seriesPath[n] bytes Int32 privilege num kn Int32 privilege[n][1]
Int32 privilege[n][2]
- * ... Int32 privilege[n][kn]
+ * This class store each role in a separate sequential file. Role file schema
: Int32 role name size
+ * Utf-8 role name bytes Int32 seriesPath privilege number n Int32
seriesPath[1] size Utf-8
+ * seriesPath[1] bytes Int32 privilege num k1 Int32 privilege[1][1] Int32
privilege[1][2] ... Int32
+ * privilege[1][k1] Int32 seriesPath[2] size Utf-8 seriesPath[2] bytes Int32
privilege num yk2 Int32
+ * privilege[2][1] Int32 privilege[2][2] ... Int32 privilege[2][k2] ... Int32
seriesPath[n] size
+ * Utf-8 seriesPath[n] bytes Int32 privilege num kn Int32 privilege[n][1]
Int32 privilege[n][2] ...
+ * Int32 privilege[n][kn]
*/
public class LocalFileRoleAccessor implements IRoleAccessor {
private static final Logger logger =
LoggerFactory.getLogger(LocalFileRoleAccessor.class);
diff --git
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/user/LocalFileUserAccessor.java
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/user/LocalFileUserAccessor.java
index 03e7651f489..38345f5603b 100644
---
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/user/LocalFileUserAccessor.java
+++
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/user/LocalFileUserAccessor.java
@@ -47,8 +47,8 @@ import java.util.UUID;
/**
* This class loads a user's information from the corresponding file.The user
file is a sequential
- * file. User file schemaengine: Int32 username bytes size Utf-8 username
bytes Int32 Password bytes
- * size Utf-8 password bytes Int32 seriesPath privilege number n Int32
seriesPath[1] size Utf-8
+ * file. User file schema: Int32 username bytes size Utf-8 username bytes
Int32 Password bytes size
+ * Utf-8 password bytes Int32 seriesPath privilege number n Int32
seriesPath[1] size Utf-8
* seriesPath[1] bytes Int32 privilege num k1 Int32 privilege[1][1] Int32
privilege[1][2] ... Int32
* privilege[1][k1] Int32 seriesPath[2] size Utf-8 seriesPath[2] bytes Int32
privilege num k2 Int32
* privilege[2][1] Int32 privilege[2][2] ... Int32 privilege[2][k2] ... Int32
seriesPath[n] size
diff --git
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/cluster/NodeStatus.java
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/cluster/NodeStatus.java
index 095d9592dab..32f083dd042 100644
---
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/cluster/NodeStatus.java
+++
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/cluster/NodeStatus.java
@@ -30,7 +30,7 @@ public enum NodeStatus {
/** Node is in removing */
Removing("Removing"),
- /** Only read statements are permitted */
+ /** Only query statements are permitted */
ReadOnly("ReadOnly");
public static final String DISK_FULL = "DiskFull";
diff --git
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/cluster/RegionStatus.java
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/cluster/RegionStatus.java
index 34243df24d8..ade6136fb24 100644
---
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/cluster/RegionStatus.java
+++
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/cluster/RegionStatus.java
@@ -30,7 +30,7 @@ public enum RegionStatus {
/** Region is in removing */
Removing("Removing"),
- /** Only read statements are permitted */
+ /** Only query statements are permitted */
ReadOnly("ReadOnly");
private final String status;
diff --git
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/CommonConfig.java
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/CommonConfig.java
index 6867f94cfce..f84931263ba 100644
---
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/CommonConfig.java
+++
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/CommonConfig.java
@@ -165,7 +165,7 @@ public class CommonConfig {
private long pipeMetaSyncerInitialSyncDelayMinutes = 3;
private long pipeMetaSyncerSyncIntervalMinutes = 3;
- /** whether to use persistent schemaengine mode. */
+ /** whether to use persistent schema mode. */
private String schemaEngineMode = "Memory";
/** Whether to enable Last cache. */
@@ -379,7 +379,7 @@ public class CommonConfig {
switch (newStatus) {
case ReadOnly:
- logger.warn("Change system status to ReadOnly! Only read statements
are permitted!");
+ logger.warn("Change system status to ReadOnly! Only query statements
are permitted!");
break;
case Removing:
logger.info(
diff --git
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/IoTDBConstant.java
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/IoTDBConstant.java
index 1f788497521..4763c6f08e6 100644
---
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/IoTDBConstant.java
+++
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/IoTDBConstant.java
@@ -227,10 +227,10 @@ public class IoTDBConstant {
// system folder name
public static final String SYSTEM_FOLDER_NAME = "system";
- public static final String SCHEMA_FOLDER_NAME = "schemaengine";
+ public static final String SCHEMA_FOLDER_NAME = "schema";
public static final String LOAD_TSFILE_FOLDER_NAME = "load";
public static final String SYNC_FOLDER_NAME = "sync";
- public static final String QUERY_FOLDER_NAME = "read";
+ public static final String QUERY_FOLDER_NAME = "query";
public static final String EXT_FOLDER_NAME = "ext";
public static final String UDF_FOLDER_NAME = "udf";
public static final String TRIGGER_FOLDER_NAME = "trigger";
diff --git
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/enums/HandleSystemErrorStrategy.java
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/enums/HandleSystemErrorStrategy.java
index 8bf0819effe..c176826c063 100644
---
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/enums/HandleSystemErrorStrategy.java
+++
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/enums/HandleSystemErrorStrategy.java
@@ -25,7 +25,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public enum HandleSystemErrorStrategy {
- /** set system status to read-only and the system only accepts read
operations */
+ /** set system status to read-only and the system only accepts query
operations */
CHANGE_TO_READ_ONLY,
/** the system will be shutdown */
SHUTDOWN;
@@ -35,7 +35,7 @@ public enum HandleSystemErrorStrategy {
public void handle() {
if (this == HandleSystemErrorStrategy.CHANGE_TO_READ_ONLY) {
logger.error(
- "Unrecoverable error occurs! Change system status to read-only
because handle_system_error is CHANGE_TO_READ_ONLY. Only read statements are
permitted!",
+ "Unrecoverable error occurs! Change system status to read-only
because handle_system_error is CHANGE_TO_READ_ONLY. Only query statements are
permitted!",
new RuntimeException("System mode is set to READ_ONLY"));
CommonDescriptor.getInstance().getConfig().setNodeStatus(NodeStatus.ReadOnly);
} else if (this == HandleSystemErrorStrategy.SHUTDOWN) {
diff --git
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/exception/MetadataException.java
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/exception/MetadataException.java
index 8b767215efd..3a83a6310f5 100644
---
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/exception/MetadataException.java
+++
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/exception/MetadataException.java
@@ -21,7 +21,7 @@ package org.apache.iotdb.commons.exception;
import org.apache.iotdb.rpc.TSStatusCode;
/**
- * If read metadata constructs schemaengine but passes illegal parameters to
EncodingConvertor or
+ * If query metadata constructs schema but passes illegal parameters to
EncodingConvertor or
* DataTypeConverter,this exception will be threw.
*/
public class MetadataException extends IoTDBException {
diff --git
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/DataPartitionQueryParam.java
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/DataPartitionQueryParam.java
index b688bf679c8..4ed3cd8dc45 100644
---
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/DataPartitionQueryParam.java
+++
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/DataPartitionQueryParam.java
@@ -28,11 +28,11 @@ public class DataPartitionQueryParam {
private String devicePath;
private List<TTimePartitionSlot> timePartitionSlotList = new ArrayList<>();
- // it will be set to true in read when there exist filter like: time <= XXX
+ // it will be set to true in query when there exist filter like: time <= XXX
// (-oo, timePartitionSlotList.get(0))
private boolean needLeftAll = false;
- // it will be set to true read when there exist filter like: time >= XXX
+ // it will be set to true query when there exist filter like: time >= XXX
// (timePartitionSlotList.get(timePartitionSlotList.size() - 1), +oo)
private boolean needRightAll = false;
diff --git
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/DataPartitionTable.java
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/DataPartitionTable.java
index ffcf11f3137..979b594a690 100644
---
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/DataPartitionTable.java
+++
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/DataPartitionTable.java
@@ -206,7 +206,7 @@ public class DataPartitionTable {
public List<TTimePartitionSlot> getTimeSlotList(
TSeriesPartitionSlot seriesSlotId, TConsensusGroupId regionId, long
startTime, long endTime) {
if (seriesSlotId.getSlotId() == -1) {
- // read timePartition of specific database or region
+ // query timePartition of specific database or region
List<TTimePartitionSlot> timePartitionSlots = new ArrayList<>();
dataPartitionMap.forEach(
(seriesPartitionSlot, seriesPartitionTable) ->
@@ -216,7 +216,7 @@ public class DataPartitionTable {
} else if (!dataPartitionMap.containsKey(seriesSlotId)) {
return new ArrayList<>();
} else {
- // read timePartition of specific seriesPartition
+ // query timePartition of specific seriesPartition
SeriesPartitionTable seriesPartitionTable =
dataPartitionMap.get(seriesSlotId);
return seriesPartitionTable.getTimeSlotList(regionId, startTime,
endTime);
}
diff --git
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/QueryExecutor.java
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/QueryExecutor.java
index 2ecf8591d49..55439beffd8 100644
---
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/QueryExecutor.java
+++
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/QueryExecutor.java
@@ -23,7 +23,7 @@ import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation;
import java.util.Objects;
-/** QueryExecutor indicates this read can execute directly without data from
StorageEngine */
+/** QueryExecutor indicates this query can execute directly without data from
StorageEngine */
public class QueryExecutor implements ExecutorType {
TDataNodeLocation dataNodeLocation;
diff --git
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/StorageExecutor.java
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/StorageExecutor.java
index 6f9302f6ffb..a99b4dea07e 100644
---
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/StorageExecutor.java
+++
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/StorageExecutor.java
@@ -26,7 +26,7 @@ import javax.annotation.Nonnull;
import java.util.Objects;
-/** StorageExecutor indicates execution of this read need data from
StorageEngine */
+/** StorageExecutor indicates execution of this query need data from
StorageEngine */
public class StorageExecutor implements ExecutorType {
private final TRegionReplicaSet regionReplicaSet;
diff --git
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/schema/filter/SchemaFilter.java
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/schema/filter/SchemaFilter.java
index b24abe09fc3..21189924030 100644
---
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/schema/filter/SchemaFilter.java
+++
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/schema/filter/SchemaFilter.java
@@ -67,7 +67,7 @@ public abstract class SchemaFilter {
case AND:
return new AndFilter(byteBuffer);
default:
- throw new IllegalArgumentException("Unsupported schemaengine filter
type: " + type);
+ throw new IllegalArgumentException("Unsupported schema filter type: "
+ type);
}
}
diff --git
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/PerformanceOverviewMetrics.java
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/PerformanceOverviewMetrics.java
index 4a94fd02256..1efbe626335 100644
---
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/PerformanceOverviewMetrics.java
+++
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/PerformanceOverviewMetrics.java
@@ -173,7 +173,7 @@ public class PerformanceOverviewMetrics implements
IMetricSet {
private Timer triggerTimer = DoNothingMetricManager.DO_NOTHING_TIMER;
private Timer storageTimer = DoNothingMetricManager.DO_NOTHING_TIMER;
- /** Record the time cost of schemaengine validate stage in local schedule. */
+ /** Record the time cost of schema validate stage in local schedule. */
public void recordScheduleSchemaValidateCost(long costTimeInNanos) {
schemaValidateTimer.updateNanos(costTimeInNanos);
}
@@ -193,7 +193,7 @@ public class PerformanceOverviewMetrics implements
IMetricSet {
// region storage
private static final String PERFORMANCE_OVERVIEW_STORAGE_DETAIL =
Metric.PERFORMANCE_OVERVIEW_STORAGE_DETAIL.toString();
- private static final String ENGINE = "storageengine";
+ private static final String ENGINE = "engine";
static {
metricInfoMap.put(
@@ -210,7 +210,7 @@ public class PerformanceOverviewMetrics implements
IMetricSet {
// endregion
- // region storageengine
+ // region engine
private static final String PERFORMANCE_OVERVIEW_ENGINE_DETAIL =
Metric.PERFORMANCE_OVERVIEW_ENGINE_DETAIL.toString();
diff --git
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java
index 2bc8cc6b6a4..ae8fef76ec5 100644
---
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java
+++
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java
@@ -45,7 +45,7 @@ public enum Metric {
IOT_CONSENSUS("iot_consensus"),
RATIS_CONSENSUS_WRITE("ratis_consensus_write"),
RATIS_CONSENSUS_READ("ratis_consensus_read"),
- // storage storageengine related
+ // storage engine related
POINTS("points"),
COST_TASK("cost_task"),
QUEUE("queue"),
@@ -62,14 +62,14 @@ public enum Metric {
DATA_WRITTEN("data_written"),
DATA_READ("data_read"),
COMPACTION_TASK_COUNT("compaction_task_count"),
- // schemaengine storageengine related
+ // schema engine related
MEM("mem"),
CACHE("cache"),
CACHE_HIT_RATE("cache_hit"),
QUANTITY("quantity"),
SCHEMA_REGION("schema_region"),
SCHEMA_ENGINE("schema_engine"),
- // read storageengine related
+ // query engine related
QUERY_PLAN_COST("query_plan_cost"),
OPERATOR_EXECUTION_COST("operator_execution_cost"),
OPERATOR_EXECUTION_COUNT("operator_execution_count"),
diff --git
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/udf/service/UDFClassLoader.java
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/udf/service/UDFClassLoader.java
index 380dbed977e..df5dd977b5d 100644
---
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/udf/service/UDFClassLoader.java
+++
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/udf/service/UDFClassLoader.java
@@ -35,7 +35,7 @@ public class UDFClassLoader extends URLClassLoader {
private final String libRoot;
/**
- * If activeQueriesCount is equals to 0, it means that there is no read
using this classloader.
+ * If activeQueriesCount is equals to 0, it means that there is no query
using this classloader.
* This classloader can only be closed when activeQueriesCount is equals to
0.
*/
private final AtomicLong activeQueriesCount;
diff --git
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/udf/service/UDFClassLoaderManager.java
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/udf/service/UDFClassLoaderManager.java
index ea420a49558..1ee3f4f78f9 100644
---
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/udf/service/UDFClassLoaderManager.java
+++
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/udf/service/UDFClassLoaderManager.java
@@ -37,7 +37,7 @@ public class UDFClassLoaderManager implements IService {
private final String libRoot;
- /** The keys in the map are the read IDs of the UDF queries being executed.
*/
+ /** The keys in the map are the query IDs of the UDF queries being executed.
*/
private final Map<String, UDFClassLoader> queryIdToUDFClassLoaderMap;
/**
diff --git
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/utils/StatusUtils.java
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/utils/StatusUtils.java
index 5ad4b7e63f5..6cb401b56fa 100644
---
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/utils/StatusUtils.java
+++
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/utils/StatusUtils.java
@@ -91,7 +91,7 @@ public class StatusUtils {
status.setMessage("Database processor related error.");
break;
case STORAGE_ENGINE_ERROR:
- status.setMessage("Storage storageengine related error.");
+ status.setMessage("Storage engine related error.");
break;
case TSFILE_PROCESSOR_ERROR:
status.setMessage("TsFile processor related error.");
diff --git
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/common/constant/JsonFormatConstant.java
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/common/constant/JsonFormatConstant.java
index 98c62d7bb36..f48615a81cb 100644
---
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/common/constant/JsonFormatConstant.java
+++
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/common/constant/JsonFormatConstant.java
@@ -19,9 +19,9 @@
package org.apache.iotdb.tsfile.common.constant;
-/** This class define several constant string variables used in tsfile
schemaengine's keys. */
+/** This class define several constant string variables used in tsfile
schema's keys. */
public class JsonFormatConstant {
- public static final String JSON_SCHEMA = "schemaengine";
+ public static final String JSON_SCHEMA = "schema";
public static final String DELTA_TYPE = "delta_type";
public static final String MEASUREMENT_UID = "measurement_id";
public static final String DATA_TYPE = "data_type";
diff --git
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/compress/ICompressor.java
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/compress/ICompressor.java
index fc280edfb58..ed6d9bdcc59 100644
---
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/compress/ICompressor.java
+++
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/compress/ICompressor.java
@@ -45,7 +45,7 @@ import static
org.apache.iotdb.tsfile.file.metadata.enums.CompressionType.LZMA2;
import static
org.apache.iotdb.tsfile.file.metadata.enums.CompressionType.SNAPPY;
import static org.apache.iotdb.tsfile.file.metadata.enums.CompressionType.ZSTD;
-/** compress data according to type in schemaengine. */
+/** compress data according to type in schema. */
public interface ICompressor extends Serializable {
static ICompressor getCompressor(String name) {
diff --git
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/decoder/BitmapDecoder.java
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/decoder/BitmapDecoder.java
index 825a38ba7da..b274b0e8d21 100644
---
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/decoder/BitmapDecoder.java
+++
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/decoder/BitmapDecoder.java
@@ -179,7 +179,7 @@ public class BitmapDecoder extends Decoder {
}
/**
- * In current version, boolean value is equal to Enums value in schemaengine.
+ * In current version, boolean value is equal to Enums value in schema.
*
* @param buffer : decoded data saved in InputStream
* @throws TsFileDecodingException cannot read next value
diff --git
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/TsFileSequenceReader.java
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/TsFileSequenceReader.java
index 7c8a8c9eba1..56602d07b09 100644
---
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/TsFileSequenceReader.java
+++
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/TsFileSequenceReader.java
@@ -1335,7 +1335,7 @@ public class TsFileSequenceReader implements
AutoCloseable {
return
ChunkHeader.deserializeCompressionTypeAndEncoding(tsFileInput.wrapAsInputStream());
}
- /** Get measurement schemaengine by chunkMetadatas. */
+ /** Get measurement schema by chunkMetadatas. */
public MeasurementSchema getMeasurementSchema(List<IChunkMetadata>
chunkMetadataList)
throws IOException {
if (chunkMetadataList.isEmpty()) {
@@ -1491,7 +1491,7 @@ public class TsFileSequenceReader implements
AutoCloseable {
/**
* Self Check the file and return the position before where the data is safe.
*
- * @param newSchema the schemaengine on each time series in the file
+ * @param newSchema the schema on each time series in the file
* @param chunkGroupMetadataList ChunkGroupMetadata List
* @param fastFinish if true and the file is complete, then newSchema and
chunkGroupMetadataList
* parameter will be not modified.
@@ -1701,7 +1701,7 @@ public class TsFileSequenceReader implements
AutoCloseable {
// because we can not guarantee the correctness of the deviceId.
truncatedSize = this.position() - 1;
if (lastDeviceId != null) {
- // schemaengine of last chunk group
+ // schema of last chunk group
if (newSchema != null) {
for (IMeasurementSchema tsSchema : measurementSchemaList) {
newSchema.putIfAbsent(
@@ -1720,7 +1720,7 @@ public class TsFileSequenceReader implements
AutoCloseable {
case MetaMarker.OPERATION_INDEX_RANGE:
truncatedSize = this.position() - 1;
if (lastDeviceId != null) {
- // schemaengine of last chunk group
+ // schema of last chunk group
if (newSchema != null) {
for (IMeasurementSchema tsSchema : measurementSchemaList) {
newSchema.putIfAbsent(
@@ -1743,7 +1743,7 @@ public class TsFileSequenceReader implements
AutoCloseable {
// now we read the tail of the data section, so we are sure that the last
// ChunkGroupFooter is complete.
if (lastDeviceId != null) {
- // schemaengine of last chunk group
+ // schema of last chunk group
if (newSchema != null) {
for (IMeasurementSchema tsSchema : measurementSchemaList) {
newSchema.putIfAbsent(
diff --git
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/common/BatchData.java
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/common/BatchData.java
index 5f3319f0973..4d57583d80a 100644
---
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/common/BatchData.java
+++
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/common/BatchData.java
@@ -46,7 +46,7 @@ import java.util.List;
* <p>This class records a time list and a value list, which could be replaced
by TVList in the
* future
*
- * <p>When you use BatchData in read process, it does not contain duplicated
timestamps. The batch
+ * <p>When you use BatchData in query process, it does not contain duplicated
timestamps. The batch
* data may be empty.
*
* <p>If you get a batch data, you can iterate the data as the following codes:
diff --git
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/common/Chunk.java
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/common/Chunk.java
index f4882fde382..039257e3bdf 100644
---
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/common/Chunk.java
+++
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/common/Chunk.java
@@ -29,7 +29,7 @@ import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.List;
-/** used in read. */
+/** used in query. */
public class Chunk {
private ChunkHeader chunkHeader;
diff --git
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/controller/IMetadataQuerier.java
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/controller/IMetadataQuerier.java
index 526475960af..d3f1c9cf675 100644
---
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/controller/IMetadataQuerier.java
+++
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/controller/IMetadataQuerier.java
@@ -55,7 +55,7 @@ public interface IMetadataQuerier {
/**
* Convert the space partition constraint to the time partition constraint.
*
- * @param paths selected paths in a read expression
+ * @param paths selected paths in a query expression
* @param spacePartitionStartPos the start position of the space partition
* @param spacePartitionEndPos the end position of the space partition
* @return the converted time partition constraint
diff --git
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/expression/util/ExpressionOptimizer.java
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/expression/util/ExpressionOptimizer.java
index bf217c489f1..accd871cc45 100644
---
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/expression/util/ExpressionOptimizer.java
+++
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/expression/util/ExpressionOptimizer.java
@@ -46,7 +46,7 @@ public class ExpressionOptimizer {
*
* @param expression IExpression to be transferred
* @param selectedSeries selected series
- * @return an executable read filter, whether a GlobalTimeExpression or All
leaf nodes are
+ * @return an executable query filter, whether a GlobalTimeExpression or All
leaf nodes are
* SingleSeriesExpression
*/
public IExpression optimize(IExpression expression, List<Path>
selectedSeries)
@@ -175,7 +175,7 @@ public class ExpressionOptimizer {
* QueryFilterOR( SingleSeriesExpression(path1, timeFilter),
SingleSeriesExpression(path2,
* timeFilter) ), SingleSeriesExpression(path3, timeFilter) )
*
- * @return a DNF read filter without GlobalTimeExpression
+ * @return a DNF query filter without GlobalTimeExpression
*/
private IExpression pushGlobalTimeFilterToAllSeries(
GlobalTimeExpression timeFilter, List<Path> selectedSeries)
diff --git
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/filter/TimeFilter.java
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/filter/TimeFilter.java
index 47c005c6e69..04970f079bb 100644
---
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/filter/TimeFilter.java
+++
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/filter/TimeFilter.java
@@ -220,7 +220,7 @@ public class TimeFilter {
}
/**
- * returns a default time filter by whether it's an ascending read.
+ * returns a default time filter by whether it's an ascending query.
*
* <p>If the data is read in descending order, we use the largest timestamp
to set to the filter,
* so the filter should be TimeLtEq. If the data is read in ascending order,
we use the smallest
diff --git
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/query/dataset/DataSetWithTimeGenerator.java
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/query/dataset/DataSetWithTimeGenerator.java
index 2b9824cd429..d9addd6af2f 100644
---
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/query/dataset/DataSetWithTimeGenerator.java
+++
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/query/dataset/DataSetWithTimeGenerator.java
@@ -30,7 +30,7 @@ import java.io.IOException;
import java.util.List;
/**
- * read processing: (1) generate time by series that has filter (2) get value
of series that does
+ * query processing: (1) generate time by series that has filter (2) get value
of series that does
* not have filter (3) construct RowRecord.
*/
public class DataSetWithTimeGenerator extends QueryDataSet {
diff --git
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/query/dataset/QueryDataSet.java
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/query/dataset/QueryDataSet.java
index cf2725de0c9..e1d83652512 100644
---
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/query/dataset/QueryDataSet.java
+++
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/query/dataset/QueryDataSet.java
@@ -39,9 +39,9 @@ public abstract class QueryDataSet {
protected int fetchSize = 10000;
protected boolean ascending;
/*
- * whether current data group has data for read.
+ * whether current data group has data for query.
* If not null(must be in cluster mode),
- * we need to redirect the read to any data group which has some data to
speed up read.
+ * we need to redirect the query to any data group which has some data to
speed up query.
*/
protected EndPoint endPoint = null;
@@ -56,7 +56,7 @@ public abstract class QueryDataSet {
protected int columnNum;
- /** For redirect read. Need keep consistent with EndPoint in rpc.thrift. */
+ /** For redirect query. Need keep consistent with EndPoint in rpc.thrift. */
public static class EndPoint {
private String ip = null;
private int port = 0;
@@ -182,7 +182,7 @@ public abstract class QueryDataSet {
public abstract boolean hasNextWithoutConstraint() throws IOException;
- /** This method is used for batch read, return RowRecord. */
+ /** This method is used for batch query, return RowRecord. */
public RowRecord next() throws IOException {
if (rowLimit > 0) {
alreadyReturnedRowNum++;
diff --git
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/query/executor/ExecutorWithTimeGenerator.java
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/query/executor/ExecutorWithTimeGenerator.java
index c44bb9d1151..d0c5b62b887 100644
---
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/query/executor/ExecutorWithTimeGenerator.java
+++
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/query/executor/ExecutorWithTimeGenerator.java
@@ -51,7 +51,7 @@ public class ExecutorWithTimeGenerator implements
QueryExecutor {
/**
* All leaf nodes of queryFilter in queryExpression are SeriesFilters, We
use a TimeGenerator to
- * control read processing. for more information, see
DataSetWithTimeGenerator
+ * control query processing. for more information, see
DataSetWithTimeGenerator
*
* @return DataSet with TimeGenerator
*/
diff --git
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/query/executor/TsFileExecutor.java
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/query/executor/TsFileExecutor.java
index 2cd161a7497..08160ef6969 100644
---
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/query/executor/TsFileExecutor.java
+++
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/query/executor/TsFileExecutor.java
@@ -99,7 +99,7 @@ public class TsFileExecutor implements QueryExecutor {
/**
* Query with the space partition constraint.
*
- * @param queryExpression read expression
+ * @param queryExpression query expression
* @param spacePartitionStartPos the start position of the space partition
* @param spacePartitionEndPos the end position of the space partition
* @return QueryDataSet
@@ -128,7 +128,7 @@ public class TsFileExecutor implements QueryExecutor {
BinaryExpression.or(addTimeExpression,
resTimeRanges.get(i).getExpression());
}
- // combine the original read expression and the additional time filter
+ // combine the original query expression and the additional time filter
if (queryExpression.hasQueryFilter()) {
IExpression combinedExpression =
BinaryExpression.and(queryExpression.getExpression(),
addTimeExpression);
@@ -138,7 +138,7 @@ public class TsFileExecutor implements QueryExecutor {
}
// Having converted the space partition constraint to an additional time
filter, we can now
- // read as normal.
+ // query as normal.
return execute(queryExpression);
}
diff --git
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/page/AlignedPageReader.java
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/page/AlignedPageReader.java
index c59399910b1..8064db9748f 100644
---
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/page/AlignedPageReader.java
+++
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/page/AlignedPageReader.java
@@ -122,10 +122,10 @@ public class AlignedPageReader implements IPageReader,
IAlignedPageReader {
// TODO accept valueStatisticsList to filter
return filter.satisfy(getStatistics());
} else {
- // For aligned series, When we only read some measurements under an
aligned device, if the
+ // For aligned series, When we only query some measurements under an
aligned device, if the
// values of these queried measurements at a timestamp are all null, the
timestamp will not be
// selected.
- // NOTE: if we change the read semantic in the future for aligned
series, we need to remove
+ // NOTE: if we change the query semantic in the future for aligned
series, we need to remove
// this check here.
long rowCount = getTimeStatistics().getCount();
for (Statistics statistics : getValueStatisticsList()) {
diff --git
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/series/AbstractFileSeriesReader.java
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/series/AbstractFileSeriesReader.java
index 15907fc7294..24ff3fdb390 100644
---
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/series/AbstractFileSeriesReader.java
+++
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/series/AbstractFileSeriesReader.java
@@ -29,7 +29,7 @@ import org.apache.iotdb.tsfile.read.reader.IChunkReader;
import java.io.IOException;
import java.util.List;
-/** Series reader is used to read one series of one tsfile. */
+/** Series reader is used to query one series of one tsfile. */
public abstract class AbstractFileSeriesReader implements IBatchReader {
protected IChunkLoader chunkLoader;
diff --git
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/series/FileSeriesReader.java
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/series/FileSeriesReader.java
index ae70ace33a2..222cc59a5c9 100644
---
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/series/FileSeriesReader.java
+++
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/series/FileSeriesReader.java
@@ -33,8 +33,8 @@ import java.util.ArrayList;
import java.util.List;
/**
- * Series reader is used to read one series of one TsFile, and this reader has
a filter operating on
- * the same series.
+ * Series reader is used to query one series of one TsFile, and this reader
has a filter operating
+ * on the same series.
*/
public class FileSeriesReader extends AbstractFileSeriesReader {
diff --git
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/series/FileSeriesReaderByTimestamp.java
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/series/FileSeriesReaderByTimestamp.java
index deb80c2e5fa..c9e1ee88a12 100644
---
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/series/FileSeriesReaderByTimestamp.java
+++
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/series/FileSeriesReaderByTimestamp.java
@@ -35,8 +35,8 @@ import java.util.ArrayList;
import java.util.List;
/**
- * Series reader is used to read one series of one tsfile, using this reader
to read the value of a
- * series with given timestamps.
+ * Series reader is used to query one series of one tsfile, using this reader
to query the value of
+ * a series with given timestamps.
*/
public class FileSeriesReaderByTimestamp {
diff --git
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/write/TsFileWriter.java
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/write/TsFileWriter.java
index 9be0b1303ce..2ab6780f184 100644
---
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/write/TsFileWriter.java
+++
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/write/TsFileWriter.java
@@ -59,7 +59,7 @@ public class TsFileWriter implements AutoCloseable {
protected static final TSFileConfig config =
TSFileDescriptor.getInstance().getConfig();
private static final Logger LOG =
LoggerFactory.getLogger(TsFileWriter.class);
- /** schemaengine of this TsFile. */
+ /** schema of this TsFile. */
protected final Schema schema;
/** IO writer of this TsFile. */
private final TsFileIOWriter fileWriter;
@@ -111,7 +111,7 @@ public class TsFileWriter implements AutoCloseable {
* init this TsFileWriter.
*
* @param file the File to be written by this TsFileWriter
- * @param schema the schemaengine of this TsFile
+ * @param schema the schema of this TsFile
*/
public TsFileWriter(File file, Schema schema) throws IOException {
this(new TsFileIOWriter(file), schema,
TSFileDescriptor.getInstance().getConfig());
@@ -121,7 +121,7 @@ public class TsFileWriter implements AutoCloseable {
* init this TsFileWriter.
*
* @param output the TsFileOutput of the file to be written by this
TsFileWriter
- * @param schema the schemaengine of this TsFile
+ * @param schema the schema of this TsFile
* @throws IOException
*/
public TsFileWriter(TsFileOutput output, Schema schema) throws IOException {
@@ -132,7 +132,7 @@ public class TsFileWriter implements AutoCloseable {
* init this TsFileWriter.
*
* @param file the File to be written by this TsFileWriter
- * @param schema the schemaengine of this TsFile
+ * @param schema the schema of this TsFile
* @param conf the configuration of this TsFile
*/
public TsFileWriter(File file, Schema schema, TSFileConfig conf) throws
IOException {
@@ -143,7 +143,7 @@ public class TsFileWriter implements AutoCloseable {
* init this TsFileWriter.
*
* @param fileWriter the io writer of this TsFile
- * @param schema the schemaengine of this TsFile
+ * @param schema the schema of this TsFile
* @param conf the configuration of this TsFile
*/
protected TsFileWriter(TsFileIOWriter fileWriter, Schema schema,
TSFileConfig conf)
diff --git
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/write/chunk/AlignedChunkWriterImpl.java
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/write/chunk/AlignedChunkWriterImpl.java
index a391c5df641..fbf124eba5a 100644
---
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/write/chunk/AlignedChunkWriterImpl.java
+++
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/write/chunk/AlignedChunkWriterImpl.java
@@ -49,7 +49,7 @@ public class AlignedChunkWriterImpl implements IChunkWriter {
// Used for batch writing
private long remainingPointsNumber;
- /** @param schema schemaengine of this measurement */
+ /** @param schema schema of this measurement */
public AlignedChunkWriterImpl(VectorMeasurementSchema schema) {
timeChunkWriter =
new TimeChunkWriter(
@@ -82,8 +82,8 @@ public class AlignedChunkWriterImpl implements IChunkWriter {
* This is used to rewrite file. The encoding and compression of the time
column should be the
* same as the source file.
*
- * @param timeSchema time schemaengine
- * @param valueSchemaList value schemaengine list
+ * @param timeSchema time schema
+ * @param valueSchemaList value schema list
*/
public AlignedChunkWriterImpl(
IMeasurementSchema timeSchema, List<IMeasurementSchema> valueSchemaList)
{
@@ -114,7 +114,7 @@ public class AlignedChunkWriterImpl implements IChunkWriter
{
* configuration by default. The encoding of the time column is 'TS_2DIFF'
in the configuration by
* default.
*
- * @param schemaList value schemaengine list
+ * @param schemaList value schema list
*/
public AlignedChunkWriterImpl(List<IMeasurementSchema> schemaList) {
TSEncoding timeEncoding =
diff --git
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/write/chunk/ChunkWriterImpl.java
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/write/chunk/ChunkWriterImpl.java
index d5e4b3d4b20..585620eefd7 100644
---
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/write/chunk/ChunkWriterImpl.java
+++
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/write/chunk/ChunkWriterImpl.java
@@ -92,7 +92,7 @@ public class ChunkWriterImpl implements IChunkWriter {
private Statistics<?> firstPageStatistics;
- /** @param schema schemaengine of this measurement */
+ /** @param schema schema of this measurement */
public ChunkWriterImpl(IMeasurementSchema schema) {
this.measurementSchema = schema;
this.compressor = ICompressor.getCompressor(schema.getCompressor());
@@ -112,7 +112,7 @@ public class ChunkWriterImpl implements IChunkWriter {
this.pageWriter.setTimeEncoder(measurementSchema.getTimeEncoder());
this.pageWriter.setValueEncoder(measurementSchema.getValueEncoder());
- // check if the measurement schemaengine uses SDT
+ // check if the measurement schema uses SDT
checkSdtEncoding();
}
diff --git
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/write/schema/Schema.java
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/write/schema/Schema.java
index 5940124427e..d75fa409891 100644
---
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/write/schema/Schema.java
+++
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/write/schema/Schema.java
@@ -27,8 +27,8 @@ import java.util.LinkedHashMap;
import java.util.Map;
/**
- * The schemaengine of timeseries that exist in this file. The schemaTemplates
is a simplified
- * manner to batch create schemaengine of timeseries.
+ * The schema of timeseries that exist in this file. The schemaTemplates is a
simplified manner to
+ * batch create schema of timeseries.
*/
public class Schema implements Serializable {
@@ -98,7 +98,7 @@ public class Schema implements Serializable {
return schemaTemplates;
}
- /** check if this schemaengine contains a measurement named measurementId. */
+ /** check if this schema contains a measurement named measurementId. */
public boolean containsDevice(Path devicePath) {
return registeredTimeseries.containsKey(devicePath);
}
diff --git
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/write/writer/RestorableTsFileIOWriter.java
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/write/writer/RestorableTsFileIOWriter.java
index 31dfac50c70..391426cc34f 100644
---
a/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/write/writer/RestorableTsFileIOWriter.java
+++
b/iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/write/writer/RestorableTsFileIOWriter.java
@@ -51,7 +51,7 @@ import java.util.Map;
* <p>(2) Otherwise, the writer generates metadata for already flushed Chunks
and truncate crashed
* data. The hasCrashed()=true and canWrite()=true
*
- * <p>Notice!!! If you want to read this file through the generated metadata,
remember to call the
+ * <p>Notice!!! If you want to query this file through the generated metadata,
remember to call the
* makeMetadataVisible()
*/
public class RestorableTsFileIOWriter extends TsFileIOWriter {
@@ -171,7 +171,7 @@ public class RestorableTsFileIOWriter extends
TsFileIOWriter {
}
/**
- * For read.
+ * For query.
*
* <p>get chunks' metadata from memory.
*
diff --git
a/iotdb-core/tsfile/src/test/java/org/apache/iotdb/tsfile/read/ReadInPartitionTest.java
b/iotdb-core/tsfile/src/test/java/org/apache/iotdb/tsfile/read/ReadInPartitionTest.java
index 966c02c0016..788378015b6 100644
---
a/iotdb-core/tsfile/src/test/java/org/apache/iotdb/tsfile/read/ReadInPartitionTest.java
+++
b/iotdb-core/tsfile/src/test/java/org/apache/iotdb/tsfile/read/ReadInPartitionTest.java
@@ -48,7 +48,7 @@ import java.util.List;
/*
This test is designed for the TsFileExecutor's execute(queryExpression,
params) function.
- The test target here is the logic of converting the read partition constraint
to an additional time filter.
+ The test target here is the logic of converting the query partition
constraint to an additional time filter.
Note that the correctness of the constructed additional time filter, which is
guaranteed and tested in
IMetadataQuerierByFileImplTest and TimeRangeTest, is not the test focus here.
@@ -114,7 +114,7 @@ public class ReadInPartitionTest {
// test the transformed expression
Assert.assertNull(queryExpression.getExpression());
- // test the equivalence of the read result
+ // test the equivalence of the query result
Assert.assertFalse(queryDataSet.hasNext());
}
@@ -143,7 +143,7 @@ public class ReadInPartitionTest {
.toString();
Assert.assertEquals(expected, transformedExpression.toString());
- // test the equivalence of the read result:
+ // test the equivalence of the query result:
QueryDataSet queryDataSet_eq = roTsFile.query(queryExpression);
while (queryDataSet.hasNext() && queryDataSet_eq.hasNext()) {
RowRecord r = queryDataSet.next();
@@ -180,7 +180,7 @@ public class ReadInPartitionTest {
.toString();
Assert.assertEquals(expected, transformedExpression.toString());
- // test the equivalence of the read result:
+ // test the equivalence of the query result:
QueryDataSet queryDataSet_eq = roTsFile.query(queryExpression);
while (queryDataSet.hasNext() && queryDataSet_eq.hasNext()) {
RowRecord r = queryDataSet.next();
@@ -218,7 +218,7 @@ public class ReadInPartitionTest {
.toString();
Assert.assertEquals(expected, transformedExpression.toString());
- // test the equivalence of the read result:
+ // test the equivalence of the query result:
QueryDataSet queryDataSet_eq = roTsFile.query(queryExpression);
while (queryDataSet.hasNext() && queryDataSet_eq.hasNext()) {
RowRecord r = queryDataSet.next();
diff --git
a/iotdb-core/tsfile/src/test/java/org/apache/iotdb/tsfile/read/TsFileReaderTest.java
b/iotdb-core/tsfile/src/test/java/org/apache/iotdb/tsfile/read/TsFileReaderTest.java
index 0ea1cb08976..0a2cb22c158 100644
---
a/iotdb-core/tsfile/src/test/java/org/apache/iotdb/tsfile/read/TsFileReaderTest.java
+++
b/iotdb-core/tsfile/src/test/java/org/apache/iotdb/tsfile/read/TsFileReaderTest.java
@@ -86,7 +86,7 @@ public class TsFileReaderTest {
TSRecord t = new TSRecord(i, "t");
if (i % 100 == 0) {
// Add a large max_value to the page statistics,
- // and get a very large number of invalid pages when the read is
executed
+ // and get a very large number of invalid pages when the query is
executed
t.addTuple(new IntDataPoint("id", 9000001));
} else {
t.addTuple(new IntDataPoint("id", i));
@@ -247,7 +247,7 @@ public class TsFileReaderTest {
TsFileGeneratorForTest.generateAlignedTsFile(10, 100, 30);
String filePath = TsFileGeneratorForTest.alignedOutputDataFile;
try (TsFileReader tsFileReader = new TsFileReader(new
TsFileSequenceReader(filePath)); ) {
- // timeseries path for read
+ // timeseries path for query
ArrayList<Path> paths = new ArrayList<>();
paths.add(new Path("d1", "s1", true));
paths.add(new Path("d1", "s2", true));
@@ -265,7 +265,7 @@ public class TsFileReaderTest {
TsFileGeneratorForTest.generateAlignedTsFile(100000, 1024, 100);
String filePath = TsFileGeneratorForTest.alignedOutputDataFile;
try (TsFileReader tsFileReader = new TsFileReader(new
TsFileSequenceReader(filePath)); ) {
- // timeseries path for read
+ // timeseries path for query
ArrayList<Path> paths = new ArrayList<>();
paths.add(new Path("d1", "s1", true));
paths.add(new Path("d1", "s2", true));
@@ -287,7 +287,7 @@ public class TsFileReaderTest {
TsFileGeneratorForTest.generateAlignedTsFile(100000, 1024, 100);
String filePath = TsFileGeneratorForTest.alignedOutputDataFile;
try (TsFileReader tsFileReader = new TsFileReader(new
TsFileSequenceReader(filePath)); ) {
- // timeseries path for read
+ // timeseries path for query
ArrayList<Path> paths = new ArrayList<>();
paths.add(new Path("d1", "s1", true));
paths.add(new Path("d1", "s2", true));
@@ -307,7 +307,7 @@ public class TsFileReaderTest {
TsFileGeneratorForTest.generateAlignedTsFile(100000, 1024, 100);
String filePath = TsFileGeneratorForTest.alignedOutputDataFile;
try (TsFileReader tsFileReader = new TsFileReader(new
TsFileSequenceReader(filePath)); ) {
- // timeseries path for read
+ // timeseries path for query
ArrayList<Path> paths = new ArrayList<>();
paths.add(new Path("d1", "s1", true));
paths.add(new Path("d1", "s2", true));
@@ -330,7 +330,7 @@ public class TsFileReaderTest {
TsFileGeneratorForTest.generateAlignedTsFile(100000, 1024, 100);
String filePath = TsFileGeneratorForTest.alignedOutputDataFile;
try (TsFileReader tsFileReader = new TsFileReader(new
TsFileSequenceReader(filePath)); ) {
- // timeseries path for read
+ // timeseries path for query
ArrayList<Path> paths = new ArrayList<>();
paths.add(new Path("d1", "s1", true));
paths.add(new Path("d1", "s2", true));
@@ -357,7 +357,7 @@ public class TsFileReaderTest {
TsFileGeneratorForTest.generateAlignedTsFile(100000, 1024, 100);
String filePath = TsFileGeneratorForTest.alignedOutputDataFile;
try (TsFileReader tsFileReader = new TsFileReader(new
TsFileSequenceReader(filePath)); ) {
- // timeseries path for read
+ // timeseries path for query
ArrayList<Path> paths = new ArrayList<>();
paths.add(new Path("d1", "s1", true));
paths.add(new Path("d1", "s2", true));
@@ -385,7 +385,7 @@ public class TsFileReaderTest {
TsFileGeneratorForTest.generateAlignedTsFile(100000, 1024, 100);
String filePath = TsFileGeneratorForTest.alignedOutputDataFile;
try (TsFileReader tsFileReader = new TsFileReader(new
TsFileSequenceReader(filePath)); ) {
- // timeseries path for read
+ // timeseries path for query
ArrayList<Path> paths = new ArrayList<>();
paths.add(new Path("d1", "s1", true));
paths.add(new Path("d1", "s3", true));
@@ -413,7 +413,7 @@ public class TsFileReaderTest {
TsFileGeneratorForTest.generateAlignedTsFile(100000, 1024, 100);
String filePath = TsFileGeneratorForTest.alignedOutputDataFile;
try (TsFileReader tsFileReader = new TsFileReader(new
TsFileSequenceReader(filePath)); ) {
- // timeseries path for read
+ // timeseries path for query
ArrayList<Path> paths = new ArrayList<>();
paths.add(new Path("d1", "s1", true));
paths.add(new Path("d1", "s9", true));
@@ -442,7 +442,7 @@ public class TsFileReaderTest {
TsFileGeneratorForTest.generateAlignedTsFile(100000, 1024, 100);
String filePath = TsFileGeneratorForTest.alignedOutputDataFile;
try (TsFileReader tsFileReader = new TsFileReader(new
TsFileSequenceReader(filePath)); ) {
- // timeseries path for read
+ // timeseries path for query
ArrayList<Path> paths = new ArrayList<>();
paths.add(new Path("d1", "s1", true));
paths.add(new Path("d1", "s9", true));
@@ -485,14 +485,14 @@ public class TsFileReaderTest {
TsFileGeneratorForTest.generateAlignedTsFile(10, 100, 30);
String filePath = TsFileGeneratorForTest.alignedOutputDataFile;
try (TsFileSequenceReader reader = new TsFileSequenceReader(filePath)) {
- // read for non-exist device
+ // query for non-exist device
try {
reader.getAlignedChunkMetadata("d3");
} catch (IOException e) {
Assert.assertEquals("Device {d3} is not in tsFileMetaData",
e.getMessage());
}
- // read for non-aligned device
+ // query for non-aligned device
try {
reader.getAlignedChunkMetadata("d2");
} catch (IOException e) {
diff --git
a/iotdb-core/tsfile/src/test/java/org/apache/iotdb/tsfile/utils/RecordUtils.java
b/iotdb-core/tsfile/src/test/java/org/apache/iotdb/tsfile/utils/RecordUtils.java
index 4ccdcc94702..461bcd6c312 100644
---
a/iotdb-core/tsfile/src/test/java/org/apache/iotdb/tsfile/utils/RecordUtils.java
+++
b/iotdb-core/tsfile/src/test/java/org/apache/iotdb/tsfile/utils/RecordUtils.java
@@ -44,7 +44,7 @@ public class RecordUtils {
* separated by ","
*
* @param str - input string
- * @param schema - constructed file schemaengine
+ * @param schema - constructed file schema
* @return TSRecord constructed from str
*/
public static TSRecord parseSimpleTupleRecord(String str, Schema schema) {
diff --git
a/iotdb-core/tsfile/src/test/java/org/apache/iotdb/tsfile/write/MetadataIndexConstructorTest.java
b/iotdb-core/tsfile/src/test/java/org/apache/iotdb/tsfile/write/MetadataIndexConstructorTest.java
index 9a89a98dc3f..f24eb7afe26 100644
---
a/iotdb-core/tsfile/src/test/java/org/apache/iotdb/tsfile/write/MetadataIndexConstructorTest.java
+++
b/iotdb-core/tsfile/src/test/java/org/apache/iotdb/tsfile/write/MetadataIndexConstructorTest.java
@@ -451,7 +451,7 @@ public class MetadataIndexConstructorTest {
String measurementName =
measurementPrefix + generateIndexString(measurementIndex,
measurementNum);
logger.info("generating vector measurement {}...",
measurementName);
- // add measurements into file schemaengine (all with INT64 data
type)
+ // add measurements into file schema (all with INT64 data type)
MeasurementSchema schema1 =
new MeasurementSchema(measurementName, TSDataType.INT64,
TSEncoding.RLE);
schemas.add(schema1);
diff --git
a/iotdb-core/tsfile/src/test/java/org/apache/iotdb/tsfile/write/TsFileIOWriterTest.java
b/iotdb-core/tsfile/src/test/java/org/apache/iotdb/tsfile/write/TsFileIOWriterTest.java
index 821c08cd426..ba955912f14 100644
---
a/iotdb-core/tsfile/src/test/java/org/apache/iotdb/tsfile/write/TsFileIOWriterTest.java
+++
b/iotdb-core/tsfile/src/test/java/org/apache/iotdb/tsfile/write/TsFileIOWriterTest.java
@@ -65,7 +65,7 @@ public class TsFileIOWriterTest {
public void before() throws IOException {
TsFileIOWriter writer = new TsFileIOWriter(new File(FILE_PATH));
- // file schemaengine
+ // file schema
MeasurementSchema measurementSchema =
TestHelper.createSimpleMeasurementSchema(SENSOR_1);
VectorMeasurementSchema vectorMeasurementSchema =
new VectorMeasurementSchema(
diff --git
a/iotdb-core/tsfile/src/test/java/org/apache/iotdb/tsfile/write/TsFileIntegrityCheckingTool.java
b/iotdb-core/tsfile/src/test/java/org/apache/iotdb/tsfile/write/TsFileIntegrityCheckingTool.java
index e111e9768b6..be55f4a0449 100644
---
a/iotdb-core/tsfile/src/test/java/org/apache/iotdb/tsfile/write/TsFileIntegrityCheckingTool.java
+++
b/iotdb-core/tsfile/src/test/java/org/apache/iotdb/tsfile/write/TsFileIntegrityCheckingTool.java
@@ -148,7 +148,7 @@ public class TsFileIntegrityCheckingTool {
}
/**
- * This method checks the integrity of the file by mimicking the process of
the read, which reads
+ * This method checks the integrity of the file by mimicking the process of
the query, which reads
* the metadata index tree first, and get the timeseries metadata list and
chunk metadata list.
* After that, this method acquires single chunk according to chunk
metadata, then it deserializes
* the chunk, and verifies the correctness of the data.
diff --git
a/iotdb-core/tsfile/src/test/java/org/apache/iotdb/tsfile/write/TsFileReadWriteTest.java
b/iotdb-core/tsfile/src/test/java/org/apache/iotdb/tsfile/write/TsFileReadWriteTest.java
index 339546d2ed8..04a6c67e35c 100644
---
a/iotdb-core/tsfile/src/test/java/org/apache/iotdb/tsfile/write/TsFileReadWriteTest.java
+++
b/iotdb-core/tsfile/src/test/java/org/apache/iotdb/tsfile/write/TsFileReadWriteTest.java
@@ -157,11 +157,11 @@ public class TsFileReadWriteTest {
// If no dataPoint in "device_1.sensor_2", it will throws a nomeasurement
// exception,
- // cause no schemaengine in tsfilemetadata anymore.
+ // cause no schema in tsfilemetadata anymore.
@Test
public void readEmptyMeasurementTest() throws IOException,
WriteProcessException {
try (TsFileWriter tsFileWriter = new TsFileWriter(f)) {
- // add measurements into file schemaengine
+ // add measurements into file schema
tsFileWriter.registerTimeseries(
new Path("device_1"),
new MeasurementSchema("sensor_1", TSDataType.FLOAT, TSEncoding.RLE));
@@ -206,7 +206,7 @@ public class TsFileReadWriteTest {
TSDataType dataType, DataPointProxy proxy, TSEncoding encodingType)
throws IOException, WriteProcessException {
int floatCount = 1024 * 1024 * 13 + 1023;
- // add measurements into file schemaengine
+ // add measurements into file schema
try (TsFileWriter tsFileWriter = new TsFileWriter(f)) {
tsFileWriter.registerTimeseries(
new Path("device_1"), new MeasurementSchema("sensor_1", dataType,
encodingType));