This is an automated email from the ASF dual-hosted git repository.
qiaojialin pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/iotdb.git
The following commit(s) were added to refs/heads/master by this push:
new 399340e6fd [IOTDB-3845] Add annotations in confignode.thrift (#6899)
399340e6fd is described below
commit 399340e6fd72d928e42064f8ebb52141ec5220ed
Author: YongzaoDan <[email protected]>
AuthorDate: Sat Aug 6 10:29:05 2022 +0800
[IOTDB-3845] Add annotations in confignode.thrift (#6899)
---
.../confignode/manager/ClusterSchemaManager.java | 37 +---
.../iotdb/confignode/manager/ConfigManager.java | 1 -
.../iotdb/confignode/manager/PartitionManager.java | 10 +-
.../iotdb/confignode/manager/ProcedureManager.java | 21 +--
.../iotdb/confignode/persistence/NodeInfo.java | 14 +-
.../persistence/executor/ConfigPlanExecutor.java | 2 +-
.../procedure/env/DataNodeRemoveHandler.java | 11 +-
.../procedure/impl/RegionMigrateProcedure.java | 6 +-
.../thrift/ConfigNodeRPCServiceProcessor.java | 11 +-
.../src/main/thrift/confignode.thrift | 204 +++++++++++++++++----
10 files changed, 206 insertions(+), 111 deletions(-)
diff --git
a/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterSchemaManager.java
b/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterSchemaManager.java
index 608b9b735b..638f6b5332 100644
---
a/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterSchemaManager.java
+++
b/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterSchemaManager.java
@@ -185,9 +185,9 @@ public class ClusterSchemaManager {
storageGroups.add(storageGroup);
dnlToSgMap.put(dataNodeLocation.getDataNodeId(), storageGroups);
} else {
- List<String> storagegroups =
dnlToSgMap.get(dataNodeLocation.getDataNodeId());
- storagegroups.add(storageGroup);
- dnlToSgMap.put(dataNodeLocation.getDataNodeId(), storagegroups);
+ List<String> storageGroups =
dnlToSgMap.get(dataNodeLocation.getDataNodeId());
+ storageGroups.add(storageGroup);
+ dnlToSgMap.put(dataNodeLocation.getDataNodeId(), storageGroups);
}
}
}
@@ -354,25 +354,15 @@ public class ClusterSchemaManager {
resp.setStatus(templateResp.getStatus());
if (resp.getStatus().getCode() ==
TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
if (templateResp.getTemplateList() != null) {
- List<ByteBuffer> list = new ArrayList<ByteBuffer>();
- templateResp
- .getTemplateList()
- .forEach(
- template -> {
- list.add(template.serialize());
- });
+ List<ByteBuffer> list = new ArrayList<>();
+ templateResp.getTemplateList().forEach(template ->
list.add(template.serialize()));
resp.setTemplateList(list);
}
}
return resp;
}
- /**
- * show nodes in schema template
- *
- * @param req
- * @return
- */
+ /** show nodes in schema template */
public TGetTemplateResp getTemplate(String req) {
GetSchemaTemplatePlan getSchemaTemplatePlan = new
GetSchemaTemplatePlan(req);
TemplateInfoResp templateResp =
@@ -388,13 +378,7 @@ public class ClusterSchemaManager {
return resp;
}
- /**
- * mount template
- *
- * @param templateName
- * @param path
- * @return
- */
+ /** mount template */
public synchronized TSStatus setSchemaTemplate(String templateName, String
path) {
// check whether the template can be set on given path
CheckTemplateSettablePlan checkTemplateSettablePlan =
@@ -482,12 +466,7 @@ public class ClusterSchemaManager {
return failedRollbackStatusList;
}
- /**
- * show path set template xx
- *
- * @param templateName
- * @return
- */
+ /** show path set template xx */
public TGetPathsSetTemplatesResp getPathsSetTemplate(String templateName) {
GetPathsSetTemplatePlan getPathsSetTemplatePlan = new
GetPathsSetTemplatePlan(templateName);
PathInfoResp pathInfoResp =
diff --git
a/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java
b/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java
index 28747e1a82..9cefbb6163 100644
---
a/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java
+++
b/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java
@@ -200,7 +200,6 @@ public class ConfigManager implements IManager {
@Override
public DataSet removeDataNode(RemoveDataNodePlan removeDataNodePlan) {
- // TODO replace with Porcedure later.
TSStatus status = confirmLeader();
if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
return nodeManager.removeDataNode(removeDataNodePlan);
diff --git
a/confignode/src/main/java/org/apache/iotdb/confignode/manager/PartitionManager.java
b/confignode/src/main/java/org/apache/iotdb/confignode/manager/PartitionManager.java
index 0e4adebb84..85702318fc 100644
---
a/confignode/src/main/java/org/apache/iotdb/confignode/manager/PartitionManager.java
+++
b/confignode/src/main/java/org/apache/iotdb/confignode/manager/PartitionManager.java
@@ -131,9 +131,8 @@ public class PartitionManager {
*
* @param req SchemaPartitionPlan with partitionSlotsMap
* @return SchemaPartitionResp with DataPartition and TSStatus.
SUCCESS_STATUS if all process
- * finish. NOT_ENOUGH_DATA_NODE if the DataNodes is not enough to create
new Regions. TIME_OUT
- * if waiting other threads to create Regions for too long.
STORAGE_GROUP_NOT_EXIST if some
- * StorageGroup doesn't exist.
+ * finish. NOT_ENOUGH_DATA_NODE if the DataNodes is not enough to create
new Regions.
+ * STORAGE_GROUP_NOT_EXIST if some StorageGroup don't exist.
*/
public DataSet getOrCreateSchemaPartition(GetOrCreateSchemaPartitionPlan
req) {
// After all the SchemaPartitions are allocated,
@@ -189,9 +188,8 @@ public class PartitionManager {
* @param req DataPartitionPlan with Map<StorageGroupName,
Map<SeriesPartitionSlot,
* List<TimePartitionSlot>>>
* @return DataPartitionResp with DataPartition and TSStatus. SUCCESS_STATUS
if all process
- * finish. NOT_ENOUGH_DATA_NODE if the DataNodes is not enough to create
new Regions. TIME_OUT
- * if waiting other threads to create Regions for too long.
STORAGE_GROUP_NOT_EXIST if some
- * StorageGroup doesn't exist.
+ * finish. NOT_ENOUGH_DATA_NODE if the DataNodes is not enough to create
new Regions.
+ * STORAGE_GROUP_NOT_EXIST if some StorageGroup don't exist.
*/
public DataSet getOrCreateDataPartition(GetOrCreateDataPartitionPlan req) {
// After all the DataPartitions are allocated,
diff --git
a/confignode/src/main/java/org/apache/iotdb/confignode/manager/ProcedureManager.java
b/confignode/src/main/java/org/apache/iotdb/confignode/manager/ProcedureManager.java
index 4d4a754929..addbeb65bc 100644
---
a/confignode/src/main/java/org/apache/iotdb/confignode/manager/ProcedureManager.java
+++
b/confignode/src/main/java/org/apache/iotdb/confignode/manager/ProcedureManager.java
@@ -117,11 +117,7 @@ public class ProcedureManager {
}
}
- /**
- * generate a procedure, and execute by one by one
- *
- * @param req new config node
- */
+ /** Generate a AddConfigNodeProcedure, and serially execute all the
AddConfigNodeProcedure */
public void addConfigNode(TConfigNodeRegisterReq req) {
AddConfigNodeProcedure addConfigNodeProcedure =
new AddConfigNodeProcedure(req.getConfigNodeLocation());
@@ -129,9 +125,7 @@ public class ProcedureManager {
}
/**
- * generate a procedure, and execute remove confignode one by one
- *
- * @param removeConfigNodePlan remove config node plan
+ * Generate a RemoveConfigNodeProcedure, and serially execute all the
RemoveConfigNodeProcedure
*/
public void removeConfigNode(RemoveConfigNodePlan removeConfigNodePlan) {
RemoveConfigNodeProcedure removeConfigNodeProcedure =
@@ -140,12 +134,7 @@ public class ProcedureManager {
LOGGER.info("Submit to remove ConfigNode, {}", removeConfigNodePlan);
}
- /**
- * generate a procedure, and execute remove datanode one by one
- *
- * @param removeDataNodePlan
- * @return
- */
+ /** Generate RemoveDataNodeProcedures, and serially execute all the
RemoveDataNodeProcedure */
public boolean removeDataNode(RemoveDataNodePlan removeDataNodePlan) {
removeDataNodePlan
.getDataNodeLocations()
@@ -241,7 +230,9 @@ public class ProcedureManager {
public void reportRegionMigrateResult(TRegionMigrateResultReportReq req) {
LOGGER.info("receive DataNode region:{} migrate result:{}",
req.getRegionId(), req);
- this.executor.getProcedures().values().stream()
+ this.executor
+ .getProcedures()
+ .values()
.forEach(
procedure -> {
if (procedure instanceof RegionMigrateProcedure) {
diff --git
a/confignode/src/main/java/org/apache/iotdb/confignode/persistence/NodeInfo.java
b/confignode/src/main/java/org/apache/iotdb/confignode/persistence/NodeInfo.java
index 4858afdfaa..108ee8b6b2 100644
---
a/confignode/src/main/java/org/apache/iotdb/confignode/persistence/NodeInfo.java
+++
b/confignode/src/main/java/org/apache/iotdb/confignode/persistence/NodeInfo.java
@@ -216,18 +216,18 @@ public class NodeInfo implements SnapshotProcessor {
}
/**
- * Get DataNode info
+ * Get DataNodeConfiguration
*
- * @param getDataNodeInfoPlan QueryDataNodeInfoPlan
- * @return The specific DataNode's info or all DataNode info if dataNodeId in
- * QueryDataNodeInfoPlan is -1
+ * @param getDataNodeConfigurationPlan GetDataNodeConfigurationPlan
+ * @return The specific DataNode's configuration or all DataNodes'
configuration if dataNodeId in
+ * GetDataNodeConfigurationPlan is -1
*/
- public DataNodeConfigurationResp getDataNodeInfo(
- GetDataNodeConfigurationPlan getDataNodeInfoPlan) {
+ public DataNodeConfigurationResp getDataNodeConfiguration(
+ GetDataNodeConfigurationPlan getDataNodeConfigurationPlan) {
DataNodeConfigurationResp result = new DataNodeConfigurationResp();
result.setStatus(new
TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()));
- int dataNodeId = getDataNodeInfoPlan.getDataNodeId();
+ int dataNodeId = getDataNodeConfigurationPlan.getDataNodeId();
dataNodeInfoReadWriteLock.readLock().lock();
try {
if (dataNodeId == -1) {
diff --git
a/confignode/src/main/java/org/apache/iotdb/confignode/persistence/executor/ConfigPlanExecutor.java
b/confignode/src/main/java/org/apache/iotdb/confignode/persistence/executor/ConfigPlanExecutor.java
index 62a44e1d21..2b8a7c58f9 100644
---
a/confignode/src/main/java/org/apache/iotdb/confignode/persistence/executor/ConfigPlanExecutor.java
+++
b/confignode/src/main/java/org/apache/iotdb/confignode/persistence/executor/ConfigPlanExecutor.java
@@ -119,7 +119,7 @@ public class ConfigPlanExecutor {
throws UnknownPhysicalPlanTypeException, AuthException {
switch (req.getType()) {
case GetDataNodeConfiguration:
- return nodeInfo.getDataNodeInfo((GetDataNodeConfigurationPlan) req);
+ return
nodeInfo.getDataNodeConfiguration((GetDataNodeConfigurationPlan) req);
case CountStorageGroup:
return
clusterSchemaInfo.countMatchedStorageGroups((CountStorageGroupPlan) req);
case GetStorageGroup:
diff --git
a/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/DataNodeRemoveHandler.java
b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/DataNodeRemoveHandler.java
index 67cee64c4f..ced771f189 100644
---
a/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/DataNodeRemoveHandler.java
+++
b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/DataNodeRemoveHandler.java
@@ -51,7 +51,7 @@ import java.util.stream.Collectors;
public class DataNodeRemoveHandler {
private static final Logger LOGGER =
LoggerFactory.getLogger(DataNodeRemoveHandler.class);
- private ConfigManager configManager;
+ private final ConfigManager configManager;
/** region migrate lock */
private final LockQueue regionMigrateLock = new LockQueue();
@@ -327,10 +327,11 @@ public class DataNodeRemoveHandler {
}
/**
- * check if has removed Data Node but not exist in cluster
+ * Check whether all DataNodes to be deleted exist in the cluster
*
* @param removeDataNodePlan RemoveDataNodeReq
- * @return SUCCEED_STATUS if not has
+ * @return SUCCEED_STATUS if all DataNodes to be deleted exist in the
cluster, DATANODE_NOT_EXIST
+ * otherwise
*/
private TSStatus checkDataNodeExist(RemoveDataNodePlan removeDataNodePlan) {
TSStatus status = new
TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode());
@@ -350,10 +351,10 @@ public class DataNodeRemoveHandler {
}
/**
- * check if has enought replication in cluster
+ * Check whether the cluster has enough DataNodes to maintain RegionReplicas
*
* @param removeDataNodePlan RemoveDataNodeReq
- * @return SUCCEED_STATUS if not has
+ * @return SUCCEED_STATUS if the number of DataNodes is enough,
LACK_REPLICATION otherwise
*/
private TSStatus checkRegionReplication(RemoveDataNodePlan
removeDataNodePlan) {
TSStatus status = new
TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode());
diff --git
a/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/RegionMigrateProcedure.java
b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/RegionMigrateProcedure.java
index dac3c1328a..b16967dc75 100644
---
a/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/RegionMigrateProcedure.java
+++
b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/RegionMigrateProcedure.java
@@ -219,11 +219,7 @@ public class RegionMigrateProcedure
return status;
}
- /**
- * DN report region migrate result to CN, and continue
- *
- * @param req
- */
+ /** DataNode report region migrate result to ConfigNode, and continue */
public void notifyTheRegionMigrateFinished(TRegionMigrateResultReportReq
req) {
// TODO the req is used in roll back
synchronized (regionMigrateLock) {
diff --git
a/confignode/src/main/java/org/apache/iotdb/confignode/service/thrift/ConfigNodeRPCServiceProcessor.java
b/confignode/src/main/java/org/apache/iotdb/confignode/service/thrift/ConfigNodeRPCServiceProcessor.java
index 4b13c383e7..35266ce4b2 100644
---
a/confignode/src/main/java/org/apache/iotdb/confignode/service/thrift/ConfigNodeRPCServiceProcessor.java
+++
b/confignode/src/main/java/org/apache/iotdb/confignode/service/thrift/ConfigNodeRPCServiceProcessor.java
@@ -156,7 +156,6 @@ public class ConfigNodeRPCServiceProcessor implements
IConfigNodeRPCService.Ifac
@Override
public TDataNodeRemoveResp removeDataNode(TDataNodeRemoveReq req) throws
TException {
- // TODO without reqId and respId, how to trace a request exec state?
LOGGER.info("ConfigNode RPC Service start to remove DataNode, req: {}",
req);
RemoveDataNodePlan removeDataNodePlan = new
RemoveDataNodePlan(req.getDataNodeLocations());
DataNodeToStatusResp removeResp =
@@ -472,18 +471,14 @@ public class ConfigNodeRPCServiceProcessor implements
IConfigNodeRPCService.Ifac
configManager.getConsensusManager().getConsensusImpl().removeConsensusGroup(groupId);
if (!resp.isSuccess()) {
return new
TSStatus(TSStatusCode.REMOVE_CONFIGNODE_FAILED.getStatusCode())
- .setMessage("remove ConsensusGroup failed because remove
ConsensusGroup failed.");
+ .setMessage(
+ "remove ConsensusGroup failed because internal failure. See
other logs for more details");
}
return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode())
.setMessage("remove ConsensusGroup success.");
}
- /**
- * stop config node
- *
- * @param configNodeLocation
- * @return
- */
+ /** stop config node */
@Override
public TSStatus stopConfigNode(TConfigNodeLocation configNodeLocation) {
new Thread(
diff --git a/thrift-confignode/src/main/thrift/confignode.thrift
b/thrift-confignode/src/main/thrift/confignode.thrift
index 7c19b090cf..9967add3bc 100644
--- a/thrift-confignode/src/main/thrift/confignode.thrift
+++ b/thrift-confignode/src/main/thrift/confignode.thrift
@@ -29,14 +29,12 @@ struct TDataNodeRegisterReq {
2: optional map<string, TStorageGroupSchema> statusMap
}
-struct TDataNodeRemoveReq {
- 1: required list<common.TDataNodeLocation> dataNodeLocations
-}
-
-struct TRegionMigrateResultReportReq {
- 1: required common.TConsensusGroupId regionId
- 2: required common.TSStatus migrateResult
- 3: optional map<common.TDataNodeLocation, common.TRegionMigrateFailedType>
failedNodeAndReason
+struct TDataNodeRegisterResp {
+ 1: required common.TSStatus status
+ 2: required list<common.TConfigNodeLocation> configNodeList
+ 3: optional i32 dataNodeId
+ 4: optional TGlobalConfig globalConfig
+ 5: optional binary templateInfo
}
struct TGlobalConfig {
@@ -48,21 +46,24 @@ struct TGlobalConfig {
6: required string readConsistencyLevel
}
-struct TDataNodeRegisterResp {
- 1: required common.TSStatus status
- 2: required list<common.TConfigNodeLocation> configNodeList
- 3: optional i32 dataNodeId
- 4: optional TGlobalConfig globalConfig
- 5: optional binary templateInfo
+struct TDataNodeRemoveReq {
+ 1: required list<common.TDataNodeLocation> dataNodeLocations
}
struct TDataNodeRemoveResp {
1: required common.TSStatus status
2: optional map<common.TDataNodeLocation, common.TSStatus> nodeToStatus
}
+
+struct TRegionMigrateResultReportReq {
+ 1: required common.TConsensusGroupId regionId
+ 2: required common.TSStatus migrateResult
+ 3: optional map<common.TDataNodeLocation, common.TRegionMigrateFailedType>
failedNodeAndReason
+}
+
struct TDataNodeConfigurationResp {
1: required common.TSStatus status
- // map<DataNodeId, DataNodeLocation>
+ // map<DataNodeId, DataNodeConfiguration>
2: optional map<i32, common.TDataNodeConfiguration> dataNodeConfigurationMap
}
@@ -134,7 +135,6 @@ struct TSchemaPartitionTableResp {
}
// Node Management
-
struct TSchemaNodeManagementReq {
1: required binary pathPatternTree
2: optional i32 level
@@ -214,6 +214,8 @@ struct TCheckUserPrivilegesReq {
// ConfigNode
struct TConfigNodeRegisterReq {
1: required common.TConfigNodeLocation configNodeLocation
+ // The Non-Seed-ConfigNode must ensure that the following
+ // fields are consistent with the Seed-ConfigNode
2: required string dataRegionConsensusProtocolClass
3: required string schemaRegionConsensusProtocolClass
4: required i32 seriesPartitionSlotNum
@@ -346,65 +348,155 @@ struct TClearCacheReq {
service IConfigNodeRPCService {
- /* DataNode */
+ // ======================================================
+ // DataNode
+ // ======================================================
+ /**
+ * Register a new DataNode into the cluster
+ *
+ * @return SUCCESS_STATUS if the new DataNode registered successfully
+ * DATANODE_ALREADY_REGISTERED if the DataNode already registered
+ */
TDataNodeRegisterResp registerDataNode(TDataNodeRegisterReq req)
+ /**
+ * Generate a set of DataNodeRemoveProcedure to remove some specific
DataNodes from the cluster
+ *
+ * @return SUCCESS_STATUS if the DataNodeRemoveProcedure submitted
successfully
+ * LACK_REPLICATION if the number of DataNodes will be too small to
maintain
+ * RegionReplicas after remove these DataNodes
+ * DATANODE_NOT_EXIST if one of the DataNodes in the
TDataNodeRemoveReq doesn't exist in the cluster
+ * NODE_DELETE_FAILED_ERROR if failed to submit the
DataNodeRemoveProcedure
+ */
TDataNodeRemoveResp removeDataNode(TDataNodeRemoveReq req)
+ /**
+ * Get one or more DataNodes' configuration
+ *
+ * @param dataNodeId, the specific DataNode's index
+ * @return The specific DataNode's configuration if the DataNode exists,
+ * or all DataNodes' configuration if dataNodeId is -1
+ */
TDataNodeConfigurationResp getDataNodeConfiguration(i32 dataNodeId)
+ /** Report region migration complete */
common.TSStatus reportRegionMigrateResult(TRegionMigrateResultReportReq req)
- /* StorageGroup */
-
+ // ======================================================
+ // StorageGroup
+ // ======================================================
+
+ /**
+ * Set a new StorageGroup, all fields in TStorageGroupSchema can be
customized
+ * while the undefined fields will automatically use default values
+ *
+ * @return SUCCESS_STATUS if the new StorageGroup set successfully
+ * PATH_ILLEGAL if the new StorageGroup's name is illegal
+ * STORAGE_GROUP_ALREADY_EXISTS if the StorageGroup already exist
+ */
common.TSStatus setStorageGroup(TSetStorageGroupReq req)
+ /**
+ * Generate a DeleteStorageGroupProcedure to delete a specific StorageGroup
+ *
+ * @return SUCCESS_STATUS if the DeleteStorageGroupProcedure submitted
successfully
+ * TIMESERIES_NOT_EXIST if the specific StorageGroup doesn't exist
+ * EXECUTE_STATEMENT_ERROR if failed to submit the
DeleteStorageGroupProcedure
+ */
common.TSStatus deleteStorageGroup(TDeleteStorageGroupReq req)
+ /**
+ * Generate a set of DeleteStorageGroupProcedure to delete some specific
StorageGroups
+ *
+ * @return SUCCESS_STATUS if the DeleteStorageGroupProcedure submitted
successfully
+ * TIMESERIES_NOT_EXIST if the specific StorageGroup doesn't exist
+ * EXECUTE_STATEMENT_ERROR if failed to submit the
DeleteStorageGroupProcedure
+ */
common.TSStatus deleteStorageGroups(TDeleteStorageGroupsReq req)
+ /** Update the specific StorageGroup's TTL */
common.TSStatus setTTL(common.TSetTTLReq req)
+ /** Update the specific StorageGroup's SchemaReplicationFactor */
common.TSStatus setSchemaReplicationFactor(TSetSchemaReplicationFactorReq
req)
+ /** Update the specific StorageGroup's DataReplicationFactor */
common.TSStatus setDataReplicationFactor(TSetDataReplicationFactorReq req)
+ /** Update the specific StorageGroup's PartitionInterval */
common.TSStatus setTimePartitionInterval(TSetTimePartitionIntervalReq req)
+ /** Count the matched StorageGroups */
TCountStorageGroupResp countMatchedStorageGroups(list<string>
storageGroupPathPattern)
+ /** Get the matched StorageGroups' TStorageGroupSchema */
TStorageGroupSchemaResp getMatchedStorageGroupSchemas(list<string>
storageGroupPathPattern)
- /* Schema */
+ // ======================================================
+ // SchemaPartition
+ // ======================================================
// TODO: Replace this by getSchemaPartitionTable
TSchemaPartitionResp getSchemaPartition(TSchemaPartitionReq req)
+ /**
+ * Get SchemaPartitionTable by specific PathPatternTree,
+ * the returned SchemaPartitionTable will not contain the unallocated
SeriesPartitionSlots
+ * See https://apache-iotdb.feishu.cn/docs/doccnqe3PLPEKwsCX1xadXQ2JOg for
detailed matching rules
+ */
TSchemaPartitionTableResp getSchemaPartitionTable(TSchemaPartitionReq req)
// TODO: Replace this by getOrCreateSchemaPartitionTable
TSchemaPartitionResp getOrCreateSchemaPartition(TSchemaPartitionReq req)
+ /**
+ * Get or create SchemaPartitionTable by specific PathPatternTree,
+ * the returned SchemaPartitionTable always contains all the
SeriesPartitionSlots
+ * since the unallocated SeriesPartitionSlots will be allocated by the way
+ *
+ * @return SUCCESS_STATUS if the SchemaPartitionTable got or created
successfully
+ * NOT_ENOUGH_DATA_NODE if the number of cluster DataNodes is not
enough for creating new SchemaRegions
+ * STORAGE_GROUP_NOT_EXIST if some StorageGroups don't exist
+ */
TSchemaPartitionTableResp
getOrCreateSchemaPartitionTable(TSchemaPartitionReq req)
- /* Node Management */
+ // ======================================================
+ // Node Management TODO: @MarcosZyk add interface annotation
+ // ======================================================
TSchemaNodeManagementResp
getSchemaNodeManagementPartition(TSchemaNodeManagementReq req)
- /* Data */
+ // ======================================================
+ // DataPartition
+ // ======================================================
// TODO: Replace this by getDataPartitionTable
TDataPartitionResp getDataPartition(TDataPartitionReq req)
+ /**
+ * Get DataPartitionTable by specific PartitionSlotsMap,
+ * the returned DataPartitionTable will not contain the unallocated
SeriesPartitionSlots and TimePartitionSlots
+ */
TDataPartitionTableResp getDataPartitionTable(TDataPartitionReq req)
// TODO: Replace this by getOrCreateDataPartitionTable
TDataPartitionResp getOrCreateDataPartition(TDataPartitionReq req)
+ /**
+ * Get or create DataPartitionTable by specific PartitionSlotsMap,
+ * the returned SchemaPartitionTable always contains all the
SeriesPartitionSlots and TimePartitionSlots
+ * since the unallocated SeriesPartitionSlots and TimePartitionSlots will be
allocated by the way
+ *
+ * @return SUCCESS_STATUS if the DataPartitionTable got or created
successfully
+ * NOT_ENOUGH_DATA_NODE if the number of cluster DataNodes is not
enough for creating new DataRegions
+ * STORAGE_GROUP_NOT_EXIST if some StorageGroups don't exist
+ */
TDataPartitionTableResp getOrCreateDataPartitionTable(TDataPartitionReq req)
- /* Authorize */
+ // ======================================================
+ // Authorize TODO: @RYH61 add interface annotation
+ // ======================================================
common.TSStatus operatePermission(TAuthorizerReq req)
@@ -414,27 +506,61 @@ service IConfigNodeRPCService {
TPermissionInfoResp checkUserPrivileges(TCheckUserPrivilegesReq req)
- /* ConfigNode */
-
+ // ======================================================
+ // ConfigNode
+ // ======================================================
+
+ /**
+ * The Non-Seed-ConfigNode submit a registration request to the
ConfigNode-leader when first startup
+ *
+ * @return SUCCESS_STATUS if the AddConfigNodeProcedure submitted
successfully
+ * ERROR_GLOBAL_CONFIG if some global configurations in the
Non-Seed-ConfigNode
+ * are inconsist with the ConfigNode-leader
+ */
common.TSStatus registerConfigNode(TConfigNodeRegisterReq req)
+ /** The ConfigNode-leader will guide the Non-Seed-ConfigNode to join the
ConsensusGroup when first startup */
common.TSStatus addConsensusGroup(TAddConsensusGroupReq req)
+ /** The ConfigNode-leader will notify the Non-Seed-ConfigNode that the
registration success */
common.TSStatus notifyRegisterSuccess()
+ /**
+ * Remove the specific ConfigNode from the cluster
+ *
+ * @return SUCCESS_STATUS if the RemoveConfigNodeProcedure submitted
successfully
+ * REMOVE_CONFIGNODE_FAILED if the number of ConfigNode is less than
1
+ * or the specific ConfigNode doesn't exist
+ * or the specific ConfigNode is leader
+ */
common.TSStatus removeConfigNode(common.TConfigNodeLocation
configNodeLocation)
+ /**
+ * Let the specific ConfigNode remove the ConsensusGroup
+ *
+ * @return SUCCESS_STATUS if remove ConsensusGroup successfully
+ * REMOVE_CONFIGNODE_FAILED if the specific ConfigNode doesn't exist
in the current cluster
+ * or Ratis internal failure
+ */
common.TSStatus removeConsensusGroup(common.TConfigNodeLocation
configNodeLocation)
+ /** Stop the specific ConfigNode */
common.TSStatus stopConfigNode(common.TConfigNodeLocation configNodeLocation)
- /* UDF */
+ /** The ConfigNode-leader will ping other ConfigNodes periodically */
+ i64 getConfigNodeHeartBeat(i64 timestamp)
+
+ // ======================================================
+ // UDF TODO: @SteveYurongSu add interface annotation
+ // ======================================================
common.TSStatus createFunction(TCreateFunctionReq req)
common.TSStatus dropFunction(TDropFunctionReq req)
- /* Maintenance Tools */
+ // ======================================================
+ // Maintenance Tools TODO: @RYH61 add interface annotation
+ // ======================================================
common.TSStatus merge(TMergeReq req)
@@ -442,25 +568,35 @@ service IConfigNodeRPCService {
common.TSStatus clearCache(TClearCacheReq req)
- /* Cluster Tools */
+ // ======================================================
+ // Cluster Tools
+ // ======================================================
+ /** Show cluster ConfigNodes' and DataNodes' information */
TShowClusterResp showCluster()
+ /** Show cluster DataNodes' information */
TShowDataNodesResp showDataNodes()
+ /** Show cluster ConfigNodes' information */
TShowConfigNodesResp showConfigNodes()
+ /**
+ * Show the matched cluster Regions' information
+ * See https://apache-iotdb.feishu.cn/docx/doxcnOzmIlaE2MX5tKjmYWuMSRg for
detailed matching rules
+ */
TShowRegionResp showRegion(TShowRegionReq req)
- /* Routing */
+ // ======================================================
+ // Routing
+ // ======================================================
+ /** The ConfigNode-leader will generate and return a latest RegionRouteMap */
TRegionRouteMapResp getLatestRegionRouteMap()
- /* Get confignode heartbeat */
-
- i64 getConfigNodeHeartBeat(i64 timestamp)
-
- /* Template */
+ // ======================================================
+ // Template TODO: @MarcosZyk add interface annotation
+ // ======================================================
common.TSStatus createSchemaTemplate(TCreateSchemaTemplateReq req)