This is an automated email from the ASF dual-hosted git repository. qiaojialin pushed a commit to branch fix_statuscode in repository https://gitbox.apache.org/repos/asf/iotdb.git
commit 0182cc275916dbc9d2a0ed0585427d3cfd502f98 Author: qiaojialin <[email protected]> AuthorDate: Mon Nov 14 23:42:20 2022 +0800 update status code 80% --- .../confignode/manager/ClusterSchemaManager.java | 2 +- .../iotdb/confignode/manager/SyncManager.java | 6 ++- .../iotdb/confignode/persistence/AuthorInfo.java | 5 +-- docs/UserGuide/API/Status-Codes.md | 43 +++++++++++++++++++- docs/zh/UserGuide/API/Status-Codes.md | 46 +++++++++++++++++++++- .../iotdb/confignode/it/IoTDBStorageGroupIT.java | 3 +- .../apache/iotdb/commons/utils/StatusUtils.java | 4 -- .../apache/iotdb/db/engine/StorageEngineV2.java | 2 +- .../metadata/MeasurementAlreadyExistException.java | 2 +- .../metadata/MeasurementInBlackListException.java | 2 +- .../metadata/StorageGroupAlreadySetException.java | 4 +- .../schemafile/ColossalRecordException.java | 6 +-- .../SchemaFileLogCorruptedException.java | 2 +- .../execution/executor/RegionWriteExecutor.java | 6 +-- .../iotdb/db/mpp/plan/analyze/AnalyzeVisitor.java | 2 +- .../db/mpp/plan/analyze/ClusterSchemaFetcher.java | 2 +- .../config/executor/ClusterConfigTaskExecutor.java | 2 +- .../scheduler/load/LoadTsFileDispatcherImpl.java | 2 +- .../influxdb/meta/NewInfluxDBMetaManager.java | 2 +- .../impl/DataNodeInternalRPCServiceImpl.java | 7 ++-- .../thrift/impl/NewInfluxDBServiceImpl.java | 2 +- .../java/org/apache/iotdb/rpc/TSStatusCode.java | 26 ++++++------ 22 files changed, 127 insertions(+), 51 deletions(-) diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterSchemaManager.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterSchemaManager.java index dffe4df3ec..a4a0f0e6d6 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterSchemaManager.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterSchemaManager.java @@ -125,7 +125,7 @@ public class ClusterSchemaManager { if (metadataException instanceof IllegalPathException) { result = new TSStatus(TSStatusCode.PATH_ILLEGAL.getStatusCode()); } else { - result = new TSStatus(TSStatusCode.STORAGE_GROUP_ALREADY_EXISTS.getStatusCode()); + result = new TSStatus(TSStatusCode.DATABASE_ALREADY_EXISTS.getStatusCode()); } result.setMessage(metadataException.getMessage()); return result; diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/SyncManager.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/SyncManager.java index e57e1ef089..288326f936 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/manager/SyncManager.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/SyncManager.java @@ -95,7 +95,8 @@ public class SyncManager { return getConsensusManager().write(plan).getStatus(); } catch (PipeSinkException e) { LOGGER.error(e.getMessage()); - return new TSStatus(TSStatusCode.CREATE_PIPE_SINK_ERROR.getStatusCode()).setMessage(e.getMessage()); + return new TSStatus(TSStatusCode.CREATE_PIPE_SINK_ERROR.getStatusCode()) + .setMessage(e.getMessage()); } } @@ -105,7 +106,8 @@ public class SyncManager { return getConsensusManager().write(plan).getStatus(); } catch (PipeSinkException e) { LOGGER.error(e.getMessage()); - return new TSStatus(TSStatusCode.CREATE_PIPE_SINK_ERROR.getStatusCode()).setMessage(e.getMessage()); + return new TSStatus(TSStatusCode.CREATE_PIPE_SINK_ERROR.getStatusCode()) + .setMessage(e.getMessage()); } } diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/persistence/AuthorInfo.java b/confignode/src/main/java/org/apache/iotdb/confignode/persistence/AuthorInfo.java index 69a225f293..2ced5707cb 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/persistence/AuthorInfo.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/persistence/AuthorInfo.java @@ -118,8 +118,7 @@ public class AuthorInfo implements SnapshotProcessor { result = getUserPermissionInfo(username); result.setStatus(RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS)); } catch (AuthException e) { - result.setStatus( - RpcUtils.getStatus(TSStatusCode.EXECUTE_PERMISSION_EXCEPTION_ERROR, e.getMessage())); + result.setStatus(RpcUtils.getStatus(TSStatusCode.EXECUTE_PERMISSION_ERROR, e.getMessage())); } } else { result = AuthUtils.generateEmptyPermissionInfoResp(); @@ -203,7 +202,7 @@ public class AuthorInfo implements SnapshotProcessor { throw new AuthException("unknown type: " + authorPlan.getAuthorType()); } } catch (AuthException e) { - return RpcUtils.getStatus(TSStatusCode.EXECUTE_PERMISSION_EXCEPTION_ERROR, e.getMessage()); + return RpcUtils.getStatus(TSStatusCode.EXECUTE_PERMISSION_ERROR, e.getMessage()); } return RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS); } diff --git a/docs/UserGuide/API/Status-Codes.md b/docs/UserGuide/API/Status-Codes.md index 58f19d029b..e354faa9d6 100644 --- a/docs/UserGuide/API/Status-Codes.md +++ b/docs/UserGuide/API/Status-Codes.md @@ -84,6 +84,26 @@ Here is a list of Status Code and related message: |334|CREATE_PIPE_SINK_ERROR| Error in creating PIPE sink | |335|PIPE_ERROR| PIPE error | |336|PIPESERVER_ERROR| PIPE server error | +|337|SERIES_OVERFLOW| Series number exceeds the threshold | +|339|TEMPLATE_NOT_EXIST| Schema template does not exist | +|340|CREATE_TEMPLATE_ERROR| Create schema template error | +|341|SYNC_FILE_REBASE| Sync TsFile error | +|342|SYNC_FILE_ERROR| Sync TsFile error | +|343|VERIFY_METADATA_ERROR| Meet error in validate timeseries schema | +|344|TIMESERIES_IN_BLACK_LIST| Timeseries is being deleted | +|349|OVERSIZE_RECORD| Size of record exceeds the threshold of page of SchemaFile | +|350|SCHEMA_FILE_REDO_LOG_BROKEN| SchemaFile redo log has broken | +|355|TRIGGER_FIRE_ERROR| Error when firing trigger | +|360|TRIGGER_LOAD_CLASS_ERROR| Error when load class of trigger | +|361|TRIGGER_DOWNLOAD_ERROR| Error when download trigger from ConfigNode | +|362|CREATE_TRIGGER_INSTANCE_ERROR| Error when create trigger instance | +|363|ACTIVE_TRIGGER_INSTANCE_ERROR| Error when activate trigger instance | +|364|DROP_TRIGGER_INSTANCE_ERROR| Error when drop trigger instance | +|365|UPDATE_TRIGGER_LOCATION_ERROR| Error when move stateful trigger to new datanode | +|370|UDF_LOAD_CLASS_ERROR| Error when loading UDF class | +|371|UDF_DOWNLOAD_ERROR| Error when download UDF class from ConfigNode | +|372|CREATE_FUNCTION_ON_DATANODE_ERROR| Error when create UDF on DataNode | +|373|DROP_FUNCTION_ON_DATANODE_ERROR| Error when drop a UDF on DataNode | |400|EXECUTE_STATEMENT_ERROR|Execute statement error| |401|SQL_PARSE_ERROR|Meet error while parsing SQL| |402|GENERATE_TIME_ZONE_ERROR|Meet error while generating time zone| @@ -99,6 +119,10 @@ Here is a list of Status Code and related message: |412|WRITE_PROCESS_ERROR|Writing data related error| |413|WRITE_PROCESS_REJECT|Writing data rejected error| |414|QUERY_ID_NOT_EXIST|Kill query with non existent queryId| +|415|SNAPSHOT_DIR_NOT_LEGAL|Snapshot dir name is illegal | +|416|SEMANTIC_ERROR|SQL semantic error| +|417|LOAD_PIECE_OF_TSFILE_ERROR|Error when load a piece of TsFile when loading| +|423|MEMORY_NOT_ENOUGH|Not enough memory for task execution in MPP| |500|INTERNAL_SERVER_ERROR|Internal server error| |501|CLOSE_OPERATION_ERROR|Meet error in close operation| |502|READ_ONLY_SYSTEM_ERROR|Operating system is read only| @@ -107,19 +131,34 @@ Here is a list of Status Code and related message: |505|SHUT_DOWN_ERROR|Meet error while shutdown| |506|MULTIPLE_ERROR|Meet error when executing multiple statements| |507|SESSION_EXPIRED|Session expired| +|508|TSBLOCK_SERIALIZE_ERROR|TsBlock serialization error| |600|WRONG_LOGIN_PASSWORD_ERROR|Username or password is wrong| |601|NOT_LOGIN_ERROR|Has not logged in| |602|NO_PERMISSION_ERROR|No permissions for this operation, please add privilege| |603|UNINITIALIZED_AUTH_ERROR|Uninitialized authorizer| +|605|USER_NOT_EXIST_ERROR|User does not exist| +|606|ROLE_NOT_EXIST_ERROR|Role does not exist| +|607|AUTHENTICATION_ERROR|Error in authentication| +|608|CLEAR_PERMISSION_CACHE_ERROR|Error when clear the permission cache| |700|PARTITION_NOT_READY|Partition table not ready| |701|TIME_OUT|Operation timeout| |702|NO_LEADER|No leader| |703|UNSUPPORTED_OPERATION|Unsupported operation| -|704|NODE_READ_ONLY|Node read only| -|705|CONSISTENCY_FAILURE|Consistency check failure| |706|NO_CONNECTION|Can not get connection error| |707|NEED_REDIRECTION|Need direction| +|709|ALL_RETRY_FAILED|All retry failed| +|710|MIGRATE_REGION_ERROR|Error when migrate region| +|711|CREATE_REGION_ERROR|Create region error| +|712|DELETE_REGION_ERROR|Delete region error| +|713|PARTITION_CACHE_UPDATE_FAIL|Update partition cache failed| +|714|DESERIALIZE_PIECE_OF_TSFILE_ERROR|Error when deserialize a piece of TsFile| +|715|CONSENSUS_NOT_INITIALIZED|Consensus is not initialized and cannot provide service| |800|CONFIG_ERROR|Configuration error| +|901|DATANODE_ALREADY_REGISTERED|DataNode already registered in cluster | +|902|CREATE_DATABASE_ERROR|Create Database failed| +|903|DATABASE_ALREADY_EXISTS|Database already exist| +|904|NOT_ENOUGH_DATA_NODE|The number of DataNode is not enough| +|905|ERROR_GLOBAL_CONFIG|Global config in cluster does not consistent| > All exceptions are refactored in latest version by extracting uniform > message into exception classes. Different error codes are added to all > exceptions. When an exception is caught and a higher-level exception is > thrown, the error code will keep and pass so that users will know the > detailed error reason. A base exception class "ProcessException" is also added to be extended by all exceptions. diff --git a/docs/zh/UserGuide/API/Status-Codes.md b/docs/zh/UserGuide/API/Status-Codes.md index d055908e68..6a36975cec 100644 --- a/docs/zh/UserGuide/API/Status-Codes.md +++ b/docs/zh/UserGuide/API/Status-Codes.md @@ -85,6 +85,27 @@ try { |334|CREATE_PIPE_SINK_ERROR| 创建 PIPE Sink 异常 | |335|PIPE_ERROR| PIPE 异常 | |336|PIPESERVER_ERROR| PIPE server 异常 | +|337|SERIES_OVERFLOW| 序列数量超过阈值 | +|338|MEASUREMENT_ALREADY_EXIST| 序列数量超过阈值 | +|339|TEMPLATE_NOT_EXIST| 物理量模板不存在 | +|340|CREATE_TEMPLATE_ERROR| 创建物理量模板失败 | +|341|SYNC_FILE_REBASE| 同步文件异常 | +|342|SYNC_FILE_ERROR| 同步文件异常 | +|343|VERIFY_METADATA_ERROR| 校验元数据失败 | +|344|TIMESERIES_IN_BLACK_LIST| 时间序列正在删除 | +|349|OVERSIZE_RECORD| 记录大小超过元数据文件页面大小 | +|350|SCHEMA_FILE_REDO_LOG_BROKEN| SchemaFile 的 redo 日志损坏 | +|355|TRIGGER_FIRE_ERROR| 触发器执行错误 | +|360|TRIGGER_LOAD_CLASS_ERROR| 触发器加载类异常 | +|361|TRIGGER_DOWNLOAD_ERROR| 从 ConfigNode 下载触发器异常 | +|362|CREATE_TRIGGER_INSTANCE_ERROR| 创建触发器实例异常 | +|363|ACTIVE_TRIGGER_INSTANCE_ERROR| 激活触发器实例异常 | +|364|DROP_TRIGGER_INSTANCE_ERROR| 删除触发器实例异常 | +|365|UPDATE_TRIGGER_LOCATION_ERROR| 更新有状态的触发器所在 DataNode 异常 | +|370|UDF_LOAD_CLASS_ERROR| UDF 加载类异常 | +|371|UDF_DOWNLOAD_ERROR| 无法从 ConfigNode 下载 UDF | +|372|CREATE_FUNCTION_ON_DATANODE_ERROR| 在 DataNode 创建 UDF 失败 | +|373|DROP_FUNCTION_ON_DATANODE_ERROR| 在 DataNode 卸载 UDF 失败 | |400|EXECUTE_STATEMENT_ERROR|执行语句错误| |401|SQL_PARSE_ERROR|SQL 语句分析错误| |402|GENERATE_TIME_ZONE_ERROR|生成时区错误| @@ -100,6 +121,10 @@ try { |412|WRITE_PROCESS_ERROR|写入相关错误| |413|WRITE_PROCESS_REJECT|写入拒绝错误| |414|QUERY_ID_NOT_EXIST|Query id 不存在| +|415|SNAPSHOT_DIR_NOT_LEGAL|快照目录名不合法| +|416|SEMANTIC_ERROR|SQL 语义错误| +|417|LOAD_PIECE_OF_TSFILE_ERROR|加载 TsFile 片段异常| +|423|MEMORY_NOT_ENOUGH|MPP 框架中任务执行内存不足| |500|INTERNAL_SERVER_ERROR|服务器内部错误| |501|CLOSE_OPERATION_ERROR|关闭操作错误| |502|READ_ONLY_SYSTEM_ERROR|系统只读| @@ -108,19 +133,36 @@ try { |505|SHUT_DOWN_ERROR|关机错误| |506|MULTIPLE_ERROR|多行语句执行错误| |507|SESSION_EXPIRED|会话过期| +|508|TSBLOCK_SERIALIZE_ERROR|TsBlock 序列化错误| |600|WRONG_LOGIN_PASSWORD_ERROR|用户名或密码错误| |601|NOT_LOGIN_ERROR|没有登录| |602|NO_PERMISSION_ERROR|没有操作权限| |603|UNINITIALIZED_AUTH_ERROR|授权人未初始化| +|604|EXECUTE_PERMISSION_EXCEPTION_ERROR|| +|605|USER_NOT_EXIST_ERROR|用户不存在| +|606|ROLE_NOT_EXIST_ERROR|角色不存在| +|607|AUTHENTICATION_ERROR|权限认证失败| +|608|CLEAR_PERMISSION_CACHE_ERROR|清空权限缓存失败| + |700|PARTITION_NOT_READY|分区表未准备好| |701|TIME_OUT|操作超时| |702|NO_LEADER|Leader 找不到| |703|UNSUPPORTED_OPERATION|不支持的操作| -|704|NODE_READ_ONLY|节点只读| -|705|CONSISTENCY_FAILURE|一致性检查失败| |706|NO_CONNECTION|连接获取失败| |707|NEED_REDIRECTION|需要重定向| +|709|ALL_RETRY_FAILED|所有重试失败| +|710|MIGRATE_REGION_ERROR|Region 迁移失败| +|711|CREATE_REGION_ERROR|创建 region 失败| +|712|DELETE_REGION_ERROR|删除 region 失败| +|713|PARTITION_CACHE_UPDATE_FAIL|更新分区缓存失败| +|714|DESERIALIZE_PIECE_OF_TSFILE_ERROR|反序列化 TsFile 片段异常| +|715|CONSENSUS_NOT_INITIALIZED|共识层未初始化,不能提供服务| |800|CONFIG_ERROR|配置文件有错误项| +|901|DATANODE_ALREADY_REGISTERED|DataNode 在集群中已经注册| +|902|CREATE_DATABASE_ERROR|创建 Database 失败| +|903|DATABASE_ALREADY_EXISTS|Database 已存在| +|904|NOT_ENOUGH_DATA_NODE|没有足够的 DataNode| +|905|ERROR_GLOBAL_CONFIG|全局配置参数异常| > 在最新版本中,我们重构了 IoTDB > 的异常类。通过将错误信息统一提取到异常类中,并为所有异常添加不同的错误代码,从而当捕获到异常并引发更高级别的异常时,错误代码将保留并传递,以便用户了解详细的错误原因。 除此之外,我们添加了一个基础异常类“ProcessException”,由所有异常扩展。 diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/IoTDBStorageGroupIT.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/IoTDBStorageGroupIT.java index 6867a42d44..0010a56420 100644 --- a/integration-test/src/test/java/org/apache/iotdb/confignode/it/IoTDBStorageGroupIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/IoTDBStorageGroupIT.java @@ -126,8 +126,7 @@ public class IoTDBStorageGroupIT { // test fail by re-register status = client.setStorageGroup(setReq0); - Assert.assertEquals( - TSStatusCode.STORAGE_GROUP_ALREADY_EXISTS.getStatusCode(), status.getCode()); + Assert.assertEquals(TSStatusCode.DATABASE_ALREADY_EXISTS.getStatusCode(), status.getCode()); // test StorageGroup setter interfaces PartialPath patternPath = new PartialPath(sg1); diff --git a/node-commons/src/main/java/org/apache/iotdb/commons/utils/StatusUtils.java b/node-commons/src/main/java/org/apache/iotdb/commons/utils/StatusUtils.java index d2e3ed2035..06d6ed3246 100644 --- a/node-commons/src/main/java/org/apache/iotdb/commons/utils/StatusUtils.java +++ b/node-commons/src/main/java/org/apache/iotdb/commons/utils/StatusUtils.java @@ -78,7 +78,6 @@ public class StatusUtils { case PATH_ALREADY_EXIST_ERROR: status.setMessage("Path already exist."); break; - case PATH_NOT_EXIST_ERROR: case PATH_NOT_EXIST_ERROR: status.setMessage("Path does not exist."); break; @@ -190,9 +189,6 @@ public class StatusUtils { case NO_CONNECTION: status.setMessage("Node cannot be reached."); break; - case PARSE_LOG_ERROR: - status.setMessage("Parse log error."); - break; default: status.setMessage(""); break; diff --git a/server/src/main/java/org/apache/iotdb/db/engine/StorageEngineV2.java b/server/src/main/java/org/apache/iotdb/db/engine/StorageEngineV2.java index 30c6f2500b..4ee0c4437a 100644 --- a/server/src/main/java/org/apache/iotdb/db/engine/StorageEngineV2.java +++ b/server/src/main/java/org/apache/iotdb/db/engine/StorageEngineV2.java @@ -715,7 +715,7 @@ public class StorageEngineV2 implements IService { "Parse Page error when writing piece node of TsFile %s to DataRegion %s.", pieceNode.getTsFile(), dataRegionId), e); - status.setCode(TSStatusCode.TSFILE_RUNTIME_ERROR.getStatusCode()); + status.setCode(TSStatusCode.LOAD_PIECE_OF_TSFILE_ERROR.getStatusCode()); status.setMessage(e.getMessage()); return status; } catch (IOException e) { diff --git a/server/src/main/java/org/apache/iotdb/db/exception/metadata/MeasurementAlreadyExistException.java b/server/src/main/java/org/apache/iotdb/db/exception/metadata/MeasurementAlreadyExistException.java index c5a9afbc68..9ff7116d71 100644 --- a/server/src/main/java/org/apache/iotdb/db/exception/metadata/MeasurementAlreadyExistException.java +++ b/server/src/main/java/org/apache/iotdb/db/exception/metadata/MeasurementAlreadyExistException.java @@ -31,7 +31,7 @@ public class MeasurementAlreadyExistException extends MetadataException { public MeasurementAlreadyExistException(String path, MeasurementPath measurementPath) { super( String.format("Path [%s] already exist", path), - TSStatusCode.MEASUREMENT_ALREADY_EXIST.getStatusCode()); + TSStatusCode.TIMESERIES_ALREADY_EXIST.getStatusCode()); this.isUserException = true; this.measurementPath = measurementPath; } diff --git a/server/src/main/java/org/apache/iotdb/db/exception/metadata/MeasurementInBlackListException.java b/server/src/main/java/org/apache/iotdb/db/exception/metadata/MeasurementInBlackListException.java index 3aef42cec0..d6dd5c3cab 100644 --- a/server/src/main/java/org/apache/iotdb/db/exception/metadata/MeasurementInBlackListException.java +++ b/server/src/main/java/org/apache/iotdb/db/exception/metadata/MeasurementInBlackListException.java @@ -31,7 +31,7 @@ public class MeasurementInBlackListException extends MetadataException { public MeasurementInBlackListException(PartialPath path) { super( String.format("Some task is deleting timeseries [%s]", path), - TSStatusCode.MEASUREMENT_IN_BLACK_LIST.getStatusCode()); + TSStatusCode.TIMESERIES_IN_BLACK_LIST.getStatusCode()); this.path = path; } diff --git a/server/src/main/java/org/apache/iotdb/db/exception/metadata/StorageGroupAlreadySetException.java b/server/src/main/java/org/apache/iotdb/db/exception/metadata/StorageGroupAlreadySetException.java index 9529bbf904..89aacde85d 100644 --- a/server/src/main/java/org/apache/iotdb/db/exception/metadata/StorageGroupAlreadySetException.java +++ b/server/src/main/java/org/apache/iotdb/db/exception/metadata/StorageGroupAlreadySetException.java @@ -31,13 +31,13 @@ public class StorageGroupAlreadySetException extends MetadataException { private final String storageGroupPath; public StorageGroupAlreadySetException(String path) { - super(getMessage(path, false), TSStatusCode.STORAGE_GROUP_ALREADY_EXISTS.getStatusCode()); + super(getMessage(path, false), TSStatusCode.DATABASE_ALREADY_EXISTS.getStatusCode()); storageGroupPath = path; hasChild = false; } public StorageGroupAlreadySetException(String path, boolean hasChild) { - super(getMessage(path, hasChild), TSStatusCode.STORAGE_GROUP_ALREADY_EXISTS.getStatusCode()); + super(getMessage(path, hasChild), TSStatusCode.DATABASE_ALREADY_EXISTS.getStatusCode()); this.hasChild = hasChild; storageGroupPath = path; } diff --git a/server/src/main/java/org/apache/iotdb/db/exception/metadata/schemafile/ColossalRecordException.java b/server/src/main/java/org/apache/iotdb/db/exception/metadata/schemafile/ColossalRecordException.java index 39f1968ab7..96777d38ef 100644 --- a/server/src/main/java/org/apache/iotdb/db/exception/metadata/schemafile/ColossalRecordException.java +++ b/server/src/main/java/org/apache/iotdb/db/exception/metadata/schemafile/ColossalRecordException.java @@ -33,21 +33,21 @@ public class ColossalRecordException extends MetadataException { super( String.format( "Record of key [%s] is too large for SchemaFile to store, content size:%d", key, size), - TSStatusCode.COLOSSAL_RECORD.getStatusCode(), + TSStatusCode.OVERSIZE_RECORD.getStatusCode(), true); } public ColossalRecordException(String key) { super( String.format("Key [%s] is too large to store in a InternalPage as index entry.", key), - TSStatusCode.COLOSSAL_RECORD.getStatusCode(), + TSStatusCode.OVERSIZE_RECORD.getStatusCode(), true); } public ColossalRecordException(String key, String alias) { super( String.format("Key-Alias pair (%s, %s) is too large for SchemaFile to store.", key, alias), - TSStatusCode.COLOSSAL_RECORD.getStatusCode(), + TSStatusCode.OVERSIZE_RECORD.getStatusCode(), true); } } diff --git a/server/src/main/java/org/apache/iotdb/db/exception/metadata/schemafile/SchemaFileLogCorruptedException.java b/server/src/main/java/org/apache/iotdb/db/exception/metadata/schemafile/SchemaFileLogCorruptedException.java index 47b67f34d5..fab2200350 100644 --- a/server/src/main/java/org/apache/iotdb/db/exception/metadata/schemafile/SchemaFileLogCorruptedException.java +++ b/server/src/main/java/org/apache/iotdb/db/exception/metadata/schemafile/SchemaFileLogCorruptedException.java @@ -28,7 +28,7 @@ public class SchemaFileLogCorruptedException extends MetadataException { public SchemaFileLogCorruptedException(String fileName, String reason) { super( String.format("SchemaFileLog [%s] corrupted for [%s].", fileName, reason), - TSStatusCode.SCHEMA_FILE_LOG_CORR.getStatusCode(), + TSStatusCode.SCHEMA_FILE_REDO_LOG_BROKEN.getStatusCode(), true); } } diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/executor/RegionWriteExecutor.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/executor/RegionWriteExecutor.java index 72811cc815..d017491141 100644 --- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/executor/RegionWriteExecutor.java +++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/executor/RegionWriteExecutor.java @@ -422,7 +422,7 @@ public class RegionWriteExecutor { failingMeasurementMap.entrySet()) { metadataException = failingMeasurement.getValue(); if (metadataException.getErrorCode() - == TSStatusCode.MEASUREMENT_ALREADY_EXIST.getStatusCode()) { + == TSStatusCode.TIMESERIES_ALREADY_EXIST.getStatusCode()) { LOGGER.info( "There's no need to internal create timeseries. {}", failingMeasurement.getValue().getMessage()); @@ -452,7 +452,7 @@ public class RegionWriteExecutor { if (failingStatus.isEmpty()) { if (executionStatus.getCode() == TSStatusCode.MULTIPLE_ERROR.getStatusCode()) { if (executionStatus.getSubStatus().get(0).getCode() - == TSStatusCode.MEASUREMENT_ALREADY_EXIST.getStatusCode()) { + == TSStatusCode.TIMESERIES_ALREADY_EXIST.getStatusCode()) { // there's only measurement_already_exist exception alreadyExistingStatus.addAll(executionStatus.getSubStatus()); } else { @@ -464,7 +464,7 @@ public class RegionWriteExecutor { } else { if (executionStatus.getCode() == TSStatusCode.MULTIPLE_ERROR.getStatusCode()) { if (executionStatus.getSubStatus().get(0).getCode() - != TSStatusCode.MEASUREMENT_ALREADY_EXIST.getStatusCode()) { + != TSStatusCode.TIMESERIES_ALREADY_EXIST.getStatusCode()) { failingStatus.addAll(executionStatus.getSubStatus()); } } else if (executionStatus.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/AnalyzeVisitor.java b/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/AnalyzeVisitor.java index 66520cc651..3d409e42ea 100644 --- a/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/AnalyzeVisitor.java +++ b/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/AnalyzeVisitor.java @@ -1734,7 +1734,7 @@ public class AnalyzeVisitor extends StatementVisitor<Analysis, MPPQueryContext> schemaFetcher, IoTDBDescriptor.getInstance().getConfig().getQueryTimeoutThreshold()); if (result.status.code != TSStatusCode.SUCCESS_STATUS.getStatusCode() - && result.status.code != TSStatusCode.STORAGE_GROUP_ALREADY_EXISTS.getStatusCode()) { + && result.status.code != TSStatusCode.DATABASE_ALREADY_EXISTS.getStatusCode()) { logger.error(String.format("Create Database error, statement: %s.", statement)); logger.error(String.format("Create database result status : %s.", result.status)); throw new LoadFileException( diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/ClusterSchemaFetcher.java b/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/ClusterSchemaFetcher.java index 22964f5ad4..b11ed72317 100644 --- a/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/ClusterSchemaFetcher.java +++ b/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/ClusterSchemaFetcher.java @@ -488,7 +488,7 @@ public class ClusterSchemaFetcher implements ISchemaFetcher { List<String> failedCreationList = new ArrayList<>(); List<MeasurementPath> alreadyExistingMeasurements = new ArrayList<>(); for (TSStatus subStatus : executionResult.status.subStatus) { - if (subStatus.code == TSStatusCode.MEASUREMENT_ALREADY_EXIST.getStatusCode()) { + if (subStatus.code == TSStatusCode.TIMESERIES_ALREADY_EXIST.getStatusCode()) { alreadyExistingMeasurements.add( MeasurementPath.parseDataFromString(subStatus.getMessage())); } else { diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/plan/execution/config/executor/ClusterConfigTaskExecutor.java b/server/src/main/java/org/apache/iotdb/db/mpp/plan/execution/config/executor/ClusterConfigTaskExecutor.java index 43981a2848..706c54935e 100644 --- a/server/src/main/java/org/apache/iotdb/db/mpp/plan/execution/config/executor/ClusterConfigTaskExecutor.java +++ b/server/src/main/java/org/apache/iotdb/db/mpp/plan/execution/config/executor/ClusterConfigTaskExecutor.java @@ -521,7 +521,7 @@ public class ClusterConfigTaskExecutor implements IConfigTaskExecutor { + createTriggerStatement.getClassName() + "', because it's not found in jar file: " + createTriggerStatement.getUriString(), - TSStatusCode.TRIGGER_LOAD_CLASS.getStatusCode())); + TSStatusCode.TRIGGER_LOAD_CLASS_ERROR.getStatusCode())); return future; } diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/plan/scheduler/load/LoadTsFileDispatcherImpl.java b/server/src/main/java/org/apache/iotdb/db/mpp/plan/scheduler/load/LoadTsFileDispatcherImpl.java index 3471aa3563..e684564094 100644 --- a/server/src/main/java/org/apache/iotdb/db/mpp/plan/scheduler/load/LoadTsFileDispatcherImpl.java +++ b/server/src/main/java/org/apache/iotdb/db/mpp/plan/scheduler/load/LoadTsFileDispatcherImpl.java @@ -157,7 +157,7 @@ public class LoadTsFileDispatcherImpl implements IFragInstanceDispatcher { (LoadTsFilePieceNode) PlanNodeType.deserialize(planNode.serializeToByteBuffer()); if (pieceNode == null) { throw new FragmentInstanceDispatchException( - new TSStatus(TSStatusCode.NODE_DESERIALIZE_ERROR.getStatusCode())); + new TSStatus(TSStatusCode.DESERIALIZE_PIECE_OF_TSFILE_ERROR.getStatusCode())); } TSStatus resultStatus = StorageEngineV2.getInstance() diff --git a/server/src/main/java/org/apache/iotdb/db/protocol/influxdb/meta/NewInfluxDBMetaManager.java b/server/src/main/java/org/apache/iotdb/db/protocol/influxdb/meta/NewInfluxDBMetaManager.java index 2f5422b034..7992a70000 100644 --- a/server/src/main/java/org/apache/iotdb/db/protocol/influxdb/meta/NewInfluxDBMetaManager.java +++ b/server/src/main/java/org/apache/iotdb/db/protocol/influxdb/meta/NewInfluxDBMetaManager.java @@ -118,7 +118,7 @@ public class NewInfluxDBMetaManager extends AbstractInfluxDBMetaManager { public void setStorageGroup(String database, long sessionID) { TSStatus status = clientRPCService.setStorageGroup(sessionID, "root." + database); if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode() - || status.getCode() == TSStatusCode.STORAGE_GROUP_ALREADY_EXISTS.getStatusCode()) { + || status.getCode() == TSStatusCode.DATABASE_ALREADY_EXISTS.getStatusCode()) { return; } throw new InfluxDBException(status.getMessage()); diff --git a/server/src/main/java/org/apache/iotdb/db/service/thrift/impl/DataNodeInternalRPCServiceImpl.java b/server/src/main/java/org/apache/iotdb/db/service/thrift/impl/DataNodeInternalRPCServiceImpl.java index f691488eb6..566cc15caa 100644 --- a/server/src/main/java/org/apache/iotdb/db/service/thrift/impl/DataNodeInternalRPCServiceImpl.java +++ b/server/src/main/java/org/apache/iotdb/db/service/thrift/impl/DataNodeInternalRPCServiceImpl.java @@ -347,7 +347,8 @@ public class DataNodeInternalRPCServiceImpl implements IDataNodeRPCService.Iface ConsensusGroupId.Factory.createFromTConsensusGroupId(req.consensusGroupId); LoadTsFilePieceNode pieceNode = (LoadTsFilePieceNode) PlanNodeType.deserialize(req.body); if (pieceNode == null) { - return createTLoadResp(new TSStatus(TSStatusCode.NODE_DESERIALIZE_ERROR.getStatusCode())); + return createTLoadResp( + new TSStatus(TSStatusCode.DESERIALIZE_PIECE_OF_TSFILE_ERROR.getStatusCode())); } TSStatus resultStatus = @@ -971,7 +972,7 @@ public class DataNodeInternalRPCServiceImpl implements IDataNodeRPCService.Iface if (result) { return RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS); } else { - return RpcUtils.getStatus(TSStatusCode.CACHE_UPDATE_FAIL); + return RpcUtils.getStatus(TSStatusCode.PARTITION_CACHE_UPDATE_FAIL); } } @@ -1063,7 +1064,7 @@ public class DataNodeInternalRPCServiceImpl implements IDataNodeRPCService.Iface if (AuthorizerManager.getInstance().invalidateCache(req.getUsername(), req.getRoleName())) { return RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS); } - return RpcUtils.getStatus(TSStatusCode.INVALIDATE_PERMISSION_CACHE_ERROR); + return RpcUtils.getStatus(TSStatusCode.CLEAR_PERMISSION_CACHE_ERROR); } @Override diff --git a/server/src/main/java/org/apache/iotdb/db/service/thrift/impl/NewInfluxDBServiceImpl.java b/server/src/main/java/org/apache/iotdb/db/service/thrift/impl/NewInfluxDBServiceImpl.java index 38a3a4d157..a033c00af9 100644 --- a/server/src/main/java/org/apache/iotdb/db/service/thrift/impl/NewInfluxDBServiceImpl.java +++ b/server/src/main/java/org/apache/iotdb/db/service/thrift/impl/NewInfluxDBServiceImpl.java @@ -137,7 +137,7 @@ public class NewInfluxDBServiceImpl implements IInfluxDBServiceWithHandler { public InfluxTSStatus createDatabase(InfluxCreateDatabaseReq req) { TSStatus tsStatus = clientRPCService.setStorageGroup(req.sessionId, "root." + req.getDatabase()); - if (tsStatus.getCode() == TSStatusCode.STORAGE_GROUP_ALREADY_EXISTS.getStatusCode()) { + if (tsStatus.getCode() == TSStatusCode.DATABASE_ALREADY_EXISTS.getStatusCode()) { tsStatus.setCode(TSStatusCode.SUCCESS_STATUS.getStatusCode()); tsStatus.setMessage("Execute successfully"); } diff --git a/service-rpc/src/main/java/org/apache/iotdb/rpc/TSStatusCode.java b/service-rpc/src/main/java/org/apache/iotdb/rpc/TSStatusCode.java index 2abc593f8d..0230e6a804 100644 --- a/service-rpc/src/main/java/org/apache/iotdb/rpc/TSStatusCode.java +++ b/service-rpc/src/main/java/org/apache/iotdb/rpc/TSStatusCode.java @@ -65,17 +65,17 @@ public enum TSStatusCode { PIPE_ERROR(335), PIPESERVER_ERROR(336), SERIES_OVERFLOW(337), - MEASUREMENT_ALREADY_EXIST(338), + TIMESERIES_ALREADY_EXIST(338), TEMPLATE_NOT_EXIST(339), CREATE_TEMPLATE_ERROR(340), SYNC_FILE_REBASE(341), SYNC_FILE_ERROR(342), VERIFY_METADATA_ERROR(343), - MEASUREMENT_IN_BLACK_LIST(344), - COLOSSAL_RECORD(349), - SCHEMA_FILE_LOG_CORR(350), + TIMESERIES_IN_BLACK_LIST(344), + OVERSIZE_RECORD(349), + SCHEMA_FILE_REDO_LOG_BROKEN(350), TRIGGER_FIRE_ERROR(355), - TRIGGER_LOAD_CLASS(360), + TRIGGER_LOAD_CLASS_ERROR(360), TRIGGER_DOWNLOAD_ERROR(361), CREATE_TRIGGER_INSTANCE_ERROR(362), ACTIVE_TRIGGER_INSTANCE_ERROR(363), @@ -104,9 +104,8 @@ public enum TSStatusCode { QUERY_ID_NOT_EXIST(414), SNAPSHOT_DIR_NOT_LEGAL(415), SEMANTIC_ERROR(416), - TSFILE_RUNTIME_ERROR(417), + LOAD_PIECE_OF_TSFILE_ERROR(417), - UNSUPPORTED_INDEX_FUNC_ERROR(421), UNSUPPORTED_INDEX_TYPE_ERROR(422), MEMORY_NOT_ENOUGH(423), @@ -125,11 +124,11 @@ public enum TSStatusCode { NOT_LOGIN_ERROR(601), NO_PERMISSION_ERROR(602), UNINITIALIZED_AUTH_ERROR(603), - EXECUTE_PERMISSION_EXCEPTION_ERROR(604), + EXECUTE_PERMISSION_ERROR(604), USER_NOT_EXIST_ERROR(605), ROLE_NOT_EXIST_ERROR(606), AUTHENTICATION_ERROR(607), - INVALIDATE_PERMISSION_CACHE_ERROR(608), + CLEAR_PERMISSION_CACHE_ERROR(608), // cluster-related errors PARTITION_NOT_READY(700), @@ -138,13 +137,12 @@ public enum TSStatusCode { UNSUPPORTED_OPERATION(703), NO_CONNECTION(706), NEED_REDIRECTION(707), - PARSE_LOG_ERROR(708), ALL_RETRY_FAILED(709), MIGRATE_REGION_ERROR(710), CREATE_REGION_ERROR(711), DELETE_REGION_ERROR(712), - CACHE_UPDATE_FAIL(713), - NODE_DESERIALIZE_ERROR(714), + PARTITION_CACHE_UPDATE_FAIL(713), + DESERIALIZE_PIECE_OF_TSFILE_ERROR(714), CONSENSUS_NOT_INITIALIZED(715), // configuration @@ -152,8 +150,8 @@ public enum TSStatusCode { // ConfigNode response DATANODE_ALREADY_REGISTERED(901), - SET_STORAGE_GROUP_FAILED(902), - STORAGE_GROUP_ALREADY_EXISTS(903), + CREATE_DATABASE_ERROR(902), + DATABASE_ALREADY_EXISTS(903), NOT_ENOUGH_DATA_NODE(904), ERROR_GLOBAL_CONFIG(905), APPLY_CONFIGNODE_FAILED(906),
