This is an automated email from the ASF dual-hosted git repository.
vgalaxies pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-hugegraph.git
The following commit(s) were added to refs/heads/master by this push:
new 0b24aca3c chore(store): translate CJK comments to English for
store-dist, store-grpc, store-node, store-rocksdb, store-test (#2645)
0b24aca3c is described below
commit 0b24aca3cd255287131545fb2e56c92f4f922106
Author: V_Galaxy <[email protected]>
AuthorDate: Fri Aug 23 14:35:16 2024 +0800
chore(store): translate CJK comments to English for store-dist, store-grpc,
store-node, store-rocksdb, store-test (#2645)
Co-authored-by: Peng Junzhi <[email protected]>
---
.../assembly/static/bin/start-hugegraph-store.sh | 4 +--
.../src/assembly/static/conf/application-pd.yml | 6 ++--
.../src/assembly/static/conf/application.yml | 12 +++----
hugegraph-store/hg-store-grpc/pom.xml | 14 ++++----
.../hg-store-grpc/src/main/proto/graphpb.proto | 26 +++++++-------
.../src/main/proto/store_common.proto | 8 ++---
.../src/main/proto/store_session.proto | 6 ++--
.../org/apache/hugegraph/store/node/AppConfig.java | 8 ++---
.../hugegraph/store/node/AppShutdownHook.java | 2 +-
.../hugegraph/store/node/StoreNodeApplication.java | 2 +-
.../store/node/controller/HgTestController.java | 4 +--
.../store/node/controller/PartitionAPI.java | 8 ++---
.../store/node/grpc/BatchGrpcClosure.java | 10 +++---
.../hugegraph/store/node/grpc/GrpcClosure.java | 2 +-
.../store/node/grpc/HgStoreNodeService.java | 4 +--
.../store/node/grpc/HgStoreSessionImpl.java | 40 ++++++++++-----------
.../store/node/grpc/HgStoreWrapperEx.java | 2 +-
.../store/node/grpc/ParallelScanIterator.java | 22 ++++++------
.../store/node/grpc/ScanBatchResponse.java | 42 +++++++++++-----------
.../store/node/grpc/ScanBatchResponseFactory.java | 2 +-
.../apache/hugegraph/store/node/grpc/ScanUtil.java | 2 +-
.../store/node/grpc/scan/GraphStoreImpl.java | 8 ++---
.../store/node/grpc/scan/ScanResponseObserver.java | 16 ++++-----
.../store/node/listener/PdConfigureListener.java | 2 +-
.../apache/hugegraph/store/node/util/HgGrpc.java | 2 +-
.../src/main/resources/application-pd.yml | 4 +--
.../src/main/resources/application.yml | 10 +++---
.../store/node/HgStoreNodeServiceTest.java | 12 +++----
.../src/test/resources/application-pd.yml | 2 +-
.../src/test/resources/application-server00.yml | 18 +++++-----
.../src/test/resources/application-server01.yml | 18 +++++-----
.../src/test/resources/application-server02.yml | 18 +++++-----
.../src/test/resources/application-server03.yml | 18 +++++-----
.../src/test/resources/application-server04.yml | 18 +++++-----
.../src/test/resources/application-server05.yml | 18 +++++-----
.../src/test/resources/application-server06.yml | 18 +++++-----
.../hugegraph/rocksdb/access/RocksDBFactory.java | 8 ++---
.../hugegraph/rocksdb/access/RocksDBSession.java | 4 +--
.../hugegraph/rocksdb/access/SessionOperator.java | 4 +--
.../rocksdb/access/SessionOperatorImpl.java | 8 ++---
.../store/HgSessionManagerOneRaftFakePDTest.java | 10 +++---
.../store/HgSessionManagerRaftFakePDTest.java | 12 +++----
.../store/HgSessionManagerRaftPDTest.java | 6 ++--
.../hugegraph/store/HgSessionManagerTest.java | 8 ++---
.../hugegraph/store/PartitionEngineTest.java | 6 ++--
.../hugegraph/store/client/ChangeShardNumTest.java | 2 +-
.../store/client/HgSessionManagerRaftPDTest.java | 4 +--
.../apache/hugegraph/store/core/CoreSuiteTest.java | 2 +-
.../hugegraph/store/core/HgCmdClientTest.java | 10 +++---
.../hugegraph/store/core/StoreEngineTestBase.java | 4 +--
.../hg-store-test/src/main/resources/pd-server.yml | 20 +++++------
51 files changed, 258 insertions(+), 258 deletions(-)
diff --git
a/hugegraph-store/hg-store-dist/src/assembly/static/bin/start-hugegraph-store.sh
b/hugegraph-store/hg-store-dist/src/assembly/static/bin/start-hugegraph-store.sh
index 1704aded1..d70f2fbd1 100644
---
a/hugegraph-store/hg-store-dist/src/assembly/static/bin/start-hugegraph-store.sh
+++
b/hugegraph-store/hg-store-dist/src/assembly/static/bin/start-hugegraph-store.sh
@@ -75,12 +75,12 @@ export FILE_LIMITN=1024
function check_evn_limit() {
local limit_check=$(ulimit -n)
if [[ ${limit_check} != "unlimited" && ${limit_check} -lt ${FILE_LIMITN}
]]; then
- echo -e "${BASH_SOURCE[0]##*/}:${LINENO}:\E[1;32m ulimit -n
可以打开的最大文件描述符数太少,需要(${FILE_LIMITN})!! \E[0m"
+ echo -e "${BASH_SOURCE[0]##*/}:${LINENO}:\E[1;32m ulimit -n can open
too few maximum file descriptors, need (${FILE_LIMITN})!! \E[0m"
return 1
fi
limit_check=$(ulimit -u)
if [[ ${limit_check} != "unlimited" && ${limit_check} -lt ${PROC_LIMITN}
]]; then
- echo -e "${BASH_SOURCE[0]##*/}:${LINENO}:\E[1;32m ulimit -u
用户最大可用的进程数太少,需要(${PROC_LIMITN})!! \E[0m"
+ echo -e "${BASH_SOURCE[0]##*/}:${LINENO}:\E[1;32m ulimit -u too few
available processes for the user, need (${PROC_LIMITN})!! \E[0m"
return 2
fi
return 0
diff --git
a/hugegraph-store/hg-store-dist/src/assembly/static/conf/application-pd.yml
b/hugegraph-store/hg-store-dist/src/assembly/static/conf/application-pd.yml
index df535953f..0315c4b4f 100644
--- a/hugegraph-store/hg-store-dist/src/assembly/static/conf/application-pd.yml
+++ b/hugegraph-store/hg-store-dist/src/assembly/static/conf/application-pd.yml
@@ -26,9 +26,9 @@ management:
include: "*"
rocksdb:
- # rocksdb 使用的总内存大小,达到该值强制写盘
+ # rocksdb total memory usage, force flush to disk when reaching this value
total_memory_size: 32000000000
- # rocksdb 使用的 memtable 大小
+ # memtable size used by rocksdb
write_buffer_size: 32000000
- # 对于每个 rocksdb 来说,memtable 个数达到该值进行写盘
+ # For each rocksdb, the number of memtables reaches this value for writing
to disk.
min_write_buffer_number_to_merge: 16
diff --git
a/hugegraph-store/hg-store-dist/src/assembly/static/conf/application.yml
b/hugegraph-store/hg-store-dist/src/assembly/static/conf/application.yml
index 4ca3d34dd..8e2b8d4f7 100644
--- a/hugegraph-store/hg-store-dist/src/assembly/static/conf/application.yml
+++ b/hugegraph-store/hg-store-dist/src/assembly/static/conf/application.yml
@@ -16,7 +16,7 @@
#
pdserver:
- # pd 服务地址,多个 pd 地址用逗号分割
+ # PD service address, multiple PD addresses separated by commas
address: localhost:8686
management:
@@ -30,24 +30,24 @@ management:
include: "*"
grpc:
- # grpc 的服务地址
+ # grpc service address
host: 127.0.0.1
port: 8500
netty-server:
max-inbound-message-size: 1000MB
raft:
- # raft 缓存队列大小
+ # raft cache queue size
disruptorBufferSize: 1024
address: 127.0.0.1:8510
max-log-file-size: 600000000000
- # 快照生成时间间隔,单位秒
+ # Snapshot generation interval, in seconds
snapshotInterval: 1800
server:
- # rest 服务地址
+ # rest service address
port: 8520
app:
- # 存储路径,支持多个路径,逗号分割
+ # Storage path, support multiple paths, separated by commas
data-path: ./storage
#raft-path: ./storage
diff --git a/hugegraph-store/hg-store-grpc/pom.xml
b/hugegraph-store/hg-store-grpc/pom.xml
index 3e7cc6add..75f4594a0 100644
--- a/hugegraph-store/hg-store-grpc/pom.xml
+++ b/hugegraph-store/hg-store-grpc/pom.xml
@@ -91,23 +91,23 @@
<pluginArtifact>
io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier}
</pluginArtifact>
- <!--默认值-->
+ <!-- Default value -->
<protoSourceRoot>${project.basedir}/src/main/proto</protoSourceRoot>
- <!--默认值-->
+ <!-- Default value -->
<!--<outputDirectory>${project.build.directory}/generated-sources/protobuf/java</outputDirectory>-->
<outputDirectory>${project.basedir}/src/main/java</outputDirectory>
-
<!--设置是否在生成java文件之前清空outputDirectory的文件,默认值为true,设置为false时也会覆盖同名文件-->
+ <!-- Set whether to clear the files in outputDirectory
before generating java files, the default value is true, and it will also
override files with the same name when set to false -->
<clearOutputDirectory>false</clearOutputDirectory>
-
<!--更多配置信息可以查看https://www.xolstice.org/protobuf-maven-plugin/compile-mojo.html-->
+ <!-- More configuration information can be found at
https://www.xolstice.org/protobuf-maven-plugin/compile-mojo.html -->
</configuration>
<executions>
<execution>
- <!--在执行mvn compile的时候会执行以下操作-->
+ <!-- When executing mvn compile, the following
operations will be performed -->
<phase>generate-sources</phase>
<goals>
- <!--生成OuterClass类-->
+ <!-- Generate OuterClass class -->
<goal>compile</goal>
- <!--生成Grpc类-->
+ <!-- Generate Grpc classes -->
<goal>compile-custom</goal>
</goals>
</execution>
diff --git a/hugegraph-store/hg-store-grpc/src/main/proto/graphpb.proto
b/hugegraph-store/hg-store-grpc/src/main/proto/graphpb.proto
index a245002f8..6e9d16d2e 100644
--- a/hugegraph-store/hg-store-grpc/src/main/proto/graphpb.proto
+++ b/hugegraph-store/hg-store-grpc/src/main/proto/graphpb.proto
@@ -30,20 +30,20 @@ message ScanPartitionRequest{
SCAN_VERTEX = 1;
SCAN_EDGE = 2;
}
- // 请求参数
+ // Request parameters
message Request{
ScanType scan_type = 1;
string graph_name = 2;
uint32 partition_id = 3;
uint32 start_code = 4;
uint32 end_code = 5;
- // 过滤条件
+ // Filter conditions
string condition = 6;
string table = 7;
int64 limit = 8;
int32 boundary = 9;
bytes position = 10;
- // 返回条件
+ // Return condition
repeated int64 properties = 11;
}
@@ -54,14 +54,14 @@ message ScanPartitionRequest{
RequestHeader header = 1;
oneof request {
Request scan_request = 2;
- // 每消费一个数据包,通知服务端一次,返回消息序号
+ // Each time a data packet is consumed, notify the server once, return the
message sequence number
Reply reply_request = 4;
}
}
message ScanResponse{
ResponseHeader header = 1;
- // 消息序号
+ // Message Sequence Number
int32 seq_no = 2;
repeated Vertex vertex = 3;
repeated Edge edge = 4;
@@ -74,19 +74,19 @@ message Property{
}
message Vertex{
- int64 label = 1; // 点类型
- Variant id = 2; // 点ID
- repeated Property properties = 3; //点属性
+ int64 label = 1; // Point type
+ Variant id = 2; // Point ID
+ repeated Property properties = 3; // Point properties
}
message Edge{
- int64 label = 1; // 边类型
+ int64 label = 1; // Edge type
int64 sourceLabel = 2;
int64 targetLabel = 3;
- Variant source_id = 4; // 源点ID
- Variant target_id = 5; // 目标点ID
+ Variant source_id = 4; // Source point ID
+ Variant target_id = 5; // Target point ID
- repeated Property properties = 6; //边属性
+ repeated Property properties = 6; // Edge properties
}
message Variant {
@@ -116,7 +116,7 @@ enum VariantType {
message RequestHeader {
- // 发送者 ID.
+ // Sender ID.
uint64 sender_id = 2;
}
diff --git a/hugegraph-store/hg-store-grpc/src/main/proto/store_common.proto
b/hugegraph-store/hg-store-grpc/src/main/proto/store_common.proto
index fc9934dec..bc4567019 100644
--- a/hugegraph-store/hg-store-grpc/src/main/proto/store_common.proto
+++ b/hugegraph-store/hg-store-grpc/src/main/proto/store_common.proto
@@ -82,10 +82,10 @@ enum ScanMethod {
}
enum ScanOrderType{
- // 批量接口下,返回顺序的要求
- ORDER_NONE = 0; // 允许无序
- ORDER_WITHIN_VERTEX = 1; // 一个点内的边不会被打断,单不同点之间为无序
- ORDER_STRICT = 2; // 保证原始的输入点顺序
+ // Under batch interface, the requirement for return order
+ ORDER_NONE = 0; // Allow unordered
+ ORDER_WITHIN_VERTEX = 1; // Edges within a vertex will not be broken, but
the order between different vertices is unordered.
+ ORDER_STRICT = 2; // Ensure the original input point order
}
enum OpType {
diff --git a/hugegraph-store/hg-store-grpc/src/main/proto/store_session.proto
b/hugegraph-store/hg-store-grpc/src/main/proto/store_session.proto
index b659645a6..e9cb94088 100644
--- a/hugegraph-store/hg-store-grpc/src/main/proto/store_session.proto
+++ b/hugegraph-store/hg-store-grpc/src/main/proto/store_session.proto
@@ -122,11 +122,11 @@ message PartitionLeader {
enum PartitionFaultType{
PARTITION_FAULT_TYPE_UNKNOWN = 0;
- // 当前不是Leader,返回Leader所在store
+ // Currently not the Leader, return the store where the Leader is located.
PARTITION_FAULT_TYPE_NOT_LEADER = 1;
- // 等待Leader超时,可能raft group创建失败
+ // Wait for Leader timeout, possibly raft group creation failed
PARTITION_FAULT_TYPE_WAIT_LEADER_TIMEOUT = 2;
- // 分区不属于本机
+ // Partition does not belong to this machine
PARTITION_FAULT_TYPE_NOT_LOCAL = 3;
}
diff --git
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/AppConfig.java
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/AppConfig.java
index 9920d9238..674a7fe41 100644
---
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/AppConfig.java
+++
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/AppConfig.java
@@ -49,14 +49,14 @@ public class AppConfig {
@Value("${server.port}")
private int restPort;
- //内置pd模式,用于单机部署
+ // Built-in pd mode, for standalone deployment
@Value("${app.data-path: store}")
private String dataPath;
@Value("${app.raft-path:}")
private String raftPath;
- //内置pd模式,用于单机部署
+ // Built-in pd mode, for standalone deployment
@Value("${app.fake-pd: false}")
private boolean fakePd;
@Autowired
@@ -97,7 +97,7 @@ public class AppConfig {
if (raft.getDisruptorBufferSize() == 0) {
int size = (int) (totalMemory / 1000 / 1000 / 1000);
size = (int) Math.pow(2, Math.round(Math.log(size) / Math.log(2)))
* 32;
- raft.setDisruptorBufferSize(size); // 每32M增加一个buffer
+ raft.setDisruptorBufferSize(size); // Increase one buffer every 32M
}
if (!rocksdb.containsKey("write_buffer_size") ||
@@ -213,7 +213,7 @@ public class AppConfig {
@Value("${fake-pd.store-list:''}")
private String storeList;
@Value("${fake-pd.peers-list:''}")
- private String peersList; //fakePd模式下,raft集群初始配置
+ private String peersList; // fakePd mode, raft cluster initial
configuration
@Value("${fake-pd.partition-count:3}")
private int partitionCount;
@Value("${fake-pd.shard-count:3}")
diff --git
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/AppShutdownHook.java
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/AppShutdownHook.java
index 4b02e4e49..b239a327a 100644
---
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/AppShutdownHook.java
+++
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/AppShutdownHook.java
@@ -43,7 +43,7 @@ public class AppShutdownHook extends Thread {
doSomethingForShutdown();
try {
- mainThread.join(); //当收到停止信号时,等待mainThread的执行完成
+ mainThread.join(); // Wait for mainThread to finish when a stop
signal is received.
} catch (InterruptedException ignored) {
}
System.out.println("Shut down complete.");
diff --git
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/StoreNodeApplication.java
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/StoreNodeApplication.java
index c74ccc329..c793ed96f 100644
---
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/StoreNodeApplication.java
+++
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/StoreNodeApplication.java
@@ -39,7 +39,7 @@ public class StoreNodeApplication {
}
public static void start() {
- // 设置solt用到的日志位置
+ // Set the log location for the slot usage
String logPath = System.getProperty("logging.path");
if (StringUtils.isBlank(logPath)) {
System.setProperty("logging.path", "logs");
diff --git
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/HgTestController.java
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/HgTestController.java
index 8c23621b5..157c7dfda 100644
---
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/HgTestController.java
+++
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/HgTestController.java
@@ -34,7 +34,7 @@ import org.springframework.web.bind.annotation.RestController;
import lombok.extern.slf4j.Slf4j;
/**
- * 仅用于测试
+ * For testing only
*/
@RestController
@Slf4j
@@ -75,7 +75,7 @@ public class HgTestController {
nodeService.getStoreEngine().destroyPartitionEngine(groupId,
graphs);
return "OK";
} else {
- return "未找到分区";
+ return "Partition not found";
}
}
diff --git
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/PartitionAPI.java
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/PartitionAPI.java
index 9247f35c7..d55bcbf28 100644
---
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/PartitionAPI.java
+++
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/PartitionAPI.java
@@ -98,7 +98,7 @@ public class PartitionAPI {
String graphName = partitionEntry.getKey();
Partition pt = partitionEntry.getValue();
PartitionInfo partition = new PartitionInfo(pt);
- // 此处为了打开所有的图,metric只返回已打开的图
+ // Here to open all the graphs, metric only returns the opened
graph
businessHandler.getLatestSequenceNumber(graphName, pt.getId());
partition.setMetric(
businessHandler.getPartitionMetric(graphName,
pt.getId(), accurate));
@@ -142,7 +142,7 @@ public class PartitionAPI {
}
/**
- * 打印分区的所有key
+ * Print all keys in the partition
*/
@GetMapping(value = "/partition/dump/{id}", produces =
MediaType.APPLICATION_JSON_VALUE)
public Map<String, Object> dumpPartition(@PathVariable(value = "id") int
id) throws
@@ -171,7 +171,7 @@ public class PartitionAPI {
}
/**
- * 打印分区的所有key
+ * Print all keys in the partition
*/
@GetMapping(value = "/partition/clean/{id}", produces =
MediaType.APPLICATION_JSON_VALUE)
public Map<String, Object> cleanPartition(@PathVariable(value = "id") int
id) throws
@@ -196,7 +196,7 @@ public class PartitionAPI {
ArthasAgent.attach(configMap);
// DashResponse retPose = new DashResponse();
List<String> ret = new ArrayList<>();
- ret.add("Arthas 启动成功");
+ ret.add("Arthas started successfully");
return okMap("arthasstart", ret);
}
diff --git
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/BatchGrpcClosure.java
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/BatchGrpcClosure.java
index d4ee59cfa..14c092678 100644
---
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/BatchGrpcClosure.java
+++
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/BatchGrpcClosure.java
@@ -44,7 +44,7 @@ import io.grpc.stub.StreamObserver;
import lombok.extern.slf4j.Slf4j;
/**
- * 批量处理的grpc回调封装类
+ * Batch processing grpc callback wrapper class
*
* @param <V>
*/
@@ -95,7 +95,7 @@ class BatchGrpcClosure<V> {
}
/**
- * 不使用计数器latch
+ * Not using counter latch
*
* @return
*/
@@ -158,13 +158,13 @@ class BatchGrpcClosure<V> {
}
/**
- * 等待raft执行结束,返回结果给grpc
+ * Wait for the raft execution to complete, return the result to grpc
*/
public void waitFinish(StreamObserver<V> observer, Function<List<V>, V>
ok, long timeout) {
try {
countDownLatch.await(timeout, TimeUnit.MILLISECONDS);
- if (errorStatus.isEmpty()) { // 没有错误时,合并结果
+ if (errorStatus.isEmpty()) { // No error, merge results
observer.onNext(ok.apply(results));
} else {
observer.onNext((V) FeedbackRes.newBuilder()
@@ -186,7 +186,7 @@ class BatchGrpcClosure<V> {
}
/**
- * 从多个结果中选择一个错误的结果返回,如果没有错误,返回第一个
+ * Select one incorrect result from multiple results, if there are no
errors, return the first one.
*/
public FeedbackRes selectError(List<FeedbackRes> results) {
if (!CollectionUtils.isEmpty(results)) {
diff --git
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/GrpcClosure.java
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/GrpcClosure.java
index 785739edd..0d65066e9 100644
---
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/GrpcClosure.java
+++
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/GrpcClosure.java
@@ -36,7 +36,7 @@ abstract class GrpcClosure<V> implements RaftClosure {
private V result;
/**
- * 设置输出结果给raftClosure,对于Follower来说,raftClosure为空
+ * Set the output result to raftClosure, for Follower, raftClosure is
empty.
*/
public static <V> void setResult(RaftClosure raftClosure, V result) {
GrpcClosure closure = (GrpcClosure) raftClosure;
diff --git
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreNodeService.java
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreNodeService.java
index e99d7d24a..0305bd03c 100644
---
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreNodeService.java
+++
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreNodeService.java
@@ -183,7 +183,7 @@ public class HgStoreNodeService implements RaftTaskHandler {
invoke(partId, methodId, CleanReq.parseFrom(input),
response);
break;
default:
- return false; // 未处理
+ return false; // Unhandled
}
} catch (IOException e) {
throw new HgStoreException(e.getMessage(), e);
@@ -214,7 +214,7 @@ public class HgStoreNodeService implements RaftTaskHandler {
hgStoreSession.doClean(partId, (CleanReq) req, response);
break;
default:
- return false; // 未处理
+ return false; // Unhandled
}
return true;
}
diff --git
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreSessionImpl.java
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreSessionImpl.java
index b7766ea23..2bc0c27b8 100644
---
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreSessionImpl.java
+++
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreSessionImpl.java
@@ -130,12 +130,12 @@ public class HgStoreSessionImpl extends
HgStoreSessionGrpc.HgStoreSessionImplBas
String graph = request.getHeader().getGraph();
int partition = request.getPartition();
- // 发给不同的raft执行
+ // Send to different raft to execute
BatchGrpcClosure<FeedbackRes> closure = new BatchGrpcClosure<>(1);
storeService.addRaftTask(HgStoreNodeService.CLEAN_OP, graph, partition,
request,
closure.newRaftClosure());
- // 等待返回结果
+ // Waiting for the return result
closure.waitFinish(responseObserver, r -> closure.selectError(r),
appConfig.getRaft().getRpcTimeOut());
}
@@ -228,7 +228,7 @@ public class HgStoreSessionImpl extends
HgStoreSessionGrpc.HgStoreSessionImplBas
GraphMode graphMode = graphState.getMode();
if (graphMode != null &&
graphMode.getNumber() == GraphMode.ReadOnly_VALUE)
{
- // 状态为只读时从pd获取最新的图状态,图只读状态会在pd的通知中更新
+ // When in read-only state, get the latest graph
state from pd, the graph's read-only state will be updated in pd's notification.
Metapb.Graph pdGraph =
pd.getPDClient().getGraph(graph);
Metapb.GraphState pdGraphState =
@@ -237,13 +237,13 @@ public class HgStoreSessionImpl extends
HgStoreSessionGrpc.HgStoreSessionImplBas
pdGraphState.getMode() != null &&
pdGraphState.getMode().getNumber() ==
GraphMode.ReadOnly_VALUE) {
- // 确认pd中存储的当前状态也是只读,则不允许插入数据
+ // Confirm that the current state stored in pd
is also read-only, then inserting data is not allowed.
throw new PDException(-1,
"the graph space size " +
"has " +
"reached the threshold");
}
- // pd状态与本地缓存不一致,本地缓存更新为pd中的状态
+ // pd status is inconsistent with local cache,
update local cache to the status in pd
managerGraph.setProtoObj(pdGraph);
}
}
@@ -262,12 +262,12 @@ public class HgStoreSessionImpl extends
HgStoreSessionGrpc.HgStoreSessionImplBas
return;
}
- // 按分区拆分数据
+ // Split data by partition
Map<Integer, List<BatchEntry>> groups = new HashMap<>();
list.forEach((entry) -> {
Key startKey = entry.getStartKey();
if (startKey.getCode() == HgStoreConst.SCAN_ALL_PARTITIONS_ID) {
- // 所有Leader分区
+ // All Leader partitions
List<Integer> ids =
storeService.getGraphLeaderPartitionIds(graph);
ids.forEach(id -> {
@@ -277,7 +277,7 @@ public class HgStoreSessionImpl extends
HgStoreSessionGrpc.HgStoreSessionImplBas
groups.get(id).add(entry);
});
} else {
- // 根据keyCode查询所属分区ID,按分区ID分组
+ // According to keyCode to query the belonging partition ID,
group by partition ID
Integer partitionId =
pd.getPartitionByCode(graph, startKey.getCode())
.getId();
@@ -288,7 +288,7 @@ public class HgStoreSessionImpl extends
HgStoreSessionGrpc.HgStoreSessionImplBas
}
});
- // 发给不同的raft执行
+ // Send to different raft to execute
BatchGrpcClosure<FeedbackRes> closure =
new BatchGrpcClosure<>(groups.size());
groups.forEach((partition, entries) -> {
@@ -306,7 +306,7 @@ public class HgStoreSessionImpl extends
HgStoreSessionGrpc.HgStoreSessionImplBas
if (!graph.isEmpty()) {
log.debug(" batch: waiting raft...");
- // 等待返回结果
+ // Wait for the return result
closure.waitFinish(observer, r -> closure.selectError(r),
appConfig.getRaft().getRpcTimeOut());
log.debug(" batch: ended waiting");
@@ -382,16 +382,16 @@ public class HgStoreSessionImpl extends
HgStoreSessionGrpc.HgStoreSessionImplBas
}
String graph = request.getHeader().getGraph();
- // 所有Leader分区
+ // All Leader partitions
List<Integer> ids = storeService.getGraphLeaderPartitionIds(graph);
- // 按分区拆分数据
+ // Split data by partition
Map<Integer, TableReq> groups = new HashMap<>();
- // 按分区拆分数据
+ // Split data by partition
ids.forEach(id -> {
groups.put(id, request);
});
- // 发给不同的raft执行
+ // Send to different raft for execution
BatchGrpcClosure<FeedbackRes> closure = new
BatchGrpcClosure<>(groups.size());
groups.forEach((partition, entries) -> {
storeService.addRaftTask(HgStoreNodeService.TABLE_OP, graph,
partition,
@@ -401,7 +401,7 @@ public class HgStoreSessionImpl extends
HgStoreSessionGrpc.HgStoreSessionImplBas
if (!groups.isEmpty()) {
// log.info(" table waiting raft...");
- // 等待返回结果
+ // Wait for the return result
closure.waitFinish(observer, r -> closure.selectError(r),
appConfig.getRaft().getRpcTimeOut());
// log.info(" table ended waiting raft");
@@ -470,16 +470,16 @@ public class HgStoreSessionImpl extends
HgStoreSessionGrpc.HgStoreSessionImplBas
}
String graph = request.getHeader().getGraph();
- // 所有Leader分区
+ // All Leader partitions
List<Integer> ids = storeService.getGraphLeaderPartitionIds(graph);
- // 按分区拆分数据
+ // Split data by partition
Map<Integer, GraphReq> groups = new HashMap<>();
- // 按分区拆分数据
+ // Split data by partitioning
ids.forEach(id -> {
groups.put(id, request);
});
- // 发给不同的raft执行
+ // Send to different raft for execution
BatchGrpcClosure<FeedbackRes> closure = new
BatchGrpcClosure<>(groups.size());
groups.forEach((partition, entries) -> {
storeService.addRaftTask(HgStoreNodeService.GRAPH_OP, graph,
partition,
@@ -488,7 +488,7 @@ public class HgStoreSessionImpl extends
HgStoreSessionGrpc.HgStoreSessionImplBas
});
if (!groups.isEmpty()) {
- // 等待返回结果
+ // Waiting for the return result
closure.waitFinish(observer, r -> closure.selectError(r),
appConfig.getRaft().getRpcTimeOut());
diff --git
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreWrapperEx.java
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreWrapperEx.java
index 78355e178..26e7f2357 100644
---
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreWrapperEx.java
+++
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreWrapperEx.java
@@ -113,7 +113,7 @@ public class HgStoreWrapperEx {
public boolean doGraph(int partId, GraphMethod method, String graph) {
boolean flag = true;
- if (method == GRAPH_METHOD_DELETE) {// 交给 raft 执行,此处不处理
+ if (method == GRAPH_METHOD_DELETE) {// Hand over to raft for
execution, no processing here
flag = true;
} else {
throw new UnsupportedOperationException("GraphMethod: " +
method.name());
diff --git
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ParallelScanIterator.java
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ParallelScanIterator.java
index 430d466c0..1f34b043f 100644
---
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ParallelScanIterator.java
+++
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ParallelScanIterator.java
@@ -43,7 +43,7 @@ import com.alipay.sofa.jraft.util.Utils;
import lombok.extern.slf4j.Slf4j;
/**
- * 支持平行读取的批量查询迭代器
+ * Support parallel read batch query iterator
*/
@Slf4j
public class ParallelScanIterator implements ScanIterator {
@@ -86,7 +86,7 @@ public class ParallelScanIterator implements ScanIterator {
Math.max(1, Math.min(query.getConditionCount() / 16,
maxWorkThreads));
}
this.maxInQueue = maxWorkThreads * 2;
- // 边有序需要更大的队列
+ // Edge sorted requires a larger queue
queue = new LinkedBlockingQueue<>(maxInQueue * 2);
createScanner();
}
@@ -107,7 +107,7 @@ public class ParallelScanIterator implements ScanIterator {
while (current == null && tryTimes < waitDataMaxTryTimes) {
try {
if (queue.size() != 0 || !finished) {
- current = queue.poll(100, TimeUnit.MILLISECONDS);
//定期检查client是否被关闭了
+ current = queue.poll(100, TimeUnit.MILLISECONDS); //
Regularly check if the client has been closed.
if (current == null && !finished) {
wakeUpScanner();
}
@@ -159,7 +159,7 @@ public class ParallelScanIterator implements ScanIterator {
}
/**
- * 创建扫描器
+ * Create Scanner
*/
private void createScanner() {
synchronized (scanners) {
@@ -173,7 +173,7 @@ public class ParallelScanIterator implements ScanIterator {
}
/**
- * 唤醒扫描器
+ * Wake up scanner
*/
private void wakeUpScanner() {
synchronized (pauseScanners) {
@@ -187,7 +187,7 @@ public class ParallelScanIterator implements ScanIterator {
}
/**
- * 休眠扫描器
+ * Sleep Scanner
*
* @param scanner
*/
@@ -209,10 +209,10 @@ public class ParallelScanIterator implements ScanIterator
{
}
/**
- * 添加到队列,返回队列是否已满
+ * Add to queue, return whether the queue is full
*
* @param data
- * @return false: 队列已满
+ * @return false: Queue is full
*/
private boolean putData(List<KV> data) {
try {
@@ -238,7 +238,7 @@ public class ParallelScanIterator implements ScanIterator {
queueLock.unlock();
}
}
- // 数据未结束,线程继续执行
+ // Data not ended, thread continues to execute
return hasNext || this.queue.size() < maxInQueue;
}
@@ -305,7 +305,7 @@ public class ParallelScanIterator implements ScanIterator {
private volatile boolean closed = false;
private ScanIterator getIterator() {
- // 迭代器没有数据,或该点以达到limit,切换新的迭代器
+ // Iterator has no data, or the point has reached the limit,
switch to a new iterator.
if (iterator == null || !iterator.hasNext() || counter >= limit) {
if (iterator != null) {
iterator.close();
@@ -343,7 +343,7 @@ public class ParallelScanIterator implements ScanIterator {
if ((entriesSize >= batchSize || bodySize >= maxBodySize)
||
(orderEdge && bodySize >= maxBodySize / 2)) {
if (orderEdge) {
- //边排序,保证一个点的所有边连续,阻止其他点插入
+ // Sort the edges, ensure all edges of one point
are consecutive, prevent other points from inserting.
canNext = putData(dataList, iterator != null &&
iterator.hasNext());
} else {
canNext = putData(dataList);
diff --git
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponse.java
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponse.java
index 3712fbd7c..99ce662fe 100644
---
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponse.java
+++
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponse.java
@@ -37,10 +37,10 @@ import io.grpc.stub.StreamObserver;
import lombok.extern.slf4j.Slf4j;
/**
- * 批量查询处理器,批量查询数据,流式返回数据。
- * 1、服务端流式发送数据给客户端
- * 2、客户端每消费一批次数据,返回批次号给服务端
- * 3、服务端根据批次号决定发送多少数据,保证传送数据的不间断,
+ * Batch query processor, batch query data, stream back data.
+ * 1. Server-side streaming data to the client
+ * 2. The client returns the batch number to the server after consuming each
batch of data.
+ * 3. The server decides how much data to send based on the batch number,
ensuring the uninterrupted transmission of data,
*/
@Slf4j
public class ScanBatchResponse implements StreamObserver<ScanStreamBatchReq> {
@@ -50,24 +50,24 @@ public class ScanBatchResponse implements
StreamObserver<ScanStreamBatchReq> {
static ByteBufferAllocator alloc =
new ByteBufferAllocator(ParallelScanIterator.maxBodySize * 3 / 2,
1000);
private final int maxInFlightCount =
PropertyUtil.getInt("app.scan.stream.inflight", 16);
- private final int activeTimeout =
PropertyUtil.getInt("app.scan.stream.timeout", 60); //单位秒
+ private final int activeTimeout =
PropertyUtil.getInt("app.scan.stream.timeout", 60); // unit: second
private final StreamObserver<KvStream> sender;
private final HgStoreWrapperEx wrapper;
private final ThreadPoolExecutor executor;
private final Object stateLock = new Object();
private final Lock iteratorLock = new ReentrantLock();
- // 当前正在遍历的迭代器
+ // Currently traversing iterator
private ScanIterator iterator;
- // 下一次发送的序号
+ // Next send sequence number
private volatile int seqNo;
- // Client已消费的序号
+ // Client consumed sequence number
private volatile int clientSeqNo;
- // 已经发送的条目数
+ // Number of entries sent
private volatile long count;
- // 客户端要求返回的最大条目数
+ // Client requests the maximum number of entries to return
private volatile long limit;
private ScanQueryRequest query;
- // 上次读取数据时间
+ // Last read data time
private long activeTime;
private volatile State state;
@@ -83,20 +83,20 @@ public class ScanBatchResponse implements
StreamObserver<ScanStreamBatchReq> {
}
/**
- * 接收客户端发送的消息
- * 服务端另起线程处理消息,不阻塞网络
+ * Receive messages sent by the client
+ * Server starts a new thread to process messages, does not block the
network.
*
* @param request
*/
@Override
public void onNext(ScanStreamBatchReq request) {
switch (request.getQueryCase()) {
- case QUERY_REQUEST: // 查询条件
+ case QUERY_REQUEST: // query conditions
executor.execute(() -> {
startQuery(request.getHeader().getGraph(),
request.getQueryRequest());
});
break;
- case RECEIPT_REQUEST: // 消息异步应答
+ case RECEIPT_REQUEST: // Message asynchronous response
this.clientSeqNo = request.getReceiptRequest().getTimes();
if (seqNo - clientSeqNo < maxInFlightCount) {
synchronized (stateLock) {
@@ -111,7 +111,7 @@ public class ScanBatchResponse implements
StreamObserver<ScanStreamBatchReq> {
}
}
break;
- case CANCEL_REQUEST: // 关闭流
+ case CANCEL_REQUEST: // close stream
closeQuery();
break;
default:
@@ -132,7 +132,7 @@ public class ScanBatchResponse implements
StreamObserver<ScanStreamBatchReq> {
}
/**
- * 生成迭代器
+ * Generate iterator
*
* @param request
*/
@@ -152,7 +152,7 @@ public class ScanBatchResponse implements
StreamObserver<ScanStreamBatchReq> {
}
/**
- * 生成迭代器
+ * Generate iterator
*/
private void closeQuery() {
setStateDone();
@@ -178,7 +178,7 @@ public class ScanBatchResponse implements
StreamObserver<ScanStreamBatchReq> {
}
/**
- * 发送数据
+ * Send data
*/
private void sendEntries() {
if (state == State.DONE || iterator == null) {
@@ -255,7 +255,7 @@ public class ScanBatchResponse implements
StreamObserver<ScanStreamBatchReq> {
}
/**
- * 检查是否活跃,超过一定时间客户端没有请求数据,认为已经不活跃,关闭连接释放资源
+ * Check for activity, if the client does not request data for a certain
period of time, it is considered inactive, close the connection to release
resources.
*/
public void checkActiveTimeout() {
if ((System.currentTimeMillis() - activeTime) > activeTimeout * 1000L)
{
@@ -265,7 +265,7 @@ public class ScanBatchResponse implements
StreamObserver<ScanStreamBatchReq> {
}
/**
- * 任务状态
+ * Task Status
*/
private enum State {
IDLE,
diff --git
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponseFactory.java
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponseFactory.java
index 9c6dafc77..43abfd97c 100644
---
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponseFactory.java
+++
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponseFactory.java
@@ -54,7 +54,7 @@ public class ScanBatchResponseFactory {
}
/**
- * 检查是否Stream是否活跃,超时的Stream及时关闭
+ * Check if the Stream is active, and close the timed-out Stream in a
timely manner.
*/
public void checkStreamActive() {
streamObservers.forEach(streamObserver -> {
diff --git
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanUtil.java
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanUtil.java
index 0148fa0b2..e2fcff42e 100644
---
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanUtil.java
+++
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanUtil.java
@@ -149,7 +149,7 @@ class ScanUtil {
}
/**
- * 支持并行读取的多迭代器
+ * Support for multi-iterators with parallel reading
*/
static ScanIterator getParallelIterator(String graph, ScanQueryRequest
request,
HgStoreWrapperEx wrapper,
ThreadPoolExecutor executor) {
diff --git
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/GraphStoreImpl.java
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/GraphStoreImpl.java
index dcfc0549a..6583e5974 100644
---
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/GraphStoreImpl.java
+++
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/GraphStoreImpl.java
@@ -34,7 +34,7 @@ import io.grpc.stub.StreamObserver;
import lombok.extern.slf4j.Slf4j;
/**
- * graphpb.proto 实现类
+ * graphpb.proto implementation class
*/
@Slf4j
@GRpcService
@@ -67,9 +67,9 @@ public class GraphStoreImpl extends GraphStoreImplBase {
}
/**
- * 流式回复消息,每个消息带有seqNo
- * 客户端每消费一个消息,应答一个seqNo
- * 服务端根据客户端的seqNo决定发送几个数据包
+ * Streaming reply messages, each message with a seqNo
+ * Client side should respond with a seqNo for each message consumed.
+ * The server decides how many packets to send based on the client's seqNo.
*
* @param ro
* @return
diff --git
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/ScanResponseObserver.java
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/ScanResponseObserver.java
index b5b49d039..dc57dae36 100644
---
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/ScanResponseObserver.java
+++
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/ScanResponseObserver.java
@@ -71,17 +71,17 @@ public class ScanResponseObserver<T> implements
private volatile Future<?> readTask;
/*
- * 2022年11月1日
- * 1.onNext 需要进行异步处理,以防止grpc的调用阻塞
- * 2.不要读取迭代器或者发送数据不要产生线程等待
- * 3.在发送前,尽量准备好要发送的数据
+ * November 1, 2022
+ * 1. onNext needs to be processed asynchronously to prevent the grpc call
from being blocked.
+ * 2. Do not read iterators or send data do not produce thread waiting.
+ * 3. Before sending, try to prepare the data to be sent as much as
possible.
* */
/*
- * 2022年11月2日
- * 1.读取rocksdb迭代器的线程read
- * 2.进行数据转换并发送到阻塞队列的线程offer
- * 3.从阻塞队列读取数据,并发送的线程,包括在没有读取到数据的情况下唤醒读取和发送的线程send
+ * November 2, 2022
+ * 1. Read the thread of rocksdb iterator read
+ * 2. Perform data conversion and send to the blocking queue thread offer
+ * 3. Thread for reading data from the blocking queue and sending,
including waking up the reading and sending threads when no data is read
* */
public ScanResponseObserver(StreamObserver<ScanResponse> sender,
diff --git
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/listener/PdConfigureListener.java
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/listener/PdConfigureListener.java
index 709e7fdb9..d818e5627 100644
---
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/listener/PdConfigureListener.java
+++
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/listener/PdConfigureListener.java
@@ -103,7 +103,7 @@ public class PdConfigureListener implements
client.listen(TIMESTAMP_KEY, (Consumer<WatchResponse>) o -> {
log.info("receive message to restart :" + o);
try {
- // 优先更新最新配置文件,以免修改像端口之类的参数导致旧文件被优先加载
+ // Prioritize updating the latest configuration file to
avoid old files being loaded first when modifying parameters like ports.
ScanPrefixResponse responseNew =
client.scanPrefix(CONFIG_PREFIX);
Map<String, String> kvsMapNew = responseNew.getKvsMap();
String config = kvsMapNew.get(CONFIG_FIX_PREFIX);
diff --git
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgGrpc.java
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgGrpc.java
index fb824c56a..0bb68895f 100644
---
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgGrpc.java
+++
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgGrpc.java
@@ -109,7 +109,7 @@ public abstract class HgGrpc {
String des,
Throwable t) {
if (t != null) {
- // 为给client返回完整异常信息
+ // To return complete exception information to the client
des = (des == null ? "" : des + ",") +
Throwables.getStackTraceAsString(t);
}
diff --git
a/hugegraph-store/hg-store-node/src/main/resources/application-pd.yml
b/hugegraph-store/hg-store-node/src/main/resources/application-pd.yml
index dc198c3c8..7985e67da 100644
--- a/hugegraph-store/hg-store-node/src/main/resources/application-pd.yml
+++ b/hugegraph-store/hg-store-node/src/main/resources/application-pd.yml
@@ -16,7 +16,7 @@
#
pdserver:
- # pd服务地址,多个pd地址用逗号分割
+ # PD service address, multiple PD addresses separated by commas
address: localhost:8686
management:
@@ -33,6 +33,6 @@ logging:
level:
root: info
rocksdb:
- # rocksdb 使用的总内存大小
+ # total memory size used by rocksdb
total_memory_size: 32000000000
write_buffer_size: 32000000
diff --git a/hugegraph-store/hg-store-node/src/main/resources/application.yml
b/hugegraph-store/hg-store-node/src/main/resources/application.yml
index 962101aac..0b6527060 100644
--- a/hugegraph-store/hg-store-node/src/main/resources/application.yml
+++ b/hugegraph-store/hg-store-node/src/main/resources/application.yml
@@ -16,11 +16,11 @@
#
pdserver:
- # pd服务地址,多个pd地址用逗号分割
+ # PD service address, multiple PD addresses separated by commas
address: localhost:8686
grpc:
- # grpc的服务地址
+ # grpc service address
host: 127.0.0.1
port: 8500
netty-server:
@@ -28,14 +28,14 @@ grpc:
raft:
address: 127.0.0.1:8510
max-log-file-size: 600000000000
- # 快照生成时间间隔,单位秒
+ # Snapshot generation interval, unit: seconds
snapshotInterval: 1800
server:
- # rest 服务地址
+ # rest service address
port: 8520
app:
- # 存储路径,支持多个路径,逗号分割
+ # Storage path, support multiple paths, separated by commas
data-path: ./storage
spring:
diff --git
a/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/node/HgStoreNodeServiceTest.java
b/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/node/HgStoreNodeServiceTest.java
index 0ff80ff2e..336428f99 100644
---
a/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/node/HgStoreNodeServiceTest.java
+++
b/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/node/HgStoreNodeServiceTest.java
@@ -27,16 +27,16 @@ import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
/**
- * HgStore单元测试
- * 1、测试raft多副本入库
- * 2、测试快照同步
- * 3、测试副本增减
- * 4、测试单幅本关闭日志入库
+ * HgStore unit testing
+ * 1. Test raft multi-copy storage entry
+ * 2. Test snapshot synchronization
+ * 3, test copy addition and subtraction
+ * 4. Test single frame with log storage turned off
*/
public class HgStoreNodeServiceTest {
String yml =
- "rocksdb:\n" + " # rocksdb 使用的总内存大小\n" + " total_memory_size:
32000000000\n" +
+ "rocksdb:\n" + " # Total memory size used by rocksdb\n" + "
total_memory_size: 32000000000\n" +
" max_background_jobs: 8\n" + " max_subcompactions: 4\n" +
" target_file_size_multiplier: 4\n" + "
min_write_buffer_number_to_merge: 8\n" +
" target_file_size_base: 512000000";
diff --git
a/hugegraph-store/hg-store-node/src/test/resources/application-pd.yml
b/hugegraph-store/hg-store-node/src/test/resources/application-pd.yml
index 58673f5c2..4047e27af 100644
--- a/hugegraph-store/hg-store-node/src/test/resources/application-pd.yml
+++ b/hugegraph-store/hg-store-node/src/test/resources/application-pd.yml
@@ -16,7 +16,7 @@
#
rocksdb:
- # rocksdb 使用的总内存大小
+ # total memory size used by RocksDB
total_memory_size: 32000000000
max_background_jobs: 8
max_subcompactions: 4
diff --git
a/hugegraph-store/hg-store-node/src/test/resources/application-server00.yml
b/hugegraph-store/hg-store-node/src/test/resources/application-server00.yml
index 49b33b2ea..c4e2c5ed1 100644
--- a/hugegraph-store/hg-store-node/src/test/resources/application-server00.yml
+++ b/hugegraph-store/hg-store-node/src/test/resources/application-server00.yml
@@ -16,10 +16,10 @@
#
pdserver:
- # pd服务地址,多个pd地址用逗号分割
+ # PD service address, multiple PD addresses separated by commas
address: localhost:8686
grpc:
- # grpc的服务地址
+ # grpc service address
host: 127.0.0.1
port: 8500
netty-server:
@@ -28,11 +28,11 @@ raft:
# enable: false
address: 127.0.0.1:8510
data-path: ${app.data-path}/raft
- # 快照生成时间间隔,单位秒
+ # Snapshot generation interval, unit: seconds
snapshotInterval: 30
max-log-file-size: 60000000
server:
- # rest 服务地址
+ # rest service address
port: 8520
app:
@@ -60,14 +60,14 @@ rocksdb:
snapshot_path: ${app.data-path}/snapshot
bloom_filter_bits_per_key: 10
compression_per_level: "[none, zstd, zstd, zstd, zstd, zstd, zstd]"
-#fakePd配置参数
+# fakePd configuration parameters
fake-pd:
- # fake-pd模式下,store grpc集群列表
+ # fake-pd mode, store grpc cluster list
store-list: 127.0.0.1:8500
- # fake-pd模式下,设置raft集群列表
+ # fake-pd mode, set raft cluster list
peers-list: 127.0.0.1:8510
- # 分区数量
+ # Partition Count
partition-count: 10
- # 每个分区副本数量
+ # Number of replicas per partition
shard-count: 3
diff --git
a/hugegraph-store/hg-store-node/src/test/resources/application-server01.yml
b/hugegraph-store/hg-store-node/src/test/resources/application-server01.yml
index 72482bc28..0a9c7972f 100644
--- a/hugegraph-store/hg-store-node/src/test/resources/application-server01.yml
+++ b/hugegraph-store/hg-store-node/src/test/resources/application-server01.yml
@@ -16,10 +16,10 @@
#
pdserver:
- # pd服务地址,多个pd地址用逗号分割
+ # PD service address, multiple PD addresses separated by commas
address: localhost:8686
grpc:
- # grpc的服务地址
+ # grpc service address
host: 127.0.0.1
port: 8501
netty-server:
@@ -28,11 +28,11 @@ raft:
# enable: false
address: 127.0.0.1:8511
useRocksDBSegmentLogStorage: false
- # 快照生成时间间隔,单位秒
+ # Snapshot generation interval, in seconds
snapshotInterval: 300
disruptorBufferSize: 128
server:
- # rest 服务地址
+ # rest service address
port: 8521
app:
@@ -58,13 +58,13 @@ rocksdb:
write_buffer_size: 2000000
level0_file_num_compaction_trigger: 2
bloom_filter_bits_per_key: 10
-#fakePd配置参数
+# fakePd configuration parameters
fake-pd:
- # fake-pd模式下,store grpc集群列表
+ # fake-pd mode, store grpc cluster list
store-list: 127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503
- # fake-pd模式下,设置raft集群列表
+ # fake-pd mode, set raft cluster list
peers-list: 127.0.0.1:8511,127.0.0.1:8512,127.0.0.1:8513
- # 分区数量
+ # Partition Number
partition-count: 10
- # 每个分区副本数量
+ # Number of replicas per partition
shard-count: 3
diff --git
a/hugegraph-store/hg-store-node/src/test/resources/application-server02.yml
b/hugegraph-store/hg-store-node/src/test/resources/application-server02.yml
index b69e0535e..b419a4f05 100644
--- a/hugegraph-store/hg-store-node/src/test/resources/application-server02.yml
+++ b/hugegraph-store/hg-store-node/src/test/resources/application-server02.yml
@@ -16,10 +16,10 @@
#
pdserver:
- # pd服务地址,多个pd地址用逗号分割
+ # PD service address, multiple PD addresses separated by commas
address: localhost:8686
grpc:
- # grpc的服务地址
+ # grpc service address
host: 127.0.0.1
port: 8502
netty-server:
@@ -28,11 +28,11 @@ raft:
# enable: false
address: 127.0.0.1:8512
useRocksDBSegmentLogStorage: false
- # 快照生成时间间隔,单位秒
+ # Snapshot generation interval, in seconds
snapshotInterval: 300
disruptorBufferSize: 128
server:
- # rest 服务地址
+ # rest service address
port: 8522
app:
@@ -57,13 +57,13 @@ management:
rocksdb:
db_max_alive_time: 120
-#fakePd配置参数
+# fakePd configuration parameters
fake-pd:
- # fake-pd模式下,store grpc集群列表
+ # fake-pd mode, store grpc cluster list
store-list: 127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503
- # fake-pd模式下,设置raft集群列表
+ # fake-pd mode, set raft cluster list
peers-list: 127.0.0.1:8511,127.0.0.1:8512,127.0.0.1:8513
- # 分区数量
+ # Partition Count
partition-count: 10
- # 每个分区副本数量
+ # Number of replicas per partition
shard-count: 3
diff --git
a/hugegraph-store/hg-store-node/src/test/resources/application-server03.yml
b/hugegraph-store/hg-store-node/src/test/resources/application-server03.yml
index 8028dffd9..709370c3c 100644
--- a/hugegraph-store/hg-store-node/src/test/resources/application-server03.yml
+++ b/hugegraph-store/hg-store-node/src/test/resources/application-server03.yml
@@ -16,10 +16,10 @@
#
pdserver:
- # pd服务地址,多个pd地址用逗号分割
+ # PD service address, multiple PD addresses separated by commas
address: localhost:8686
grpc:
- # grpc的服务地址
+ # grpc service address
host: 127.0.0.1
port: 8503
netty-server:
@@ -29,11 +29,11 @@ raft:
address: 127.0.0.1:8513
snapshotLogIndexMargin: 1024
useRocksDBSegmentLogStorage: false
- # 快照生成时间间隔,单位秒
+ # Snapshot generation interval, in seconds
snapshotInterval: 300
disruptorBufferSize: 128
server:
- # rest 服务地址
+ # rest service address
port: 8523
app:
@@ -59,13 +59,13 @@ rocksdb:
db_max_alive_time: 120
-#fakePd配置参数
+# fakePd configuration parameters
fake-pd:
- # fake-pd模式下,store grpc集群列表
+ # fake-pd mode, store grpc cluster list
store-list: 127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503
- # fake-pd模式下,设置raft集群列表
+ # fake-pd mode, set raft cluster list
peers-list: 127.0.0.1:8511,127.0.0.1:8512,127.0.0.1:8513
- # 分区数量
+ # Partition Count
partition-count: 10
- # 每个分区副本数量
+ # Number of replicas per partition
shard-count: 3
diff --git
a/hugegraph-store/hg-store-node/src/test/resources/application-server04.yml
b/hugegraph-store/hg-store-node/src/test/resources/application-server04.yml
index b9d35f443..b06a92c8d 100644
--- a/hugegraph-store/hg-store-node/src/test/resources/application-server04.yml
+++ b/hugegraph-store/hg-store-node/src/test/resources/application-server04.yml
@@ -16,10 +16,10 @@
#
pdserver:
- # pd服务地址,多个pd地址用逗号分割
+ # PD service address, multiple PD addresses separated by commas
address: localhost:8686
grpc:
- # grpc的服务地址
+ # grpc service address
host: 127.0.0.1
port: 8504
netty-server:
@@ -28,10 +28,10 @@ raft:
# enable: false
address: 127.0.0.1:8514
- # 快照生成时间间隔,单位秒
+ # Snapshot generation interval, unit: seconds
snapshotInterval: 300
server:
- # rest 服务地址
+ # rest service address
port: 8524
app:
@@ -56,13 +56,13 @@ management:
rocksdb:
-#fakePd配置参数
+# fakePd configuration parameters
fake-pd:
- # fake-pd模式下,store grpc集群列表
+ # fake-pd mode, store grpc cluster list
store-list: 127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503
- # fake-pd模式下,设置raft集群列表
+ # fake-pd mode, set raft cluster list
peers-list: 127.0.0.1:8511,127.0.0.1:8512,127.0.0.1:8513
- # 分区数量
+ # Partition Number
partition-count: 3
- # 每个分区副本数量
+ # Number of replicas per partition
shard-count: 3
diff --git
a/hugegraph-store/hg-store-node/src/test/resources/application-server05.yml
b/hugegraph-store/hg-store-node/src/test/resources/application-server05.yml
index 02b83f9a5..e3637051c 100644
--- a/hugegraph-store/hg-store-node/src/test/resources/application-server05.yml
+++ b/hugegraph-store/hg-store-node/src/test/resources/application-server05.yml
@@ -16,10 +16,10 @@
#
pdserver:
- # pd服务地址,多个pd地址用逗号分割
+ # PD service address, multiple PD addresses separated by commas
address: localhost:8686
grpc:
- # grpc的服务地址
+ # grpc service address
host: 127.0.0.1
port: 8505
netty-server:
@@ -28,10 +28,10 @@ raft:
# enable: false
address: 127.0.0.1:8515
data-path: ${app.data-path}/raft
- # 快照生成时间间隔,单位秒
+ # Snapshot generation interval, unit: seconds
snapshotInterval: 300
server:
- # rest 服务地址
+ # rest service address
port: 8525
app:
@@ -58,13 +58,13 @@ rocksdb:
wal_path: ${app.data-path}/db
snapshot_path: ${app.data-path}/snapshot
-#fakePd配置参数
+# fakePd configuration parameters
fake-pd:
- # fake-pd模式下,store grpc集群列表
+ # fake-pd mode, store grpc cluster list
store-list: 127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503
- # fake-pd模式下,设置raft集群列表
+ # fake-pd mode, set raft cluster list
peers-list: 127.0.0.1:8511,127.0.0.1:8512,127.0.0.1:8513
- # 分区数量
+ # Partition Quantity
partition-count: 3
- # 每个分区副本数量
+ # Number of replicas per partition
shard-count: 3
diff --git
a/hugegraph-store/hg-store-node/src/test/resources/application-server06.yml
b/hugegraph-store/hg-store-node/src/test/resources/application-server06.yml
index eeef451c1..8574bc370 100644
--- a/hugegraph-store/hg-store-node/src/test/resources/application-server06.yml
+++ b/hugegraph-store/hg-store-node/src/test/resources/application-server06.yml
@@ -16,10 +16,10 @@
#
pdserver:
- # pd服务地址,多个pd地址用逗号分割
+ # PD service address, multiple PD addresses separated by commas
address: localhost:8686
grpc:
- # grpc的服务地址
+ # grpc service address
host: 127.0.0.1
port: 8506
netty-server:
@@ -28,10 +28,10 @@ raft:
# enable: false
address: 127.0.0.1:8516
data-path: ${app.data-path}/raft
- # 快照生成时间间隔,单位秒
+ # Snapshot generation interval, in seconds
snapshotInterval: 300
server:
- # rest 服务地址
+ # rest service address
port: 8526
app:
@@ -58,13 +58,13 @@ rocksdb:
wal_path: ${app.data-path}/db
snapshot_path: ${app.data-path}/snapshot
-#fakePd配置参数
+# fakePd configuration parameters
fake-pd:
- # fake-pd模式下,store grpc集群列表
+ # fake-pd mode, store grpc cluster list
store-list: 127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503
- # fake-pd模式下,设置raft集群列表
+ # fake-pd mode, set raft cluster list
peers-list: 127.0.0.1:8511,127.0.0.1:8512,127.0.0.1:8513
- # 分区数量
+ # Partition Count
partition-count: 3
- # 每个分区副本数量
+ # Number of replicas per partition
shard-count: 3
diff --git
a/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/RocksDBFactory.java
b/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/RocksDBFactory.java
index 2ec56fa2b..ce5dc665a 100644
---
a/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/RocksDBFactory.java
+++
b/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/RocksDBFactory.java
@@ -87,7 +87,7 @@ public final class RocksDBFactory {
watcher.dbSession.getRefCount(),
(System.currentTimeMillis() -
watcher.timestamp) / 1000);
} else {
- // 超时强制删除 (30min)
+ // Force delete after timeout (30min)
watcher.dbSession.forceResetRefCount();
}
}
@@ -188,7 +188,7 @@ public final class RocksDBFactory {
}
/**
- * 释放rocksdb对象
+ * Release rocksdb object
*
* @param dbName
* @return
@@ -213,7 +213,7 @@ public final class RocksDBFactory {
}
/**
- * 销毁图,并删除数据文件
+ * Destroy the graph, and delete the data file.
*
* @param dbName
*/
@@ -221,7 +221,7 @@ public final class RocksDBFactory {
log.info("destroy {} 's rocksdb.", dbName);
RocksDBSession dbSession = dbSessionMap.get(dbName);
releaseGraphDB(dbName);
- //增加删除标记
+ // Add delete mark
if (dbSession != null) {
destroyGraphDBs.add(new DBSessionWatcher(dbSession));
rocksdbChangedListeners.forEach(listener -> {
diff --git
a/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/RocksDBSession.java
b/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/RocksDBSession.java
index d1f89262a..c3356de24 100644
---
a/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/RocksDBSession.java
+++
b/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/RocksDBSession.java
@@ -358,7 +358,7 @@ public class RocksDBSession implements AutoCloseable,
Cloneable {
if (i == dbCount - 1) {
latestDBPath = curDBPath;
} else {
- // delete old db,在删除队列的文件不要删除
+ // delete old db, do not delete files in the deletion queue
if (!factory.findPathInRemovedList(curDBPath)) {
try {
FileUtils.deleteDirectory(new File(curDBPath));
@@ -373,7 +373,7 @@ public class RocksDBSession implements AutoCloseable,
Cloneable {
latestDBPath = Paths.get(parentFile.getPath(),
defaultName).toString();
}
if (factory.findPathInRemovedList(latestDBPath)) {
- // 已经被删除,创建新的目录
+ // Has been deleted, create a new directory
latestDBPath =
Paths.get(parentFile.getPath(), String.format("%s_%d",
defaultName, version))
.toString();
diff --git
a/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/SessionOperator.java
b/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/SessionOperator.java
index 12c0d3759..e6de91a53 100644
---
a/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/SessionOperator.java
+++
b/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/SessionOperator.java
@@ -38,7 +38,7 @@ public interface SessionOperator {
ScanIterator scan(String tableName, byte[] keyFrom, byte[] keyTo, int
scanType);
/**
- * 扫描所有cf指定范围的数据
+ * Scan all data in the specified cf range
*/
ScanIterator scanRaw(byte[] keyFrom, byte[] keyTo, long startSeqNum);
@@ -62,7 +62,7 @@ public interface SessionOperator {
void deleteRange(String table, byte[] keyFrom, byte[] keyTo) throws
DBStoreException;
/**
- * 删除所有cf指定范围的数据
+ * Delete all data specified by the cf range
*/
void deleteRange(byte[] keyFrom, byte[] keyTo) throws DBStoreException;
diff --git
a/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/SessionOperatorImpl.java
b/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/SessionOperatorImpl.java
index f4f898715..eca6a83a2 100644
---
a/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/SessionOperatorImpl.java
+++
b/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/SessionOperatorImpl.java
@@ -219,7 +219,7 @@ public class SessionOperatorImpl implements SessionOperator
{
}
/**
- * commit抛出异常后一定要调用rollback,否则会造成cfHandleReadLock未释放
+ * commit throws an exception, you must call rollback, otherwise it will
cause cfHandleReadLock not to be released.
*/
@Override
public Integer commit() throws DBStoreException {
@@ -302,13 +302,13 @@ public class SessionOperatorImpl implements
SessionOperator {
}
/**
- * 遍历所有cf指定范围的数据
- * TODO: rocksdb7.x 不支持 setStartSeqNum,改为使用 Timestamp
+ * Traverse all data in the specified range of cf
+ * TODO: rocksdb7.x does not support setStartSeqNum, switch to using
Timestamp instead.
* refer: https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp
*/
@Override
public ScanIterator scanRaw(byte[] keyFrom, byte[] keyTo, long
startSeqNum) {
- int kNumInternalBytes = 8; //internal key 增加的8个字节后缀
+ int kNumInternalBytes = 8; //internal key added 8-byte suffix
Snapshot snapshot = rocksdb().getSnapshot();
Iterator<String> cfIterator = session.getTables().keySet().iterator();
diff --git
a/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/HgSessionManagerOneRaftFakePDTest.java
b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/HgSessionManagerOneRaftFakePDTest.java
index b4e19f104..84584efc0 100644
---
a/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/HgSessionManagerOneRaftFakePDTest.java
+++
b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/HgSessionManagerOneRaftFakePDTest.java
@@ -35,14 +35,14 @@ import org.apache.hugegraph.store.util.HgStoreTestUtil;
import org.junit.Assert;
/**
- * 使用fake-pd,支持raft的单元测试
+ * Using fake-pd, supporting unit tests for raft.
*/
public class HgSessionManagerOneRaftFakePDTest {
private static final Map<Integer, Long> leaderMap = new
ConcurrentHashMap<>();
private static final Map<Long, String> storeMap = new
ConcurrentHashMap<>();
private static final int partitionCount = 3;
- // 需要与store的application.yml的fake-pd.partition-count保持一致
+ // Need to be consistent with the store's application.yml
fake-pd.partition-count
private static final String[] storeAddress = {
"127.0.0.1:8500"
};
@@ -68,7 +68,7 @@ public class HgSessionManagerOneRaftFakePDTest {
Arrays.equals(startKey, endKey)) {
builder.add(leaderMap.get(startCode % partitionCount),
startCode);
} else {
- Assert.fail("OwnerKey转成HashCode后已经无序了, 按照OwnerKey范围查询没意义");
+ Assert.fail("OwnerKey converted to HashCode is no longer
ordered, querying by OwnerKey range is meaningless");
builder.add(leaderMap.get(startCode % partitionCount),
startCode);
builder.add(leaderMap.get(endCode % partitionCount), endCode);
}
@@ -174,8 +174,8 @@ public class HgSessionManagerOneRaftFakePDTest {
}
// @Test
- //CAUTION: ONLY FOR LONG!
- //注意:目前只可以对long类型value进行Merge操作。
+ // CAUTION: ONLY FOR LONG!
+ // Note: Currently, only Merge operations can be performed on long type
values.
public void merge() {
System.out.println("--- test merge (1+1=2)---");
HgStoreSession session = getStoreSession();
diff --git
a/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/HgSessionManagerRaftFakePDTest.java
b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/HgSessionManagerRaftFakePDTest.java
index d01486487..cb3eae8a7 100644
---
a/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/HgSessionManagerRaftFakePDTest.java
+++
b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/HgSessionManagerRaftFakePDTest.java
@@ -35,16 +35,16 @@ import org.apache.hugegraph.store.util.HgStoreTestUtil;
import org.junit.Assert;
/**
- * 使用fake-pd,支持raft的单元测试
+ * Use fake-pd, support unit tests for raft
RuntimeMethodHandle.op_Implicit(Microsoft.AspNetCore.Mvc.RazorPages.Infrastructure.PageResult)
*/
public class HgSessionManagerRaftFakePDTest {
private static final Map<Integer, Long> leaderMap = new
ConcurrentHashMap<>();
private static final Map<Long, String> storeMap = new
ConcurrentHashMap<>();
private static final int partitionCount = 3;
- // 需要与store的application.yml的fake-pd.partition-count保持一致
+ // Need to be consistent with the fake-pd.partition-count in the store's
application.yml
private static final String[] storeAddress =
- { // 需要与store的application.yml的fake-pd.store-list保持一致
+ { // Need to be consistent with the store's application.yml
fake-pd.store-list
"127.0.0.1:8501", "127.0.0.1:8502", "127.0.0.1:8503"
};
@@ -72,7 +72,7 @@ public class HgSessionManagerRaftFakePDTest {
Arrays.equals(startKey, endKey)) {
builder.add(leaderMap.get(startCode % partitionCount),
startCode);
} else {
- Assert.fail("OwnerKey转成HashCode后已经无序了, 按照OwnerKey范围查询没意义");
+ Assert.fail("OwnerKey converted to HashCode is no longer
ordered, querying by OwnerKey range is meaningless");
builder.add(leaderMap.get(startCode % partitionCount),
startCode);
builder.add(leaderMap.get(endCode % partitionCount), endCode);
}
@@ -216,8 +216,8 @@ public class HgSessionManagerRaftFakePDTest {
}
// @Test
- //CAUTION: ONLY FOR LONG!
- //注意:目前只可以对long类型value进行Merge操作。
+ // CAUTION: ONLY FOR LONG!
+ // Note: Currently, only Merge operations can be performed on long type
values.
public void merge() {
System.out.println("--- test merge (1+1=2)---");
HgStoreSession session = getStoreSession();
diff --git
a/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/HgSessionManagerRaftPDTest.java
b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/HgSessionManagerRaftPDTest.java
index 9820457d7..3b7dc64fd 100644
---
a/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/HgSessionManagerRaftPDTest.java
+++
b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/HgSessionManagerRaftPDTest.java
@@ -52,7 +52,7 @@ import lombok.extern.slf4j.Slf4j;
/**
- * 使用pd,支持raft的单元测试
+ * Use pd, support unit tests for raft
*/
@Slf4j
public class HgSessionManagerRaftPDTest {
@@ -262,8 +262,8 @@ public class HgSessionManagerRaftPDTest {
}
// @Test
- //CAUTION: ONLY FOR LONG!
- //注意:目前只可以对long类型value进行Merge操作。
+ // CAUTION: ONLY FOR LONG!
+ // Note: Currently, only long type values can be merged.
public void merge() {
System.out.println("--- test merge (1+1=2)---");
HgStoreSession session = getStoreSession();
diff --git
a/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/HgSessionManagerTest.java
b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/HgSessionManagerTest.java
index 9a63ce74f..1ebaa9462 100644
---
a/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/HgSessionManagerTest.java
+++
b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/HgSessionManagerTest.java
@@ -56,7 +56,7 @@ public class HgSessionManagerTest {
"unit-test"));
private static final int partitionCount = 10;
- // 需要与 store 的 application.yml 的 fake-pd.partition-count 保持一致
+ // Need to be consistent with the store's application.yml
fake-pd.partition-count
//private static String[] storeAddress = {"127.0.0.1:8500"};
private static final String[] storeAddress =
@@ -91,7 +91,7 @@ public class HgSessionManagerTest {
//log.info("leader-> {}",leaderMap.get(startCode /
PARTITION_LENGTH));
builder.add(leaderMap.get(startCode / PARTITION_LENGTH),
startCode);
} else {
- Assert.fail("OwnerKey 转成 HashCode 后已经无序了,按照 OwnerKey 范围查询没意义");
+ Assert.fail("OwnerKey converted to HashCode is already
unordered, querying by OwnerKey range is meaningless");
builder.add(leaderMap.get(startCode / PARTITION_LENGTH),
startCode);
builder.add(leaderMap.get(endCode / PARTITION_LENGTH),
endCode);
}
@@ -172,8 +172,8 @@ public class HgSessionManagerTest {
}
@Test
- //CAUTION: ONLY FOR LONG!
- //注意:目前只可以对 long 类型 value 进行 Merge 操作。
+ // CAUTION: ONLY FOR LONG!
+ // Note: Currently, only long type values can be merged.
public void merge() {
System.out.println("--- test merge (1+1=2)---");
HgStoreSession session = getStoreSession();
diff --git
a/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/PartitionEngineTest.java
b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/PartitionEngineTest.java
index 7115aaa65..1bfe03b54 100644
---
a/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/PartitionEngineTest.java
+++
b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/PartitionEngineTest.java
@@ -78,7 +78,7 @@ public class PartitionEngineTest {
.contains(peer))
.collect(Collectors.toList());
- // 新增 6、7
+ // Add 6, 7
Assert.assertEquals(2, addedNodes.size());
addedNodes.clear();
addedNodes.addAll(Arrays.asList(peers));
@@ -88,7 +88,7 @@ public class PartitionEngineTest {
addedNodes.forEach(s -> System.out.print(s + " "));
System.out.println();
- // 删除 4,5
+ // Delete 4, 5
Assert.assertEquals(2, removedNodes.size());
removedNodes.clear();
@@ -98,7 +98,7 @@ public class PartitionEngineTest {
Assert.assertEquals(2, removedNodes.size());
removedNodes.forEach(s -> System.out.print(s + " "));
System.out.println();
- // 交集 5
+ // Intersection 5
Assert.assertEquals(1, mixedPeer.size());
oldPeers1.removeAll(Arrays.asList(learners));
Assert.assertEquals(1, oldPeers1.size());
diff --git
a/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/client/ChangeShardNumTest.java
b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/client/ChangeShardNumTest.java
index be0c86996..f3c94e669 100644
---
a/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/client/ChangeShardNumTest.java
+++
b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/client/ChangeShardNumTest.java
@@ -27,7 +27,7 @@ import org.junit.Assert;
import org.junit.Test;
/**
- * 测试修改副本数
+ * Test modify copy number
*/
public class ChangeShardNumTest extends HgStoreClientBase {
diff --git
a/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/client/HgSessionManagerRaftPDTest.java
b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/client/HgSessionManagerRaftPDTest.java
index e52ae8d2d..f6b5e7c35 100644
---
a/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/client/HgSessionManagerRaftPDTest.java
+++
b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/client/HgSessionManagerRaftPDTest.java
@@ -240,8 +240,8 @@ public class HgSessionManagerRaftPDTest extends
HgStoreClientBase {
}
// @Test
- // CAUTION: ONLY FOR LONG!
- // 注意:目前只可以对 long 类型 value 进行 Merge 操作。
+ // CAUTION: ONLY FOR LONG!
+ // Note: Currently, only long type values can be merged.
public void merge() {
System.out.println("--- test merge (1+1=2)---");
HgStoreSession session = getStoreSession();
diff --git
a/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/core/CoreSuiteTest.java
b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/core/CoreSuiteTest.java
index 9781b06a7..68530367a 100644
---
a/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/core/CoreSuiteTest.java
+++
b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/core/CoreSuiteTest.java
@@ -39,7 +39,7 @@ import lombok.extern.slf4j.Slf4j;
// ZipUtilsTest.class,
// MiscUtilClassTest.class,
// PartitionInstructionProcessorTest.class,
-// // 尽量放到最后
+// // Try to put it last
// HgBusinessImplTest.class
//})
diff --git
a/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/core/HgCmdClientTest.java
b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/core/HgCmdClientTest.java
index d31e01724..8468f1b50 100644
---
a/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/core/HgCmdClientTest.java
+++
b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/core/HgCmdClientTest.java
@@ -93,8 +93,8 @@ public class HgCmdClientTest {
}
public static Long getId() {
- // 如果需要更长 或者更大冗余空间,只需要 time * 10^n 即可
- // 当前可保证 1 毫秒 生成 10000 条不重复
+ // If needed longer or more redundant space, just use time * 10^n
+ // Currently guaranteed to generate 10000 non-duplicate in 1
millisecond
Long time = Long.valueOf(new SimpleDateFormat("HHmmssSSS").format(new
Date())) * 10000 +
(long) (Math.random() * 100);
// Long time = Long.valueOf(new
SimpleDateFormat("MMddhhmmssSSS").format(new Date())
@@ -144,7 +144,7 @@ public class HgCmdClientTest {
session.createTable(tableName);
String createGraph = "create_graph";
HgOwnerKey hgOwnerKey = toOwnerKey(createGraph);
- // 需要写数据,才会创建图
+ // Need to write data, then the graph will be created.
session.put(tableName,
hgOwnerKey, createGraph.getBytes(StandardCharsets.UTF_8));
Assert.assertEquals(createGraph, toStr(session.get(tableName,
hgOwnerKey)));
@@ -215,7 +215,7 @@ public class HgCmdClientTest {
session.createTable(tableName);
String createGraph = "create_graph";
HgOwnerKey hgOwnerKey = toOwnerKey(createGraph);
- // 需要写数据,才会创建图
+ // Need to write data, then the graph will be created.
session.put(tableName,
hgOwnerKey, createGraph.getBytes(StandardCharsets.UTF_8));
Assert.assertEquals(createGraph, toStr(session.get(tableName,
hgOwnerKey)));
@@ -264,7 +264,7 @@ public class HgCmdClientTest {
session.createTable(tableName);
String createGraph = "create_graph";
HgOwnerKey hgOwnerKey = toOwnerKey(createGraph);
- // 需要写数据,才会创建图
+ // Need to write data, then the graph will be created.
session.put(tableName,
hgOwnerKey, createGraph.getBytes(StandardCharsets.UTF_8));
Assert.assertEquals(createGraph, toStr(session.get(tableName,
hgOwnerKey)));
diff --git
a/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/core/StoreEngineTestBase.java
b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/core/StoreEngineTestBase.java
index 267b5a566..67596dc10 100644
---
a/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/core/StoreEngineTestBase.java
+++
b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/core/StoreEngineTestBase.java
@@ -38,7 +38,7 @@ import com.alipay.sofa.jraft.util.StorageOptionsFactory;
import lombok.extern.slf4j.Slf4j;
/**
- * 使用 FakePd 和 FakePdOptions,初始化 HgStoreEngine,该类的各项 get 函数可用
+ * Use FakePd and FakePdOptions to initialize HgStoreEngine, the get functions
of this class are available.
*/
@Slf4j
public class StoreEngineTestBase {
@@ -92,7 +92,7 @@ public class StoreEngineTestBase {
}
/**
- * 创建 分区为 0 的 partition engine. 该分区 1 个 shard,为 leader, graph name: graph0
+ * Create partition 0's partition engine. The partition has 1 shard, as
the leader, graph name: graph0.
*
* @return
*/
diff --git a/hugegraph-store/hg-store-test/src/main/resources/pd-server.yml
b/hugegraph-store/hg-store-test/src/main/resources/pd-server.yml
index 5608dc9dd..578278202 100644
--- a/hugegraph-store/hg-store-test/src/main/resources/pd-server.yml
+++ b/hugegraph-store/hg-store-test/src/main/resources/pd-server.yml
@@ -41,31 +41,31 @@ pd:
patrol-interval: 3000000
data-path: tmp/8686
- # 最少节点数,少于该数字,集群停止入库
+ # Minimum number of nodes, less than this number, the cluster stops
ingesting data.
initial-store-count: 1
- # 初始store列表,在列表内的store自动激活
+ # Initial store list, stores within the list are automatically activated.
initial-store-list: 127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503
#initial-store-list: 127.0.0.1:8501
raft:
address: 127.0.0.1:8610
- # raft集群
+ # raft cluster
peers-list: 127.0.0.1:8610
- # raft rpc读写超时时间,单位毫秒
+ # raft rpc read-write timeout, unit in milliseconds
rpc-timeout: 10000
- # 快照生成时间间隔,单位秒
+ # Snapshot generation interval, unit: seconds
snapshotInterval: 30000
metrics: true
store:
- # store心跳超时时间,超过该时间,认为store临时不可用,转移Leader到其他副本,单位秒
+ # store heartbeat timeout, if exceeds this time, consider the store
temporarily unavailable, transfer Leader to another replica, in seconds
keepAlive-timeout: 300
- # store下线时间。超过该时间,认为store永久不可用,分配副本到其他机器,单位秒
+ # store offline time. Beyond this time, it is considered that the store is
permanently unavailable, and the replica is allocated to other machines, in
seconds.
max-down-time: 180000
partition:
- # 默认每个分区副本数
+ # Default number of replicas per partition
default-shard-count: 3
- # 默认每机器最大副本数,初始分区数= store-max-shard-count * store-number /
default-shard-count
+ # Default maximum number of replicas per machine, initial number of
partitions = store-max-shard-count * store-number / default-shard-count
store-max-shard-count: 1
discovery:
- #客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除
+ # After client registration, no heartbeat longest number, after exceeding,
previous registration information will be deleted.
heartbeat-try-count: 3