This is an automated email from the ASF dual-hosted git repository.
vgalaxies pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-hugegraph.git
The following commit(s) were added to refs/heads/master by this push:
new a038d2341 fix(hstore): JRaft maxEntriesSize configuration parameters
do not take effect (#2630)
a038d2341 is described below
commit a038d2341cc58cb66af34e270d6134b56e54389f
Author: YangJiaqi <[email protected]>
AuthorDate: Sun Aug 11 13:04:34 2024 +0800
fix(hstore): JRaft maxEntriesSize configuration parameters do not take
effect (#2630)
Co-authored-by: imbajin <[email protected]>
---
.asf.yaml | 3 ++-
.../hugegraph/store/options/HgStoreEngineOptions.java | 2 +-
.../java/org/apache/hugegraph/store/node/AppConfig.java | 2 ++
.../hugegraph/store/node/grpc/HgStoreNodeService.java | 13 +++++++------
4 files changed, 12 insertions(+), 8 deletions(-)
diff --git a/.asf.yaml b/.asf.yaml
index 93063dc7d..581a611dc 100644
--- a/.asf.yaml
+++ b/.asf.yaml
@@ -28,7 +28,8 @@ github:
del_branch_on_merge: true
#labels:
enabled_merge_buttons:
- merge: false
+ # TODO: disable it after common merged
+ merge: true
rebase: true
squash: true
protected_branches:
diff --git
a/hugegraph-store/hg-store-core/src/main/java/org/apache/hugegraph/store/options/HgStoreEngineOptions.java
b/hugegraph-store/hg-store-core/src/main/java/org/apache/hugegraph/store/options/HgStoreEngineOptions.java
index 3b3ff9bc7..18d145fb4 100644
---
a/hugegraph-store/hg-store-core/src/main/java/org/apache/hugegraph/store/options/HgStoreEngineOptions.java
+++
b/hugegraph-store/hg-store-core/src/main/java/org/apache/hugegraph/store/options/HgStoreEngineOptions.java
@@ -98,7 +98,7 @@ public class HgStoreEngineOptions {
/**
* The maximum number of entries in AppendEntriesRequest
*/
- private final int maxEntriesSize = 256;
+ private int maxEntriesSize = 256;
/**
* Raft cluster data backlog occurs, rate limiting wait time in
milliseconds.
**/
diff --git
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/AppConfig.java
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/AppConfig.java
index 6c561f4c0..c65691223 100644
---
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/AppConfig.java
+++
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/AppConfig.java
@@ -182,6 +182,8 @@ public class AppConfig {
private int maxSegmentFileSize;
@Value("${raft.maxReplicatorInflightMsgs:256}")
private int maxReplicatorInflightMsgs;
+ @Value("${raft.maxEntriesSize:256}")
+ private int maxEntriesSize;
}
diff --git
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreNodeService.java
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreNodeService.java
index 4492f37b2..6c8855516 100644
---
a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreNodeService.java
+++
b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreNodeService.java
@@ -100,6 +100,7 @@ public class HgStoreNodeService implements RaftTaskHandler {
.isUseRocksDBSegmentLogStorage());
setMaxSegmentFileSize(appConfig.getRaft().getMaxSegmentFileSize());
setMaxReplicatorInflightMsgs(appConfig.getRaft().getMaxReplicatorInflightMsgs());
+ setMaxEntriesSize(appConfig.getRaft().getMaxEntriesSize());
}});
setFakePdOptions(new FakePdOptions() {{
setStoreList(appConfig.getFakePdConfig().getStoreList());
@@ -125,9 +126,9 @@ public class HgStoreNodeService implements RaftTaskHandler {
}
/**
- * 添加raft 任务,转发数据给raft
+ * Add raft task, forward data to raft
*
- * @return true 表示数据已被提交,false表示未提交,用于单副本入库减少批次拆分
+ * @return true means the data has been submitted, false means not
submitted, used to reduce batch splitting for single-replica storage
*/
public <Req extends com.google.protobuf.GeneratedMessageV3>
void addRaftTask(byte methodId, String graphName, Integer partitionId, Req
req,
@@ -140,14 +141,14 @@ public class HgStoreNodeService implements
RaftTaskHandler {
}
//
try {
- // 序列化,
+ // Serialization
final byte[] buffer = new byte[req.getSerializedSize() + 1];
final CodedOutputStream output =
CodedOutputStream.newInstance(buffer);
output.write(methodId);
req.writeTo(output);
output.checkNoSpaceLeft();
output.flush();
- // 传送给raft
+ // Add raft task
storeEngine.addRaftTask(graphName, partitionId,
RaftOperation.create(methodId, buffer,
req), closure);
@@ -159,7 +160,7 @@ public class HgStoreNodeService implements RaftTaskHandler {
}
/**
- * 来自日志的任务,一般是follower 或者 日志回滚的任务
+ * Tasks from logs, generally tasks from followers or log rollbacks
*/
@Override
public boolean invoke(int partId, byte[] request, RaftClosure response)
throws
@@ -190,7 +191,7 @@ public class HgStoreNodeService implements RaftTaskHandler {
}
/**
- * 处理raft传送过来的数据
+ * Process the data sent by raft
*/
@Override
public boolean invoke(int partId, byte methodId, Object req, RaftClosure
response) throws