This is an automated email from the ASF dual-hosted git repository. vgalaxies pushed a commit to branch trans-pd in repository https://gitbox.apache.org/repos/asf/incubator-hugegraph.git
commit 44387491fd095c2339d9d5b51f3bd29a798e4723 Author: VGalaxies <[email protected]> AuthorDate: Sun May 12 12:56:51 2024 +0800 translate pd service (except for PDService) --- .../org/apache/hugegraph/pd/boot/HugePDServer.java | 2 +- .../hugegraph/pd/pulse/PdInstructionSubject.java | 2 +- .../java/org/apache/hugegraph/pd/rest/API.java | 2 +- .../org/apache/hugegraph/pd/rest/GraphAPI.java | 12 +++--- .../org/apache/hugegraph/pd/rest/IndexAPI.java | 41 +++++++++--------- .../org/apache/hugegraph/pd/rest/MemberAPI.java | 6 +-- .../org/apache/hugegraph/pd/rest/PartitionAPI.java | 46 ++++++++++----------- .../org/apache/hugegraph/pd/rest/ShardAPI.java | 2 - .../org/apache/hugegraph/pd/rest/StoreAPI.java | 48 ++++++++++++---------- .../hugegraph/pd/service/KvServiceGrpcImpl.java | 30 +++++++------- .../apache/hugegraph/pd/service/PDRestService.java | 2 +- .../hugegraph/pd/service/UpgradeService.java | 4 +- .../hugegraph/pd/upgrade/VersionUpgradeScript.java | 4 +- .../apache/hugegraph/pd/watch/KvWatchSubject.java | 31 +++++++------- .../src/main/resources/application.yml | 29 ++++++------- .../src/test/resources/application-server0.yml | 10 ----- .../src/test/resources/application-server1.yml | 11 ----- .../src/test/resources/application-server2.yml | 10 ----- .../src/test/resources/application-server3.yml | 10 ----- 19 files changed, 135 insertions(+), 167 deletions(-) diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java index 815005259..cf105680d 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java @@ -24,7 +24,7 @@ import org.springframework.context.annotation.ComponentScan; import com.alipay.remoting.util.StringUtils; /** - * PD 服务启动类 + * PD service startup class */ @ComponentScan(basePackages = {"org.apache.hugegraph.pd"}) @SpringBootApplication diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PdInstructionSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PdInstructionSubject.java index e123384b2..cf1b61553 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PdInstructionSubject.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PdInstructionSubject.java @@ -38,7 +38,7 @@ public class PdInstructionSubject extends AbstractObserverSubject { } /** - * pd单纯的向pulse发送的指令,不接收对应的notice + * Simply send a command to pulse, and do not receive the corresponding notice * * @return null */ diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java index d748d23a7..a2287cb83 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java @@ -107,7 +107,7 @@ public class API { } builder.append(","); }); - builder.deleteCharAt(builder.length() - 1); //删除最后一个逗号 + builder.deleteCharAt(builder.length() - 1); } builder.append("]").append(COMMA); } diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java index 68d80beb4..0c25d78c3 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java @@ -143,7 +143,7 @@ public class GraphAPI extends API { statistics = new GraphStatistics(graph); response.setData(statistics); } else { - response.setData(new HashMap<String, Object>()); //没有该图 + response.setData(new HashMap<String, Object>()); } response.setStatus(Pdpb.ErrorType.OK.getNumber()); response.setMessage(Pdpb.ErrorType.OK.name()); @@ -176,7 +176,7 @@ public class GraphAPI extends API { public Shard(Metapb.Shard shard, long partitionId) { this.role = String.valueOf(shard.getRole()); this.storeId = shard.getStoreId(); - this.state = Metapb.ShardState.SState_Normal.name(); //gshard的状态默认为normal + this.state = Metapb.ShardState.SState_Normal.name(); this.progress = 0; this.partitionId = partitionId; } @@ -236,7 +236,7 @@ public class GraphAPI extends API { @Data class GraphStatistics { - //图统计信息 + // Graph statistics String graphName; long partitionCount; String state; @@ -255,7 +255,7 @@ public class GraphAPI extends API { graphName = graph.getGraphName(); partitionCount = graph.getPartitionCount(); state = String.valueOf(graph.getState()); - // 数据量及key的数量 + // The amount of data and the number of keys List<Metapb.Store> stores = pdRestService.getStores(graphName); for (Metapb.Store store : stores) { List<Metapb.GraphStats> graphStatsList = store.getStats().getGraphStatsList(); @@ -272,7 +272,7 @@ public class GraphAPI extends API { List<Partition> resultPartitionList = new ArrayList<>(); List<Metapb.Partition> tmpPartitions = pdRestService.getPartitions(graphName); if ((tmpPartitions != null) && (!tmpPartitions.isEmpty())) { - // 需要返回的分区信息 + // The partition information to be returned for (Metapb.Partition partition : tmpPartitions) { Metapb.PartitionStats partitionStats = pdRestService .getPartitionStats(graphName, partition.getId()); @@ -282,7 +282,7 @@ public class GraphAPI extends API { } } partitions = resultPartitionList; - // 隐去图名后面的 /g /m /s + // Hide /g /m /s after the title of the graph final int postfixLength = 2; graphName = graphName.substring(0, graphName.length() - postfixLength); } diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java index 89f6e8624..61f3c5a2c 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java @@ -98,7 +98,6 @@ public class IndexAPI extends API { statistics.onlineStoreSize = pdService.getStoreNodeService().getActiveStores().size(); statistics.offlineStoreSize = statistics.storeSize - statistics.onlineStoreSize; List<Metapb.Graph> graphs = pdRestService.getGraphs(); - // 图的数量,只统计/g statistics.graphSize = graphs.stream().filter((g) -> (g.getGraphName() != null) && (g.getGraphName().endsWith("/g"))) @@ -112,11 +111,15 @@ public class IndexAPI extends API { statistics.dataSize += graphStats.getApproximateSize(); } } - // 数据状态:根据图的状态推出数据状态,枚举值越大,问题越严重, 默认为正常状态 + // Data status: The data status is deduced based on the state of the graph, the + // larger the enumeration value, the more serious the problem, and the default is the + // normal state Metapb.PartitionState dataState = Metapb.PartitionState.PState_Normal; for (Metapb.Graph graph : pdRestService.getGraphs()) { if (graph.getState() == Metapb.PartitionState.UNRECOGNIZED) { - continue; // 未识别不参与对比,不然会抛出异常 + // If it is not recognized, it will not participate in the + // comparison, otherwise an exception will be thrown + continue; } if ((graph.getState() != null) && (graph.getState().getNumber() > dataState.getNumber())) { @@ -174,9 +177,9 @@ public class IndexAPI extends API { String state; String dataPath; String role; - String serviceName; //服务名称,自定义属性 - String serviceVersion; //静态定义 - long startTimeStamp; //进程启动时间 + String serviceName; // Service name, custom attributes + String serviceVersion; // Static definitions + long startTimeStamp; // The time when the process started public Member(Metapb.Member member) { if (member != null) { @@ -200,31 +203,31 @@ public class IndexAPI extends API { class Statistics { /** - * 集群状态 + * Cluster status, default of the cluster */ String state; /** - * 数据状态 + * Data status */ String dataState; /** - * pd集群成员 + * pd Cluster members */ List<Member> pdList; /** - * pd集群的leader + * pd The leader of the cluster */ Member pdLeader; /** - * pd集群的大小 + * pd The size of the cluster */ int memberSize; /** - * stores列表 + * stores list */ List<Store> stores; /** - * store的数量 + * store quantity */ int storeSize; /** @@ -232,27 +235,27 @@ public class IndexAPI extends API { */ int onlineStoreSize; /** - * 离线的store的数量 + * The number of stores that are offline */ int offlineStoreSize; /** - * 图的数量 + * The number of graphs */ long graphSize; /** - * 分区的数量 + * The number of partitions */ int partitionSize; /** - * 分区副本数 + * Number of partition replicas */ int shardCount; /** - * key的数量 + * The number of keys */ long keyCount; /** - * 数据量 + * Amount of data */ long dataSize; diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java index d0078b5db..392cf2aab 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java @@ -216,9 +216,9 @@ public class MemberAPI extends API { String dataPath; String role; String replicateState; - String serviceName; //服务名称,自定义属性 - String serviceVersion; //静态定义 - long startTimeStamp; //启动时间,暂时取进程的启动时间 + String serviceName; // Service name, custom attributes + String serviceVersion; // Static definitions + long startTimeStamp; // Startup time: temporarily takes the startup time of the process public Member(Metapb.Member member) { if (member != null) { diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java index bdbdec39d..eeaef4df3 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java @@ -58,12 +58,12 @@ public class PartitionAPI extends API { @GetMapping(value = "/highLevelPartitions", produces = MediaType.APPLICATION_JSON_VALUE) public RestApiResponse getHighLevelPartitions() { - // 分区下多个图的信息 + // Information about multiple graphs under the partition Map<Integer, Map<String, GraphStats>> partitions2GraphsMap = new HashMap<>(); Map<Integer, HighLevelPartition> resultPartitionsMap = new HashMap<>(); - // 每一个分区的keyCount, 只从leader处取出 + // The keyCount of each partition is only taken from the leader Map<Integer, Long> partition2KeyCount = new HashMap<>(); - // 每一个分区的dataSize, 只从leader处取出 + // The dataSize of each partition is only taken from the leader Map<Integer, Long> partition2DataSize = new HashMap<>(); List<Metapb.Store> stores; Map<Long, Metapb.Store> storesMap = new HashMap<>(); @@ -77,20 +77,19 @@ public class PartitionAPI extends API { storesMap.put(store.getId(), store); List<Metapb.GraphStats> graphStatsList = store.getStats().getGraphStatsList(); for (Metapb.GraphStats graphStats : graphStatsList) { - // 获取分区保存的图信息(只从leader处取出来) + // Obtaining Graph Information Saved by a Partition (Only from the Leader) if (Metapb.ShardRole.Leader != graphStats.getRole()) { continue; } - // 计算分区的keyCount(不区分图) + // Calculating the key count of partitions (indiscriminate graphs) partition2KeyCount.put(graphStats.getPartitionId(), partition2KeyCount.getOrDefault(graphStats.getPartitionId(), graphStats.getApproximateKeys())); - // 计算分区的dataSize, 通过累加图的大小实现 + // The dataSize of the partition is calculated by adding the size of the graph partition2DataSize.put(graphStats.getPartitionId(), partition2DataSize.getOrDefault(graphStats.getPartitionId(), 0L) + graphStats.getApproximateSize()); - // 构造分区下的图信息 if (partitions2GraphsMap.get(graphStats.getPartitionId()) == null) { partitions2GraphsMap.put(graphStats.getPartitionId(), new HashMap<String, GraphStats>()); @@ -100,10 +99,10 @@ public class PartitionAPI extends API { partitionGraphsMap.put(graphStats.getGraphName(), new GraphStats(graphStats)); } } - // 构造分区的所有需返回的信息 + // Construct all the information that needs to be returned for the partition List<Metapb.Partition> partitionList = pdRestService.getPartitions(""); for (Metapb.Partition partition : partitionList) { - // 补充分区内图信息的startKey, endKey + // Supplement the startKey and endKey of the partition image if (partitions2GraphsMap.get(partition.getId()) != null) { GraphStats graphStats = partitions2GraphsMap.get(partition.getId()).get(partition.getGraphName()); @@ -112,7 +111,7 @@ public class PartitionAPI extends API { graphStats.endKey = partition.getEndKey(); } } - // 构造分区整体信息(不区分图) + // Construct the overall information of the partition (regardless of the diagram) if ((resultPartitionsMap.get(partition.getId()) == null) && (!partition.getGraphName().endsWith("/s")) ) { @@ -124,7 +123,7 @@ public class PartitionAPI extends API { log.error("getPartitionStats error", e); partitionStats = null; } - // 初始化分区信息 + // Initialize the partition information HighLevelPartition resultPartition = new HighLevelPartition(partition, partitionStats); resultPartition.keyCount = @@ -132,28 +131,29 @@ public class PartitionAPI extends API { resultPartition.dataSize = partition2DataSize.getOrDefault(resultPartition.partitionId, 0L); for (ShardStats shard : resultPartition.shards) { - // 对副本的地址,分区信息赋值 + // Assign values to the address and partition information of the replica shard.address = storesMap.get(shard.storeId).getAddress(); shard.partitionId = partition.getId(); } if ((partitionStats != null) && (partitionStats.getLeader() != null)) { - long storeId = partitionStats.getLeader().getStoreId(); // 获取leader的storeId + long storeId = partitionStats.getLeader().getStoreId(); resultPartition.leaderAddress = - storesMap.get(storeId).getAddress(); // 获取leader的address + storesMap.get(storeId).getAddress(); } resultPartitionsMap.put(partition.getId(), resultPartition); } } - // 构造需返回的分区下的图列表,只返回/g, 且按名称排序 + // Construct a list of graphs under the partitions to be returned, return only /g, and + // sort by name for (Map.Entry<Integer, HighLevelPartition> entry : resultPartitionsMap.entrySet()) { Integer partitionId = entry.getKey(); HighLevelPartition currentPartition = resultPartitionsMap.get(partitionId); Map<String, GraphStats> graphsMap = partitions2GraphsMap - .getOrDefault(partitionId, new HashMap<>()); // 避免后面出现空指针异常 + .getOrDefault(partitionId, new HashMap<>()); // Avoid null pointer exceptions at the back ArrayList<GraphStats> graphsList = new ArrayList<>(); for (Map.Entry<String, GraphStats> entry1 : graphsMap.entrySet()) { if (!entry1.getKey().endsWith("/g")) { - continue; // 只保留/g的图 + continue; // Only the graph of /g is kept } String graphName = entry1.getKey(); GraphStats tmpGraph = graphsMap.get(graphName); @@ -181,10 +181,10 @@ public class PartitionAPI extends API { @GetMapping(value = "/partitions", produces = MediaType.APPLICATION_JSON_VALUE) public RestApiResponse getPartitions() { try { - List<Partition> partitions = new ArrayList<>();//需返回的分区对象 + List<Partition> partitions = new ArrayList<>(); List<Metapb.Partition> partitionList = pdRestService.getPartitions(""); List<Metapb.Store> stores = pdRestService.getStoreStats(false); - //分区的raftNode的状态 + // The status of the raft node of the partition HashMap<Long, HashMap<Integer, Metapb.RaftStats>> raftMap = new HashMap<>(); HashMap<Long, HashMap<String, Metapb.GraphStats>> shardIndexMap = new HashMap<>(); @@ -392,7 +392,7 @@ public class PartitionAPI extends API { long dataSize; String shardState; int progress; - long raftTerm; //任期 + long raftTerm; List<GraphStats> graphs; List<ShardStats> shards; String failureCause = ""; @@ -424,7 +424,7 @@ public class PartitionAPI extends API { log.error("get shard list failed, {}", e.getMessage()); } } - // 综合所有副本的状态,给shardState赋值 + // Synthesize the state of all replicas and assign a value to shardState shardState = tmpShardState.name(); } } @@ -456,7 +456,7 @@ public class PartitionAPI extends API { String role; String state; int progress; - //额外属性 + // Extra attributes long partitionId; String address; @@ -468,7 +468,7 @@ public class PartitionAPI extends API { } ShardStats(Metapb.Shard shard) { - //当没有shardStats的初始化方法 + // When there is no initialization method for shardStats storeId = shard.getStoreId(); role = String.valueOf(shard.getRole()); state = Metapb.ShardState.SState_Normal.name(); diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java index 6cb5b09da..53637806d 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java @@ -50,8 +50,6 @@ public class ShardAPI extends API { @GetMapping(value = "/shards", produces = MediaType.APPLICATION_JSON_VALUE) @ResponseBody public RestApiResponse getShards() { - - //对shards信息的统计 try { List<Shard> resultShardList = new ArrayList<>(); List<Metapb.Graph> graphs = pdRestService.getGraphs(); diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java index 030d5de46..10c783f7d 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java @@ -81,7 +81,7 @@ public class StoreAPI extends API { } } - // 仅支持通过该接口修改 storeState + // Only storeState can be modified through this API @PostMapping(value = "/store/{storeId}", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE) @ResponseBody @@ -113,7 +113,7 @@ public class StoreAPI extends API { } /** - * 返回每个store上的leader + * Returns the leader on each store * * @return */ @@ -184,7 +184,7 @@ public class StoreAPI extends API { @GetMapping(value = "store/{storeId}", produces = MediaType.APPLICATION_JSON_VALUE) @ResponseBody public RestApiResponse getStore(@PathVariable long storeId) { - //获取store的统计信息 + // Get the statistics of the store Metapb.Store store = null; try { store = pdRestService.getStore(storeId); @@ -238,12 +238,11 @@ public class StoreAPI extends API { @Data class Partition { - //分区信息 int partitionId; String graphName; String role; // shard role String workState; - long dataSize; // 占用的存储空间 + long dataSize; // The amount of storage space occupied Partition() { } @@ -262,26 +261,27 @@ public class StoreAPI extends API { @Data class StoreStatistics { - //store的统计信息 + // store statistics long storeId; String address; String raftAddress; String version; String state; String deployPath; - String dataPath; // 数据存储路径 + String dataPath; // The path where the data is stored long startTimeStamp; - long registedTimeStamp; // 暂时取第一次心跳时间作为注册时间 - long lastHeartBeat; // 上一次心跳时间 + // For the time being, the time of the first heartbeat is taken as the registration time + long registedTimeStamp; + long lastHeartBeat; // Last heartbeat time long capacity; long available; int partitionCount; int graphSize; long keyCount; - long leaderCount; // shard role = 'Leader'的分区数量 + long leaderCount; // shard role = 'Leader' The number of partitions String serviceName; String serviceVersion; - long serviceCreatedTimeStamp; // 服务创建时间 + long serviceCreatedTimeStamp; // The time when the service was created List<Partition> partitions; StoreStatistics(Metapb.Store store) { @@ -294,25 +294,26 @@ public class StoreAPI extends API { deployPath = store.getDeployPath(); final String prefix = "file:"; if ((deployPath != null) && (deployPath.startsWith(prefix))) { - // 去掉前缀 + // Remove the prefix deployPath = deployPath.substring(prefix.length()); } if ((deployPath != null) && (deployPath.contains(".jar"))) { - // 去掉jar包之后的信息 + // Remove the information after the jar package deployPath = deployPath.substring(0, deployPath.indexOf(".jar") + 4); } dataPath = store.getDataPath(); startTimeStamp = store.getStartTimestamp(); try { serviceCreatedTimeStamp = pdRestService.getStore(store.getId()) - .getStats().getStartTime(); // 实例时间 + .getStats() + .getStartTime(); // Instance time final int base = 1000; - serviceCreatedTimeStamp *= base; // 转化为毫秒 + serviceCreatedTimeStamp *= base; // Translates to milliseconds } catch (PDException e) { e.printStackTrace(); serviceCreatedTimeStamp = store.getStartTimestamp(); } - registedTimeStamp = store.getStartTimestamp(); // 注册时间 + registedTimeStamp = store.getStartTimestamp(); // Time of registration lastHeartBeat = store.getLastHeartbeat(); capacity = store.getStats().getCapacity(); available = store.getStats().getAvailable(); @@ -320,14 +321,17 @@ public class StoreAPI extends API { serviceName = address + "-store"; serviceVersion = store.getVersion(); List<Metapb.GraphStats> graphStatsList = store.getStats().getGraphStatsList(); - List<Partition> partitionStatsList = new ArrayList<>(); // 保存分区信息 - HashSet<String> graphNameSet = new HashSet<>(); // 用于统计图的数量 - HashSet<Integer> leaderPartitionIds = new HashSet<Integer>(); // 统计leader的分区数量 - // 构造分区信息(store中存储的图信息) + // Save the partition information + List<Partition> partitionStatsList = new ArrayList<>(); + // The number used for the chart + HashSet<String> graphNameSet = new HashSet<>(); + // Statistics on the number of partitions of the leader + HashSet<Integer> leaderPartitionIds = new HashSet<Integer>(); + // Construct partition information (graph information stored in the store) Map<Integer, Long> partition2KeyCount = new HashMap<>(); for (Metapb.GraphStats graphStats : graphStatsList) { String graphName = graphStats.getGraphName(); - // 图名只保留/g /m /s前面的部分 + // Only the part in front of /g /m /s is retained in the title final int postfixLength = 2; graphNameSet.add(graphName.substring(0, graphName.length() - postfixLength)); if ((graphStats.getGraphName() != null) && @@ -335,7 +339,7 @@ public class StoreAPI extends API { Partition pt = new Partition(graphStats); partitionStatsList.add(pt); } - // 统计每个分区的keyCount + // Count the keyCount of each partition partition2KeyCount.put(graphStats.getPartitionId(), graphStats.getApproximateKeys()); if (graphStats.getRole() == Metapb.ShardRole.Leader) { diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java index ffa8cdaab..088403fb5 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java @@ -56,7 +56,7 @@ import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; /** - * kv 存储的核心实现类 + * The core implementation class of KV storage */ @Slf4j @GRpcService @@ -87,7 +87,7 @@ public class KvServiceGrpcImpl extends KvServiceGrpc.KvServiceImplBase implement } /** - * 普通的 put + * Ordinary put * * @param request * @param responseObserver @@ -119,7 +119,7 @@ public class KvServiceGrpcImpl extends KvServiceGrpc.KvServiceImplBase implement } /** - * 普通的 get + * Ordinary get * * @param request * @param responseObserver @@ -151,7 +151,7 @@ public class KvServiceGrpcImpl extends KvServiceGrpc.KvServiceImplBase implement } /** - * 普通的 delete + * Ordinary delete * * @param request * @param responseObserver @@ -185,7 +185,7 @@ public class KvServiceGrpcImpl extends KvServiceGrpc.KvServiceImplBase implement } /** - * 按前缀删除 + * Delete by prefix * * @param request * @param responseObserver @@ -223,7 +223,7 @@ public class KvServiceGrpcImpl extends KvServiceGrpc.KvServiceImplBase implement } /** - * 按前缀查询 + * Search by prefix * * @param request * @param responseObserver @@ -253,7 +253,7 @@ public class KvServiceGrpcImpl extends KvServiceGrpc.KvServiceImplBase implement } /** - * 获取随机非 0 字符串做 Id + * Obtain a random non-0 string as an Id * * @return */ @@ -268,7 +268,7 @@ public class KvServiceGrpcImpl extends KvServiceGrpc.KvServiceImplBase implement } /** - * 普通的 watch + * Ordinary watch * * @param request * @param responseObserver @@ -295,7 +295,7 @@ public class KvServiceGrpcImpl extends KvServiceGrpc.KvServiceImplBase implement } /** - * 普通的前缀 watch + * Ordinary prefix watch * * @param request * @param responseObserver @@ -322,7 +322,7 @@ public class KvServiceGrpcImpl extends KvServiceGrpc.KvServiceImplBase implement } /** - * 上面两个方法的通用方式 + * A generic approach to the above two methods * * @param request * @param responseObserver @@ -358,7 +358,7 @@ public class KvServiceGrpcImpl extends KvServiceGrpc.KvServiceImplBase implement } /** - * 加锁 + * Locking * * @param request * @param responseObserver @@ -450,7 +450,7 @@ public class KvServiceGrpcImpl extends KvServiceGrpc.KvServiceImplBase implement } /** - * 解锁 + * Unlock * * @param request * @param responseObserver @@ -484,7 +484,7 @@ public class KvServiceGrpcImpl extends KvServiceGrpc.KvServiceImplBase implement } /** - * 锁续活 + * Lock renewal * * @param request * @param responseObserver @@ -520,7 +520,7 @@ public class KvServiceGrpcImpl extends KvServiceGrpc.KvServiceImplBase implement } /** - * 带超时时间的 put + * PUT with timeout * * @param request * @param responseObserver @@ -549,7 +549,7 @@ public class KvServiceGrpcImpl extends KvServiceGrpc.KvServiceImplBase implement } /** - * 续活带有超时时间的 key + * Reactivate the key with a timeout period * * @param request * @param responseObserver diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java index ed902208c..9df838111 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java @@ -64,7 +64,7 @@ public class PDRestService implements InitializingBean { private StoreMonitorDataService storeMonitorDataService; /** - * 初始化 + * initialize * * @throws Exception */ diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java index f99efe5e0..40f3d2ef8 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java @@ -51,13 +51,13 @@ public class UpgradeService { var dataVersion = getDataVersion(); log.info("now db data version : {}", dataVersion); for (VersionUpgradeScript script : factory.getScripts()) { - // 执行过,run once的跳过 + // Executed, run once skipped if (isExecuted(script.getClass().getName()) && script.isRunOnce()) { log.info("Script {} is Executed and is run once", script.getClass().getName()); continue; } - // 判断跳过的条件 + // Determine the conditions for skipping if (dataVersion == null && !script.isRunWithoutDataVersion() || dataVersion != null && !versionCompare( dataVersion, diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionUpgradeScript.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionUpgradeScript.java index d90079043..d3cad42bb 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionUpgradeScript.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionUpgradeScript.java @@ -40,8 +40,8 @@ public interface VersionUpgradeScript { String getLowVersion(); /** - * pd中没有data version的时候,是否执行. 一般是对应3。6。2之前的版本 - * + * If there is no data version in the PD, whether to execute the . Generally, it corresponds + * to 3.6.2 previous versions * * @return run when pd has no data version */ boolean isRunWithoutDataVersion(); diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java index f0109a623..a1a297014 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java @@ -41,7 +41,7 @@ import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; /** - * watch订阅、响应处理类 + * Watch subscription and response processing classes **/ @Slf4j public class KvWatchSubject { @@ -57,7 +57,7 @@ public class KvWatchSubject { BiPredicate<String, String> startWith = String::startsWith; /** - * 会使用以下三组key: + * The following three sets of keys will be used: * clients -> W@KW@key@clientId * rocksdb key1 ->W@KW@key@clientId * rocksdb key2 ->W@clientId@KW@key@clientId @@ -86,12 +86,13 @@ public class KvWatchSubject { } /** - * 增加观察者 + * Increase observers * - * @param key 观察的key - * @param clientId 客户端标识 + * @param key The key of the observation + * @param clientId Client identity * @param observer - * @param delimiter 观察类型标识符,对前缀监听或者对key的监听可以通过此参数区分 + * @param delimiter Observe the type identifier, listen to the prefix or listen to the key + * can be distinguished by this parameter * @throws PDException */ public void addObserver(String key, long clientId, StreamObserver<WatchResponse> observer, @@ -111,11 +112,13 @@ public class KvWatchSubject { } /** - * 通知观察者方法,key和prefix都使用此方法,predicate不同 + * The notification observer method, which is used by both key and prefix, is different from + * predicate * * @param key - * @param watchType 观察类型,一般是增加和删除 - * @param predicate 判断等于或者是前匹配,用来适配key或prefix观察 + * @param watchType Observation types, generally additions and deletions + * @param predicate Determine whether it is equal or pre-matched, and use it to adapt to the + * key or prefix observation * @param kvs * @throws PDException */ @@ -177,10 +180,10 @@ public class KvWatchSubject { } /** - * 续活客户端 - * 1.往客户端发一个alive的消息,带重试哈 - * 2.如果有响应,则续活之前保存的那两组key - * 3.如果多次都失败,则删除内存和rocksdb的数据 + * Renew the client + * 1. Send an alive message to the client with a retry + * 2. If there is a response, the two sets of keys saved before will be reactivated + * 3. If it fails multiple times, delete the data of memory and rocksdb */ public void keepClientAlive() { WatchResponse testAlive = WatchResponse.newBuilder().setState(WatchState.Alive).build(); @@ -256,7 +259,7 @@ public class KvWatchSubject { } /** - * 通知客户端leader切换了,重连 + * Notify the client that the leader has switched and reconnect */ public void notifyClientChangeLeader() { WatchResponse response = diff --git a/hugegraph-pd/hg-pd-service/src/main/resources/application.yml b/hugegraph-pd/hg-pd-service/src/main/resources/application.yml index 25471b6cc..87a33273b 100644 --- a/hugegraph-pd/hg-pd-service/src/main/resources/application.yml +++ b/hugegraph-pd/hg-pd-service/src/main/resources/application.yml @@ -31,7 +31,7 @@ management: grpc: port: 8686 - # grpc的服务地址 + # GRPC's service address host: 127.0.0.1 netty-server: max-inbound-message-size: 100MB @@ -44,37 +44,38 @@ server: port: 8620 pd: - # 定期检查集群是否健康的时间间隔,单位秒 + # Periodically check whether the cluster is healthy at intervals, in seconds patrol-interval: 300 - # 存储路径 + # Storage path data-path: tmp/pd/8610 - # 最少节点数,少于该数字,集群停止入库 + # If the minimum number of nodes is less than this number, the cluster will stop being stored initial-store-count: 1 - # 初始store列表,在列表内的store自动激活 + # The initial store list is automatically activated in the store initial-store-list: 127.0.0.1:8502 raft: - # 本机raft服务地址 + # The address of the local raft service address: 127.0.0.1:8610 - # PD集群服务地址 + # The service address of the PD cluster peers-list: 127.0.0.1:8610,127.0.0.1:8611,127.0.0.1:8612 - # raft rpc读写超时时间,单位毫秒 + # The read and write timeout period of the raft rpc, in milliseconds rpc-timeout: 10000 - # 快照生成时间间隔,单位秒 + # The interval between snapshot generation, in seconds snapshotInterval: 300 metrics: true store: - # store心跳超时时间,超过该时间,认为store临时不可用,转移Leader到其他副本,单位秒 + # If the store heartbeat timeout period exceeds this time, the store is temporarily unavailable and the leader is transferred to another replica in seconds keepAlive-timeout: 300 - # store下线时间。超过该时间,认为store永久不可用,分配副本到其他机器,单位秒 + # The time when the store went offline. After that time, the store is considered permanently unavailable, and the replica is allocated to another machine, in seconds max-down-time: 86400 partition: - # 默认每个分区副本数 + # Default number of replicas per partition default-shard-count: 3 - # 默认每机器最大副本数,初始分区数= store-max-shard-count * store-number / default-shard-count + # The default maximum number of replicas per machine, the initial number of partitions= store-max-shard-count * store-number / default-shard-count store-max-shard-count: 12 discovery: - #客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除 + # After the client registers, the maximum number of heartbeats is not reached, the previous + # registration information will be deleted heartbeat-try-count: 3 diff --git a/hugegraph-pd/hg-pd-service/src/test/resources/application-server0.yml b/hugegraph-pd/hg-pd-service/src/test/resources/application-server0.yml index 5e1d63e94..5ee5108cf 100644 --- a/hugegraph-pd/hg-pd-service/src/test/resources/application-server0.yml +++ b/hugegraph-pd/hg-pd-service/src/test/resources/application-server0.yml @@ -41,31 +41,21 @@ pd: patrol-interval: 3000000 data-path: tmp/8686 - # 最少节点数,少于该数字,集群停止入库 initial-store-count: 1 - # 初始store列表,在列表内的store自动激活 initial-store-list: 127.0.0.1:8500 raft: address: 127.0.0.1:8610 - # raft集群 peers-list: 127.0.0.1:8610 - # raft rpc读写超时时间,单位毫秒 rpc-timeout: 10000 - # 快照生成时间间隔,单位秒 snapshotInterval: 30000 metrics: true store: - # store心跳超时时间,超过该时间,认为store临时不可用,转移Leader到其他副本,单位秒 keepAlive-timeout: 300 - # store下线时间。超过该时间,认为store永久不可用,分配副本到其他机器,单位秒 max-down-time: 180000 partition: - # 默认每个分区副本数 default-shard-count: 3 - # 默认每机器最大副本数,初始分区数= store-max-shard-count * store-number / default-shard-count store-max-shard-count: 12 discovery: - #客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除 heartbeat-try-count: 3 diff --git a/hugegraph-pd/hg-pd-service/src/test/resources/application-server1.yml b/hugegraph-pd/hg-pd-service/src/test/resources/application-server1.yml index 7cb53fe1c..11b6a722c 100644 --- a/hugegraph-pd/hg-pd-service/src/test/resources/application-server1.yml +++ b/hugegraph-pd/hg-pd-service/src/test/resources/application-server1.yml @@ -41,31 +41,20 @@ pd: patrol-interval: 3000000 data-path: tmp/8686 - # 最少节点数,少于该数字,集群停止入库 initial-store-count: 1 - # 初始store列表,在列表内的store自动激活 initial-store-list: 127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503 - #initial-store-list: 127.0.0.1:8501 raft: address: 127.0.0.1:8610 - # raft集群 peers-list: 127.0.0.1:8610 - # raft rpc读写超时时间,单位毫秒 rpc-timeout: 10000 - # 快照生成时间间隔,单位秒 snapshotInterval: 30000 metrics: true store: - # store心跳超时时间,超过该时间,认为store临时不可用,转移Leader到其他副本,单位秒 keepAlive-timeout: 300 - # store下线时间。超过该时间,认为store永久不可用,分配副本到其他机器,单位秒 max-down-time: 180000 partition: - # 默认每个分区副本数 default-shard-count: 3 - # 默认每机器最大副本数,初始分区数= store-max-shard-count * store-number / default-shard-count store-max-shard-count: 6 discovery: - #客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除 heartbeat-try-count: 3 diff --git a/hugegraph-pd/hg-pd-service/src/test/resources/application-server2.yml b/hugegraph-pd/hg-pd-service/src/test/resources/application-server2.yml index 5e1dd50a9..7fb2e362b 100644 --- a/hugegraph-pd/hg-pd-service/src/test/resources/application-server2.yml +++ b/hugegraph-pd/hg-pd-service/src/test/resources/application-server2.yml @@ -39,7 +39,6 @@ server: port: 8621 pd: - # 集群ID,区分不同的PD集群 cluster_id: 1 patrol-interval: 300000 data-path: tmp/8687 @@ -48,26 +47,17 @@ pd: raft: enable: true address: 127.0.0.1:8611 - # raft集群 peers-list: 127.0.0.1:8610,127.0.0.1:8611,127.0.0.1:8612 - # raft rpc读写超时时间,单位毫秒 rpc-timeout: 10000 - # 快照生成时间间隔,单位秒 snapshotInterval: 300 metrics: true - # 初始store列表,在列表内的store自动激活 initial-store-list: 127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503 store: - # store心跳超时时间,超过该时间,认为store临时不可用,转移Leader到其他副本,单位秒 keepAlive-timeout: 300 - # store下线时间。超过该时间,认为store永久不可用,分配副本到其他机器,单位秒 max-down-time: 1800 partition: - # 默认每个分区副本数 default-shard-count: 3 - # 默认每机器最大副本数,初始分区数= store-max-shard-count * store-number / default-shard-count store-max-shard-count: 3 discovery: - #客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除 heartbeat-try-count: 3 diff --git a/hugegraph-pd/hg-pd-service/src/test/resources/application-server3.yml b/hugegraph-pd/hg-pd-service/src/test/resources/application-server3.yml index d2b88950a..b2470a315 100644 --- a/hugegraph-pd/hg-pd-service/src/test/resources/application-server3.yml +++ b/hugegraph-pd/hg-pd-service/src/test/resources/application-server3.yml @@ -39,7 +39,6 @@ server: port: 8622 pd: - # 集群ID,区分不同的PD集群 cluster_id: 1 patrol-interval: 300000 data-path: tmp/8688 @@ -48,26 +47,17 @@ pd: raft: enable: true address: 127.0.0.1:8612 - # raft集群 peers-list: 127.0.0.1:8610,127.0.0.1:8611,127.0.0.1:8612 - # raft rpc读写超时时间,单位毫秒 rpc-timeout: 10000 - # 快照生成时间间隔,单位秒 snapshotInterval: 300 metrics: true - # 初始store列表,在列表内的store自动激活 initial-store-list: 127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503 store: - # store心跳超时时间,超过该时间,认为store临时不可用,转移Leader到其他副本,单位秒 keepAlive-timeout: 300 - # store下线时间。超过该时间,认为store永久不可用,分配副本到其他机器,单位秒 max-down-time: 1800 partition: - # 默认每个分区副本数 default-shard-count: 3 - # 默认每机器最大副本数,初始分区数= store-max-shard-count * store-number / default-shard-count store-max-shard-count: 3 discovery: - #客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除 heartbeat-try-count: 3
