This is an automated email from the ASF dual-hosted git repository.

jin pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-hugegraph.git

commit a560a6efeef0182b132164d2cea970cd8c9c6f66
Author: VGalaxies <[email protected]>
AuthorDate: Wed Apr 3 23:57:50 2024 +0800

    feat(pd): integrate `pd-grpc` submodule
---
 hugegraph-pd/hg-pd-grpc/pom.xml                    | 138 +++++
 .../hg-pd-grpc/src/main/proto/discovery.proto      |  71 +++
 hugegraph-pd/hg-pd-grpc/src/main/proto/kv.proto    | 143 +++++
 .../hg-pd-grpc/src/main/proto/metaTask.proto       |  64 +++
 .../hg-pd-grpc/src/main/proto/metapb.proto         | 394 +++++++++++++
 .../hg-pd-grpc/src/main/proto/pd_common.proto      |  53 ++
 .../hg-pd-grpc/src/main/proto/pd_pulse.proto       | 172 ++++++
 .../hg-pd-grpc/src/main/proto/pd_watch.proto       | 103 ++++
 hugegraph-pd/hg-pd-grpc/src/main/proto/pdpb.proto  | 607 +++++++++++++++++++++
 9 files changed, 1745 insertions(+)

diff --git a/hugegraph-pd/hg-pd-grpc/pom.xml b/hugegraph-pd/hg-pd-grpc/pom.xml
new file mode 100644
index 000000000..cef49e957
--- /dev/null
+++ b/hugegraph-pd/hg-pd-grpc/pom.xml
@@ -0,0 +1,138 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+
+<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";
+         xmlns="http://maven.apache.org/POM/4.0.0";
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd";>
+
+    <modelVersion>4.0.0</modelVersion>
+
+    <parent>
+        <groupId>org.apache.hugegraph</groupId>
+        <artifactId>hugegraph-pd</artifactId>
+        <version>${revision}</version>
+        <relativePath>../pom.xml</relativePath>
+    </parent>
+    <artifactId>hg-pd-grpc</artifactId>
+
+
+    <properties>
+        <os.plugin.version>1.6.0</os.plugin.version>
+        <grpc.version>1.39.0</grpc.version>
+        <protoc.version>3.17.2</protoc.version>
+        <protobuf.plugin.version>0.6.1</protobuf.plugin.version>
+    </properties>
+
+    <dependencies>
+        <dependency>
+            <groupId>io.grpc</groupId>
+            <artifactId>grpc-netty-shaded</artifactId>
+            <version>${grpc.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>io.grpc</groupId>
+            <artifactId>grpc-protobuf</artifactId>
+            <version>${grpc.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>io.grpc</groupId>
+            <artifactId>grpc-stub</artifactId>
+            <version>${grpc.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>javax.annotation</groupId>
+            <artifactId>javax.annotation-api</artifactId>
+            <version>1.3.2</version>
+        </dependency>
+    </dependencies>
+
+    <build>
+        <sourceDirectory>${basedir}/src/main/java</sourceDirectory>
+        <resources>
+            <resource>
+                <directory>src/main/resources</directory>
+            </resource>
+            <resource>
+                <directory>src/main/proto</directory>
+            </resource>
+        </resources>
+        <extensions>
+            <extension>
+                <groupId>kr.motd.maven</groupId>
+                <artifactId>os-maven-plugin</artifactId>
+                <version>${os.plugin.version}</version>
+            </extension>
+        </extensions>
+        <plugins>
+            <plugin>
+                <groupId>org.xolstice.maven.plugins</groupId>
+                <artifactId>protobuf-maven-plugin</artifactId>
+                <version>${protobuf.plugin.version}</version>
+                <extensions>true</extensions>
+                <configuration>
+                    <protocArtifact>
+                        
com.google.protobuf:protoc:${protoc.version}:exe:${os.detected.classifier}
+                    </protocArtifact>
+                    <pluginId>grpc-java</pluginId>
+                    <pluginArtifact>
+                        
io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier}
+                    </pluginArtifact>
+                    <!--默认值-->
+                    
<protoSourceRoot>${project.basedir}/src/main/proto</protoSourceRoot>
+                    <!--默认值-->
+                    
<!--<outputDirectory>${project.build.directory}/generated-sources/protobuf/java</outputDirectory>-->
+                    
<outputDirectory>${project.basedir}/src/main/java</outputDirectory>
+                    
<!--设置是否在生成java文件之前清空outputDirectory的文件,默认值为true,设置为false时也会覆盖同名文件-->
+                    <clearOutputDirectory>false</clearOutputDirectory>
+                    
<!--更多配置信息可以查看https://www.xolstice.org/protobuf-maven-plugin/compile-mojo.html-->
+                </configuration>
+                <executions>
+                    <execution>
+                        <!--在执行mvn compile的时候会执行以下操作-->
+                        <phase>generate-sources</phase>
+                        <goals>
+                            <!--生成OuterClass类-->
+                            <goal>compile</goal>
+                            <!--生成Grpc类-->
+                            <goal>compile-custom</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+            <plugin>
+                <artifactId>maven-clean-plugin</artifactId>
+                <configuration>
+                    <filesets>
+                        <fileset>
+                            <directory>src/main/java</directory>
+                        </fileset>
+                    </filesets>
+                </configuration>
+                <executions>
+                    <execution>
+                        <!-- remove all java files before compile -->
+                        <phase>initialize</phase>
+                        <goals>
+                            <goal>clean</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+        </plugins>
+    </build>
+</project>
diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/discovery.proto 
b/hugegraph-pd/hg-pd-grpc/src/main/proto/discovery.proto
new file mode 100644
index 000000000..b434ab0e8
--- /dev/null
+++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/discovery.proto
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+syntax = "proto3";
+package discovery;
+import "pdpb.proto";
+
+option java_package = "org.apache.hugegraph.pd.grpc.discovery";
+option java_multiple_files = true;
+
+
+service DiscoveryService {
+  rpc register(NodeInfo) returns (RegisterInfo);
+  rpc getNodes(Query) returns (NodeInfos);
+  //  rpc getNodesByLabel(Conditions) returns (NodeInfos);
+}
+
+/* requests */
+message NodeInfo {
+  string id = 1;
+  string appName = 2;
+  string version = 3;
+  string address = 4;
+  int64 interval = 5;
+  map<string, string> labels = 6;
+}
+message Query {
+  string appName = 1;
+  string version = 2;
+  map<string, string> labels = 3;
+}
+message LeaseInfo {
+  int64 registrationTs = 1;
+  int64 lastHeartbeatTs = 2;
+  int64 serverUpTs = 3;
+}
+message RegisterInfo {
+  NodeInfo nodeInfo = 1;
+  LeaseInfo leaseInfo = 2 ;
+  RegisterType type = 3 ;
+  pdpb.ResponseHeader header = 4;
+}
+enum RegisterType {
+  Register = 0;
+  Heartbeat = 1;
+  Dislodge = 2;
+}
+//message Condition{
+//  string label = 1;
+//}
+//message Conditions{
+//  string label = 1;
+//  string value = 2;
+//}
+message NodeInfos{
+  repeated NodeInfo info = 1;
+}
diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/kv.proto 
b/hugegraph-pd/hg-pd-grpc/src/main/proto/kv.proto
new file mode 100644
index 000000000..22007cda3
--- /dev/null
+++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/kv.proto
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+syntax = "proto3";
+package kv;
+import "pdpb.proto";
+import "metapb.proto";
+
+option java_package = "org.apache.hugegraph.pd.grpc.kv";
+option java_multiple_files = true;
+
+
+service KvService {
+  rpc put(Kv) returns (KvResponse);
+  rpc get(K) returns (KResponse);
+  rpc delete(K) returns (KvResponse);
+  rpc deletePrefix(K) returns (KvResponse);
+  rpc scanPrefix(K) returns (ScanPrefixResponse);
+  rpc watch(WatchRequest) returns (stream WatchResponse);
+  rpc watchPrefix(WatchRequest) returns (stream WatchResponse);
+  rpc lock(LockRequest) returns (LockResponse);
+  rpc lockWithoutReentrant(LockRequest) returns (LockResponse);
+  rpc unlock(LockRequest) returns (LockResponse);
+  rpc keepAlive(LockRequest) returns (LockResponse);
+  rpc isLocked(LockRequest) returns (LockResponse);
+  rpc putTTL(TTLRequest) returns (TTLResponse);
+  rpc keepTTLAlive(TTLRequest) returns (TTLResponse);
+}
+
+/* requests */
+message Kv {
+  pdpb.RequestHeader header = 1;
+  string key = 2;
+  string value = 3;
+}
+message KvResponse {
+  pdpb.ResponseHeader header = 1;
+}
+
+message K{
+  pdpb.RequestHeader header = 1;
+  string key = 2;
+}
+
+message KResponse{
+  pdpb.ResponseHeader header = 1;
+  string value = 2;
+}
+
+message ScanPrefixResponse {
+  pdpb.ResponseHeader header = 1;
+  map<string, string> kvs = 2;
+}
+
+message LockRequest{
+  pdpb.RequestHeader header = 1;
+  string key = 2;
+  int64 ttl = 3;
+  int64 clientId = 4;
+}
+message LockResponse{
+  pdpb.ResponseHeader header = 1;
+  string key = 2;
+  int64 ttl = 3;
+  int64 clientId = 4;
+  bool succeed = 5;
+}
+
+message LockAliveResponse{
+  pdpb.ResponseHeader header = 1;
+  int64 clientId = 2;
+}
+
+
+message WatchKv {
+  string key = 1;
+  string value = 2;
+}
+
+enum WatchType {
+  Put = 0;
+  Delete = 1;
+  Unrecognized = 2;
+}
+
+message WatchEvent {
+  WatchKv current = 1;
+  WatchKv prev = 2;
+  WatchType type = 3;
+}
+
+message WatchResponse {
+  pdpb.ResponseHeader header = 1;
+  repeated WatchEvent events = 2;
+  int64 clientId = 3;
+  WatchState state = 4;
+}
+
+enum WatchState {
+  Starting = 0;
+  Started = 1;
+  Leader_Changed = 2;
+  Alive = 3;
+}
+
+message WatchRequest {
+  pdpb.RequestHeader header = 1;
+  WatchState state = 2;
+  string key = 3;
+  int64 clientId = 4;
+}
+
+message V{
+  string value = 1;
+  int64  ttl = 2;
+  int64 st = 3;
+}
+
+message TTLRequest{
+  pdpb.RequestHeader header = 1;
+  string key = 2;
+  string value = 3;
+  int64 ttl = 4;
+}
+
+message TTLResponse{
+  pdpb.ResponseHeader header = 1;
+  bool succeed = 2;
+}
diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/metaTask.proto 
b/hugegraph-pd/hg-pd-grpc/src/main/proto/metaTask.proto
new file mode 100644
index 000000000..c4bb8bde1
--- /dev/null
+++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/metaTask.proto
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+syntax = "proto3";
+package metaTask;
+import "metapb.proto";
+import "pd_pulse.proto";
+option java_package = "org.apache.hugegraph.pd.grpc";
+
+enum TaskType {
+  Unknown = 0;
+  Split_Partition = 1;
+  Change_Shard = 2;
+  Move_Partition = 3;
+  Clean_Partition = 4;
+  Change_KeyRange = 5;
+}
+
+// 一条任务信息
+message Task {
+  uint64 id = 1;
+  TaskType type = 2;
+  TaskState state = 3;
+  int64 start_timestamp = 4;
+  metapb.Partition partition = 5;
+  string message = 6;
+  //每个shard执行的任务状态
+  repeated ShardTaskState shardState = 7;
+  ChangeShard changeShard = 9;
+  SplitPartition splitPartition = 10;
+  MovePartition movePartition = 11;
+  CleanPartition cleanPartition = 12;
+  PartitionKeyRange partitionKeyRange = 13;
+}
+
+enum TaskState{
+  Task_Unknown = 0;
+  Task_Ready = 1;   //任务就绪
+  Task_Doing = 2;   //执行中
+  Task_Done = 3;    //完成
+  Task_Exit = 4;    //退出
+  Task_Stop = 10;
+  Task_Success = 11;
+  Task_Failure = 12;
+}
+
+message ShardTaskState{
+  uint64 store_id = 1;
+  TaskState state = 2;
+}
diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/metapb.proto 
b/hugegraph-pd/hg-pd-grpc/src/main/proto/metapb.proto
new file mode 100644
index 000000000..a8a695be0
--- /dev/null
+++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/metapb.proto
@@ -0,0 +1,394 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+syntax = "proto3";
+package metapb;
+option java_package = "org.apache.hugegraph.pd.grpc";
+import "google/protobuf/any.proto";
+
+enum ClusterState{
+  // 集群健康
+  Cluster_OK = 0;
+  // 分区警告,存在部分故障节点,短时间不影响读写
+  Cluster_Warn = 2;
+  // 分区下线,可以读,无法写
+  Cluster_Offline = 10;
+  // 分区故障,无法读写,需要尽快修复故障节点。
+  Cluster_Fault = 11;
+  Cluster_Not_Ready = -1;
+}
+// 集群状态
+message ClusterStats{
+  ClusterState state = 1;
+  string message = 2;
+  uint64 timestamp = 16;
+}
+
+enum StoreState {
+  Unknown = 0;
+  // 未激活
+  Pending = 4;
+  // 在线
+  Up = 1;
+  // 离线
+  Offline = 2;
+  // 下线中
+  Exiting = 5;
+  // 已下线
+  Tombstone = 3;
+}
+
+// Store label for Storage grouping.
+message StoreLabel {
+  string key = 1;
+  string value = 2;
+}
+
+message Store {
+  uint64 id = 1;
+  // Address to handle client requests
+  string address = 2;
+  string raft_address = 3;
+  repeated StoreLabel labels = 4;
+  // Store软件版本号
+  string version = 5;
+  StoreState state = 6;
+  // The start timestamp of the current store
+  int64 start_timestamp = 7;
+  string deploy_path = 8;
+  // The last heartbeat timestamp of the store.
+  int64 last_heartbeat = 9;
+  StoreStats stats = 10;
+  // 数据格式版本号
+  int32 data_version = 11;
+  int32 cores = 12;
+  string data_path = 13;
+}
+
+enum ShardRole {
+  None = 0;
+  Leader = 1;
+  Follower = 2;
+  // Learner/None -> Learner
+  Learner = 3;
+}
+
+message Shard {
+  uint64 store_id = 2;
+  ShardRole role = 3;
+}
+
+message ShardGroup{
+  uint32 id = 1;
+  uint64 version = 2;
+  uint64 conf_ver = 3;
+  repeated Shard shards = 6;
+  PartitionState state = 10;
+  string message = 11;
+}
+
+message Graph {
+  string graph_name = 2;
+  // 分区数量,0表示无效,不能大于raft分组总数
+  int32 partition_count = 3;
+  // 当前工作状态
+  PartitionState state = 10;
+  string message = 11;
+  GraphState graph_state = 12;
+}
+// 分区工作状态
+enum PartitionState{
+  PState_None = 0;
+  //
+  PState_Normal = 1;
+  // 分区警告,存在部分故障节点,短时间不影响读写
+  PState_Warn = 2;
+  // 分区下线,可以读,无法写
+  PState_Offline = 10;
+  // 分区故障,无法读写,需要尽快修复故障节点。
+  PState_Fault = 11;
+}
+
+message PartitionV36 {
+  uint32 id = 1;
+  string graph_name = 3;
+  // 分区范围 [start_key, end_key).
+  uint64 start_key = 4;
+  uint64 end_key = 5;
+  repeated Shard shards = 6;
+  // Leader任期,leader切换后递增
+  uint64 version = 7;
+  // shards版本号,每次改变后递增
+  uint64 conf_ver = 8;
+  // 当前工作状态
+  PartitionState state = 10;
+  string message = 11;
+}
+
+message Partition {
+  uint32 id = 1;
+  string graph_name = 3;
+  // 分区范围 [start_key, end_key).
+  uint64 start_key = 4;
+  uint64 end_key = 5;
+  // Partition 对象不在保存 shard list(根据对应的shard group 去查询), version 和 conf 
version不再有实际的意义
+  // repeated Shard shards = 6;
+  // key range 每次改变后递增
+  uint64 version = 7;
+  // shards版本号,每次改变后递增
+  // uint64 conf_ver = 8;
+  // 当前工作状态
+  PartitionState state = 10;
+  string message = 11;
+}
+
+message PartitionShard {
+  metapb.Partition partition = 1;
+  metapb.Shard leader = 2;
+  // 离线的Shard
+  repeated metapb.Shard offline_shards = 3;
+}
+// 记录分区所在的存储位置
+message PartitionStore {
+  uint32 partition_id = 1;
+  string graph_name = 3;
+  // 存储位置
+  string store_location = 4;
+}
+
+message PartitionRaft {
+  uint32 partition_id = 1;
+  string graph_name = 3;
+  // 存储位置
+  string raft_location = 4;
+}
+
+message ShardStats{
+  uint64 store_id = 2;
+  ShardRole role = 3;
+  ShardState state = 4;
+  // 安装快照的进度
+  uint32 progress = 5;
+}
+message PartitionStats{
+  uint32 id = 1;
+  // raft分组的任期.
+  uint64 leader_term = 2;
+  repeated string graph_name = 3;
+  metapb.Shard leader = 4;
+  // 离线 shards
+  repeated metapb.Shard shard = 5;
+  repeated metapb.Shard learner = 6;
+  uint64 conf_ver = 7;
+  // 分区状态
+  PartitionState state = 8;
+  repeated ShardStats shardStats = 9;
+  // 分区近似大小
+  uint64 approximate_size = 10;
+  // 分区key的近似数量
+  uint64 approximate_keys = 13;
+  // heartbeat timestamp
+  int64 timestamp = 16;
+}
+
+message GraphStats{
+  // 图名
+  string graph_name = 1;
+  // 分区近似大小
+  uint64 approximate_size = 2;
+  // 分区key的近似数量
+  uint64 approximate_keys = 3;
+  //  // committed index
+  //  uint64 committed_index = 4;
+  uint32 partition_id = 5;
+  ShardRole role = 6;
+  // 当前工作状态
+  PartitionState work_state = 8;
+}
+
+message RaftStats {
+  // partition id
+  uint32 partition_id = 1;
+  // committed index
+  uint64 committed_index = 2;
+}
+
+message TimeInterval {
+  // The unix timestamp in seconds of the start of this period.
+  uint64 start_timestamp = 1;
+  // The unix timestamp in seconds of the end of this period.
+  uint64 end_timestamp = 2;
+}
+
+message RecordPair {
+  string key = 1;
+  uint64 value = 2;
+}
+
+
+message QueryStats {
+  uint64 GC = 1;
+  uint64 Get = 2;
+  uint64 Scan = 3;
+  uint64 Coprocessor = 4;
+  uint64 Delete = 5;
+  uint64 DeleteRange = 6;
+  uint64 Put = 7;
+}
+
+enum ShardState{
+  SState_None = 0;
+  // 正常
+  SState_Normal = 1;
+  // 安装快照
+  SState_Snapshot = 2;
+  // 离线
+  SState_Offline = 10;
+}
+
+
+message StoreStats {
+  uint64 store_id = 1;
+  // Capacity for the store.
+  uint64 capacity = 2;
+  // Available size for the store.
+  uint64 available = 3;
+  // Total partition count in this store.
+  uint32 partition_count = 4;
+  // Current sending snapshot count.
+  uint32 sending_snap_count = 5;
+  // Current receiving snapshot count.
+  uint32 receiving_snap_count = 6;
+  // When the store is started (unix timestamp in seconds).
+  uint32 start_time = 7;
+  // How many partition is applying snapshot.
+  uint32 applying_snap_count = 8;
+  // If the store is busy
+  bool is_busy = 9;
+  // Actually used space by db
+  uint64 used_size = 10;
+  // Bytes written for the store during this period.
+  uint64 bytes_written = 11;
+  // Keys written for the store during this period.
+  uint64 keys_written = 12;
+  // Bytes read for the store during this period.
+  uint64 bytes_read = 13;
+  // Keys read for the store during this period.
+  uint64 keys_read = 14;
+  // Actually reported time interval
+  TimeInterval interval = 15;
+  // Threads' CPU usages in the store
+  repeated RecordPair cpu_usages = 16;
+  // Threads' read disk I/O rates in the store
+  repeated RecordPair read_io_rates = 17;
+  // Threads' write disk I/O rates in the store
+  repeated RecordPair write_io_rates = 18;
+  // Operations' latencies in the store
+  repeated RecordPair op_latencies = 19;
+  // Store query stats
+  QueryStats query_stats = 21;
+  // graph stats
+  repeated GraphStats graph_stats = 22;
+  // raft stats
+  repeated RaftStats raft_stats = 23;
+  int32 cores = 24;
+  // system metrics
+  repeated RecordPair system_metrics = 25;
+}
+
+// 分区查询条件
+message PartitionQuery{
+  optional uint64 store_id = 1;      // 0 表示查询条件不包含store_id
+  optional string graph_name = 2;
+  optional uint32 partition_id = 4;
+}
+
+//PD 节点信息
+message Member {
+  uint64 cluster_id = 1;
+  string raft_url = 3;
+  string grpc_url = 4;
+  string rest_url = 5;
+  string data_path = 6;
+  StoreState state = 7;
+  ShardRole role = 8;
+  string replicator_state = 9;
+}
+
+// 图空间配置
+message GraphSpace{
+  string name = 1;
+  // 最大占用存储
+  uint64 storage_limit = 2;
+  // 已使用空间
+  uint64 used_size = 3;
+  // 修改时间
+  uint64 timestamp = 10;
+}
+
+// PD 配置
+message PDConfig{
+  uint64 version = 1;
+  // 分区数量, 初始化根据Store数量动态计算,分裂后进行修改
+  int32 partition_count = 2;
+  // 每分区副本数量
+  int32 shard_count = 3;
+  // pd集群列表
+  string peers_list = 4;
+  // 集群中最少store数量
+  int32 min_store_count = 6;
+  // 每个store最大副本数
+  int32 max_Shards_Per_Store = 7;
+  // 修改时间
+  uint64 timestamp = 10;
+}
+
+
+
+//消息持久化
+message QueueItem{
+  string item_id = 1;
+  string item_class = 2;
+  bytes item_content = 3;
+  int64 timestamp = 10;
+}
+
+message LogRecord{
+  string action = 1;
+  int64 timestamp = 2;
+  map<string, string> labels = 3;
+  google.protobuf.Any object = 4;
+  string message = 5;
+}
+
+message GraphState{
+  GraphMode mode = 1;
+  GraphModeReason reason = 2;
+}
+
+enum GraphMode{
+  ReadWrite = 0;
+  ReadOnly = 1;
+  WriteOnly = 2;
+}
+
+enum GraphModeReason{
+  Empty = 0; // 空
+  Initiative = 1; // 主动的状态设置
+  Quota = 2; // 达到限额条件
+
+}
diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_common.proto 
b/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_common.proto
new file mode 100644
index 000000000..c9eec8149
--- /dev/null
+++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_common.proto
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+syntax = "proto3";
+
+option java_multiple_files = true;
+option java_package = "org.apache.hugegraph.pd.grpc.common";
+option java_outer_classname = "HgPdCommonProto";
+
+message RequestHeader {
+  // 集群 ID.
+  uint64 cluster_id = 1;
+  // 发送者 ID.
+  uint64 sender_id = 2;
+}
+
+message ResponseHeader {
+  // cluster_id is the ID of the cluster which sent the response.
+  uint64 cluster_id = 1;
+  Error error = 2;
+}
+
+enum ErrorType {
+  OK = 0;
+  UNKNOWN = 1;
+  STORE_NON_EXIST = 101;
+  STORE_TOMBSTONE = 103;
+  ALREADY_BOOTSTRAPPED = 4;
+  INCOMPATIBLE_VERSION = 5;
+  PARTITION_NOT_FOUND = 6;
+
+  ETCD_READ_ERROR = 1000;
+  ETCD_WRITE_ERROR = 1001;
+}
+
+message Error {
+  ErrorType type = 1;
+  string message = 2;
+}
diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_pulse.proto 
b/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_pulse.proto
new file mode 100644
index 000000000..fb8940df6
--- /dev/null
+++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_pulse.proto
@@ -0,0 +1,172 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+syntax = "proto3";
+
+import "metapb.proto";
+import "pd_common.proto";
+
+option java_multiple_files = true;
+option java_package = "org.apache.hugegraph.pd.grpc.pulse";
+option java_outer_classname = "HgPdPulseProto";
+
+service HgPdPulse {
+  rpc Pulse(stream PulseRequest) returns (stream PulseResponse);
+}
+
+/* requests */
+message PulseRequest {
+  PulseCreateRequest create_request = 1;
+  PulseCancelRequest cancel_request = 2;
+  PulseNoticeRequest notice_request = 3;
+  PulseAckRequest ack_request = 4;
+}
+
+message PulseCreateRequest {
+  PulseType  pulse_type = 1;
+}
+
+message PulseCancelRequest {
+  int64 observer_id = 1;
+}
+
+message PulseNoticeRequest {
+  int64 observer_id = 1;
+  oneof request_union {
+    PartitionHeartbeatRequest partition_heartbeat_request = 10;
+  }
+}
+
+message PulseAckRequest {
+  int64 observer_id = 1;
+  int64 notice_id = 2;
+}
+
+// 分区心跳,分区的peer增减、leader改变等事件发生时,由leader发送心跳。
+// 同时pd对分区进行shard增减通过Response发送给leader
+message PartitionHeartbeatRequest {
+  RequestHeader header = 1;
+  // Leader Peer sending the heartbeat
+  metapb.PartitionStats states = 4;
+}
+
+/* responses */
+message PulseResponse {
+  PulseType pulse_type = 1;
+  int64 observer_id = 2;
+  int32 status = 3;   //0=ok,1=fail
+  int64 notice_id = 4;
+  oneof response_union {
+    PartitionHeartbeatResponse partition_heartbeat_response = 10;
+    PdInstructionResponse instruction_response = 11;
+  }
+}
+
+message PartitionHeartbeatResponse {
+  ResponseHeader header = 1;
+  uint64 id = 3;
+  metapb.Partition partition = 2;
+  ChangeShard change_shard = 4;
+
+  TransferLeader transfer_leader = 5;
+  // 拆分成多个分区,第一个SplitPartition是原分区,从第二开始是新分区
+  SplitPartition split_partition = 6;
+  // rocksdb compaction 指定的表,null是针对所有
+  DbCompaction db_compaction = 7;
+  // 将partition的数据,迁移到 target
+  MovePartition move_partition = 8;
+  // 清理partition的graph的数据
+  CleanPartition clean_partition = 9;
+  // partition key range 变化
+  PartitionKeyRange key_range = 10;
+}
+
+/* Date model */
+message ChangeShard {
+  repeated metapb.Shard shard = 1;
+  ConfChangeType change_type = 2;
+}
+
+message TransferLeader {
+  metapb.Shard shard = 1;
+}
+
+message SplitPartition {
+  repeated metapb.Partition new_partition = 1;
+}
+
+message DbCompaction {
+  string table_name = 3;
+}
+
+message MovePartition{
+  // target partition的key range为,迁移后的新range
+  metapb.Partition target_partition = 1;
+  // partition 的 key start 和 key end的所有数据,
+  // 会迁移到 target partition 上
+  uint64 key_start = 2;
+  uint64 key_end = 3;
+}
+
+message CleanPartition {
+  uint64 key_start = 1;
+  uint64 key_end = 2;
+  CleanType clean_type = 3;
+  bool delete_partition = 4; //是否删除分区
+}
+
+message PartitionKeyRange{
+  uint32 partition_id = 1;
+  uint64 key_start = 2;
+  uint64 key_end = 3;
+}
+
+message PdInstructionResponse {
+  PdInstructionType instruction_type = 1;
+  string leader_ip = 2;
+}
+
+/* enums */
+enum PulseType {
+  PULSE_TYPE_UNKNOWN = 0;
+  PULSE_TYPE_PARTITION_HEARTBEAT = 1;
+  PULSE_TYPE_PD_INSTRUCTION = 2;
+}
+
+enum PulseChangeType {
+  PULSE_CHANGE_TYPE_UNKNOWN = 0;
+  PULSE_CHANGE_TYPE_ADD = 1;
+  PULSE_CHANGE_TYPE_ALTER = 2;
+  PULSE_CHANGE_TYPE_DEL = 3;
+}
+
+enum ConfChangeType {
+  CONF_CHANGE_TYPE_UNKNOWN = 0;
+  CONF_CHANGE_TYPE_ADD_NODE = 1;
+  CONF_CHANGE_TYPE_REMOVE_NODE = 2;
+  CONF_CHANGE_TYPE_ADD_LEARNER_NODE = 3;
+  CONF_CHANGE_TYPE_ADJUST = 4;    // 调整shard,leader根据新的配置动态增减。
+}
+
+enum CleanType {
+  CLEAN_TYPE_KEEP_RANGE = 0; // 仅保留这个range
+  CLEAN_TYPE_EXCLUDE_RANGE = 1; // 删除这个range
+}
+
+enum PdInstructionType {
+  CHANGE_TO_FOLLOWER = 0;
+}
diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_watch.proto 
b/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_watch.proto
new file mode 100644
index 000000000..febc41f52
--- /dev/null
+++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_watch.proto
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+syntax = "proto3";
+
+import "metapb.proto";
+
+option java_multiple_files = true;
+option java_package = "org.apache.hugegraph.pd.grpc.watch";
+option java_outer_classname = "HgPdWatchProto";
+
+service HgPdWatch {
+  rpc Watch(stream WatchRequest) returns (stream WatchResponse);
+}
+
+message WatchRequest {
+  WatchCreateRequest create_request = 1;
+  WatchCancelRequest cancel_request = 2;
+}
+
+message WatchCreateRequest {
+  WatchType  watch_type = 1;
+}
+
+message WatchCancelRequest {
+  int64 watcher_id = 1;
+}
+
+message WatchResponse {
+  WatchType watch_type = 1;
+  int64 watcher_id = 2;
+  int32 status = 3;   //0=ok,1=fail
+  int64 notice_id = 4;
+  string msg = 5;
+  oneof response_union {
+    WatchPartitionResponse partition_response = 10;
+    WatchNodeResponse node_response = 11;
+    WatchGraphResponse graph_response = 12;
+    WatchShardGroupResponse shard_group_response = 13;
+  }
+}
+
+message WatchPartitionResponse {
+  string graph = 1;
+  int32 partition_id = 2;
+  WatchChangeType change_type = 3;
+}
+
+message WatchNodeResponse {
+  string graph = 1;
+  uint64 node_id = 2;
+  NodeEventType node_event_type = 3;
+}
+
+message WatchGraphResponse {
+  metapb.Graph graph = 1;
+  WatchType type = 2;
+}
+
+message WatchShardGroupResponse {
+  metapb.ShardGroup shard_group = 1;
+  WatchChangeType type = 2;
+  int32 shard_group_id = 3;
+}
+
+enum WatchType {
+  WATCH_TYPE_UNKNOWN = 0;
+  WATCH_TYPE_PARTITION_CHANGE = 1;
+  WATCH_TYPE_STORE_NODE_CHANGE = 2;
+  WATCH_TYPE_GRAPH_CHANGE = 3;
+  WATCH_TYPE_SHARD_GROUP_CHANGE = 4;
+}
+
+enum WatchChangeType {
+  WATCH_CHANGE_TYPE_UNKNOWN = 0;
+  WATCH_CHANGE_TYPE_ADD = 1;
+  WATCH_CHANGE_TYPE_ALTER = 2;
+  WATCH_CHANGE_TYPE_DEL = 3;
+  WATCH_CHANGE_TYPE_SPECIAL1 = 4;
+}
+
+enum NodeEventType {
+  NODE_EVENT_TYPE_UNKNOWN = 0;
+  NODE_EVENT_TYPE_NODE_ONLINE = 1;
+  NODE_EVENT_TYPE_NODE_OFFLINE = 2;
+  NODE_EVENT_TYPE_NODE_RAFT_CHANGE = 3;
+  // pd leader 变更
+  NODE_EVENT_TYPE_PD_LEADER_CHANGE = 4;
+}
diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/pdpb.proto 
b/hugegraph-pd/hg-pd-grpc/src/main/proto/pdpb.proto
new file mode 100644
index 000000000..4e293ca08
--- /dev/null
+++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/pdpb.proto
@@ -0,0 +1,607 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+syntax = "proto3";
+package pdpb;
+
+import "metapb.proto";
+import "metaTask.proto";
+
+option java_package = "org.apache.hugegraph.pd.grpc";
+
+service PD {
+  // 注册store,首次注册会生成新的store_id, store_id是store唯一标识
+  rpc RegisterStore(RegisterStoreRequest) returns (RegisterStoreResponse) {}
+  rpc GetStore(GetStoreRequest) returns (GetStoreResponse) {}
+  // 修改Store状态等信息.
+  rpc SetStore(SetStoreRequest) returns (SetStoreResponse) {}
+  // 根据可以查找所属分区
+  rpc DelStore(DetStoreRequest) returns (DetStoreResponse) {}
+  rpc GetAllStores(GetAllStoresRequest) returns (GetAllStoresResponse) {}
+  rpc StoreHeartbeat(StoreHeartbeatRequest) returns (StoreHeartbeatResponse) {}
+
+  // 根据可以查找所属分区
+  rpc GetPartition(GetPartitionRequest) returns (GetPartitionResponse) {}
+
+  // 根据HashCode查找所属分区
+  rpc GetPartitionByCode(GetPartitionByCodeRequest) returns 
(GetPartitionResponse) {}
+  // 根据PartitionID返回分区
+  rpc GetPartitionByID(GetPartitionByIDRequest) returns (GetPartitionResponse) 
{}
+  rpc ScanPartitions(ScanPartitionsRequest) returns (ScanPartitionsResponse) {}
+  // 更新分区信息,主要用来更新分区key范围,调用此接口需谨慎,否则会造成数据丢失。
+  rpc UpdatePartition(UpdatePartitionRequest) returns 
(UpdatePartitionResponse) {}
+  // 根据可以查找所属分区
+  rpc DelPartition(DelPartitionRequest) returns (DelPartitionResponse) {}
+  // 根据条件查询分区信息, 包括Store、Graph等条件
+  rpc QueryPartitions(QueryPartitionsRequest) returns 
(QueryPartitionsResponse){}
+  // 读取图信息
+  rpc GetGraph(GetGraphRequest) returns (GetGraphResponse){}
+  // 修改图信息
+  rpc SetGraph(SetGraphRequest) returns (SetGraphResponse){}
+  rpc DelGraph(DelGraphRequest) returns (DelGraphResponse){}
+  // 全局唯一自增ID
+  rpc GetId(GetIdRequest) returns (GetIdResponse){}
+  rpc ResetId(ResetIdRequest) returns (ResetIdResponse){}
+  // PD的集群列表
+  rpc GetMembers(GetMembersRequest) returns (GetMembersResponse) {}
+  rpc GetStoreStatus(GetAllStoresRequest) returns (GetAllStoresResponse) {}
+  rpc GetPDConfig(GetPDConfigRequest) returns (GetPDConfigResponse){}
+  rpc SetPDConfig(SetPDConfigRequest) returns (SetPDConfigResponse){}
+  rpc GetGraphSpace(GetGraphSpaceRequest) returns (GetGraphSpaceResponse){}
+  rpc SetGraphSpace(SetGraphSpaceRequest) returns (SetGraphSpaceResponse){}
+  // 获取集群健康状态
+  rpc GetClusterStats(GetClusterStatsRequest) returns 
(GetClusterStatsResponse){}
+  // 替换PD的集群节点
+  rpc ChangePeerList(ChangePeerListRequest) returns 
(getChangePeerListResponse) {}
+  // 数据分裂
+  rpc SplitData(SplitDataRequest) returns (SplitDataResponse){}
+
+  rpc SplitGraphData(SplitGraphDataRequest) returns (SplitDataResponse) {}
+  // 数据迁移
+  rpc MovePartition(MovePartitionRequest) returns (MovePartitionResponse){}
+  // 汇报分区分裂等任务执行结果
+  rpc ReportTask(ReportTaskRequest) returns (ReportTaskResponse){}
+
+  rpc GetPartitionStats(GetPartitionStatsRequest) returns 
(GetPartitionStatsResponse){}
+  //平衡store中分区leader的数量
+  rpc BalanceLeaders(BalanceLeadersRequest) returns (BalanceLeadersResponse){}
+
+  // 替换license文件
+  rpc PutLicense(PutLicenseRequest) returns (PutLicenseResponse){}
+
+  // 通知rocksdb进行compaction
+  rpc DbCompaction(DbCompactionRequest) returns (DbCompactionResponse){}
+
+  // 合并分区
+  rpc CombineCluster(CombineClusterRequest) returns (CombineClusterResponse){}
+  // 单个图缩容
+  rpc CombineGraph(CombineGraphRequest) returns (CombineGraphResponse) {}
+
+  // shard group
+  rpc GetShardGroup(GetShardGroupRequest) returns (GetShardGroupResponse){}
+  rpc UpdateShardGroup(UpdateShardGroupRequest) returns 
(UpdateShardGroupResponse){}
+  // 删除掉shard group
+  rpc DeleteShardGroup(DeleteShardGroupRequest) returns 
(DeleteShardGroupResponse) {}
+  // shard group 运维相关的处理
+  rpc UpdateShardGroupOp(ChangeShardRequest) returns (ChangeShardResponse){}
+  // change shard
+  rpc ChangeShard(ChangeShardRequest) returns (ChangeShardResponse) {}
+  // 更新pd raft
+  rpc updatePdRaft(UpdatePdRaftRequest) returns (UpdatePdRaftResponse)  {}
+
+  rpc getCache(GetGraphRequest) returns (CacheResponse)  {}
+  rpc getPartitions(GetGraphRequest) returns (CachePartitionResponse)  {}
+}
+
+message RequestHeader {
+  // 集群 ID.
+  uint64 cluster_id = 1;
+  // 发送者 ID.
+  uint64 sender_id = 2;
+}
+
+message ResponseHeader {
+  // cluster_id is the ID of the cluster which sent the response.
+  uint64 cluster_id = 1;
+  Error error = 2;
+}
+
+enum ErrorType {
+  OK = 0;
+  UNKNOWN = 1;
+
+  NOT_LEADER = 100;
+  STORE_ID_NOT_EXIST = 101;
+  NO_ACTIVE_STORE = 102;
+  NOT_FOUND = 103;
+  PD_UNREACHABLE = 104;
+  LESS_ACTIVE_STORE = 105;
+  STORE_HAS_BEEN_REMOVED = 106;
+  STORE_PROHIBIT_DELETION = 111;
+  SET_CONFIG_SHARD_COUNT_ERROR = 112;
+  UPDATE_STORE_STATE_ERROR = 113;
+  STORE_PROHIBIT_DUPLICATE = 114;
+  ROCKSDB_READ_ERROR = 1002;
+  ROCKSDB_WRITE_ERROR = 1003;
+  ROCKSDB_DEL_ERROR = 1004;
+  ROCKSDB_SAVE_SNAPSHOT_ERROR = 1005;
+  ROCKSDB_LOAD_SNAPSHOT_ERROR = 1006;
+
+  // 当前集群状态禁止分裂
+  Cluster_State_Forbid_Splitting = 1007;
+  // 正在分裂中
+  Split_Partition_Doing = 1008;
+  // store上分区数量超过上限
+  Too_Many_Partitions_Per_Store = 1009;
+  // license 错误
+  LICENSE_ERROR = 107;
+  // license 认证错误
+  LICENSE_VERIFY_ERROR = 108;
+
+  //分区下线正在进行
+  Store_Tombstone_Doing = 1010;
+
+  // 不合法的分裂个数
+  Invalid_Split_Partition_Count = 1011;
+}
+
+message Error {
+  ErrorType type = 1;
+  string message = 2;
+}
+message GetStoreRequest {
+  RequestHeader header = 1;
+  uint64 store_id = 2;
+}
+
+message GetStoreResponse {
+  ResponseHeader header = 1;
+
+  metapb.Store store = 2;
+  metapb.StoreStats stats = 3;
+}
+
+message DetStoreRequest {
+  RequestHeader header = 1;
+  uint64 store_id = 2;
+}
+
+message DetStoreResponse {
+  ResponseHeader header = 1;
+  metapb.Store store = 2;
+}
+
+message RegisterStoreRequest {
+  RequestHeader header = 1;
+  metapb.Store store = 2;
+}
+
+
+message RegisterStoreResponse {
+  ResponseHeader header = 1;
+  // 初次注册,返回新的store_id
+  uint64 store_id = 2;
+}
+
+message SetStoreRequest {
+  RequestHeader header = 1;
+  metapb.Store store = 2;
+}
+
+message SetStoreResponse {
+  ResponseHeader header = 1;
+  // 返回修改后的Store
+  metapb.Store store = 2;
+}
+
+
+// 返回graph_name所在的所有store,如果graph_name为空值,则返回系统所有的store
+message GetAllStoresRequest {
+  RequestHeader header = 1;
+  string graph_name = 2;
+  // 是否返回离线的store
+  bool exclude_offline_stores = 3;
+}
+
+message GetAllStoresResponse {
+  ResponseHeader header = 1;
+
+  repeated metapb.Store stores = 2;
+}
+
+
+message StoreHeartbeatRequest {
+  RequestHeader header = 1;
+
+  metapb.StoreStats stats = 2;
+}
+
+message StoreHeartbeatResponse {
+  ResponseHeader header = 1;
+  string cluster_version = 3;
+  metapb.ClusterStats clusterStats = 4;
+}
+
+message GetPartitionRequest {
+  RequestHeader header = 1;
+  string graph_name = 2;
+  bytes key = 3;
+}
+
+
+message GetPartitionByCodeRequest {
+  RequestHeader header = 1;
+  string graph_name = 2;
+  uint64 code = 3;
+}
+
+
+message GetPartitionResponse {
+  ResponseHeader header = 1;
+  metapb.Partition partition = 2;
+  metapb.Shard leader = 3;
+  // 离线的Shard
+  repeated metapb.Shard offline_shards = 4;
+}
+
+message GetPartitionByIDRequest {
+  RequestHeader header = 1;
+  string graph_name = 2;
+  uint32 partition_id = 3;
+}
+
+message DelPartitionRequest {
+  RequestHeader header = 1;
+  string graph_name = 2;
+  uint32 partition_id = 3;
+}
+message DelPartitionResponse {
+  ResponseHeader header = 1;
+  metapb.Partition partition = 2;
+}
+
+message UpdatePartitionRequest{
+  RequestHeader header = 1;
+  repeated metapb.Partition partition = 2;
+}
+
+message UpdatePartitionResponse{
+  ResponseHeader header = 1;
+  repeated metapb.Partition partition = 2;
+}
+// Use GetPartitionResponse as the response of GetPartitionByIDRequest.
+
+message ScanPartitionsRequest {
+  RequestHeader header = 1;
+  string graph_name = 2;
+  bytes start_key = 3;
+  bytes end_key = 4; // end_key is +inf when it is empty.
+}
+
+
+
+message ScanPartitionsResponse {
+  ResponseHeader header = 1;
+  repeated metapb.PartitionShard partitions = 4;
+}
+
+
+
+message QueryPartitionsRequest{
+  RequestHeader header = 1;
+  metapb.PartitionQuery query = 2;
+}
+
+message QueryPartitionsResponse {
+  ResponseHeader header = 1;
+  repeated metapb.Partition partitions = 4;
+}
+
+
+
+message GetGraphRequest{
+  RequestHeader header = 1;
+  string graph_name = 2;
+}
+
+message GetGraphResponse{
+  ResponseHeader header = 1;
+  metapb.Graph graph = 2;
+}
+
+message SetGraphRequest{
+  RequestHeader header = 1;
+  metapb.Graph graph = 2;
+}
+
+message SetGraphResponse{
+  ResponseHeader header = 1;
+  metapb.Graph graph = 2;
+}
+
+message DelGraphRequest{
+  RequestHeader header = 1;
+  string graph_name = 2;
+}
+
+message DelGraphResponse{
+  ResponseHeader header = 1;
+  metapb.Graph graph = 2;
+}
+
+message GetIdRequest{
+  RequestHeader header = 1;
+  string key = 2;
+  int32 delta = 3;
+}
+
+message GetIdResponse{
+  ResponseHeader header = 1;
+  int64 id = 2;
+  int32 delta = 3;
+}
+
+message ResetIdRequest{
+  RequestHeader header = 1;
+  string key = 2;
+}
+
+message ResetIdResponse{
+  ResponseHeader header = 1;
+  int32 result = 2;
+}
+
+message GetMembersRequest{
+  RequestHeader header = 1;
+}
+
+message GetMembersResponse{
+  ResponseHeader header = 1;
+  repeated metapb.Member members = 2;
+  metapb.Member leader = 3;
+}
+
+message GetPDConfigRequest{
+  RequestHeader header = 1;
+  uint64 version = 2 ;
+}
+
+message GetPDConfigResponse{
+  ResponseHeader header = 1;
+  metapb.PDConfig pd_config = 2;
+}
+
+message SetPDConfigRequest{
+  RequestHeader header = 1;
+  metapb.PDConfig pd_config = 2;
+}
+
+message SetPDConfigResponse{
+  ResponseHeader header = 1;
+}
+
+
+message GetGraphSpaceRequest{
+  RequestHeader header = 1;
+  string graph_Space_Name = 2;
+}
+
+message GetGraphSpaceResponse{
+  ResponseHeader header = 1;
+  repeated metapb.GraphSpace graph_space = 2;
+}
+
+message SetGraphSpaceRequest{
+  RequestHeader header = 1;
+  metapb.GraphSpace graph_space = 2;
+}
+
+message SetGraphSpaceResponse{
+  ResponseHeader header = 1;
+}
+
+message GetClusterStatsRequest{
+  RequestHeader header = 1;
+}
+
+message GetClusterStatsResponse{
+  ResponseHeader header = 1;
+  metapb.ClusterStats cluster = 2;
+}
+message ChangePeerListRequest{
+  RequestHeader header = 1;
+  string peer_List = 2;
+}
+message getChangePeerListResponse{
+  ResponseHeader header = 1;
+}
+
+enum OperationMode {
+  Auto = 0;
+  Expert = 1;
+}
+
+message SplitDataParam{
+  // 被分裂的源分区ID
+  uint32 partition_id = 1;
+  //目标分区数量
+  uint32 count = 2;
+}
+
+message SplitDataRequest{
+  RequestHeader header = 1;
+  //工作模式
+  //  Auto:自动分裂,每个Store上分区数达到最大值
+  //  Expert:专家模式,需要指定splitParams
+  OperationMode mode = 2;
+  repeated SplitDataParam param = 3;
+}
+
+message SplitGraphDataRequest{
+  RequestHeader header = 1;
+  //工作模式
+  string graph_name = 2;
+  uint32 to_count = 3;
+}
+
+message SplitDataResponse{
+  ResponseHeader header = 1;
+}
+
+message MovePartitionParam{
+  uint32 partition_id = 1;
+  uint64 src_store_id = 2;
+  uint64 dst_store_id = 3;
+}
+
+message MovePartitionRequest{
+  RequestHeader header = 1;
+  //工作模式
+  //  Auto:自动转移,达到每个Store上分区数量相同
+  //  Expert:专家模式,需要指定transferParams
+  OperationMode mode = 2;
+  repeated MovePartitionParam param = 3;
+}
+
+message MovePartitionResponse{
+  ResponseHeader header = 1;
+}
+
+message ReportTaskRequest{
+  RequestHeader header = 1;
+  metaTask.Task task = 2;
+}
+
+message ReportTaskResponse{
+  ResponseHeader header = 1;
+}
+
+message GetPartitionStatsRequest{
+  RequestHeader header = 1;
+  uint32 partition_id = 2;
+  // 如果未空,返回所有图的同一分区ID
+  string graph_name = 4;
+}
+
+message GetPartitionStatsResponse{
+  ResponseHeader header = 1;
+  metapb.PartitionStats partition_stats = 2;
+}
+
+message BalanceLeadersRequest{
+  RequestHeader header = 1;
+}
+
+message BalanceLeadersResponse{
+  ResponseHeader header = 1;
+}
+
+message PutLicenseRequest{
+  RequestHeader header = 1;
+  bytes content = 2;
+}
+
+message PutLicenseResponse{
+  ResponseHeader header = 1;
+}
+
+message DbCompactionRequest{
+  RequestHeader header = 1;
+  string tableName = 2;
+}
+
+message DbCompactionResponse{
+  ResponseHeader header = 1;
+}
+
+message CombineClusterRequest {
+  RequestHeader header = 1;
+  uint32 toCount = 2;
+}
+
+message CombineClusterResponse {
+  ResponseHeader header = 1;
+}
+
+message CombineGraphRequest {
+  RequestHeader header = 1;
+  string graphName = 2;
+  uint32 toCount = 3;
+}
+
+message CombineGraphResponse {
+  ResponseHeader header = 1;
+}
+
+message DeleteShardGroupRequest {
+  RequestHeader header = 1;
+  uint32 groupId = 2;
+}
+
+message DeleteShardGroupResponse {
+  ResponseHeader header = 1;
+}
+
+message GetShardGroupRequest{
+  RequestHeader header = 1;
+  uint32 group_id = 2 ;
+}
+
+message GetShardGroupResponse{
+  ResponseHeader header = 1;
+  metapb.ShardGroup shardGroup = 2;
+}
+
+message UpdateShardGroupRequest{
+  RequestHeader header = 1;
+  metapb.ShardGroup shardGroup = 2;
+}
+
+message UpdateShardGroupResponse{
+  ResponseHeader header = 1;
+}
+
+message ChangeShardRequest{
+  RequestHeader header = 1;
+  uint32 groupId = 2;
+  repeated metapb.Shard shards = 3;
+}
+
+message ChangeShardResponse {
+  ResponseHeader header = 1;
+}
+
+message UpdatePdRaftRequest{
+  RequestHeader header = 1;
+  string config = 3;
+}
+
+message UpdatePdRaftResponse{
+  ResponseHeader header = 1;
+  string message = 2;
+}
+message CacheResponse {
+  ResponseHeader header = 1;
+  // 返回修改后的Store
+  repeated metapb.Store stores = 2;
+  repeated metapb.ShardGroup shards = 3;
+  repeated metapb.Graph graphs = 4;
+}
+message CachePartitionResponse {
+  ResponseHeader header = 1;
+  repeated metapb.Partition partitions = 2;
+}

Reply via email to