This is an automated email from the ASF dual-hosted git repository.

jin pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-hugegraph.git


The following commit(s) were added to refs/heads/master by this push:
     new 126885d86 docs(store): update guidance for store module (#2894)
126885d86 is described below

commit 126885d86f84869e45daae23535b2ead1290911e
Author: Soyan <[email protected]>
AuthorDate: Fri Oct 31 19:03:59 2025 +0800

    docs(store): update guidance for store module (#2894)
---
 hugegraph-store/README.md                        |  25 +-
 hugegraph-store/docs/best-practices.md           |  13 +-
 hugegraph-store/docs/deployment-guide.md         | 115 +++--
 hugegraph-store/docs/development-guide.md        |   4 +-
 hugegraph-store/docs/distributed-architecture.md |   4 +-
 hugegraph-store/docs/integration-guide.md        | 547 +++++++++++++----------
 hugegraph-store/docs/operations-guide.md         | 140 ++----
 7 files changed, 425 insertions(+), 423 deletions(-)

diff --git a/hugegraph-store/README.md b/hugegraph-store/README.md
index 5d7821b25..23935b3af 100644
--- a/hugegraph-store/README.md
+++ b/hugegraph-store/README.md
@@ -104,19 +104,12 @@ From the project root:
 mvn install -pl hugegraph-struct -am -DskipTests
 
 # Build Store and all dependencies
-mvn clean package -pl hugegraph-store -am -DskipTests
-```
-
-Or build from the `hugegraph-store` directory:
-
-```bash
-cd hugegraph-store
-mvn clean install -DskipTests
+mvn clean package -pl hugegraph-store/hugegraph-store-dist -am -DskipTests
 ```
 
 The assembled distribution will be available at:
 ```
-hugegraph-store/hg-store-dist/target/apache-hugegraph-store-incubating-<version>.tar.gz
+hugegraph-store/apache-hugegraph-store-incubating-1.7.0/lib/hg-store-node-1.7.0.jar```
 ```
 
 ### Configuration
@@ -220,8 +213,8 @@ For detailed configuration options, RocksDB tuning, and 
deployment topologies, s
 Start the Store server:
 
 ```bash
-tar -xzf apache-hugegraph-store-incubating-<version>.tar.gz
-cd apache-hugegraph-store-incubating-<version>
+# Replace {version} with your hugegraph version
+cd apache-hugegraph-store-incubating-{version}
 
 # Start Store node
 bin/start-hugegraph-store.sh
@@ -258,13 +251,13 @@ ps aux | grep hugegraph-store
 grpcurl -plaintext localhost:8500 list
 
 # Check REST API health
-curl http://localhost:8520/actuator/health
+curl http://localhost:8520/v1/health
 
 # Check logs
 tail -f logs/hugegraph-store.log
 
 # Verify registration with PD (from PD node)
-curl http://localhost:8620/pd/v1/stores
+curl http://localhost:8620/v1/stores
 ```
 
 For production deployment, see [Deployment Guide](docs/deployment-guide.md) 
and [Best Practices](docs/best-practices.md).
@@ -307,14 +300,12 @@ bin/start-hugegraph.sh
 
 ```bash
 # Check backend via REST API
-curl http://localhost:8080/graphs/<graph-name>/backend
-
+curl --location --request GET 'http://localhost:8080/metrics/backend' \
+--header 'Authorization: Bearer <YOUR_ACCESS_TOKEN>'
 # Response should show:
 # {"backend": "hstore", "nodes": [...]}
 ```
 
-For detailed integration steps, client API usage, and migration from other 
backends, see [Integration Guide](docs/integration-guide.md).
-
 ---
 
 ## Testing
diff --git a/hugegraph-store/docs/best-practices.md 
b/hugegraph-store/docs/best-practices.md
index 9a214f4e2..47d02521c 100644
--- a/hugegraph-store/docs/best-practices.md
+++ b/hugegraph-store/docs/best-practices.md
@@ -345,13 +345,12 @@ grpc:
 bin/enable-auth.sh
 
 # Configure users and roles via REST API
-curl -X POST http://localhost:8080/graphs/hugegraph/auth/users \
-  -H "Content-Type: application/json" \
-  -d '{
-    "user_name": "admin",
-    "user_password": "password123",
-    "user_role": "admin"
-  }'
+curl -X POST 
"http://localhost:8080/graphspaces/{graph_spcace_name}/graphs/{graph}/auth/users";
 \
+     -H "Content-Type: application/json" \
+     -d '{
+         "user_name": "admin",
+         "user_password": "password123"
+     }'
 ```
 
 ### Data Encryption
diff --git a/hugegraph-store/docs/deployment-guide.md 
b/hugegraph-store/docs/deployment-guide.md
index b6aa59568..d45b713c4 100644
--- a/hugegraph-store/docs/deployment-guide.md
+++ b/hugegraph-store/docs/deployment-guide.md
@@ -471,17 +471,34 @@ curl http://localhost:8620/actuator/health
 
 ```bash
 # Check cluster members
-curl http://192.168.1.10:8620/pd/v1/members
+curl http://192.168.1.10:8620/v1/members
 
 # Expected output:
-# {
-#   "members": [
-#     {"id": "1", "name": "pd-1", "address": "192.168.1.10:8686"},
-#     {"id": "2", "name": "pd-2", "address": "192.168.1.11:8686"},
-#     {"id": "3", "name": "pd-3", "address": "192.168.1.12:8686"}
-#   ],
-#   "leader": "1"
-# }
+{
+  "message":"OK",
+  "data":{
+    "pdLeader":null,
+    "pdList":[{
+      "raftUrl":"127.0.0.1:8610",
+      "grpcUrl":"",
+      "restUrl":"",
+      "state":"Offline",
+      "dataPath":"",
+      "role":"Leader",
+      "replicateState":"",
+      "serviceName":"-PD",
+      "serviceVersion":"1.7.0",
+      "startTimeStamp":1761818483830
+      }],
+    "stateCountMap":{
+      "Offline":1
+      },
+      "numOfService":1,
+      "state":"Cluster_OK",
+      "numOfNormalService":0
+      },
+    "status":0
+}
 ```
 
 ---
@@ -560,23 +577,45 @@ bin/start-hugegraph-store.sh
 tail -f logs/hugegraph-store.log
 
 # Verify Store is running
-curl http://localhost:8520/actuator/health
+curl http://localhost:8520/v1/health
 ```
 
 **Verify Store registration with PD**:
 
 ```bash
 # Query PD for registered stores
-curl http://192.168.1.10:8620/pd/v1/stores
+curl http://192.168.1.10:8620/v1/stores
 
 # Expected output:
-# {
-#   "stores": [
-#     {"id": "1", "address": "192.168.1.20:8500", "state": "Online"},
-#     {"id": "2", "address": "192.168.1.21:8500", "state": "Online"},
-#     {"id": "3", "address": "192.168.1.22:8500", "state": "Online"}
-#   ]
-# }
+{
+  "message":"OK",
+  "data":{
+    "stores":[{
+      "storeId":"1783423547167821026",
+      "address":"192.168.1.10:8500",
+      "raftAddress":"192.168.1.10:8510",
+      "version":"","state":"Up",
+      
"deployPath":"/Users/user/incubator-hugegraph/hugegraph-store/hg-store-node/target/classes/",
+      "dataPath":"./storage",
+      "startTimeStamp":1761818547335,
+      "registedTimeStamp":1761818547335,
+      "lastHeartBeat":1761818727631,
+      "capacity":245107195904,
+      "available":118497292288,
+      "partitionCount":0,
+      "graphSize":0,
+      "keyCount":0,
+      "leaderCount":0,
+      "serviceName":"192.168.1.10:8500-store",
+      "serviceVersion":"",
+      "serviceCreatedTimeStamp":1761818547000,
+      "partitions":[]}],
+      "stateCountMap":{"Up":1},
+      "numOfService":1,
+      "numOfNormalService":1
+      },
+      "status":0
+}
 ```
 
 ---
@@ -904,47 +943,45 @@ kubectl port-forward svc/hugegraph-store 8500:8500 -n 
hugegraph
 
 ```bash
 # PD health
-curl http://192.168.1.10:8620/actuator/health
+curl http://192.168.1.10:8620/v1/health
 
 # Store health
-curl http://192.168.1.20:8520/actuator/health
-
-# Server health
-curl http://192.168.1.30:8080/actuator/health
+curl http://192.168.1.20:8520/v1/health
 ```
 
 ### Cluster Status
 
 ```bash
 # PD cluster members
-curl http://192.168.1.10:8620/pd/v1/members
+curl http://192.168.1.10:8620/v1/members
 
 # Registered stores
-curl http://192.168.1.10:8620/pd/v1/stores
+curl http://192.168.1.10:8620/v1/stores
 
 # Partitions
-curl http://192.168.1.10:8620/pd/v1/partitions
+curl http://192.168.1.10:8620/v1/partitions
 
 # Graph list
-curl http://192.168.1.30:8080/graphs
+curl http://192.168.1.10:8620/v1/graphs
 ```
 
 ### Basic Operations Test
 
 ```bash
 # Create vertex via Server
-curl -X POST http://192.168.1.30:8080/graphs/hugegraph/graph/vertices \
-  -H "Content-Type: application/json" \
-  -d '{
-    "label": "person",
-    "properties": {
-      "name": "Alice",
-      "age": 30
-    }
-  }'
-
-# Query vertex
-curl http://192.168.1.30:8080/graphs/hugegraph/graph/vertices
+curl -X POST 
"http://192.168.1.30:8080/graphspaces/{graphspace_name}/graphs/{graph_name}/graph/vertices";
 \
+     -H "Content-Type: application/json" \
+     -d '{
+         "label": "person",
+         "properties": {
+             "name": "marko",
+             "age": 29
+         }
+     }'
+
+# Query vertex (using -u if auth is enabled)
+curl -u admin:admin \
+     -X GET 
"http://localhost:8080/graphspaces/{graphspace_name}/graphs/graphspace_name}/graph/vertices/{graph_id}
 ```
 
 ### Performance Baseline Test
diff --git a/hugegraph-store/docs/development-guide.md 
b/hugegraph-store/docs/development-guide.md
index 9a6a5e4c3..3338ed19a 100644
--- a/hugegraph-store/docs/development-guide.md
+++ b/hugegraph-store/docs/development-guide.md
@@ -220,6 +220,8 @@ cd 
hugegraph-store/hg-store-dist/target/apache-hugegraph-store-incubating-1.7.0
 bin/start-hugegraph-store.sh
 ```
 
+If you want to run store module in debug mode
+Directly run HgStoreNodeService in your IDE (ensure PD is on).
 ---
 
 ## Build and Test
@@ -823,7 +825,7 @@ cd install-dist/scripts/dependency
 - Slack: (link in project README)
 
 **Related Projects**:
-- Apache JRaft: https://github.com/sofastack/sofa-jraft
+- SOFA-JRaft: https://github.com/sofastack/sofa-jraft
 - RocksDB: https://rocksdb.org/
 - gRPC: https://grpc.io/docs/languages/java/
 
diff --git a/hugegraph-store/docs/distributed-architecture.md 
b/hugegraph-store/docs/distributed-architecture.md
index a97215848..982de223c 100644
--- a/hugegraph-store/docs/distributed-architecture.md
+++ b/hugegraph-store/docs/distributed-architecture.md
@@ -200,11 +200,11 @@ HugeGraph Store follows a layered architecture with clear 
separation of responsi
 
 ## Raft Consensus Mechanism
 
-HugeGraph Store uses **Apache JRaft** (Ant Financial's Raft implementation) to 
achieve strong consistency and high availability.
+HugeGraph Store uses **Sofa-JRaft** (Ant Financial's Raft implementation) to 
achieve strong consistency and high availability.
 
 ### Raft per Partition Design
 
-Unlike some distributed systems that use a single Raft group for the entire 
cluster, HugeGraph Store uses **one Raft group per partition**:
+Unlike some distributed systems that use a single Raft group for the entire 
cluster, HugeGraph Store uses MultiRaft:
 
 ```
 Store Cluster (3 nodes: S1, S2, S3)
diff --git a/hugegraph-store/docs/integration-guide.md 
b/hugegraph-store/docs/integration-guide.md
index a3fad5cae..f35669c69 100644
--- a/hugegraph-store/docs/integration-guide.md
+++ b/hugegraph-store/docs/integration-guide.md
@@ -99,19 +99,10 @@ tail -f logs/hugegraph-server.log
 
 ```bash
 # Check backend via REST API
-curl http://localhost:8080/graphs/hugegraph/backend
-
-# Expected response:
-{
-  "backend": "hstore",
-  "version": "1.7.0",
-  "nodes": [
-    {"id": "1", "address": "192.168.1.20:8500"},
-    {"id": "2", "address": "192.168.1.21:8500"},
-    {"id": "3", "address": "192.168.1.22:8500"}
-  ],
-  "partitions": 12
-}
+curl --location --request GET 'http://localhost:8080/metrics/backend' \
+--header 'Authorization: Bearer <YOUR_ACCESS_TOKEN>'
+# Response should show:
+# {"backend": "hstore", "nodes": [...]}
 ```
 
 ---
@@ -125,184 +116,304 @@ The `hg-store-client` module provides a Java client for 
directly interacting wit
 ```xml
 <dependency>
     <groupId>org.apache.hugegraph</groupId>
-    <artifactId>hg-store-client</artifactId>
+    <artifactId>hugegraph-client</artifactId>
     <version>1.7.0</version>
 </dependency>
 ```
 
 ### Basic Usage
 
-#### 1. Creating a Client
-
-```java
-import org.apache.hugegraph.store.client.HgStoreClient;
-import org.apache.hugegraph.store.client.HgStoreSession;
-
-// PD addresses
-String pdPeers = "192.168.1.10:8686,192.168.1.11:8686,192.168.1.12:8686";
-
-// Create client
-HgStoreClient client = HgStoreClient.create(pdPeers);
-
-// Create session for a graph
-String graphName = "hugegraph";
-HgStoreSession session = client.openSession(graphName);
-```
-
-#### 2. Basic Operations
-
-**Put (Write)**:
-```java
-import org.apache.hugegraph.store.client.HgStoreSession;
-
-// Put a key-value pair
-byte[] key = "vertex:person:1001".getBytes();
-byte[] value = serializeVertex(vertex);  // Your serialization logic
-
-session.put(tableName, key, value);
-```
-
-**Get (Read)**:
-```java
-// Get value by key
-byte[] key = "vertex:person:1001".getBytes();
-byte[] value = session.get(tableName, key);
-
-if (value != null) {
-    Vertex vertex = deserializeVertex(value);
-}
-```
-
-**Delete**:
-```java
-// Delete a key
-byte[] key = "vertex:person:1001".getBytes();
-session.delete(tableName, key);
-```
+#### 1. Single Example
 
-**Scan (Range Query)**:
 ```java
-import org.apache.hugegraph.store.client.HgStoreResultSet;
-
-// Scan all keys with prefix "vertex:person:"
-byte[] startKey = "vertex:person:".getBytes();
-byte[] endKey = "vertex:person:~".getBytes();
-
-HgStoreResultSet resultSet = session.scan(tableName, startKey, endKey);
-
-while (resultSet.hasNext()) {
-    HgStoreResultSet.Entry entry = resultSet.next();
-    byte[] key = entry.key();
-    byte[] value = entry.value();
-
-    // Process entry
-}
-
-resultSet.close();
-```
-
-#### 3. Batch Operations
-
-```java
-import org.apache.hugegraph.store.client.HgStoreBatch;
-
-// Create batch
-HgStoreBatch batch = session.beginBatch();
-
-// Add operations to batch
-for (Vertex vertex : vertices) {
-    byte[] key = vertexKey(vertex.id());
-    byte[] value = serializeVertex(vertex);
-    batch.put(tableName, key, value);
-}
-
-// Commit batch (atomic write via Raft)
-batch.commit();
-
-// Or rollback
-// batch.rollback();
-```
-
-#### 4. Session Management
-
-```java
-// Close session
-session.close();
-
-// Close client (releases all resources)
-client.close();
-```
-
-### Advanced Usage
-
-#### Query with Filters
-
-```java
-import org.apache.hugegraph.store.client.HgStoreQuery;
-import org.apache.hugegraph.store.client.HgStoreQuery.Filter;
-
-// Build query with filter
-HgStoreQuery query = HgStoreQuery.builder()
-    .table(tableName)
-    .prefix("vertex:person:")
-    .filter(Filter.eq("age", 30))  // Filter: age == 30
-    .limit(100)
-    .build();
-
-// Execute query
-HgStoreResultSet resultSet = session.query(query);
-
-while (resultSet.hasNext()) {
-    // Process results
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hugegraph.driver.GraphManager;
+import org.apache.hugegraph.driver.GremlinManager;
+import org.apache.hugegraph.driver.HugeClient;
+import org.apache.hugegraph.driver.SchemaManager;
+import org.apache.hugegraph.structure.constant.T;
+import org.apache.hugegraph.structure.graph.Edge;
+import org.apache.hugegraph.structure.graph.Path;
+import org.apache.hugegraph.structure.graph.Vertex;
+import org.apache.hugegraph.structure.gremlin.Result;
+import org.apache.hugegraph.structure.gremlin.ResultSet;
+
+public class SingleExample {
+
+    public static void main(String[] args) throws IOException {
+        // If connect failed will throw a exception.
+        HugeClient hugeClient = HugeClient.builder("http://localhost:8080";,
+                        "hugegraph")
+                .build();
+
+        SchemaManager schema = hugeClient.schema();
+
+        schema.propertyKey("name").asText().ifNotExist().create();
+        schema.propertyKey("age").asInt().ifNotExist().create();
+        schema.propertyKey("city").asText().ifNotExist().create();
+        schema.propertyKey("weight").asDouble().ifNotExist().create();
+        schema.propertyKey("lang").asText().ifNotExist().create();
+        schema.propertyKey("date").asDate().ifNotExist().create();
+        schema.propertyKey("price").asInt().ifNotExist().create();
+
+        schema.vertexLabel("person")
+                .properties("name", "age", "city")
+                .primaryKeys("name")
+                .ifNotExist()
+                .create();
+
+        schema.vertexLabel("software")
+                .properties("name", "lang", "price")
+                .primaryKeys("name")
+                .ifNotExist()
+                .create();
+
+        schema.indexLabel("personByCity")
+                .onV("person")
+                .by("city")
+                .secondary()
+                .ifNotExist()
+                .create();
+
+        schema.indexLabel("personByAgeAndCity")
+                .onV("person")
+                .by("age", "city")
+                .secondary()
+                .ifNotExist()
+                .create();
+
+        schema.indexLabel("softwareByPrice")
+                .onV("software")
+                .by("price")
+                .range()
+                .ifNotExist()
+                .create();
+
+        schema.edgeLabel("knows")
+                .sourceLabel("person")
+                .targetLabel("person")
+                .properties("date", "weight")
+                .ifNotExist()
+                .create();
+
+        schema.edgeLabel("created")
+                .sourceLabel("person").targetLabel("software")
+                .properties("date", "weight")
+                .ifNotExist()
+                .create();
+
+        schema.indexLabel("createdByDate")
+                .onE("created")
+                .by("date")
+                .secondary()
+                .ifNotExist()
+                .create();
+
+        schema.indexLabel("createdByWeight")
+                .onE("created")
+                .by("weight")
+                .range()
+                .ifNotExist()
+                .create();
+
+        schema.indexLabel("knowsByWeight")
+                .onE("knows")
+                .by("weight")
+                .range()
+                .ifNotExist()
+                .create();
+
+        GraphManager graph = hugeClient.graph();
+        Vertex marko = graph.addVertex(T.LABEL, "person", "name", "marko",
+                "age", 29, "city", "Beijing");
+        Vertex vadas = graph.addVertex(T.LABEL, "person", "name", "vadas",
+                "age", 27, "city", "Hongkong");
+        Vertex lop = graph.addVertex(T.LABEL, "software", "name", "lop",
+                "lang", "java", "price", 328);
+        Vertex josh = graph.addVertex(T.LABEL, "person", "name", "josh",
+                "age", 32, "city", "Beijing");
+        Vertex ripple = graph.addVertex(T.LABEL, "software", "name", "ripple",
+                "lang", "java", "price", 199);
+        Vertex peter = graph.addVertex(T.LABEL, "person", "name", "peter",
+                "age", 35, "city", "Shanghai");
+
+        marko.addEdge("knows", vadas, "date", "2016-01-10", "weight", 0.5);
+        marko.addEdge("knows", josh, "date", "2013-02-20", "weight", 1.0);
+        marko.addEdge("created", lop, "date", "2017-12-10", "weight", 0.4);
+        josh.addEdge("created", lop, "date", "2009-11-11", "weight", 0.4);
+        josh.addEdge("created", ripple, "date", "2017-12-10", "weight", 1.0);
+        peter.addEdge("created", lop, "date", "2017-03-24", "weight", 0.2);
+
+        GremlinManager gremlin = hugeClient.gremlin();
+        System.out.println("==== Path ====");
+        ResultSet resultSet = gremlin.gremlin("g.V().outE().path()").execute();
+        Iterator<Result> results = resultSet.iterator();
+        results.forEachRemaining(result -> {
+            System.out.println(result.getObject().getClass());
+            Object object = result.getObject();
+            if (object instanceof Vertex) {
+                System.out.println(((Vertex) object).id());
+            } else if (object instanceof Edge) {
+                System.out.println(((Edge) object).id());
+            } else if (object instanceof Path) {
+                List<Object> elements = ((Path) object).objects();
+                elements.forEach(element -> {
+                    System.out.println(element.getClass());
+                    System.out.println(element);
+                });
+            } else {
+                System.out.println(object);
+            }
+        });
+
+        hugeClient.close();
+    }
 }
-```
 
-#### Aggregation Queries
-
-```java
-import org.apache.hugegraph.store.client.HgStoreQuery.Aggregation;
-
-// Count vertices with label "person"
-HgStoreQuery query = HgStoreQuery.builder()
-    .table(tableName)
-    .prefix("vertex:person:")
-    .aggregation(Aggregation.COUNT)
-    .build();
-
-long count = session.aggregate(query);
-System.out.println("Person count: " + count);
 ```
 
-#### Multi-Partition Iteration
+#### 2. Batch Example
 
 ```java
-// Scan across all partitions (Store handles partition routing)
-HgStoreResultSet resultSet = session.scanAll(tableName);
-
-while (resultSet.hasNext()) {
-    HgStoreResultSet.Entry entry = resultSet.next();
-    // Process entry from any partition
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hugegraph.driver.GraphManager;
+import org.apache.hugegraph.driver.HugeClient;
+import org.apache.hugegraph.driver.SchemaManager;
+import org.apache.hugegraph.structure.graph.Edge;
+import org.apache.hugegraph.structure.graph.Vertex;
+
+public class BatchExample {
+
+    public static void main(String[] args) {
+        // If connect failed will throw a exception.
+        HugeClient hugeClient = HugeClient.builder("http://localhost:8080";,
+                                                   "hugegraph")
+                                          .build();
+
+        SchemaManager schema = hugeClient.schema();
+
+        schema.propertyKey("name").asText().ifNotExist().create();
+        schema.propertyKey("age").asInt().ifNotExist().create();
+        schema.propertyKey("lang").asText().ifNotExist().create();
+        schema.propertyKey("date").asDate().ifNotExist().create();
+        schema.propertyKey("price").asInt().ifNotExist().create();
+
+        schema.vertexLabel("person")
+              .properties("name", "age")
+              .primaryKeys("name")
+              .ifNotExist()
+              .create();
+
+        schema.vertexLabel("person")
+              .properties("price")
+              .nullableKeys("price")
+              .append();
+
+        schema.vertexLabel("software")
+              .properties("name", "lang", "price")
+              .primaryKeys("name")
+              .ifNotExist()
+              .create();
+
+        schema.indexLabel("softwareByPrice")
+              .onV("software").by("price")
+              .range()
+              .ifNotExist()
+              .create();
+
+        schema.edgeLabel("knows")
+              .link("person", "person")
+              .properties("date")
+              .ifNotExist()
+              .create();
+
+        schema.edgeLabel("created")
+              .link("person", "software")
+              .properties("date")
+              .ifNotExist()
+              .create();
+
+        schema.indexLabel("createdByDate")
+              .onE("created").by("date")
+              .secondary()
+              .ifNotExist()
+              .create();
+
+        // get schema object by name
+        System.out.println(schema.getPropertyKey("name"));
+        System.out.println(schema.getVertexLabel("person"));
+        System.out.println(schema.getEdgeLabel("knows"));
+        System.out.println(schema.getIndexLabel("createdByDate"));
+
+        // list all schema objects
+        System.out.println(schema.getPropertyKeys());
+        System.out.println(schema.getVertexLabels());
+        System.out.println(schema.getEdgeLabels());
+        System.out.println(schema.getIndexLabels());
+
+        GraphManager graph = hugeClient.graph();
+
+        Vertex marko = new Vertex("person").property("name", "marko")
+                                           .property("age", 29);
+        Vertex vadas = new Vertex("person").property("name", "vadas")
+                                           .property("age", 27);
+        Vertex lop = new Vertex("software").property("name", "lop")
+                                           .property("lang", "java")
+                                           .property("price", 328);
+        Vertex josh = new Vertex("person").property("name", "josh")
+                                          .property("age", 32);
+        Vertex ripple = new Vertex("software").property("name", "ripple")
+                                              .property("lang", "java")
+                                              .property("price", 199);
+        Vertex peter = new Vertex("person").property("name", "peter")
+                                           .property("age", 35);
+
+        Edge markoKnowsVadas = new Edge("knows").source(marko).target(vadas)
+                                                .property("date", 
"2016-01-10");
+        Edge markoKnowsJosh = new Edge("knows").source(marko).target(josh)
+                                               .property("date", "2013-02-20");
+        Edge markoCreateLop = new Edge("created").source(marko).target(lop)
+                                                 .property("date",
+                                                           "2017-12-10");
+        Edge joshCreateRipple = new Edge("created").source(josh).target(ripple)
+                                                   .property("date",
+                                                             "2017-12-10");
+        Edge joshCreateLop = new Edge("created").source(josh).target(lop)
+                                                .property("date", 
"2009-11-11");
+        Edge peterCreateLop = new Edge("created").source(peter).target(lop)
+                                                 .property("date",
+                                                           "2017-03-24");
+
+        List<Vertex> vertices = new ArrayList<>();
+        vertices.add(marko);
+        vertices.add(vadas);
+        vertices.add(lop);
+        vertices.add(josh);
+        vertices.add(ripple);
+        vertices.add(peter);
+
+        List<Edge> edges = new ArrayList<>();
+        edges.add(markoKnowsVadas);
+        edges.add(markoKnowsJosh);
+        edges.add(markoCreateLop);
+        edges.add(joshCreateRipple);
+        edges.add(joshCreateLop);
+        edges.add(peterCreateLop);
+
+        vertices = graph.addVertices(vertices);
+        vertices.forEach(vertex -> System.out.println(vertex));
+
+        edges = graph.addEdges(edges, false);
+        edges.forEach(edge -> System.out.println(edge));
+
+        hugeClient.close();
+    }
 }
-
-resultSet.close();
-```
-
-### Connection Pool Configuration
-
-```java
-import org.apache.hugegraph.store.client.HgStoreClientConfig;
-
-// Configure client
-HgStoreClientConfig config = HgStoreClientConfig.builder()
-    .pdPeers(pdPeers)
-    .maxSessions(10)               // Max sessions per Store node
-    .sessionTimeout(30000)          // Session timeout (ms)
-    .rpcTimeout(10000)              // RPC timeout (ms)
-    .maxRetries(3)                  // Max retry attempts
-    .retryInterval(1000)            // Retry interval (ms)
-    .build();
-
-HgStoreClient client = HgStoreClient.create(config);
 ```
 
 ---
@@ -332,55 +443,6 @@ HgStoreClient client = HgStoreClient.create(config);
    - Send request to leader Store
 ```
 
-### Partition Routing
-
-**Example**: Write vertex with ID `"person:1001"`
-
-```java
-// 1. Client hashes the key
-String key = "vertex:person:1001";
-int hash = MurmurHash3.hash32(key);  // e.g., 0x12345678
-
-// 2. Client queries PD: which partition owns this hash?
-Partition partition = pdClient.getPartitionByHash(graphName, hash);
-// PD responds: Partition 5
-
-// 3. Client queries PD: who is the leader of Partition 5?
-Shard leader = partition.getLeader();
-// PD responds: Store 2 (192.168.1.21:8500)
-
-// 4. Client sends write request to Store 2
-storeClient.put(leader.getStoreAddress(), tableName, key, value);
-```
-
-**Caching**:
-- Client caches partition metadata (refreshed every 60 seconds)
-- On leader change, client receives redirect response and updates cache
-
-### Handling PD Failures
-
-**Scenario**: PD cluster is temporarily unavailable
-
-**Client Behavior**:
-1. **Short outage** (<60 seconds):
-   - Client uses cached partition metadata
-   - Operations continue normally
-   - Client retries PD connection in background
-
-2. **Long outage** (>60 seconds):
-   - Cached metadata may become stale (e.g., leader changed)
-   - Client may send requests to wrong Store node
-   - Store node redirects client to current leader
-   - Client updates cache and retries
-
-3. **Complete PD failure**:
-   - Client cannot discover new Store nodes or partitions
-   - Existing operations work, but cluster cannot scale or rebalance
-
-**Recommendation**: Always run PD in a 3-node or 5-node cluster for high 
availability
-
----
-
 ## Migration from Other Backends
 
 ### RocksDB Embedded to Store
@@ -453,13 +515,13 @@ bin/hugegraph-restore.sh \
 
 ```bash
 # Check vertex count
-curl http://localhost:8080/graphs/hugegraph/graph/vertices?limit=0
+curl 
http://localhost:8080/graphspaces/{graphspace_name}/graphs/{graph_name}/graph/vertices
 
 # Check edge count
-curl http://localhost:8080/graphs/hugegraph/graph/edges?limit=0
+curl 
http://localhost:8080/graphspaces/{graphspace_name}/graphs/{graph_name}/graph/edges
 
 # Run sample queries
-curl 
http://localhost:8080/graphs/hugegraph/graph/vertices?label=person&limit=10
+curl 
http://localhost:8080/graphspaces/{graphspace_name}/graphs/{graph_name}/graph/vertices/{id}
 ```
 
 ---
@@ -577,10 +639,10 @@ graph.name=analytics
 **Access**:
 ```bash
 # Production graph
-curl http://localhost:8080/graphs/production/graph/vertices
+curl 
"http://192.168.1.30:8080/graphspaces/{graphspace_name}/graphs/production/graph/vertices";
 
 
 # Analytics graph
-curl http://localhost:8080/graphs/analytics/graph/vertices
+curl 
"http://192.168.1.30:8080/graphspaces/{graphspace_name}/graphs/analytics/graph/vertices";
 
 ```
 
 ### Mixed Backend Configuration
@@ -615,7 +677,7 @@ ERROR o.a.h.b.s.h.HstoreProvider - Failed to connect to PD 
cluster
 **Diagnosis**:
 ```bash
 # Check PD is running
-curl http://192.168.1.10:8620/actuator/health
+curl http://192.168.1.10:8620/v1/health
 
 # Check network connectivity
 telnet 192.168.1.10 8686
@@ -641,10 +703,10 @@ tail -f logs/hugegraph-server.log | grep PD
 **Diagnosis**:
 ```bash
 # Check Store node health
-curl http://192.168.1.20:8520/actuator/metrics
+curl http://192.168.1.20:8520/v1/health
 
 # Check partition distribution
-curl http://192.168.1.10:8620/pd/v1/partitions
+curl http://192.168.1.10:8620/v1/partitions
 
 # Check if queries are using indexes
 # (Enable query logging in Server)
@@ -652,11 +714,6 @@ curl http://192.168.1.10:8620/pd/v1/partitions
 
 **Solutions**:
 1. **Create indexes**: Ensure label and property indexes exist
-   ```groovy
-   // In Gremlin console
-   
schema.indexLabel("personByName").onV("person").by("name").secondary().create()
-   ```
-
 2. **Increase Store nodes**: If data exceeds capacity of 3 nodes
 3. **Tune RocksDB**: See [Best Practices](best-practices.md)
 4. **Enable query pushdown**: Ensure Server is using Store's query API
@@ -676,10 +733,10 @@ ERROR o.a.h.b.s.h.HstoreSession - Write operation failed: 
Raft leader not found
 tail -f logs/hugegraph-store.log | grep Raft
 
 # Check partition leaders
-curl http://192.168.1.10:8620/pd/v1/partitions | grep leader
+curl http://192.168.1.10:8620/v1/partitions | grep leader
 
 # Check Store node states
-curl http://192.168.1.10:8620/pd/v1/stores
+curl http://192.168.1.10:8620/v1/stores
 ```
 
 **Solutions**:
@@ -699,7 +756,7 @@ curl http://192.168.1.10:8620/pd/v1/stores
 **Diagnosis**:
 ```bash
 # Compare counts
-curl http://localhost:8080/graphs/hugegraph/graph/vertices?limit=0
+curl 
http://localhost:8080/graphspaces/{graphspace_name}/graphs/{graph_name}/graph/vertices
 # vs expected count from backup
 
 # Check for restore errors
@@ -710,7 +767,7 @@ tail -f logs/hugegraph-tools.log | grep ERROR
 1. **Re-run restore**: Delete graph and restore again
    ```bash
    # Clear graph
-   curl -X DELETE http://localhost:8080/graphs/hugegraph/graph/vertices
+   curl -X DELETE 
http://localhost:8080/graphspaces/{graphspace_name}/graphs/{graph_name}/graph/vertices/{id}
 
    # Restore
    bin/hugegraph-restore.sh --graph hugegraph --directory /backup/data
@@ -738,12 +795,6 @@ jmap -dump:format=b,file=heap.bin <server-pid>
 
 **Solutions**:
 1. **Close sessions**: Ensure `HgStoreSession.close()` is called
-   ```java
-   try (HgStoreSession session = client.openSession(graphName)) {
-       // Use session
-   }  // Auto-closed
-   ```
-
 2. **Tune connection pool**: Reduce `store.max_sessions` if too high
 3. **Increase heap**: Increase Server JVM heap size
    ```bash
diff --git a/hugegraph-store/docs/operations-guide.md 
b/hugegraph-store/docs/operations-guide.md
index 47023d3c9..a937d52bf 100644
--- a/hugegraph-store/docs/operations-guide.md
+++ b/hugegraph-store/docs/operations-guide.md
@@ -52,15 +52,6 @@ curl http://<pd-host>:8620/actuator/metrics
 - **Normal**: <30,000ms (30 seconds)
 - **Warning**: >60,000ms (large partition or slow disk)
 
-**Queries**:
-```bash
-# Check leader election count
-curl http://192.168.1.20:8520/actuator/metrics/raft.leader.election.count
-
-# Check log apply latency
-curl http://192.168.1.20:8520/actuator/metrics/raft.log.apply.latency
-```
-
 #### 2. RocksDB Metrics
 
 **Metric**: `rocksdb.read.latency`
@@ -83,12 +74,6 @@ curl 
http://192.168.1.20:8520/actuator/metrics/raft.log.apply.latency
 - **Normal**: >90%
 - **Warning**: <70% (increase cache size)
 
-**Queries**:
-```bash
-curl http://192.168.1.20:8520/actuator/metrics/rocksdb.read.latency
-curl http://192.168.1.20:8520/actuator/metrics/rocksdb.compaction.pending
-```
-
 #### 3. Partition Metrics
 
 **Metric**: `partition.count`
@@ -103,14 +88,15 @@ curl 
http://192.168.1.20:8520/actuator/metrics/rocksdb.compaction.pending
 
 **Queries**:
 ```bash
-# Check partition distribution (via PD)
-curl http://192.168.1.10:8620/pd/v1/stats/partition-distribution
+# Check partition distribution
+curl  http://localhost:8620/v1/partitionsAndStats 
 
-# Expected output:
+# Example output (imbalanced):
 # {
-#   "store_1": {"total": 12, "leaders": 4},
-#   "store_2": {"total": 12, "leaders": 4},
-#   "store_3": {"total": 12, "leaders": 4}
+#   {
+#   "partitions": {}, 
+#   "partitionStats: {}"
+#   }
 # }
 ```
 
@@ -272,13 +258,14 @@ curl http://192.168.1.10:8620/pd/v1/partitions | jq '.[] 
| select(.leader == nul
 **Diagnosis**:
 ```bash
 # Check partition distribution
-curl http://192.168.1.10:8620/pd/v1/stats/partition-distribution
+curl  http://localhost:8620/v1/partitionsAndStats 
 
 # Example output (imbalanced):
 # {
-#   "store_1": {"total": 20, "leaders": 15},
-#   "store_2": {"total": 8, "leaders": 2},
-#   "store_3": {"total": 8, "leaders": 1}
+#   {
+#   "partitions": {}, 
+#   "partitionStats: {}"
+#   }
 # }
 ```
 
@@ -290,7 +277,7 @@ curl 
http://192.168.1.10:8620/pd/v1/stats/partition-distribution
 **Solutions**:
 1. **Trigger Manual Rebalance** (via PD API):
    ```bash
-   curl -X POST http://192.168.1.10:8620/pd/v1/balance/trigger
+   curl http://192.168.1.10:8620/v1/balanceLeaders
    ```
 
 2. **Reduce Patrol Interval** (in PD `application.yml`):
@@ -347,7 +334,7 @@ iostat -x 1
 4. **Monitor Progress**:
    ```bash
    # Check partition state transitions
-   curl http://192.168.1.10:8620/pd/v1/partitions | grep -i migrating
+   curl http://192.168.1.10:8620/v1/partitions | grep -i migrating
    ```
 
 ---
@@ -361,10 +348,6 @@ iostat -x 1
 
 **Diagnosis**:
 ```bash
-# Check RocksDB stats
-curl http://192.168.1.20:8520/actuator/metrics/rocksdb.compaction.pending
-curl http://192.168.1.20:8520/actuator/metrics/rocksdb.block.cache.hit.rate
-
 # Check Store logs for compaction
 tail -f logs/hugegraph-store.log | grep compaction
 ```
@@ -388,13 +371,7 @@ tail -f logs/hugegraph-store.log | grep compaction
      max_write_buffer_number: 8    # More memtables
    ```
 
-3. **Manual Compaction** (if safe):
-   ```bash
-   # Trigger compaction via Store admin API
-   curl -X POST http://192.168.1.20:8520/admin/rocksdb/compact
-   ```
-
-4. **Restart Store Node** (last resort, triggers compaction on startup):
+3. **Restart Store Node** (last resort, triggers compaction on startup):
    ```bash
    bin/stop-hugegraph-store.sh
    bin/start-hugegraph-store.sh
@@ -496,58 +473,6 @@ scp backup-store1-*.tar.gz backup-server:/backups/
 - Requires all Store nodes to be backed up
 - May miss recent writes (since last snapshot)
 
-#### Strategy 2: RocksDB Checkpoint
-
-**Frequency**: Before major operations (upgrades, schema changes)
-
-**Process**:
-```bash
-# Trigger checkpoint via Store API
-curl -X POST http://192.168.1.20:8520/admin/rocksdb/checkpoint
-
-# Checkpoint created in storage/rocksdb-checkpoint/
-tar -czf backup-checkpoint-$(date +%Y%m%d).tar.gz storage/rocksdb-checkpoint/
-
-# Upload to backup server
-scp backup-checkpoint-*.tar.gz backup-server:/backups/
-```
-
-**Pros**:
-- Consistent checkpoint
-- Can be restored to a single node (for testing)
-
-**Cons**:
-- Larger backup size
-- Slower than snapshot
-
-#### Strategy 3: Logical Backup (via HugeGraph API)
-
-**Frequency**: Weekly or monthly
-
-**Process**:
-```bash
-# Use HugeGraph-Tools
-cd hugegraph-tools
-
-bin/hugegraph-backup.sh \
-  --graph hugegraph \
-  --directory /backups/logical-$(date +%Y%m%d) \
-  --format json
-
-# Backup includes:
-# - schema.json
-# - vertices.json
-# - edges.json
-```
-
-**Pros**:
-- Backend-agnostic (can restore to different backend)
-- Human-readable format
-
-**Cons**:
-- Slower (especially for large graphs)
-- Requires Server to be running
-
 ### Disaster Recovery Procedures
 
 #### Scenario 1: Single Store Node Failure
@@ -558,7 +483,7 @@ bin/hugegraph-backup.sh \
 1. **No immediate action needed**: Remaining replicas continue serving
 2. **Monitor**: Check if Raft leaders re-elected
    ```bash
-   curl http://192.168.1.10:8620/pd/v1/partitions | grep leader
+   curl http://192.168.1.10:8620/v1/partitions | grep leader
    ```
 
 3. **Replace Failed Node**:
@@ -568,7 +493,7 @@ bin/hugegraph-backup.sh \
 
 4. **Verify**: Check partition distribution
    ```bash
-   curl http://192.168.1.10:8620/pd/v1/stats/partition-distribution
+    curl  http://localhost:8620/v1/partitionsAndStats
    ```
 
 #### Scenario 2: Complete Store Cluster Failure
@@ -597,7 +522,7 @@ bin/hugegraph-backup.sh \
 4. **Verify Data**:
    ```bash
    # Check via Server
-   curl http://192.168.1.30:8080/graphs/hugegraph/graph/vertices?limit=10
+   curl 
http://192.168.1.30:8080/graphspaces/{graphspaces_name}/graphs/{graph_name}/vertices?limit=10
    ```
 
 #### Scenario 3: Data Corruption
@@ -651,7 +576,7 @@ du -sh storage/
 **Partition Count**:
 ```bash
 # Current partition count
-curl http://192.168.1.10:8620/pd/v1/stats/partition-count
+curl http://192.168.1.10:8620/v1/partitionsAndStatus
 
 # Recommendation: 3-5x Store node count
 # Example: 6 Store nodes → 18-30 partitions
@@ -678,19 +603,19 @@ curl http://192.168.1.10:8620/pd/v1/stats/partition-count
 
 2. **Verify Registration**:
    ```bash
-   curl http://192.168.1.10:8620/pd/v1/stores
+   curl http://192.168.1.10:8620/v1/stores
    # New Store should appear
    ```
 
 3. **Trigger Rebalancing** (optional):
    ```bash
-   curl -X POST http://192.168.1.10:8620/pd/v1/balance/trigger
+   curl -X POST http://192.168.1.10:8620/v1/balanceLeaders
    ```
 
 4. **Monitor Rebalancing**:
    ```bash
    # Watch partition distribution
-   watch -n 10 'curl -s 
http://192.168.1.10:8620/pd/v1/stats/partition-distribution'
+   watch -n 10 'curl http://192.168.1.10:8620/v1/partitionsAndStatus'
    ```
 
 5. **Verify**: Wait for even distribution (may take hours)
@@ -703,17 +628,17 @@ curl http://192.168.1.10:8620/pd/v1/stats/partition-count
 
 **Process**:
 1. **Mark Store for Removal** (via PD API):
-   ```bash
-   curl -X POST http://192.168.1.10:8620/pd/v1/stores/3/decommission
-   ```
+    ```bash
+    curl --location --request POST 'http://localhost:8080/store/123' \
+    --header 'Content-Type: application/json' \
+    --data-raw '{
+    "storeState": "Off"
+    }'
+    ```
+   Refer to API definition in `StoreAPI::setStore`
 
 2. **Wait for Migration**:
    - PD migrates all partitions off this Store
-   - Monitor:
-     ```bash
-     curl http://192.168.1.10:8620/pd/v1/stores/3
-     # Check partition count → should reach 0
-     ```
 
 3. **Stop Store Node**:
    ```bash
@@ -721,9 +646,6 @@ curl http://192.168.1.10:8620/pd/v1/stats/partition-count
    ```
 
 4. **Remove from PD** (optional):
-   ```bash
-   curl -X DELETE http://192.168.1.10:8620/pd/v1/stores/3
-   ```
 
 ---
 
@@ -761,7 +683,7 @@ cp 
../apache-hugegraph-store-incubating-1.7.0-backup/conf/application.yml conf/
 bin/start-hugegraph-store.sh
 
 # Verify
-curl http://192.168.1.20:8520/actuator/health
+curl http://192.168.1.20:8520/v1/health
 tail -f logs/hugegraph-store.log
 ```
 


Reply via email to