This is an automated email from the ASF dual-hosted git repository.

jin pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-hugegraph-ai.git


The following commit(s) were added to refs/heads/main by this push:
     new 26b9e45  fix(llm): mark wrong usage in embedding api (#250)
26b9e45 is described below

commit 26b9e45392d5c2abd648f52dd5aa7f2e91331c61
Author: SoJGooo <[email protected]>
AuthorDate: Fri May 23 18:25:13 2025 +0800

    fix(llm): mark wrong usage in embedding api (#250)
    
    tell users to update the deps
    
    ---------
    
    Co-authored-by: imbajin <[email protected]>
---
 .../src/hugegraph_llm/models/embeddings/base.py    |  4 ++
 .../src/hugegraph_llm/models/embeddings/ollama.py  | 48 ++++++++--------------
 .../operators/index_op/build_semantic_index.py     | 10 +++--
 .../operators/index_op/build_vector_index.py       |  2 +
 .../src/tests/indices/test_vector_index.py         |  3 +-
 5 files changed, 31 insertions(+), 36 deletions(-)

diff --git a/hugegraph-llm/src/hugegraph_llm/models/embeddings/base.py 
b/hugegraph-llm/src/hugegraph_llm/models/embeddings/base.py
index 73e973e..d6b6629 100644
--- a/hugegraph-llm/src/hugegraph_llm/models/embeddings/base.py
+++ b/hugegraph-llm/src/hugegraph_llm/models/embeddings/base.py
@@ -20,6 +20,7 @@ from enum import Enum
 from typing import List, Union
 
 import numpy as np
+from typing_extensions import deprecated
 
 
 class SimilarityMode(str, Enum):
@@ -53,6 +54,8 @@ def similarity(
 class BaseEmbedding(ABC):
     """Embedding wrapper should take in a text and return a vector."""
 
+    # TODO: replace all the usage by get_texts_embeddings() & remove it in the 
future
+    @deprecated("Use get_texts_embeddings() instead in the future.")
     @abstractmethod
     def get_text_embedding(
             self,
@@ -83,6 +86,7 @@ class BaseEmbedding(ABC):
             The order of embeddings should match the order of input texts.
         """
 
+    # TODO: [PR-238] Add & implement batch processing for 
async_get_texts_embeddings (refactor here)
     @abstractmethod
     async def async_get_text_embedding(
             self,
diff --git a/hugegraph-llm/src/hugegraph_llm/models/embeddings/ollama.py 
b/hugegraph-llm/src/hugegraph_llm/models/embeddings/ollama.py
index 062e098..e54750f 100644
--- a/hugegraph-llm/src/hugegraph_llm/models/embeddings/ollama.py
+++ b/hugegraph-llm/src/hugegraph_llm/models/embeddings/ollama.py
@@ -19,57 +19,41 @@
 from typing import List
 
 import ollama
+
 from .base import BaseEmbedding
 
 
 class OllamaEmbedding(BaseEmbedding):
-    def __init__(
-            self,
-            model: str,
-            host: str = "127.0.0.1",
-            port: int = 11434,
-            **kwargs
-    ):
+    def __init__(self, model: str, host: str = "127.0.0.1", port: int = 11434, 
**kwargs):
         self.model = model
         self.client = ollama.Client(host=f"http://{host}:{port}";, **kwargs)
         self.async_client = ollama.AsyncClient(host=f"http://{host}:{port}";, 
**kwargs)
         self.embedding_dimension = None
 
-    def get_text_embedding(
-            self,
-            text: str
-    ) -> List[float]:
-        """Comment"""
-        return list(self.client.embed(model=self.model, 
input=text)["embeddings"][0])
+    def get_text_embedding(self, text: str) -> List[float]:
+        """Get embedding for a single text."""
+        return self.get_texts_embeddings([text])[0]
 
-    def get_texts_embeddings(
-            self,
-            texts: List[str]
-    ) -> List[List[float]]:
+    def get_texts_embeddings(self, texts: List[str]) -> List[List[float]]:
         """Get embeddings for multiple texts in a single batch.
-        
-        This method efficiently processes multiple texts at once by leveraging
-        Ollama's batching capabilities, which is more efficient than processing
-        texts individually.
-        
-        Parameters
-        ----------
-        texts : List[str]
-            A list of text strings to be embedded.
-            
+
         Returns
         -------
         List[List[float]]
             A list of embedding vectors, where each vector is a list of floats.
             The order of embeddings matches the order of input texts.
         """
+        if not hasattr(self.client, "embed"):
+            error_message = (
+                "The required 'embed' method was not found on the Ollama 
client. "
+                "Please ensure your ollama library is up-to-date and supports 
batch embedding. "
+            )
+            raise AttributeError(error_message)
+
         response = self.client.embed(model=self.model, 
input=texts)["embeddings"]
         return [list(inner_sequence) for inner_sequence in response]
 
-    async def async_get_text_embedding(
-            self,
-            text: str
-    ) -> List[float]:
-        """Comment"""
+    # TODO: Add & implement batch processing for async_get_texts_embeddings 
(refactor here)
+    async def async_get_text_embedding(self, text: str) -> List[float]:
         response = await self.async_client.embeddings(model=self.model, 
prompt=text)
         return list(response["embedding"])
diff --git 
a/hugegraph-llm/src/hugegraph_llm/operators/index_op/build_semantic_index.py 
b/hugegraph-llm/src/hugegraph_llm/operators/index_op/build_semantic_index.py
index ce64442..e6b4080 100644
--- a/hugegraph-llm/src/hugegraph_llm/operators/index_op/build_semantic_index.py
+++ b/hugegraph-llm/src/hugegraph_llm/operators/index_op/build_semantic_index.py
@@ -25,8 +25,9 @@ from tqdm import tqdm
 from hugegraph_llm.config import resource_path, huge_settings
 from hugegraph_llm.indices.vector_index import VectorIndex
 from hugegraph_llm.models.embeddings.base import BaseEmbedding
-from hugegraph_llm.utils.log import log
 from hugegraph_llm.operators.hugegraph_op.schema_manager import SchemaManager
+from hugegraph_llm.utils.log import log
+
 
 class BuildSemanticIndex:
     def __init__(self, embedding: BaseEmbedding):
@@ -41,12 +42,15 @@ class BuildSemanticIndex:
     async def _get_embeddings_parallel(self, vids: list[str]) -> list[Any]:
         sem = asyncio.Semaphore(10)
         batch_size = 1000
+
+        # TODO: refactor the logic here (call async method)
         async def get_embeddings_with_semaphore(vid_list: list[str]) -> Any:
             # Executes sync embedding method in a thread pool via 
loop.run_in_executor, combining async programming
             # with multi-threading capabilities.
             # This pattern avoids blocking the event loop and prepares for a 
future fully async pipeline.
             async with sem:
                 loop = asyncio.get_running_loop()
+                # FIXME: [PR-238] add & use async_get_texts_embedding instead 
of sync method
                 return await loop.run_in_executor(None, 
self.embedding.get_texts_embeddings, vid_list)
 
         # Split vids into batches of size batch_size
@@ -59,7 +63,7 @@ class BuildSemanticIndex:
         with tqdm(total=len(tasks)) as pbar:
             for future in asyncio.as_completed(tasks):
                 batch_embeddings = await future
-                embeddings.extend(batch_embeddings) # Extend the list with 
batch results
+                embeddings.extend(batch_embeddings)  # Extend the list with 
batch results
                 pbar.update(1)
         return embeddings
 
@@ -69,7 +73,7 @@ class BuildSemanticIndex:
 
         past_vids = self.vid_index.properties
         # TODO: We should build vid vector index separately, especially when 
the vertices may be very large
-        present_vids = context["vertices"] # Warning: data truncated by 
fetch_graph_data.py
+        present_vids = context["vertices"]  # Warning: data truncated by 
fetch_graph_data.py
         removed_vids = set(past_vids) - set(present_vids)
         removed_num = self.vid_index.remove(removed_vids)
         added_vids = list(set(present_vids) - set(past_vids))
diff --git 
a/hugegraph-llm/src/hugegraph_llm/operators/index_op/build_vector_index.py 
b/hugegraph-llm/src/hugegraph_llm/operators/index_op/build_vector_index.py
index ef87379..01499f5 100644
--- a/hugegraph-llm/src/hugegraph_llm/operators/index_op/build_vector_index.py
+++ b/hugegraph-llm/src/hugegraph_llm/operators/index_op/build_vector_index.py
@@ -20,6 +20,7 @@ import os
 from typing import Dict, Any
 
 from tqdm import tqdm
+
 from hugegraph_llm.config import huge_settings, resource_path
 from hugegraph_llm.indices.vector_index import VectorIndex
 from hugegraph_llm.models.embeddings.base import BaseEmbedding
@@ -38,6 +39,7 @@ class BuildVectorIndex:
         chunks = context["chunks"]
         chunks_embedding = []
         log.debug("Building vector index for %s chunks...", 
len(context["chunks"]))
+        # TODO: use async_get_texts_embedding instead of single sync method
         for chunk in tqdm(chunks):
             chunks_embedding.append(self.embedding.get_text_embedding(chunk))
         if len(chunks_embedding) > 0:
diff --git a/hugegraph-llm/src/tests/indices/test_vector_index.py 
b/hugegraph-llm/src/tests/indices/test_vector_index.py
index 9fd7361..0f8fd5f 100644
--- a/hugegraph-llm/src/tests/indices/test_vector_index.py
+++ b/hugegraph-llm/src/tests/indices/test_vector_index.py
@@ -18,8 +18,9 @@
 
 import unittest
 from pprint import pprint
-from hugegraph_llm.models.embeddings.ollama import OllamaEmbedding
+
 from hugegraph_llm.indices.vector_index import VectorIndex
+from hugegraph_llm.models.embeddings.ollama import OllamaEmbedding
 
 
 class TestVectorIndex(unittest.TestCase):

Reply via email to