This is an automated email from the ASF dual-hosted git repository.

jin pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-hugegraph-ai.git


The following commit(s) were added to refs/heads/main by this push:
     new 212d9a5  chore(llm): use 4.1-mini and 0.01 temperature by default 
(#214)
212d9a5 is described below

commit 212d9a58f4cb0fd4439e821858fcd2d2c4980f5a
Author: SoJGooo <[email protected]>
AuthorDate: Wed May 7 15:44:29 2025 +0800

    chore(llm): use 4.1-mini and 0.01 temperature by default (#214)
    
    Co-authored-by: imbajin <[email protected]>
---
 .asf.yaml                                              |  1 -
 hugegraph-llm/src/hugegraph_llm/config/llm_config.py   | 13 +++++++------
 hugegraph-llm/src/hugegraph_llm/models/llms/litellm.py | 14 +++++++-------
 hugegraph-llm/src/hugegraph_llm/models/llms/openai.py  |  6 +++---
 hugegraph-llm/src/hugegraph_llm/models/llms/qianfan.py |  2 +-
 5 files changed, 18 insertions(+), 18 deletions(-)

diff --git a/.asf.yaml b/.asf.yaml
index 9fa40a0..cafdba4 100644
--- a/.asf.yaml
+++ b/.asf.yaml
@@ -54,7 +54,6 @@ github:
   collaborators:
     - ChenZiHong-Gavin
     - MrJs133
-    - vichayturen
     - HJ-Young
     - afterimagex
     - returnToInnocence
diff --git a/hugegraph-llm/src/hugegraph_llm/config/llm_config.py 
b/hugegraph-llm/src/hugegraph_llm/config/llm_config.py
index 5e314bd..a9b4b2d 100644
--- a/hugegraph-llm/src/hugegraph_llm/config/llm_config.py
+++ b/hugegraph-llm/src/hugegraph_llm/config/llm_config.py
@@ -33,13 +33,13 @@ class LLMConfig(BaseConfig):
     # 1. OpenAI settings
     openai_chat_api_base: Optional[str] = os.environ.get("OPENAI_BASE_URL", 
"https://api.openai.com/v1";)
     openai_chat_api_key: Optional[str] = os.environ.get("OPENAI_API_KEY")
-    openai_chat_language_model: Optional[str] = "gpt-4o-mini"
+    openai_chat_language_model: Optional[str] = "gpt-4.1-mini"
     openai_extract_api_base: Optional[str] = os.environ.get("OPENAI_BASE_URL", 
"https://api.openai.com/v1";)
     openai_extract_api_key: Optional[str] = os.environ.get("OPENAI_API_KEY")
-    openai_extract_language_model: Optional[str] = "gpt-4o-mini"
+    openai_extract_language_model: Optional[str] = "gpt-4.1-mini"
     openai_text2gql_api_base: Optional[str] = 
os.environ.get("OPENAI_BASE_URL", "https://api.openai.com/v1";)
     openai_text2gql_api_key: Optional[str] = os.environ.get("OPENAI_API_KEY")
-    openai_text2gql_language_model: Optional[str] = "gpt-4o-mini"
+    openai_text2gql_language_model: Optional[str] = "gpt-4.1-mini"
     openai_embedding_api_base: Optional[str] = 
os.environ.get("OPENAI_EMBEDDING_BASE_URL", "https://api.openai.com/v1";)
     openai_embedding_api_key: Optional[str] = 
os.environ.get("OPENAI_EMBEDDING_API_KEY")
     openai_embedding_model: Optional[str] = "text-embedding-3-small"
@@ -64,6 +64,7 @@ class LLMConfig(BaseConfig):
     ollama_embedding_port: Optional[int] = 11434
     ollama_embedding_model: Optional[str] = None
     # 4. QianFan/WenXin settings
+    # TODO: update to one token key mode
     qianfan_chat_api_key: Optional[str] = None
     qianfan_chat_secret_key: Optional[str] = None
     qianfan_chat_access_token: Optional[str] = None
@@ -87,15 +88,15 @@ class LLMConfig(BaseConfig):
     # 5. LiteLLM settings
     litellm_chat_api_key: Optional[str] = None
     litellm_chat_api_base: Optional[str] = None
-    litellm_chat_language_model: Optional[str] = "openai/gpt-4o"
+    litellm_chat_language_model: Optional[str] = "openai/gpt-4.1-mini"
     litellm_chat_tokens: int = 8192
     litellm_extract_api_key: Optional[str] = None
     litellm_extract_api_base: Optional[str] = None
-    litellm_extract_language_model: Optional[str] = "openai/gpt-4o"
+    litellm_extract_language_model: Optional[str] = "openai/gpt-4.1-mini"
     litellm_extract_tokens: int = 256
     litellm_text2gql_api_key: Optional[str] = None
     litellm_text2gql_api_base: Optional[str] = None
-    litellm_text2gql_language_model: Optional[str] = "openai/gpt-4o"
+    litellm_text2gql_language_model: Optional[str] = "openai/gpt-4.1-mini"
     litellm_text2gql_tokens: int = 4096
     litellm_embedding_api_key: Optional[str] = None
     litellm_embedding_api_base: Optional[str] = None
diff --git a/hugegraph-llm/src/hugegraph_llm/models/llms/litellm.py 
b/hugegraph-llm/src/hugegraph_llm/models/llms/litellm.py
index ca5ae60..7627979 100644
--- a/hugegraph-llm/src/hugegraph_llm/models/llms/litellm.py
+++ b/hugegraph-llm/src/hugegraph_llm/models/llms/litellm.py
@@ -38,9 +38,9 @@ class LiteLLMClient(BaseLLM):
         self,
         api_key: Optional[str] = None,
         api_base: Optional[str] = None,
-        model_name: str = "openai/gpt-4o",  # Can be any model supported by 
LiteLLM
-        max_tokens: int = 4096,
-        temperature: float = 0.0,
+        model_name: str = "openai/gpt-4.1-mini",  # Can be any model supported 
by LiteLLM
+        max_tokens: int = 8192,
+        temperature: float = 0.01,
     ) -> None:
         self.api_key = api_key
         self.api_base = api_base
@@ -49,8 +49,8 @@ class LiteLLMClient(BaseLLM):
         self.temperature = temperature
 
     @retry(
-        stop=stop_after_attempt(3),
-        wait=wait_exponential(multiplier=1, min=4, max=10),
+        stop=stop_after_attempt(2),
+        wait=wait_exponential(multiplier=1, min=2, max=5),
         retry=retry_if_exception_type((RateLimitError, BudgetExceededError, 
APIError))
     )
     def generate(
@@ -78,8 +78,8 @@ class LiteLLMClient(BaseLLM):
             return f"Error: {str(e)}"
 
     @retry(
-        stop=stop_after_attempt(3),
-        wait=wait_exponential(multiplier=1, min=4, max=10),
+        stop=stop_after_attempt(2),
+        wait=wait_exponential(multiplier=1, min=2, max=5),
         retry=retry_if_exception_type((RateLimitError, BudgetExceededError, 
APIError))
     )
     async def agenerate(
diff --git a/hugegraph-llm/src/hugegraph_llm/models/llms/openai.py 
b/hugegraph-llm/src/hugegraph_llm/models/llms/openai.py
index 45f6d7a..b185314 100644
--- a/hugegraph-llm/src/hugegraph_llm/models/llms/openai.py
+++ b/hugegraph-llm/src/hugegraph_llm/models/llms/openai.py
@@ -38,9 +38,9 @@ class OpenAIClient(BaseLLM):
         self,
         api_key: Optional[str] = None,
         api_base: Optional[str] = None,
-        model_name: str = "gpt-4o-mini",
-        max_tokens: int = 4096,
-        temperature: float = 0.0,
+        model_name: str = "gpt-4.1-mini",
+        max_tokens: int = 8092,
+        temperature: float = 0.01,
     ) -> None:
         api_key = api_key or ''
         self.client = OpenAI(api_key=api_key, base_url=api_base)
diff --git a/hugegraph-llm/src/hugegraph_llm/models/llms/qianfan.py 
b/hugegraph-llm/src/hugegraph_llm/models/llms/qianfan.py
index cbca691..2d306ac 100644
--- a/hugegraph-llm/src/hugegraph_llm/models/llms/qianfan.py
+++ b/hugegraph-llm/src/hugegraph_llm/models/llms/qianfan.py
@@ -27,7 +27,7 @@ from hugegraph_llm.utils.log import log
 
 
 class QianfanClient(BaseLLM):
-    def __init__(self, model_name: Optional[str] = "ERNIE-4.0-Turbo-8K",
+    def __init__(self, model_name: Optional[str] = "ernie-4.5-8k-preview",
                  api_key: Optional[str] = None, secret_key: Optional[str] = 
None):
         qianfan.get_config().AK = api_key or llm_settings.qianfan_chat_api_key
         qianfan.get_config().SK = secret_key or 
llm_settings.qianfan_chat_secret_key

Reply via email to