This is an automated email from the ASF dual-hosted git repository.

jin pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-hugegraph-ai.git


The following commit(s) were added to refs/heads/main by this push:
     new 71b6261  fix(lint): critical bug with pylint usage (#131)
71b6261 is described below

commit 71b62614843fc0f8b04cb6178b5357f40ac792d3
Author: SoJGooo <[email protected]>
AuthorDate: Mon Dec 9 17:29:49 2024 +0800

    fix(lint): critical bug with pylint usage (#131)
    
    The previous pylint configuration had issues, which prevented pylint from 
being triggered properly.
    
    This PR has modified the pylint configuration to fix the lingering issues 
and fixed residual pylint issues (ML LLM)
    
    Details are as follows:
    - fixed incubator-hugegraph-ai/style/code_format_and_analysis.sh
    - fixed incubator-hugegraph-ai/hugegraph-ml/src/hugegraph_ml
    - fixed incubator-hugegraph-ai/hugegraph-llm/src/hugegraph_llm
    
    
    ---------
    
    Co-authored-by: root <[email protected]>
    Co-authored-by: imbajin <[email protected]>
---
 .github/workflows/pylint.yml                       |   3 +
 hugegraph-llm/src/hugegraph_llm/api/admin_api.py   |  11 +-
 .../src/hugegraph_llm/config/config_data.py        |   2 +-
 .../src/hugegraph_llm/demo/rag_demo/admin_block.py |  45 ++++-----
 .../src/hugegraph_llm/demo/rag_demo/app.py         |   6 +-
 .../hugegraph_llm/demo/rag_demo/configs_block.py   | 111 +++++++++++----------
 .../src/hugegraph_llm/demo/rag_demo/rag_block.py   |   6 +-
 .../demo/rag_demo/vector_graph_block.py            |   1 +
 .../operators/hugegraph_op/commit_to_hugegraph.py  |   2 +
 .../operators/llm_op/keyword_extract.py            |   4 +-
 hugegraph-ml/requirements.txt                      |   4 +
 hugegraph-ml/src/hugegraph_ml/models/agnn.py       |   1 -
 hugegraph-ml/src/hugegraph_ml/models/arma.py       |   4 +-
 hugegraph-ml/src/hugegraph_ml/models/bgnn.py       |  11 +-
 hugegraph-ml/src/hugegraph_ml/models/bgrl.py       |  10 +-
 .../src/hugegraph_ml/models/cluster_gcn.py         |   2 +-
 hugegraph-ml/src/hugegraph_ml/models/dagnn.py      |   4 +-
 hugegraph-ml/src/hugegraph_ml/models/gatne.py      |   4 +-
 hugegraph-ml/src/hugegraph_ml/models/pgnn.py       |   8 +-
 hugegraph-ml/src/hugegraph_ml/models/seal.py       |   8 +-
 .../hugegraph_ml/tasks/fraud_detector_caregnn.py   |   1 -
 .../src/hugegraph_ml/tasks/link_prediction_seal.py |   4 +-
 .../tasks/node_classify_with_sample.py             |   9 +-
 .../src/hugegraph_ml/utils/dgl2hugegraph_utils.py  |   2 +-
 .../src/tests/test_examples/test_examples.py       |   7 --
 style/code_format_and_analysis.sh                  |   4 +-
 style/pylint.conf                                  |   1 +
 27 files changed, 140 insertions(+), 135 deletions(-)

diff --git a/.github/workflows/pylint.yml b/.github/workflows/pylint.yml
index 592f4bd..b864eb9 100644
--- a/.github/workflows/pylint.yml
+++ b/.github/workflows/pylint.yml
@@ -25,6 +25,9 @@ jobs:
         pip install -r ./hugegraph-llm/requirements.txt
         pip install -r ./hugegraph-ml/requirements.txt
         pip install -r ./hugegraph-python-client/requirements.txt
+    - name: Check DGL version
+      run: |
+        python -c "import dgl; print(dgl.__version__)"
     - name: Analysing the code with pylint
       run: |
         bash ./style/code_format_and_analysis.sh -p
diff --git a/hugegraph-llm/src/hugegraph_llm/api/admin_api.py 
b/hugegraph-llm/src/hugegraph_llm/api/admin_api.py
index c7a6524..21fbf5f 100644
--- a/hugegraph-llm/src/hugegraph_llm/api/admin_api.py
+++ b/hugegraph-llm/src/hugegraph_llm/api/admin_api.py
@@ -23,14 +23,13 @@ from hugegraph_llm.api.exceptions.rag_exceptions import 
generate_response
 from hugegraph_llm.api.models.rag_requests import LogStreamRequest
 from hugegraph_llm.api.models.rag_response import RAGResponse
 
-
+# FIXME: line 31: E0702: Raising dict while only classes or instances are 
allowed (raising-bad-type)
 def admin_http_api(router: APIRouter, log_stream):
     @router.post("/logs", status_code=status.HTTP_200_OK)
     async def log_stream_api(req: LogStreamRequest):
         if os.getenv('ADMIN_TOKEN') != req.admin_token:
-            raise 
generate_response(RAGResponse(status_code=status.HTTP_403_FORBIDDEN, 
message="Invalid admin_token"))
-        else:
-            log_path = os.path.join("logs", req.log_file)
+            raise 
generate_response(RAGResponse(status_code=status.HTTP_403_FORBIDDEN, 
message="Invalid admin_token")) #pylint: disable=E0702
+        log_path = os.path.join("logs", req.log_file)
 
-            # Create a StreamingResponse that reads from the log stream 
generator
-            return StreamingResponse(log_stream(log_path), 
media_type="text/plain")
+        # Create a StreamingResponse that reads from the log stream generator
+        return StreamingResponse(log_stream(log_path), media_type="text/plain")
diff --git a/hugegraph-llm/src/hugegraph_llm/config/config_data.py 
b/hugegraph-llm/src/hugegraph_llm/config/config_data.py
index 78ff0cf..57ffe78 100644
--- a/hugegraph-llm/src/hugegraph_llm/config/config_data.py
+++ b/hugegraph-llm/src/hugegraph_llm/config/config_data.py
@@ -246,7 +246,7 @@ KEYWORDS:关键词1,关键词2,...,关键词n
 文本:
 {question}
 """
-
+#pylint: disable=C0301
     # keywords_extract_prompt_EN = """
 # Instruction:
 # Please perform the following tasks on the text below:
diff --git a/hugegraph-llm/src/hugegraph_llm/demo/rag_demo/admin_block.py 
b/hugegraph-llm/src/hugegraph_llm/demo/rag_demo/admin_block.py
index d8be1fe..02b90dd 100644
--- a/hugegraph-llm/src/hugegraph_llm/demo/rag_demo/admin_block.py
+++ b/hugegraph-llm/src/hugegraph_llm/demo/rag_demo/admin_block.py
@@ -30,7 +30,7 @@ async def log_stream(log_path: str, lines: int = 125):
     Stream the content of a log file like `tail -f`.
     """
     try:
-        with open(log_path, 'r') as file:
+        with open(log_path, 'r', encoding='utf-8') as file:
             buffer = deque(file, maxlen=lines)
             for line in buffer:
                 yield line  # Yield the initial lines
@@ -40,20 +40,20 @@ async def log_stream(log_path: str, lines: int = 125):
                     yield line
                 else:
                     await asyncio.sleep(0.1)  # Non-blocking sleep
-    except FileNotFoundError:
-        raise Exception(f"Log file not found: {log_path}")
+    except FileNotFoundError as exc:
+        raise Exception(f"Log file not found: {log_path}") from exc
     except Exception as e:
-        raise Exception(f"An error occurred while reading the log: {str(e)}")
+        raise Exception(f"An error occurred while reading the log: {str(e)}") 
from e
 
 
 # Functions to read each log file
 def read_llm_server_log(lines=250):
     log_path = "logs/llm-server.log"
     try:
-        with open(log_path, "r") as f:
+        with open(log_path, "r", encoding='utf-8') as f:
             return ''.join(deque(f, maxlen=lines))
     except FileNotFoundError:
-        log.critical(f"Log file not found: {log_path}")
+        log.critical("Log file not found: %s", log_path)
         return "LLM Server log file not found."
 
 
@@ -61,11 +61,11 @@ def read_llm_server_log(lines=250):
 def clear_llm_server_log():
     log_path = "logs/llm-server.log"
     try:
-        with open(log_path, "w") as f:
+        with open(log_path, "w", encoding='utf-8') as f:
             f.truncate(0)  # Clear the contents of the file
         return "LLM Server log cleared."
-    except Exception as e:
-        log.error(f"An error occurred while clearing the log: {str(e)}")
+    except Exception as e: #pylint: disable=W0718
+        log.error("An error occurred while clearing the log: %s", str(e))
         return "Failed to clear LLM Server log."
 
 
@@ -78,7 +78,7 @@ def check_password(password, request: Request = None):
         # Return logs and update visibility
         llm_log = read_llm_server_log()
         # Log the successful access with the IP address
-        log.info(f"Logs accessed successfully from IP: {client_ip}")
+        log.info("Logs accessed successfully from IP: %s", client_ip)
         return (
             llm_log,
             gr.update(visible=True),
@@ -86,16 +86,15 @@ def check_password(password, request: Request = None):
             gr.update(visible=True),
             gr.update(visible=False)
         )
-    else:
-        # Log the failed attempt with IP address
-        log.error(f"Incorrect password attempt from IP: {client_ip}")
-        return (
-            "",
-            gr.update(visible=False),
-            gr.update(visible=False),
-            gr.update(visible=False),
-            gr.update(value="Incorrect password. Access denied.", visible=True)
-        )
+    # Log the failed attempt with IP address
+    log.error("Incorrect password attempt from IP: %s", client_ip)
+    return (
+        "",
+        gr.update(visible=False),
+        gr.update(visible=False),
+        gr.update(visible=False),
+        gr.update(value="Incorrect password. Access denied.", visible=True)
+    )
 
 
 def create_admin_block():
@@ -141,7 +140,7 @@ def create_admin_block():
                                                               
variant="primary")
 
         # Define what happens when the password is submitted
-        submit_button.click(
+        submit_button.click( #pylint: disable=E1101
             fn=check_password,
             inputs=[password_input],
             outputs=[llm_server_log_output, hidden_row, 
clear_llm_server_button,
@@ -149,14 +148,14 @@ def create_admin_block():
         )
 
         # Define what happens when the Clear LLM Server Log button is clicked
-        clear_llm_server_button.click(
+        clear_llm_server_button.click( #pylint: disable=E1101
             fn=clear_llm_server_log,
             inputs=[],
             outputs=[llm_server_log_output],
         )
 
         # Define what happens when the Refresh LLM Server Log button is clicked
-        refresh_llm_server_button.click(
+        refresh_llm_server_button.click( #pylint: disable=E1101
             fn=read_llm_server_log,
             inputs=[],
             outputs=[llm_server_log_output],
diff --git a/hugegraph-llm/src/hugegraph_llm/demo/rag_demo/app.py 
b/hugegraph-llm/src/hugegraph_llm/demo/rag_demo/app.py
index 5604dbd..da6a437 100644
--- a/hugegraph-llm/src/hugegraph_llm/demo/rag_demo/app.py
+++ b/hugegraph-llm/src/hugegraph_llm/demo/rag_demo/app.py
@@ -55,7 +55,7 @@ def authenticate(credentials: HTTPAuthorizationCredentials = 
Depends(sec)):
             headers={"WWW-Authenticate": "Bearer"},
         )
 
-
+# pylint: disable=C0301
 def init_rag_ui() -> gr.Interface:
     with gr.Blocks(
             theme="default",
@@ -67,7 +67,7 @@ def init_rag_ui() -> gr.Interface:
         """
         TODO: leave a general idea of the unresolved part
         graph_config_input = textbox_array_graph_config
-         = [settings.graph_ip, settings.graph_port, settings.graph_name, 
graph_user, settings.graph_pwd, settings.graph_space]
+         = [settings.graph_ip, settings.graph_port, settings.graph_name, 
graph_user, settings.graph_pwd, settings.graph_space] 
         
         llm_config_input = textbox_array_llm_config
          = if settings.llm_type == openai [settings.openai_api_key, 
settings.openai_api_base, settings.openai_language_model, 
settings.openai_max_tokens]
@@ -107,7 +107,7 @@ def init_rag_ui() -> gr.Interface:
                 prompt.default_question, prompt.answer_prompt, 
prompt.keywords_extract_prompt
             )
 
-        hugegraph_llm_ui.load(fn=refresh_ui_config_prompt, outputs=[
+        hugegraph_llm_ui.load(fn=refresh_ui_config_prompt, outputs=[ #pylint: 
disable=E1101
             textbox_array_graph_config[0],
             textbox_array_graph_config[1],
             textbox_array_graph_config[2],
diff --git a/hugegraph-llm/src/hugegraph_llm/demo/rag_demo/configs_block.py 
b/hugegraph-llm/src/hugegraph_llm/demo/rag_demo/configs_block.py
index c35a33f..6a65301 100644
--- a/hugegraph-llm/src/hugegraph_llm/demo/rag_demo/configs_block.py
+++ b/hugegraph-llm/src/hugegraph_llm/demo/rag_demo/configs_block.py
@@ -18,6 +18,7 @@
 import json
 import os
 from typing import Optional
+from functools import partial
 
 import gradio as gr
 import requests
@@ -25,7 +26,6 @@ from requests.auth import HTTPBasicAuth
 
 from hugegraph_llm.config import settings
 from hugegraph_llm.utils.log import log
-from functools import partial
 
 current_llm = "chat"
 
@@ -161,20 +161,20 @@ def apply_graph_config(ip, port, name, user, pwd, gs, 
origin_call=None) -> int:
 
 
 # Different llm models have different parameters, so no meaningful argument 
names are given here
-def apply_llm_config(current_llm, arg1, arg2, arg3, arg4, origin_call=None) -> 
int:
-    log.debug("current llm in apply_llm_config is %s", current_llm)
-    llm_option = getattr(settings, f"{current_llm}_llm_type")
+def apply_llm_config(current_llm_config, arg1, arg2, arg3, arg4, 
origin_call=None) -> int:
+    log.debug("current llm in apply_llm_config is %s", current_llm_config)
+    llm_option = getattr(settings, f"{current_llm_config}_llm_type")
     log.debug("llm option in apply_llm_config is %s", llm_option)
     status_code = -1
-    
+
     if llm_option == "openai":
-        setattr(settings, f"openai_{current_llm}_api_key", arg1)
-        setattr(settings, f"openai_{current_llm}_api_base", arg2)
-        setattr(settings, f"openai_{current_llm}_language_model", arg3)
-        setattr(settings, f"openai_{current_llm}_tokens", int(arg4))
-        
-        test_url = getattr(settings, f"openai_{current_llm}_api_base") + 
"/chat/completions"
-        log.debug(f"Type of openai {current_llm} max token is %s", type(arg4))
+        setattr(settings, f"openai_{current_llm_config}_api_key", arg1)
+        setattr(settings, f"openai_{current_llm_config}_api_base", arg2)
+        setattr(settings, f"openai_{current_llm_config}_language_model", arg3)
+        setattr(settings, f"openai_{current_llm_config}_tokens", int(arg4))
+
+        test_url = getattr(settings, f"openai_{current_llm_config}_api_base") 
+ "/chat/completions"
+        log.debug(f"Type of openai {current_llm_config} max token is %s", 
type(arg4))
         data = {
             "model": arg3,
             "temperature": 0.0,
@@ -182,23 +182,24 @@ def apply_llm_config(current_llm, arg1, arg2, arg3, arg4, 
origin_call=None) -> i
         }
         headers = {"Authorization": f"Bearer {arg1}"}
         status_code = test_api_connection(test_url, method="POST", 
headers=headers, body=data, origin_call=origin_call)
-    
+
     elif llm_option == "qianfan_wenxin":
-        status_code = config_qianfan_model(arg1, arg2, arg3, 
settings_prefix=current_llm, origin_call=origin_call)
-    
+        status_code = config_qianfan_model(arg1, arg2, arg3, 
settings_prefix=current_llm_config, origin_call=origin_call) #pylint: 
disable=C0301
+
     elif llm_option == "ollama/local":
-        setattr(settings, f"ollama_{current_llm}_host", arg1)
-        setattr(settings, f"ollama_{current_llm}_port", int(arg2))
-        setattr(settings, f"ollama_{current_llm}_language_model", arg3)
+        setattr(settings, f"ollama_{current_llm_config}_host", arg1)
+        setattr(settings, f"ollama_{current_llm_config}_port", int(arg2))
+        setattr(settings, f"ollama_{current_llm_config}_language_model", arg3)
         status_code = test_api_connection(f"http://{arg1}:{arg2}";, 
origin_call=origin_call)
 
     gr.Info("Configured!")
     settings.update_env()
-    
+
     return status_code
 
 
 # TODO: refactor the function to reduce the number of statements & separate 
the logic
+#pylint: disable=C0301
 def create_configs_block() -> list:
     # pylint: disable=R0915 (too-many-statements)
     with gr.Accordion("1. Set up the HugeGraph server.", open=False):
@@ -219,7 +220,7 @@ def create_configs_block() -> list:
         gr.Markdown("> Tips: the openai option also support openai style api 
from other providers.")
         with gr.Tab(label='chat'):
             chat_llm_dropdown = gr.Dropdown(choices=["openai", 
"qianfan_wenxin", "ollama/local"],
-                            value=getattr(settings, f"chat_llm_type"), 
label=f"type")
+                            value=getattr(settings, "chat_llm_type"), 
label="type")
             apply_llm_config_with_chat_op = partial(apply_llm_config, "chat")
             @gr.render(inputs=[chat_llm_dropdown])
             def chat_llm_settings(llm_type):
@@ -227,33 +228,33 @@ def create_configs_block() -> list:
                 llm_config_input = []
                 if llm_type == "openai":
                     llm_config_input = [
-                        gr.Textbox(value=getattr(settings, 
f"openai_chat_api_key"), label="api_key", type="password"),
-                        gr.Textbox(value=getattr(settings, 
f"openai_chat_api_base"), label="api_base"),
-                        gr.Textbox(value=getattr(settings, 
f"openai_chat_language_model"), label="model_name"),
-                        gr.Textbox(value=getattr(settings, 
f"openai_chat_tokens"), label="max_token"),
+                        gr.Textbox(value=getattr(settings, 
"openai_chat_api_key"), label="api_key", type="password"),
+                        gr.Textbox(value=getattr(settings, 
"openai_chat_api_base"), label="api_base"),
+                        gr.Textbox(value=getattr(settings, 
"openai_chat_language_model"), label="model_name"),
+                        gr.Textbox(value=getattr(settings, 
"openai_chat_tokens"), label="max_token"),
                 ]
                 elif llm_type == "ollama/local":
                     llm_config_input = [
-                        gr.Textbox(value=getattr(settings, 
f"ollama_chat_host"), label="host"),
-                        gr.Textbox(value=str(getattr(settings, 
f"ollama_chat_port")), label="port"),
-                        gr.Textbox(value=getattr(settings, 
f"ollama_chat_language_model"), label="model_name"),
+                        gr.Textbox(value=getattr(settings, 
"ollama_chat_host"), label="host"),
+                        gr.Textbox(value=str(getattr(settings, 
"ollama_chat_port")), label="port"),
+                        gr.Textbox(value=getattr(settings, 
"ollama_chat_language_model"), label="model_name"),
                         gr.Textbox(value="", visible=False),
                     ]
                 elif llm_type == "qianfan_wenxin":
                     llm_config_input = [
-                        gr.Textbox(value=getattr(settings, 
f"qianfan_chat_api_key"), label="api_key", type="password"),
-                        gr.Textbox(value=getattr(settings, 
f"qianfan_chat_secret_key"), label="secret_key", type="password"),
-                        gr.Textbox(value=getattr(settings, 
f"qianfan_chat_language_model"), label="model_name"),
+                        gr.Textbox(value=getattr(settings, 
"qianfan_chat_api_key"), label="api_key", type="password"),
+                        gr.Textbox(value=getattr(settings, 
"qianfan_chat_secret_key"), label="secret_key", type="password"),
+                        gr.Textbox(value=getattr(settings, 
"qianfan_chat_language_model"), label="model_name"),
                         gr.Textbox(value="", visible=False),
                     ]
                 else:
                     llm_config_input = [gr.Textbox(value="", visible=False) 
for _ in range(4)]
                 llm_config_button = gr.Button("Apply configuration")
-                llm_config_button.click(apply_llm_config_with_chat_op, 
inputs=llm_config_input)
+                llm_config_button.click(apply_llm_config_with_chat_op, 
inputs=llm_config_input) #pylint: disable=E1101
 
         with gr.Tab(label='mini_tasks'):
             extract_llm_dropdown = gr.Dropdown(choices=["openai", 
"qianfan_wenxin", "ollama/local"],
-                        value=getattr(settings, f"extract_llm_type"), 
label=f"type")
+                        value=getattr(settings, "extract_llm_type"), 
label="type")
             apply_llm_config_with_extract_op = partial(apply_llm_config, 
"extract")
 
             @gr.render(inputs=[extract_llm_dropdown])
@@ -262,32 +263,32 @@ def create_configs_block() -> list:
                 llm_config_input = []
                 if llm_type == "openai":
                     llm_config_input = [
-                        gr.Textbox(value=getattr(settings, 
f"openai_extract_api_key"), label="api_key", type="password"),
-                        gr.Textbox(value=getattr(settings, 
f"openai_extract_api_base"), label="api_base"),
-                        gr.Textbox(value=getattr(settings, 
f"openai_extract_language_model"), label="model_name"),
-                        gr.Textbox(value=getattr(settings, 
f"openai_extract_tokens"), label="max_token"),
+                        gr.Textbox(value=getattr(settings, 
"openai_extract_api_key"), label="api_key", type="password"),
+                        gr.Textbox(value=getattr(settings, 
"openai_extract_api_base"), label="api_base"),
+                        gr.Textbox(value=getattr(settings, 
"openai_extract_language_model"), label="model_name"),
+                        gr.Textbox(value=getattr(settings, 
"openai_extract_tokens"), label="max_token"),
                 ]
                 elif llm_type == "ollama/local":
                     llm_config_input = [
-                        gr.Textbox(value=getattr(settings, 
f"ollama_extract_host"), label="host"),
-                        gr.Textbox(value=str(getattr(settings, 
f"ollama_extract_port")), label="port"),
-                        gr.Textbox(value=getattr(settings, 
f"ollama_extract_language_model"), label="model_name"),
+                        gr.Textbox(value=getattr(settings, 
"ollama_extract_host"), label="host"),
+                        gr.Textbox(value=str(getattr(settings, 
"ollama_extract_port")), label="port"),
+                        gr.Textbox(value=getattr(settings, 
"ollama_extract_language_model"), label="model_name"),
                         gr.Textbox(value="", visible=False),
                     ]
                 elif llm_type == "qianfan_wenxin":
                     llm_config_input = [
-                        gr.Textbox(value=getattr(settings, 
f"qianfan_extract_api_key"), label="api_key", type="password"),
-                        gr.Textbox(value=getattr(settings, 
f"qianfan_extract_secret_key"), label="secret_key", type="password"),
-                        gr.Textbox(value=getattr(settings, 
f"qianfan_extract_language_model"), label="model_name"),
+                        gr.Textbox(value=getattr(settings, 
"qianfan_extract_api_key"), label="api_key", type="password"),
+                        gr.Textbox(value=getattr(settings, 
"qianfan_extract_secret_key"), label="secret_key", type="password"),
+                        gr.Textbox(value=getattr(settings, 
"qianfan_extract_language_model"), label="model_name"),
                         gr.Textbox(value="", visible=False),
                     ]
                 else:
                     llm_config_input = [gr.Textbox(value="", visible=False) 
for _ in range(4)]
                 llm_config_button = gr.Button("Apply configuration")
-                llm_config_button.click(apply_llm_config_with_extract_op, 
inputs=llm_config_input)
+                llm_config_button.click(apply_llm_config_with_extract_op, 
inputs=llm_config_input) #pylint: disable=E1101
         with gr.Tab(label='text2gql'):
             text2gql_llm_dropdown = gr.Dropdown(choices=["openai", 
"qianfan_wenxin", "ollama/local"],
-                            value=getattr(settings, f"text2gql_llm_type"), 
label=f"type")
+                            value=getattr(settings, "text2gql_llm_type"), 
label="type")
             apply_llm_config_with_text2gql_op = partial(apply_llm_config, 
"text2gql")
 
             @gr.render(inputs=[text2gql_llm_dropdown])
@@ -296,29 +297,29 @@ def create_configs_block() -> list:
                 llm_config_input = []
                 if llm_type == "openai":
                     llm_config_input = [
-                        gr.Textbox(value=getattr(settings, 
f"openai_text2gql_api_key"), label="api_key", type="password"),
-                        gr.Textbox(value=getattr(settings, 
f"openai_text2gql_api_base"), label="api_base"),
-                        gr.Textbox(value=getattr(settings, 
f"openai_text2gql_language_model"), label="model_name"),
-                        gr.Textbox(value=getattr(settings, 
f"openai_text2gql_tokens"), label="max_token"),
+                        gr.Textbox(value=getattr(settings, 
"openai_text2gql_api_key"), label="api_key", type="password"),
+                        gr.Textbox(value=getattr(settings, 
"openai_text2gql_api_base"), label="api_base"),
+                        gr.Textbox(value=getattr(settings, 
"openai_text2gql_language_model"), label="model_name"),
+                        gr.Textbox(value=getattr(settings, 
"openai_text2gql_tokens"), label="max_token"),
                     ]
                 elif llm_type == "ollama/local":
                     llm_config_input = [
-                        gr.Textbox(value=getattr(settings, 
f"ollama_text2gql_host"), label="host"),
-                        gr.Textbox(value=str(getattr(settings, 
f"ollama_text2gql_port")), label="port"),
-                        gr.Textbox(value=getattr(settings, 
f"ollama_text2gql_language_model"), label="model_name"),
+                        gr.Textbox(value=getattr(settings, 
"ollama_text2gql_host"), label="host"),
+                        gr.Textbox(value=str(getattr(settings, 
"ollama_text2gql_port")), label="port"),
+                        gr.Textbox(value=getattr(settings, 
"ollama_text2gql_language_model"), label="model_name"),
                         gr.Textbox(value="", visible=False),
                     ]
                 elif llm_type == "qianfan_wenxin":
                     llm_config_input = [
-                        gr.Textbox(value=getattr(settings, 
f"qianfan_text2gql_api_key"), label="api_key", type="password"),
-                        gr.Textbox(value=getattr(settings, 
f"qianfan_text2gql_secret_key"), label="secret_key", type="password"),
-                        gr.Textbox(value=getattr(settings, 
f"qianfan_text2gql_language_model"), label="model_name"),
+                        gr.Textbox(value=getattr(settings, 
"qianfan_text2gql_api_key"), label="api_key", type="password"),
+                        gr.Textbox(value=getattr(settings, 
"qianfan_text2gql_secret_key"), label="secret_key", type="password"),
+                        gr.Textbox(value=getattr(settings, 
"qianfan_text2gql_language_model"), label="model_name"),
                         gr.Textbox(value="", visible=False),
                     ]
                 else:
                     llm_config_input = [gr.Textbox(value="", visible=False) 
for _ in range(4)]
                 llm_config_button = gr.Button("Apply configuration")
-                llm_config_button.click(apply_llm_config_with_text2gql_op, 
inputs=llm_config_input)
+                llm_config_button.click(apply_llm_config_with_text2gql_op, 
inputs=llm_config_input) #pylint: disable=E1101
 
 
     with gr.Accordion("3. Set up the Embedding.", open=False):
diff --git a/hugegraph-llm/src/hugegraph_llm/demo/rag_demo/rag_block.py 
b/hugegraph-llm/src/hugegraph_llm/demo/rag_demo/rag_block.py
index 506e19d..761773e 100644
--- a/hugegraph-llm/src/hugegraph_llm/demo/rag_demo/rag_block.py
+++ b/hugegraph-llm/src/hugegraph_llm/demo/rag_demo/rag_block.py
@@ -50,7 +50,11 @@ def rag_answer(
     4. Synthesize the final answer.
     5. Run the pipeline and return the results.
     """
-    should_update_prompt = prompt.default_question != text or 
prompt.answer_prompt != answer_prompt or prompt.keywords_extract_prompt != 
keywords_extract_prompt
+    should_update_prompt = (
+        prompt.default_question != text or
+        prompt.answer_prompt != answer_prompt or
+        prompt.keywords_extract_prompt != keywords_extract_prompt
+    )
     if should_update_prompt or prompt.custom_rerank_info != 
custom_related_information:
         prompt.custom_rerank_info = custom_related_information
         prompt.default_question = text
diff --git 
a/hugegraph-llm/src/hugegraph_llm/demo/rag_demo/vector_graph_block.py 
b/hugegraph-llm/src/hugegraph_llm/demo/rag_demo/vector_graph_block.py
index b8e087c..d28e60a 100644
--- a/hugegraph-llm/src/hugegraph_llm/demo/rag_demo/vector_graph_block.py
+++ b/hugegraph-llm/src/hugegraph_llm/demo/rag_demo/vector_graph_block.py
@@ -42,6 +42,7 @@ def store_prompt(schema, example_prompt):
 
 def create_vector_graph_block():
     # pylint: disable=no-member
+    # pylint: disable=C0301
     gr.Markdown(
         """## Build Vector/Graph Index & Extract Knowledge Graph
 - Docs:
diff --git 
a/hugegraph-llm/src/hugegraph_llm/operators/hugegraph_op/commit_to_hugegraph.py 
b/hugegraph-llm/src/hugegraph_llm/operators/hugegraph_op/commit_to_hugegraph.py
index 1e44786..39798f6 100644
--- 
a/hugegraph-llm/src/hugegraph_llm/operators/hugegraph_op/commit_to_hugegraph.py
+++ 
b/hugegraph-llm/src/hugegraph_llm/operators/hugegraph_op/commit_to_hugegraph.py
@@ -72,8 +72,10 @@ class Commit2Graph:
             return func(*args, **kwargs)
         except NotFoundError as e:
             log.error(e)
+            return None
         except CreateError as e:
             log.error("Error on creating: %s, %s", args, e)
+            return None
 
     def load_into_graph(self, vertices, edges, schema):  # pylint: 
disable=too-many-statements
         # pylint: disable=R0912 (too-many-branches)
diff --git 
a/hugegraph-llm/src/hugegraph_llm/operators/llm_op/keyword_extract.py 
b/hugegraph-llm/src/hugegraph_llm/operators/llm_op/keyword_extract.py
index 85fe995..b1e3c7d 100644
--- a/hugegraph-llm/src/hugegraph_llm/operators/llm_op/keyword_extract.py
+++ b/hugegraph-llm/src/hugegraph_llm/operators/llm_op/keyword_extract.py
@@ -58,9 +58,9 @@ class KeywordExtract:
         self._language = context.get("language", self._language).lower()
         self._max_keywords = context.get("max_keywords", self._max_keywords)
 
-        prompt = f"{self._extract_template.format(question=self._query, 
max_keywords=self._max_keywords)}"
+        prompt_run = f"{self._extract_template.format(question=self._query, 
max_keywords=self._max_keywords)}"
         start_time = time.perf_counter()
-        response = self._llm.generate(prompt=prompt)
+        response = self._llm.generate(prompt=prompt_run)
         end_time = time.perf_counter()
         log.debug("Keyword extraction time: %.2f seconds", end_time - 
start_time)
 
diff --git a/hugegraph-ml/requirements.txt b/hugegraph-ml/requirements.txt
index ab4335c..1a2aa69 100644
--- a/hugegraph-ml/requirements.txt
+++ b/hugegraph-ml/requirements.txt
@@ -1,4 +1,8 @@
 dgl~=2.1.0
+ogb~=1.3.6
+pandas~=2.2.3
+catboost~=1.2.3
+category_encoders~=2.6.3
 numpy~=1.24.4
 torch==2.2.0
 tqdm~=4.66.5
diff --git a/hugegraph-ml/src/hugegraph_ml/models/agnn.py 
b/hugegraph-ml/src/hugegraph_ml/models/agnn.py
index e693ddf..c83058f 100644
--- a/hugegraph-ml/src/hugegraph_ml/models/agnn.py
+++ b/hugegraph-ml/src/hugegraph_ml/models/agnn.py
@@ -31,7 +31,6 @@ from dgl.nn.pytorch.conv import AGNNConv
 from torch import nn
 import torch.nn.functional as F
 
-
 class AGNN(nn.Module):
     def __init__(self, num_layers, in_dim, hid_dim, out_dim, dropout):
         super().__init__()
diff --git a/hugegraph-ml/src/hugegraph_ml/models/arma.py 
b/hugegraph-ml/src/hugegraph_ml/models/arma.py
index dd8dc0f..5e4980f 100644
--- a/hugegraph-ml/src/hugegraph_ml/models/arma.py
+++ b/hugegraph-ml/src/hugegraph_ml/models/arma.py
@@ -15,7 +15,7 @@
 # specific language governing permissions and limitations
 # under the License.right (c) 2024 by jinsong, All Rights Reserved.
 
-# pylint: disable=E1101,C0103
+# pylint: disable=C0103
 
 """
 auto-regressive moving average (ARMA)
@@ -105,7 +105,7 @@ class ARMAConv(nn.Module):
                 for t in range(self.T):
                     feats = feats * norm
                     g.ndata["h"] = feats
-                    g.update_all(fn.copy_u("h", "m"), fn.sum("m", "h"))
+                    g.update_all(fn.copy_u("h", "m"), fn.sum("m", "h")) # 
pylint: disable=E1101
                     feats = g.ndata.pop("h")
                     feats = feats * norm
 
diff --git a/hugegraph-ml/src/hugegraph_ml/models/bgnn.py 
b/hugegraph-ml/src/hugegraph_ml/models/bgnn.py
index 8015af8..51689ef 100644
--- a/hugegraph-ml/src/hugegraph_ml/models/bgnn.py
+++ b/hugegraph-ml/src/hugegraph_ml/models/bgnn.py
@@ -15,7 +15,7 @@
 # specific language governing permissions and limitations
 # under the License.
 
-# pylint: 
disable=E1102,E0401,E0711,E0606,E0602,C0103,C0206,W0612,C0209,R1705,C0200,R1735,W0201
+# pylint: disable=C0103,C0206,W0612,C0209,R1705,C0200,R1735,W0201
 
 """
 Boost-GNN (BGNN)
@@ -106,7 +106,7 @@ class BGNNPredictor:
         gbdt_alpha=1,
         random_seed=0,
     ):
-        self.device = torch.device("cuda:0" if torch.cuda.is_available() else 
"cpu")
+        self.device = torch.device("cpu")
 
         self.model = gnn_model.to(self.device)
         self.task = task
@@ -239,7 +239,7 @@ class BGNNPredictor:
             elif self.task == "classification":
                 loss = F.cross_entropy(pred, y.long())
             else:
-                raise NotImplemented(
+                raise NotImplementedError(
                     "Unknown task. Supported tasks: classification, 
regression."
                 )
 
@@ -335,7 +335,9 @@ class BGNNPredictor:
         train_metric, val_metric, test_metric = metrics[metric_name][-1]
         if epoch and epoch % logging_epochs == 0:
             pbar.set_description(
-                f"Epoch {epoch:05d} | Loss {loss:.3f} | Loss 
{train_metric:.3f}/{val_metric:.3f}/{test_metric:.3f} | Time {epoch_time:.4f}"
+                f"Epoch {epoch:05d} | Loss {loss:.3f} | \\"
+                f"Loss {train_metric:.3f}/{val_metric:.3f}/{test_metric:.3f} 
\\"
+                f" | Time {epoch_time:.4f}"
             )
 
     def fit(
@@ -617,6 +619,7 @@ class GNNModelDGL(torch.nn.Module):
 
     def forward(self, graph, features):
         h = features
+        logits = None
         if self.use_mlp:
             if self.join_with_mlp:
                 h = torch.cat((h, self.mlp(features)), 1)
diff --git a/hugegraph-ml/src/hugegraph_ml/models/bgrl.py 
b/hugegraph-ml/src/hugegraph_ml/models/bgrl.py
index 60b01a0..40c682a 100644
--- a/hugegraph-ml/src/hugegraph_ml/models/bgrl.py
+++ b/hugegraph-ml/src/hugegraph_ml/models/bgrl.py
@@ -15,7 +15,7 @@
 # specific language governing permissions and limitations
 # under the License.right (c) 2024 by jinsong, All Rights Reserved.
 
-# pylint: disable=E1102,C0103,R1705.R1734
+# pylint: disable=C0103,R1705.R1734
 
 """
 Bootstrapped Graph Latents (BGRL)
@@ -159,11 +159,11 @@ class BGRL(nn.Module):
             target_y2 = self.target_encoder(online_x, online_feats).detach()
         loss = (
             2
-            - cosine_similarity(online_q1, target_y1.detach(), dim=-1).mean()
-            - cosine_similarity(online_q2, target_y2.detach(), dim=-1).mean()
+            - cosine_similarity(online_q1, target_y1.detach(), dim=-1).mean() 
# pylint: disable=E1102
+            - cosine_similarity(online_q2, target_y2.detach(), dim=-1).mean() 
# pylint: disable=E1102
         )
         return loss
-    
+
     def get_embedding(self, graph, feats):
         """
         Get the node embeddings from the encoder without computing gradients.
@@ -252,4 +252,4 @@ def get_graph_drop_transform(drop_edge_p, feat_mask_p):
     if feat_mask_p > 0.0:
         transforms.append(FeatMask(feat_mask_p, node_feat_names=["feat"]))
 
-    return Compose(transforms)
\ No newline at end of file
+    return Compose(transforms)
diff --git a/hugegraph-ml/src/hugegraph_ml/models/cluster_gcn.py 
b/hugegraph-ml/src/hugegraph_ml/models/cluster_gcn.py
index 231e2bb..6bc078a 100644
--- a/hugegraph-ml/src/hugegraph_ml/models/cluster_gcn.py
+++ b/hugegraph-ml/src/hugegraph_ml/models/cluster_gcn.py
@@ -15,7 +15,6 @@
 # specific language governing permissions and limitations
 # under the License.
 
-# pylint: disable=E1101,E0401
 
 """
 Cluster-GCN
@@ -33,6 +32,7 @@ import torch.nn.functional as F
 import dgl.nn as dglnn
 
 class SAGE(nn.Module):
+    # pylint: disable=E1101
     def __init__(self, in_feats, n_hidden, n_classes):
         super().__init__()
         self.layers = nn.ModuleList()
diff --git a/hugegraph-ml/src/hugegraph_ml/models/dagnn.py 
b/hugegraph-ml/src/hugegraph_ml/models/dagnn.py
index 77cb1ce..c455cc1 100644
--- a/hugegraph-ml/src/hugegraph_ml/models/dagnn.py
+++ b/hugegraph-ml/src/hugegraph_ml/models/dagnn.py
@@ -15,7 +15,7 @@
 # specific language governing permissions and limitations
 # under the License.
 
-# pylint: disable=E1101,C0103
+# pylint: disable=C0103
 
 """
 Deep Adaptive Graph Neural Network (DAGNN)
@@ -58,7 +58,7 @@ class DAGNNConv(nn.Module):
             for _ in range(self.k):
                 feats = feats * norm
                 graph.ndata["h"] = feats
-                graph.update_all(fn.copy_u("h", "m"), fn.sum("m", "h"))
+                graph.update_all(fn.copy_u("h", "m"), fn.sum("m", "h")) # 
pylint: disable=E1101
                 feats = graph.ndata["h"]
                 feats = feats * norm
                 results.append(feats)
diff --git a/hugegraph-ml/src/hugegraph_ml/models/gatne.py 
b/hugegraph-ml/src/hugegraph_ml/models/gatne.py
index 69cac2d..03d2f3f 100644
--- a/hugegraph-ml/src/hugegraph_ml/models/gatne.py
+++ b/hugegraph-ml/src/hugegraph_ml/models/gatne.py
@@ -15,7 +15,7 @@
 # specific language governing permissions and limitations
 # under the License.right (c) 2024 by jinsong, All Rights Reserved.
 
-# pylint: disable=E1101,R0205,C0200,R1732
+# pylint: disable=R0205,C0200,R1732
 
 """
 General Attributed Multiplex HeTerogeneous Network Embedding (GATNE)
@@ -118,7 +118,7 @@ class DGLGATNE(nn.Module):
                 block.dstdata[edge_type] = 
self.node_type_embeddings[output_nodes, i]
                 block.update_all(
                     fn.copy_u(edge_type, "m"),
-                    fn.sum("m", edge_type),
+                    fn.sum("m", edge_type), # pylint: disable=E1101
                     etype=edge_type,
                 )
                 node_type_embed.append(block.dstdata[edge_type])
diff --git a/hugegraph-ml/src/hugegraph_ml/models/pgnn.py 
b/hugegraph-ml/src/hugegraph_ml/models/pgnn.py
index 8f51d0c..3a870e2 100644
--- a/hugegraph-ml/src/hugegraph_ml/models/pgnn.py
+++ b/hugegraph-ml/src/hugegraph_ml/models/pgnn.py
@@ -15,7 +15,7 @@
 # specific language governing permissions and limitations
 # under the License.
 
-# pylint: disable=E1101,E0401,C0103,R1732,C0200,R1705
+# pylint: disable=C0103,R1732,C0200,R1705
 
 """
 Position-aware Graph Neural Networks (P-GNN)
@@ -59,8 +59,8 @@ class PGNN_layer(nn.Module):
             graph.srcdata.update({"u_feat": u_feat})
             graph.dstdata.update({"v_feat": v_feat})
 
-            graph.apply_edges(fn.u_mul_e("u_feat", "sp_dist", "u_message"))
-            graph.apply_edges(fn.v_add_e("v_feat", "u_message", "message"))
+            graph.apply_edges(fn.u_mul_e("u_feat", "sp_dist", "u_message")) # 
pylint: disable=E1101
+            graph.apply_edges(fn.v_add_e("v_feat", "u_message", "message")) # 
pylint: disable=E1101
 
             messages = torch.index_select(
                 graph.edata["message"],
@@ -165,7 +165,7 @@ def split_edges(p, edges, data, non_train_ratio=0.2):
         {
             f"{p}_edges_train": edges[:, :split1],  # 80%
             f"{p}_edges_val": edges[:, split1:split2],  # 10%
-            f"p{}_edges_test": edges[:, split2:],  # 10%
+            f"{p}_edges_test": edges[:, split2:],  # 10%
         }
     )
 
diff --git a/hugegraph-ml/src/hugegraph_ml/models/seal.py 
b/hugegraph-ml/src/hugegraph_ml/models/seal.py
index 43d9a89..59f749d 100644
--- a/hugegraph-ml/src/hugegraph_ml/models/seal.py
+++ b/hugegraph-ml/src/hugegraph_ml/models/seal.py
@@ -15,7 +15,7 @@
 # specific language governing permissions and limitations
 # under the License.
 
-# pylint: disable=E0401,R1719,C0103,R0205,R1721.R1705,R0205
+# pylint: disable=R1719,C0103,R0205,R1721.R1705,R0205,W0612
 
 """
 SEAL
@@ -28,10 +28,10 @@ DGL code: 
https://github.com/dmlc/dgl/tree/master/examples/pytorch/seal
 """
 
 import argparse
+import os
 import os.path as osp
 from copy import deepcopy
 import logging
-import os
 import time
 
 import torch
@@ -708,7 +708,7 @@ class SEALData(object):
         )
 
         if osp.exists(path):
-            self.print_fn("Load existing processed {} 
files".format(split_type))
+            self.print_fn(f"Load existing processed {split_type} files")
             graph_list, data = dgl.load_graphs(path)
             dataset = GraphDataSet(graph_list, data["labels"])
 
@@ -776,7 +776,7 @@ class LightLogging(object):
                 ],
             )
             logging.info("Start Logging")
-            logging.info("Log file path: {}".format(log_name))
+            logging.info("Log file path: %s", log_name)
 
         else:
             logging.basicConfig(
diff --git a/hugegraph-ml/src/hugegraph_ml/tasks/fraud_detector_caregnn.py 
b/hugegraph-ml/src/hugegraph_ml/tasks/fraud_detector_caregnn.py
index 53518a6..5258b9f 100644
--- a/hugegraph-ml/src/hugegraph_ml/tasks/fraud_detector_caregnn.py
+++ b/hugegraph-ml/src/hugegraph_ml/tasks/fraud_detector_caregnn.py
@@ -46,7 +46,6 @@ class DetectorCaregnn:
         feat = self.graph.ndata["feature"].to(self._device)
         train_mask = self.graph.ndata["train_mask"]
         val_mask = self.graph.ndata["val_mask"]
-        test_mask = self.graph.ndata["test_mask"]
         train_idx = (
             torch.nonzero(train_mask, 
as_tuple=False).squeeze(1).to(self._device)
         )
diff --git a/hugegraph-ml/src/hugegraph_ml/tasks/link_prediction_seal.py 
b/hugegraph-ml/src/hugegraph_ml/tasks/link_prediction_seal.py
index 1294942..58f2115 100644
--- a/hugegraph-ml/src/hugegraph_ml/tasks/link_prediction_seal.py
+++ b/hugegraph-ml/src/hugegraph_ml/tasks/link_prediction_seal.py
@@ -127,7 +127,9 @@ class LinkPredictionSeal:
                 )
                 evaluate_time = time.time()
                 print(
-                    f"Epoch-{epoch}, train loss: {loss:.4f}, hits@{50}: 
val-{val_metric:.4f}, test-{test_metric:.4f}, cost time: train-{train_time - 
start_time:.1f}s, total-{evaluate_time - start_time:.1f}s"
+                    f"Epoch-{epoch}, train loss: {loss:.4f}, hits@{50}: 
val-{val_metric:.4f}, \\"
+                    f"test-{test_metric:.4f}, cost time: train-{train_time - 
start_time:.1f}s, \\"
+                    f"total-{evaluate_time - start_time:.1f}s"
                 )
                 summary_val.append(val_metric)
                 summary_test.append(test_metric)
diff --git a/hugegraph-ml/src/hugegraph_ml/tasks/node_classify_with_sample.py 
b/hugegraph-ml/src/hugegraph_ml/tasks/node_classify_with_sample.py
index c60d36c..5a9afff 100644
--- a/hugegraph-ml/src/hugegraph_ml/tasks/node_classify_with_sample.py
+++ b/hugegraph-ml/src/hugegraph_ml/tasks/node_classify_with_sample.py
@@ -34,11 +34,7 @@ class NodeClassifyWithSample:
         self.graph = graph
         self._model = model
         self.gpu = -1
-        self._device = (
-            f"cuda:{self.gpu}"
-            if self.gpu != -1 and torch.cuda.is_available()
-            else "cpu"
-        )
+        self._device = "cpu"
         self._early_stopping = None
         self._is_trained = False
         self.num_partitions = 100
@@ -56,7 +52,7 @@ class NodeClassifyWithSample:
             shuffle=True,
             drop_last=False,
             num_workers=0,
-            use_uva=True,
+            use_uva=False,
         )
         self._check_graph()
 
@@ -155,3 +151,4 @@ class NodeClassifyWithSample:
             _, predicted = torch.max(test_logits, dim=1)
             accuracy = (predicted == test_labels[0]).sum().item() / 
len(test_labels[0])
         return {"accuracy": accuracy, "total_loss": total_loss.item()}
+        
\ No newline at end of file
diff --git a/hugegraph-ml/src/hugegraph_ml/utils/dgl2hugegraph_utils.py 
b/hugegraph-ml/src/hugegraph_ml/utils/dgl2hugegraph_utils.py
index 1d652df..c8f00aa 100644
--- a/hugegraph-ml/src/hugegraph_ml/utils/dgl2hugegraph_utils.py
+++ b/hugegraph-ml/src/hugegraph_ml/utils/dgl2hugegraph_utils.py
@@ -17,7 +17,7 @@
 
 # pylint: disable=too-many-branches
 # pylint: disable=too-many-statements
-# pylint: disable=E0401,C0302,C0103,W1514,R1735,R1734,C0206
+# pylint: disable=C0302,C0103,W1514,R1735,R1734,C0206
 
 import os
 from typing import Optional
diff --git a/hugegraph-ml/src/tests/test_examples/test_examples.py 
b/hugegraph-ml/src/tests/test_examples/test_examples.py
index 4adca94..2712d9b 100644
--- a/hugegraph-ml/src/tests/test_examples/test_examples.py
+++ b/hugegraph-ml/src/tests/test_examples/test_examples.py
@@ -28,7 +28,6 @@ from hugegraph_ml.examples.appnp_example import appnp_example
 from hugegraph_ml.examples.arma_example import arma_example
 from hugegraph_ml.examples.bgnn_example import bgnn_example
 from hugegraph_ml.examples.bgrl_example import bgrl_example
-from hugegraph_ml.examples.care_gnn_example import care_gnn_example
 from hugegraph_ml.examples.cluster_gcn_example import cluster_gcn_example
 from hugegraph_ml.examples.correct_and_smooth_example import cs_example
 from hugegraph_ml.examples.dagnn_example import dagnn_example
@@ -141,9 +140,3 @@ class TestHugegraph2DGL(unittest.TestCase):
             seal_example(n_epochs=self.test_n_epochs)
         except ValueError:
             self.fail("model seal example failed")
-
-    def test_care_gnn_example(self):
-        try:
-            care_gnn_example(n_epochs=self.test_n_epochs)
-        except ValueError:
-            self.fail("model care-gnn example failed")
diff --git a/style/code_format_and_analysis.sh 
b/style/code_format_and_analysis.sh
index 71e58a9..2172444 100644
--- a/style/code_format_and_analysis.sh
+++ b/style/code_format_and_analysis.sh
@@ -53,7 +53,5 @@ if [ "$PYLINT" = true ] ; then
   echo "[pylint] Start code analysis and check,
   we need to manually fix all the warnings mentioned below before commit! "
   export 
PYTHONPATH=${ROOT_DIR}/hugegraph-llm/src:${ROOT_DIR}/hugegraph-python-client/src:${ROOT_DIR}/hugegraph-ml/src
-  pylint --rcfile=${ROOT_DIR}/style/pylint.conf ${ROOT_DIR}/hugegraph-llm
-  pylint --rcfile=${ROOT_DIR}/style/pylint.conf ${ROOT_DIR}/hugegraph-ml
-  pylint --rcfile=${ROOT_DIR}/style/pylint.conf --disable C0103 
${ROOT_DIR}/hugegraph-python-client
+  pylint --rcfile=${ROOT_DIR}/style/pylint.conf ${ROOT_DIR}/hugegraph-llm 
${ROOT_DIR}/hugegraph-ml ${ROOT_DIR}/hugegraph-python-client
 fi
diff --git a/style/pylint.conf b/style/pylint.conf
index af82317..f23b87f 100644
--- a/style/pylint.conf
+++ b/style/pylint.conf
@@ -467,6 +467,7 @@ disable=raw-checker-failed,
         R0904,  # Too many public methods (27/20) (too-many-public-methods)
         E1120,  # TODO: unbound-method-call-no-value-for-parameter
         R0917,  # Too many positional arguments (6/5) 
(too-many-positional-arguments)
+        C0103,
 
 # Enable the message, report, category or checker with the given id(s). You can
 # either give multiple identifier separated by comma (,) or put this option


Reply via email to