Copilot commented on code in PR #911:
URL: https://github.com/apache/dubbo-go-samples/pull/911#discussion_r2321604740


##########
book-flight-ai-agent/go-server/model/ollama/ollama.go:
##########
@@ -100,11 +101,22 @@ func NewLLMOllama(model string, url string) *LLMOllama {
        }
 }
 
+func (llm *LLMOllama) Stream(ctx context.Context, input string, opts 
...model.Option) (string, error) {
+       return llm.Call(ctx, input, opts...)

Review Comment:
   The Stream method is simply calling Call, which defeats the purpose of 
having a streaming interface. Consider implementing actual streaming 
functionality or documenting why streaming is not supported.
   ```suggestion
   // Stream streams the response chunks from the LLM as they arrive.
   func (llm *LLMOllama) Stream(ctx context.Context, input string, opts 
...model.Option) (<-chan string, <-chan error) {
        out := make(chan string)
        errCh := make(chan error, 1)
   
        go func() {
                defer close(out)
                defer close(errCh)
   
                // 检查 ctx 是否已超时(提前返回)
                select {
                case <-ctx.Done():
                        errCh <- fmt.Errorf("LLM 调用超时:%w", ctx.Err())
                        return
                default:
                }
   
                client := api.NewClient(&url.URL{Scheme: llm.llmUrl.scheam, 
Host: llm.llmUrl.host}, http.DefaultClient)
                optss := model.NewOptions(opts...)
   
                req := &api.GenerateRequest{
                        Model:   llm.Model,
                        Prompt:  input,
                        Stream:  llm.stream,
                        Suffix:  llm.suffix,
                        Options: optss.Opts,
                }
   
                respFunc := func(resp api.GenerateResponse) error {
                        select {
                        case <-ctx.Done():
                                return ctx.Err()
                        case out <- resp.Response:
                        }
                        return optss.CallOpt(resp.Response)
                }
   
                err := client.Generate(ctx, req, respFunc)
                if err != nil {
                        log.Printf("LLM 调用失败(可能超时):%v", err)
                        errCh <- err
                        return
                }
        }()
   
        return out, errCh
   ```



##########
book-flight-ai-agent/go-server/model/bailian/bailian.go:
##########
@@ -0,0 +1,169 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package bailian
+
+import (
+       "bytes"
+       "context"
+       "encoding/json"
+       "fmt"
+       "io"
+       "net/http"
+       "strings"
+)
+
+import (
+       
"github.com/apache/dubbo-go-samples/book-flight-ai-agent/go-server/model"
+)
+
+type Message struct {
+       Role    string `json:"role"`
+       Content string `json:"content"`
+}
+
+type BailianRequest struct {
+       Model       string    `json:"model"`
+       Messages    []Message `json:"messages"`
+       Temperature float64   `json:"temperature,omitempty"`
+       MaxTokens   int       `json:"max_tokens,omitempty"`
+       Stream      bool      `json:"stream,omitempty"`
+}
+
+type BailianResponse struct {
+       ID      string `json:"id"`
+       Object  string `json:"object"`
+       Created int64  `json:"created"`
+       Model   string `json:"model"`
+       Choices []struct {
+               Index        int     `json:"index"`
+               Message      Message `json:"message"`
+               FinishReason string  `json:"finish_reason"`
+       } `json:"choices"`
+       Usage struct {
+               PromptTokens     int `json:"prompt_tokens"`
+               CompletionTokens int `json:"completion_tokens"`
+               TotalTokens      int `json:"total_tokens"`
+       } `json:"usage"`
+}
+
+type LLMBailian struct {
+       Model     string
+       Url       string
+       ApiKey    string
+       MaxTokens int
+       options   []any
+}
+
+func NewLLMBailian(model string, url string, apiKey string) *LLMBailian {
+       return &LLMBailian{
+               Model:     model,
+               Url:       url,
+               ApiKey:    apiKey,
+               MaxTokens: 2048,
+               options:   []any{},
+       }
+}
+
+func (llm *LLMBailian) Call(ctx context.Context, input string, opts 
...model.Option) (string, error) {
+       return llm.Invoke(ctx, input, opts...)
+}
+
+func (llm *LLMBailian) Stream(ctx context.Context, input string, opts 
...model.Option) (string, error) {
+       // 百炼API的流式调用实现
+       // 简化版本,实际上应该使用流式API
+       return llm.Invoke(ctx, input, opts...)
+}
+
+func (llm *LLMBailian) Invoke(ctx context.Context, input string, opts 
...model.Option) (string, error) {
+       options := model.NewOptions(opts...)
+
+       // 解析选项
+       temperature := 0.7
+       for _, opt := range llm.options {
+               if temp, ok := opt.(WithTemperature); ok {
+                       temperature = float64(temp)
+               }
+       }

Review Comment:
   The temperature option is being read from `llm.options` but it should be 
read from the `opts` parameter passed to the function. The current code ignores 
any temperature options passed in the function call.



##########
book-flight-ai-agent/go-server/agents/cot_agent.go:
##########
@@ -74,19 +78,32 @@ func (cot *CotAgentRunner) Run(
 
        var task string
        if len(cot.memoryMsg) > 0 {
+               // 传递 timeoutCtx 到 summaryIntent
                task = cot.summaryIntent(timeNow, callopt)
        } else {
                task = input
        }
 
-       // Runner
+       // Runner 循环(使用 timeoutCtx 判断超时)
        var response string
        var action actions.Action
-
        var idxThoughtStep int32
        var taskState TaskState
+
        for idxThoughtStep < cot.maxThoughtSteps {
-               action, response = cot.thinkStep(task, timeNow, callopt, opts)
+               // 检查是否超时(提前退出循环)
+               select {
+               case <-timeoutCtx.Done():
+                       // 记录超时日志(包含任务内容、超时时间、步骤数)
+                       // log.Printf(
+                       //   "Agent 任务超时:任务=%s,超时时间=%d秒,已执行步骤数=%d",
+                       //   task, timeoutSec, idxThoughtStep,
+                       // )

Review Comment:
   Remove commented-out code rather than leaving it in the codebase. If this 
logging is needed for debugging, consider using a proper logging level or 
configuration.
   ```suggestion
   
   ```



##########
book-flight-ai-agent/go-client/frontend/static/script.js:
##########
@@ -11,6 +11,35 @@ const inputInitHeight = chatInput.scrollHeight;
 let fileBlobArr = [];
 let fileArr = [];
 
+// 1. 页面初始化时请求后端配置(新增)
+async function loadConfig() {
+    try {
+        const res = await fetch("/api/config"); // 后端新增接口返回配置
+        window.CONFIG = await res.json();
+        // 统一超时字段名:使用与后端一致的 TIMEOUT_SECONDS(原 TIME_OUT_SECOND 废弃)
+        window.CONFIG.TIMEOUT_MS = window.CONFIG.TIMEOUT_SECONDS * 1000; // 
转为毫秒(方便定时器使用)
+    } catch (err) {
+        console.error("加载配置失败,使用默认超时(2分钟)", err);
+        window.CONFIG = window.CONFIG || {};
+        window.CONFIG.TIMEOUT_SECONDS = 120;
+        window.CONFIG.TIMEOUT_MS = 120 * 1000;
+    }
+}
+
+// 2. 页面加载完成后执行配置加载
+window.onload = async () => {
+    // 确保window.CONFIG已经存在,如果不存在则初始化
+    if (!window.CONFIG) {
+        await loadConfig();
+    } else {
+        // 确保window.CONFIG有TIMEOUT_MS属性
+        if (!window.CONFIG.TIMEOUT_MS && window.CONFIG.TIME_OUT_SECOND) {
+            window.CONFIG.TIMEOUT_MS = window.CONFIG.TIME_OUT_SECOND;

Review Comment:
   This assumes TIME_OUT_SECOND is already in milliseconds, but based on the 
naming convention it should be in seconds. The conversion logic is inconsistent 
with line 20 where TIMEOUT_SECONDS is multiplied by 1000.
   ```suggestion
               window.CONFIG.TIMEOUT_MS = window.CONFIG.TIME_OUT_SECOND * 1000;
   ```



##########
book-flight-ai-agent/go-client/frontend/static/script.js:
##########
@@ -64,17 +93,40 @@ const handleChat = () => {
     const incomingChatLi = createChatLi("Thinking...", "incoming", chatbox);
     const incomingRecordLi = createChatLi("Thinking...", "incoming", 
recordbox); // Add to recordbox
 
-    // timeout
-    const TIMEOUT_MS = CONFIG.TIME_OUT_SECOND;
-    let isTimeout = false;
-    const timeoutId = setTimeout(() => {
-        isTimeout = true;
-        incomingRecordLi.querySelector("p").textContent = "Request timed out. 
Please try again.";
-    }, TIMEOUT_MS);
 
-    // send request
+    // 超时逻辑优化
+    let timeoutId;
+    const startTimeout = () => {
+        // 清除已有定时器(避免重复)
+        if (timeoutId) clearTimeout(timeoutId);
+        // 启动新定时器(使用同步后的 window.CONFIG.TIMEOUT_MS)
+        timeoutId = setTimeout(() => {
+            const timeoutMsg = `
+        <div>
+          <p>请求超时(当前超时时间:${window.CONFIG.TIMEOUT_SECONDS || 
window.CONFIG.TIME_OUT_SECOND/1000}秒)</p>
+          <button class="retry-btn" style="margin-top:8px;padding:4px 
8px;">点击重试</button>
+        </div>
+      `;
+            // 更新超时提示(带重试按钮)
+            incomingChatLi.querySelector("p").innerHTML = timeoutMsg;
+            incomingRecordLi.querySelector("p").textContent = 
`请求超时(${window.CONFIG.TIMEOUT_SECONDS || window.CONFIG.TIME_OUT_SECOND/1000}秒)`;

Review Comment:
   The fallback logic `window.CONFIG.TIME_OUT_SECOND/1000` assumes 
TIME_OUT_SECOND is in milliseconds, but this is inconsistent with the new 
TIMEOUT_SECONDS field which is in seconds. Consider removing the legacy 
fallback or ensuring consistent units.
   ```suggestion
             <p>请求超时(当前超时时间:${typeof window.CONFIG.TIMEOUT_SECONDS === 'number' 
? window.CONFIG.TIMEOUT_SECONDS : (window.CONFIG.TIME_OUT_SECOND ? 
window.CONFIG.TIME_OUT_SECOND / 1000 : 120)}秒)</p>
             <button class="retry-btn" style="margin-top:8px;padding:4px 
8px;">点击重试</button>
           </div>
         `;
               // 更新超时提示(带重试按钮)
               incomingChatLi.querySelector("p").innerHTML = timeoutMsg;
               incomingRecordLi.querySelector("p").textContent = `请求超时(${typeof 
window.CONFIG.TIMEOUT_SECONDS === 'number' ? window.CONFIG.TIMEOUT_SECONDS : 
(window.CONFIG.TIME_OUT_SECOND ? window.CONFIG.TIME_OUT_SECOND / 1000 : 
120)}秒)`;
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscr...@dubbo.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: notifications-unsubscr...@dubbo.apache.org
For additional commands, e-mail: notifications-h...@dubbo.apache.org

Reply via email to