This is an automated email from the ASF dual-hosted git repository.

jamesnetherton pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/camel-quarkus-examples.git

commit dfa4dd9729ce2bd1e6f6a81ec693632bb00fbcc7
Author: James Netherton <[email protected]>
AuthorDate: Fri Jul 11 16:08:57 2025 +0100

    Enable data-extract-langchain4j project to use a custom Ollama instance
---
 .../org/acme/extraction/OllamaTestResource.java    | 42 ++++++++++++----------
 1 file changed, 24 insertions(+), 18 deletions(-)

diff --git 
a/data-extract-langchain4j/src/test/java/org/acme/extraction/OllamaTestResource.java
 
b/data-extract-langchain4j/src/test/java/org/acme/extraction/OllamaTestResource.java
index 4b4728e..8c7d0d1 100644
--- 
a/data-extract-langchain4j/src/test/java/org/acme/extraction/OllamaTestResource.java
+++ 
b/data-extract-langchain4j/src/test/java/org/acme/extraction/OllamaTestResource.java
@@ -79,26 +79,32 @@ public class OllamaTestResource implements 
QuarkusTestResourceLifecycleManager {
                 LOG.info("Starting a fake Ollama server backed by wiremock");
                 initWireMockServer();
             } else {
-                LOG.info("Starting an Ollama server backed by testcontainers");
-                ollamaContainer = new OllamaContainer(OLLAMA_IMAGE)
-                        .withLogConsumer(new 
Slf4jLogConsumer(LOG).withPrefix("basicAuthContainer"));
-                ollamaContainer.start();
-
-                String ollamaModelId = 
getConfig().getValue("quarkus.langchain4j.ollama.chat-model.model-id", 
String.class);
-
-                ExecResult result = ollamaContainer.execInContainer("ollama", 
"pull", ollamaModelId);
-                long pullBegin = currentTimeMillis();
-                while ((currentTimeMillis() - pullBegin < 10000)
-                        && (result.getStderr() == null || 
!result.getStderr().contains("success"))) {
-                    LOG.info("Will retry ollama pull after sleeping 250ms");
-
-                    Thread.sleep(250);
-
-                    result = ollamaContainer.execInContainer("ollama", "pull", 
ollamaModelId);
+                baseUrl = System.getProperty("baseUrl", 
System.getenv("BASE_URL"));
+                if (baseUrl != null) {
+                    LOG.info("Using Ollama server at {}", baseUrl);
+                } else {
+                    LOG.info("Starting an Ollama server backed by 
testcontainers");
+                    ollamaContainer = new OllamaContainer(OLLAMA_IMAGE)
+                            .withLogConsumer(new 
Slf4jLogConsumer(LOG).withPrefix("basicAuthContainer"));
+                    ollamaContainer.start();
+
+                    String ollamaModelId = 
getConfig().getValue("quarkus.langchain4j.ollama.chat-model.model-id", 
String.class);
+
+                    ExecResult result = 
ollamaContainer.execInContainer("ollama", "pull", ollamaModelId);
+                    long pullBegin = currentTimeMillis();
+                    while ((currentTimeMillis() - pullBegin < 10000)
+                            && (result.getStderr() == null || 
!result.getStderr().contains("success"))) {
+                        LOG.info("Will retry ollama pull after sleeping 
250ms");
+
+                        Thread.sleep(250);
+
+                        result = ollamaContainer.execInContainer("ollama", 
"pull", ollamaModelId);
+                    }
+
+                    baseUrl = format(BASE_URL_FORMAT, 
ollamaContainer.getHost(),
+                            ollamaContainer.getMappedPort(OLLAMA_SERVER_PORT));
                 }
 
-                baseUrl = format(BASE_URL_FORMAT, ollamaContainer.getHost(), 
ollamaContainer.getMappedPort(OLLAMA_SERVER_PORT));
-
                 if (isRecordingMode) {
                     LOG.info("Recording interactions with the Ollama server 
backed by testcontainers");
                     initWireMockServer();

Reply via email to