This is an automated email from the ASF dual-hosted git repository.

davsclaus pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/camel.git


The following commit(s) were added to refs/heads/main by this push:
     new 47caf67d3af3 Replace deprecated langchain4j maxTokens(String) method 
(#21695)
47caf67d3af3 is described below

commit 47caf67d3af395cf3b74bf305e0a8ba96b92af92
Author: AurĂ©lien Pupier <[email protected]>
AuthorDate: Tue Mar 3 15:02:16 2026 +0100

    Replace deprecated langchain4j maxTokens(String) method (#21695)
    
    Signed-off-by: AurĂ©lien Pupier <[email protected]>
---
 .../langchain4j/tokenizer/LangChain4JParagraphTokenizerTest.java        | 2 +-
 .../langchain4j/tokenizer/LangChain4JSentenceTokenizerTest.java         | 2 +-
 .../component/langchain4j/tokenizer/LangChain4JWordTokenizerTest.java   | 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/components/camel-ai/camel-langchain4j-tokenizer/src/test/java/org/apache/camel/component/langchain4j/tokenizer/LangChain4JParagraphTokenizerTest.java
 
b/components/camel-ai/camel-langchain4j-tokenizer/src/test/java/org/apache/camel/component/langchain4j/tokenizer/LangChain4JParagraphTokenizerTest.java
index 5173905def96..95a3eb2a2fd1 100644
--- 
a/components/camel-ai/camel-langchain4j-tokenizer/src/test/java/org/apache/camel/component/langchain4j/tokenizer/LangChain4JParagraphTokenizerTest.java
+++ 
b/components/camel-ai/camel-langchain4j-tokenizer/src/test/java/org/apache/camel/component/langchain4j/tokenizer/LangChain4JParagraphTokenizerTest.java
@@ -43,7 +43,7 @@ public class LangChain4JParagraphTokenizerTest extends 
LangChain4JTokenizerTestS
                 from("direct:start")
                         .tokenize(tokenizer()
                                 .byParagraph()
-                                    .maxTokens(1024)
+                                    .maxSegmentSize(1024)
                                     .maxOverlap(10)
                                     
.using(LangChain4jTokenizerDefinition.TokenizerType.OPEN_AI)
                                     .end())
diff --git 
a/components/camel-ai/camel-langchain4j-tokenizer/src/test/java/org/apache/camel/component/langchain4j/tokenizer/LangChain4JSentenceTokenizerTest.java
 
b/components/camel-ai/camel-langchain4j-tokenizer/src/test/java/org/apache/camel/component/langchain4j/tokenizer/LangChain4JSentenceTokenizerTest.java
index f8d906fb1192..6d4524255dbf 100644
--- 
a/components/camel-ai/camel-langchain4j-tokenizer/src/test/java/org/apache/camel/component/langchain4j/tokenizer/LangChain4JSentenceTokenizerTest.java
+++ 
b/components/camel-ai/camel-langchain4j-tokenizer/src/test/java/org/apache/camel/component/langchain4j/tokenizer/LangChain4JSentenceTokenizerTest.java
@@ -43,7 +43,7 @@ public class LangChain4JSentenceTokenizerTest extends 
LangChain4JTokenizerTestSu
                 from("direct:start")
                         .tokenize(tokenizer()
                                 .bySentence()
-                                    .maxTokens(1024)
+                                    .maxSegmentSize(1024)
                                     .maxOverlap(10)
                                     
.using(LangChain4jTokenizerDefinition.TokenizerType.OPEN_AI)
                                     .end())
diff --git 
a/components/camel-ai/camel-langchain4j-tokenizer/src/test/java/org/apache/camel/component/langchain4j/tokenizer/LangChain4JWordTokenizerTest.java
 
b/components/camel-ai/camel-langchain4j-tokenizer/src/test/java/org/apache/camel/component/langchain4j/tokenizer/LangChain4JWordTokenizerTest.java
index 6aa7ea5ea00d..5489cc293538 100644
--- 
a/components/camel-ai/camel-langchain4j-tokenizer/src/test/java/org/apache/camel/component/langchain4j/tokenizer/LangChain4JWordTokenizerTest.java
+++ 
b/components/camel-ai/camel-langchain4j-tokenizer/src/test/java/org/apache/camel/component/langchain4j/tokenizer/LangChain4JWordTokenizerTest.java
@@ -43,7 +43,7 @@ public class LangChain4JWordTokenizerTest extends 
LangChain4JTokenizerTestSuppor
                 from("direct:start")
                         .tokenize(tokenizer()
                                 .byWord()
-                                    .maxTokens(1024)
+                                    .maxSegmentSize(1024)
                                     .maxOverlap(10)
                                     
.using(LangChain4jTokenizerDefinition.TokenizerType.OPEN_AI)
                                     .end())

Reply via email to