This is an automated email from the ASF dual-hosted git repository.

jpoth pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/camel.git


The following commit(s) were added to refs/heads/main by this push:
     new b5b8cb992f60 CAMEL-22545 initialize transformers pipeline in the 
handle function and increase timeout in tests in case first time using models
b5b8cb992f60 is described below

commit b5b8cb992f60f1a314124b45dadd7d2c7e47c823
Author: John Poth <[email protected]>
AuthorDate: Tue Feb 24 14:57:47 2026 +0100

    CAMEL-22545 initialize transformers pipeline in the handle function and
    increase timeout in tests in case first time using models
---
 components/camel-ai/camel-huggingface/pom.xml      |  2 +-
 .../tasks/automatic_speech_recognition.py          | 20 +++++++++++++-------
 .../camel/component/huggingface/tasks/chat.py      | 10 ++++++++--
 .../huggingface/tasks/question_answering.py        | 20 +++++++++++++-------
 .../huggingface/tasks/sentence_embeddings.py       | 17 +++++++++++------
 .../component/huggingface/tasks/summarization.py   | 20 +++++++++++++-------
 .../huggingface/tasks/text_classification.py       | 22 ++++++++++++++--------
 .../component/huggingface/tasks/text_generation.py | 20 +++++++++++++-------
 .../component/huggingface/tasks/text_to_image.py   | 20 +++++++++++++-------
 .../component/huggingface/tasks/text_to_speech.py  | 20 +++++++++++++-------
 .../huggingface/tasks/zero_shot_classification.py  | 20 +++++++++++++-------
 .../camel/component/huggingface/HuggingFaceIT.java | 12 ++++++------
 .../huggingface/TranslationPredictor.java          | 20 +++++++++++++-------
 13 files changed, 144 insertions(+), 79 deletions(-)

diff --git a/components/camel-ai/camel-huggingface/pom.xml 
b/components/camel-ai/camel-huggingface/pom.xml
index f2592a82a112..07cd56973e55 100644
--- a/components/camel-ai/camel-huggingface/pom.xml
+++ b/components/camel-ai/camel-huggingface/pom.xml
@@ -73,7 +73,7 @@
                         <groupId>org.apache.maven.plugins</groupId>
                         <artifactId>maven-failsafe-plugin</artifactId>
                         <configuration>
-                            
<forkedProcessTimeoutInSeconds>1200</forkedProcessTimeoutInSeconds>
+                            
<forkedProcessTimeoutInSeconds>7200</forkedProcessTimeoutInSeconds>
                             <skipITs>${skipITs}</skipITs>
                             <skipTests>${skipTests}</skipTests>
                         </configuration>
diff --git 
a/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/automatic_speech_recognition.py
 
b/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/automatic_speech_recognition.py
index d759b6439445..7af61d4f437b 100644
--- 
a/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/automatic_speech_recognition.py
+++ 
b/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/automatic_speech_recognition.py
@@ -21,17 +21,23 @@ import json
 import numpy as np
 import logging
 
-pipe = pipeline(
-    task='automatic-speech-recognition',
-    model='%s',
-    revision='%s',
-    device_map='%s'
-)
+pipe = None
 
 def handle(inputs: Input):
+    global pipe
     try:
+        if not pipe:
+            logging.debug("Initializing pipeline")
+            pipe = pipeline(
+                task='automatic-speech-recognition',
+                model='%s',
+                revision='%s',
+                device_map='%s'
+            )
+            logging.debug("Pipeline initialized")
+
         if inputs.content.size() == 0:
-            logging.info("Handling warmup call - returning empty output")
+            logging.debug("Handling warmup call - returning empty output")
             return Output()
 
         input_str = inputs.get_as_string("data")
diff --git 
a/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/chat.py
 
b/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/chat.py
index 2c2b6244b2a5..56e4ef5bb18c 100644
--- 
a/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/chat.py
+++ 
b/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/chat.py
@@ -21,12 +21,18 @@ import json
 import torch
 import logging
 
-pipe = pipeline(task='text-generation', model='%s', revision='%s', 
device_map='%s'%s)
+pipe = None
 
 def handle(inputs: Input):
+    global pipe
     try:
+        if not pipe:
+            logging.debug("Initializing pipeline")
+            pipe = pipeline(task='text-generation', model='%s', revision='%s', 
device_map='%s'%s)
+            logging.debug("Pipeline initialized")
+
         if inputs.content.size() == 0:
-            logging.info("Handling warmup call - returning empty output")
+            logging.debug("Handling warmup call - returning empty output")
             return Output()
         input_str = inputs.get_as_string("data")
         messages = json.loads(input_str)  # List of dicts
diff --git 
a/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/question_answering.py
 
b/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/question_answering.py
index 827a93716f33..f7015cd9660e 100644
--- 
a/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/question_answering.py
+++ 
b/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/question_answering.py
@@ -21,17 +21,23 @@ import json
 import torch
 import logging
 
-pipe = pipeline(
-    task='question-answering',
-    model='%s',
-    revision='%s',
-    device_map='%s'
-)
+pipe = None
 
 def handle(inputs: Input):
+    global pipe
     try:
+        if not pipe:
+            logging.debug("Initializing pipeline")
+            pipe = pipeline(
+                task='question-answering',
+                model='%s',
+                revision='%s',
+                device_map='%s'
+            )
+            logging.debug("Pipeline initialized")
+
         if inputs.content.size() == 0:
-            logging.info("Handling warmup call - returning empty output")
+            logging.debug("Handling warmup call - returning empty output")
             return Output()
 
         input_str = inputs.get_as_string("data")
diff --git 
a/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/sentence_embeddings.py
 
b/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/sentence_embeddings.py
index 9ab4ad2ea93c..6fe310f35f24 100644
--- 
a/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/sentence_embeddings.py
+++ 
b/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/sentence_embeddings.py
@@ -21,16 +21,21 @@ import json
 import torch
 import logging
 
-device = '%s'
-if device == 'auto':
-    device = 'cuda' if torch.cuda.is_available() else 'cpu'
-
-model = SentenceTransformer('%s', device=device)
+model = None
 
 def handle(inputs: Input):
+    global model
     try:
+        if not model:
+            logging.debug("Initializing model")
+            device = '%s'
+            if device == 'auto':
+                device = 'cuda' if torch.cuda.is_available() else 'cpu'
+            model = SentenceTransformer('%s', device=device)
+            logging.debug("Model initialized")
+
         if inputs.content.size() == 0:
-            logging.info("Handling warmup call - returning empty output")
+            logging.debug("Handling warmup call - returning empty output")
             return Output()
 
         input_str = inputs.get_as_string("data")
diff --git 
a/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/summarization.py
 
b/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/summarization.py
index 530bd6868213..53896e6d2b5d 100644
--- 
a/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/summarization.py
+++ 
b/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/summarization.py
@@ -21,17 +21,23 @@ import json
 import torch
 import logging
 
-pipe = pipeline(
-    task='summarization',
-    model='%s',
-    revision='%s',
-    device_map='%s'
-)
+pipe = None
 
 def handle(inputs: Input):
+    global pipe
     try:
+        if not pipe:
+            logging.debug("Initializing pipeline")
+            pipe = pipeline(
+                task='summarization',
+                model='%s',
+                revision='%s',
+                device_map='%s'
+            )
+            logging.debug("Pipeline initialized")
+
         if inputs.content.size() == 0:
-            logging.info("Handling warmup call - returning empty output")
+            logging.debug("Handling warmup call - returning empty output")
             return Output()
         input_str = inputs.get_as_string("data")
         torch.manual_seed(42)
diff --git 
a/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/text_classification.py
 
b/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/text_classification.py
index ff3ed27a093b..be85e8e1e05d 100644
--- 
a/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/text_classification.py
+++ 
b/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/text_classification.py
@@ -22,18 +22,24 @@ import torch
 import sys
 import logging
 
-pipe = pipeline(
-    task='text-classification',
-    model='%s',
-    revision='%s',
-    device_map='%s',
-    top_k=%s
-)
+pipe = None
 
 def handle(inputs: Input):
+    global pipe
     try:
+        if not pipe:
+            logging.debug("Initializing pipeline")
+            pipe = pipeline(
+                task='text-classification',
+                model='%s',
+                revision='%s',
+                device_map='%s',
+                top_k=%s
+            )
+            logging.debug("Pipeline initialized")
+
         if inputs.content.size() == 0:
-            logging.info("Handling warmup call - returning empty output")
+            logging.debug("Handling warmup call - returning empty output")
             return Output()
         input_str = inputs.get_as_string("data")
         torch.manual_seed(42)
diff --git 
a/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/text_generation.py
 
b/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/text_generation.py
index 0fc5afdf5a31..cbf79fe71447 100644
--- 
a/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/text_generation.py
+++ 
b/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/text_generation.py
@@ -21,17 +21,23 @@ import json
 import torch
 import logging
 
-pipe = pipeline(
-    task='text-generation',
-    model='%s',
-    revision='%s',
-    device_map='%s'
-)
+pipe = None
 
 def handle(inputs: Input):
+    global pipe
     try:
+        if not pipe:
+            logging.debug("Initializing pipeline")
+            pipe = pipeline(
+                task='text-generation',
+                model='%s',
+                revision='%s',
+                device_map='%s'
+            )
+            logging.debug("Pipeline initialized")
+
         if inputs.content.size() == 0:
-            logging.info("Handling warmup call - returning empty output")
+            logging.debug("Handling warmup call - returning empty output")
             return Output()
         input_str = inputs.get_as_string("data")
         torch.manual_seed(42)
diff --git 
a/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/text_to_image.py
 
b/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/text_to_image.py
index c89945b9510c..e8b41788c2b5 100644
--- 
a/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/text_to_image.py
+++ 
b/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/text_to_image.py
@@ -22,17 +22,23 @@ import json
 import torch
 import logging
 
-pipe = StableDiffusionPipeline.from_pretrained(
-    '%s',
-    torch_dtype=torch.float32,  # CPU-safe
-    safety_checker=None
-)
-pipe = pipe.to('%s')
+pipe = None
 
 def handle(inputs: Input):
+    global pipe
     try:
+        if not pipe:
+            logging.debug("Initializing pipeline")
+            pipe = StableDiffusionPipeline.from_pretrained(
+                '%s',
+                torch_dtype=torch.float32,  # CPU-safe
+                safety_checker=None
+            )
+            pipe = pipe.to('%s')
+            logging.debug("Pipeline initialized")
+
         if inputs.content.size() == 0:
-            logging.info("Handling warmup call - returning empty output")
+            logging.debug("Handling warmup call - returning empty output")
             return Output()
         input_str = inputs.get_as_string("data")
         torch.manual_seed(42)
diff --git 
a/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/text_to_speech.py
 
b/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/text_to_speech.py
index 37b1efb07679..be77d5545a30 100644
--- 
a/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/text_to_speech.py
+++ 
b/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/text_to_speech.py
@@ -22,17 +22,23 @@ import torch
 import numpy as np
 import logging
 
-pipe = pipeline(
-    task='text-to-speech',
-    model='%s',
-    revision='%s',
-    device_map='%s'
-)
+pipe = None
 
 def handle(inputs: Input):
+    global pipe
     try:
+        if not pipe:
+            logging.debug("Initializing pipeline")
+            pipe = pipeline(
+                task='text-to-speech',
+                model='%s',
+                revision='%s',
+                device_map='%s'
+            )
+            logging.debug("Pipeline initialized")
+
         if inputs.content.size() == 0:
-            logging.info("Handling warmup call - returning empty output")
+            logging.debug("Handling warmup call - returning empty output")
             return Output()
 
         input_str = inputs.get_as_string("data")
diff --git 
a/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/zero_shot_classification.py
 
b/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/zero_shot_classification.py
index 25347d7a1106..4dd306f353f5 100644
--- 
a/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/zero_shot_classification.py
+++ 
b/components/camel-ai/camel-huggingface/src/main/resources/org/apache/camel/component/huggingface/tasks/zero_shot_classification.py
@@ -21,17 +21,23 @@ import json
 import torch
 import logging
 
-pipe = pipeline(
-    task='zero-shot-classification',
-    model='%s',
-    revision='%s',
-    device_map='%s'
-)
+pipe = None
 
 def handle(inputs: Input):
+    global pipe
     try:
+        if not pipe:
+            logging.debug("Initializing pipeline")
+            pipe = pipeline(
+                task='zero-shot-classification',
+                model='%s',
+                revision='%s',
+                device_map='%s'
+            )
+            logging.debug("Pipeline initialized")
+
         if inputs.content.size() == 0:
-            logging.info("Handling warmup call - returning empty output")
+            logging.debug("Handling warmup call - returning empty output")
             return Output()
         input_str = inputs.get_as_string("data")
         input_data = json.loads(input_str)  # [text, label1, label2, ...]
diff --git 
a/components/camel-ai/camel-huggingface/src/test/java/org/apache/camel/component/huggingface/HuggingFaceIT.java
 
b/components/camel-ai/camel-huggingface/src/test/java/org/apache/camel/component/huggingface/HuggingFaceIT.java
index c8835ee99e02..58a89a4f580d 100644
--- 
a/components/camel-ai/camel-huggingface/src/test/java/org/apache/camel/component/huggingface/HuggingFaceIT.java
+++ 
b/components/camel-ai/camel-huggingface/src/test/java/org/apache/camel/component/huggingface/HuggingFaceIT.java
@@ -169,7 +169,7 @@ public class HuggingFaceIT extends CamelTestSupport {
                         .to("mock:result");
 
                 from("direct:start-sum")
-                        
.to("huggingface:summarization?modelId=facebook/bart-large-cnn&device=cpu&maxTokens=50&minLength=20&modelLoadingTimeout=360&predictTimeout=360")
+                        
.to("huggingface:summarization?modelId=facebook/bart-large-cnn&device=cpu&maxTokens=50&minLength=20&modelLoadingTimeout=720&predictTimeout=360")
                         .to("mock:result");
 
                 from("direct:start-zero")
@@ -181,23 +181,23 @@ public class HuggingFaceIT extends CamelTestSupport {
                         .to("mock:result");
 
                 from("direct:start-tti")
-                        
.to("huggingface:text-to-image?modelId=CompVis/stable-diffusion-v1-4&device=cpu&modelLoadingTimeout=650&predictTimeout=10000")
+                        
.to("huggingface:text-to-image?modelId=CompVis/stable-diffusion-v1-4&device=cpu&modelLoadingTimeout=1200&predictTimeout=720")
                         .to("mock:result");
 
                 from("direct:start-asr")
-                        
.to("huggingface:automatic-speech-recognition?modelId=openai/whisper-medium&device=cpu&modelLoadingTimeout=360&predictTimeout=360")
+                        
.to("huggingface:automatic-speech-recognition?modelId=openai/whisper-medium&device=cpu&modelLoadingTimeout=720&predictTimeout=360")
                         .to("mock:result");
 
                 from("direct:start-tts")
-                        
.to("huggingface:text-to-speech?modelId=facebook/mms-tts-eng&device=cpu&modelLoadingTimeout=360&predictTimeout=360")
+                        
.to("huggingface:text-to-speech?modelId=facebook/mms-tts-eng&device=cpu&modelLoadingTimeout=1200&predictTimeout=360")
                         .to("mock:result");
 
                 from("direct:start-chat")
-                        
.to("huggingface:chat?modelId=Qwen/Qwen2.5-3B-Instruct&systemPrompt=You are a 
helpful assistant named 
Alan&device=cpu&maxTokens=50&temperature=0.7&modelLoadingTimeout=360&predictTimeout=700")
+                        
.to("huggingface:chat?modelId=Qwen/Qwen2.5-3B-Instruct&systemPrompt=You are a 
helpful assistant named 
Alan&device=cpu&maxTokens=50&temperature=0.7&modelLoadingTimeout=1200&predictTimeout=700")
                         .to("mock:result");
 
                 from("direct:start-custom")
-                        
.to("huggingface:translation?modelId=Helsinki-NLP/opus-mt-en-fr&device=cpu&predictorBean=myCustomPredictor&modelLoadingTimeout=360&predictTimeout=360")
+                        
.to("huggingface:translation?modelId=Helsinki-NLP/opus-mt-en-fr&device=cpu&predictorBean=myCustomPredictor&modelLoadingTimeout=720&predictTimeout=360")
                         .to("mock:result");
 
             }
diff --git 
a/components/camel-ai/camel-huggingface/src/test/java/org/apache/camel/component/huggingface/TranslationPredictor.java
 
b/components/camel-ai/camel-huggingface/src/test/java/org/apache/camel/component/huggingface/TranslationPredictor.java
index 68dd306529bf..950633680bd8 100644
--- 
a/components/camel-ai/camel-huggingface/src/test/java/org/apache/camel/component/huggingface/TranslationPredictor.java
+++ 
b/components/camel-ai/camel-huggingface/src/test/java/org/apache/camel/component/huggingface/TranslationPredictor.java
@@ -42,17 +42,23 @@ public class TranslationPredictor extends 
AbstractTaskPredictor {
                 import torch
                 import logging
 
-                pipe = pipeline(
-                    task='translation',
-                    model='%s',
-                    revision='%s',
-                    device_map='%s'
-                )
+                pipe = None
 
                 def handle(inputs: Input):
+                    global pipe
                     try:
+                        if not pipe:
+                            logging.debug("Initializing pipeline")
+                            pipe = pipeline(
+                                task='translation',
+                                model='%s',
+                                revision='%s',
+                                device_map='%s'
+                            )
+                            logging.debug("Pipeline initialized")
+
                         if inputs.content.size() == 0:
-                            logging.info("Handling warmup call - returning 
empty output")
+                            logging.debug("Handling warmup call - returning 
empty output")
                             return Output()
 
                         input_str = inputs.get_as_string("data")

Reply via email to