This is an automated email from the ASF dual-hosted git repository.

yongzao pushed a commit to branch revert-transformer-update
in repository https://gitbox.apache.org/repos/asf/iotdb.git

commit 6f3a2d108ec5242e7951a2718a8d6ca45ffaf561
Author: Yongzao <[email protected]>
AuthorDate: Thu Sep 11 13:39:58 2025 +0800

    Update inference_manager.py
---
 iotdb-core/ainode/iotdb/ainode/core/manager/inference_manager.py | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/iotdb-core/ainode/iotdb/ainode/core/manager/inference_manager.py 
b/iotdb-core/ainode/iotdb/ainode/core/manager/inference_manager.py
index db4995c55ba..088731ed541 100644
--- a/iotdb-core/ainode/iotdb/ainode/core/manager/inference_manager.py
+++ b/iotdb-core/ainode/iotdb/ainode/core/manager/inference_manager.py
@@ -82,7 +82,8 @@ class TimerXLStrategy(InferenceStrategy):
     def infer(self, full_data, predict_length=96, **_):
         data = full_data[1][0]
         if data.dtype.byteorder not in ("=", "|"):
-            data = data.byteswap().newbyteorder()
+            np_data = data.byteswap()
+            data = np_data.view(np_data.dtype.newbyteorder())
         seqs = torch.tensor(data).unsqueeze(0).float()
         # TODO: unify model inference input
         output = self.model.generate(seqs, max_new_tokens=predict_length, 
revin=True)
@@ -94,7 +95,8 @@ class SundialStrategy(InferenceStrategy):
     def infer(self, full_data, predict_length=96, **_):
         data = full_data[1][0]
         if data.dtype.byteorder not in ("=", "|"):
-            data = data.byteswap().newbyteorder()
+            np_data = data.byteswap()
+            data = np_data.view(np_data.dtype.newbyteorder())
         seqs = torch.tensor(data).unsqueeze(0).float()
         # TODO: unify model inference input
         output = self.model.generate(
@@ -249,7 +251,8 @@ class InferenceManager:
                 # TODO: TSBlock -> Tensor codes should be unified
                 data = full_data[1][0]
                 if data.dtype.byteorder not in ("=", "|"):
-                    data = data.byteswap().newbyteorder()
+                    np_data = data.byteswap()
+                    data = np_data.view(np_data.dtype.newbyteorder())
                 # the inputs should be on CPU before passing to the inference 
request
                 inputs = torch.tensor(data).unsqueeze(0).float().to("cpu")
                 if model_id == "sundial":

Reply via email to