This is an automated email from the ASF dual-hosted git repository.

damccorm pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/beam.git


The following commit(s) were added to refs/heads/master by this push:
     new b2afbe0b26f Minor fix: move the getattr out of the loop as it is not 
needed to be inside (#25859)
b2afbe0b26f is described below

commit b2afbe0b26f7ea27d9073e582f913dd912b6d82c
Author: Amir Fayazi <amiral...@gmail.com>
AuthorDate: Fri Mar 17 05:00:36 2023 -0700

    Minor fix: move the getattr out of the loop as it is not needed to be 
inside (#25859)
---
 sdks/python/apache_beam/ml/inference/pytorch_inference.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/sdks/python/apache_beam/ml/inference/pytorch_inference.py 
b/sdks/python/apache_beam/ml/inference/pytorch_inference.py
index 818dd8325dc..cc6f3ea6be7 100644
--- a/sdks/python/apache_beam/ml/inference/pytorch_inference.py
+++ b/sdks/python/apache_beam/ml/inference/pytorch_inference.py
@@ -380,7 +380,7 @@ def make_keyed_tensor_model_fn(model_fn: str) -> 
KeyedTensorInferenceFn:
         batched_tensors = torch.stack(key_to_tensor_list[key])
         batched_tensors = _convert_to_device(batched_tensors, device)
         key_to_batched_tensors[key] = batched_tensors
-        pred_fn = getattr(model, model_fn)
+      pred_fn = getattr(model, model_fn)
       predictions = pred_fn(**key_to_batched_tensors, **inference_args)
     return utils._convert_to_result(batch, predictions, model_id)
 

Reply via email to