AnandInguva commented on code in PR #23266:
URL: https://github.com/apache/beam/pull/23266#discussion_r973107767


##########
sdks/python/apache_beam/ml/inference/base_test.py:
##########
@@ -48,13 +48,40 @@ def run_inference(
       self,
       batch: Sequence[int],
       model: FakeModel,
-      inference_args=None) -> Iterable[int]:
+      inference_args=None,
+      drop_example=False) -> Iterable[int]:
     if self._fake_clock:
       self._fake_clock.current_time_ns += 3_000_000  # 3 milliseconds
     for example in batch:
       yield model.predict(example)
 
 
+class FakeModelHandlerWithPredictionResult(base.ModelHandler[int,
+                                                             int,
+                                                             FakeModel]):
+  def __init__(self, clock=None):
+    self._fake_clock = clock
+
+  def load_model(self):
+    if self._fake_clock:
+      self._fake_clock.current_time_ns += 500_000_000  # 500ms
+    return FakeModel()
+
+  def run_inference(
+      self,
+      batch: Sequence[int],
+      model: FakeModel,
+      inference_args=None,
+      drop_example=False) -> Iterable[int]:
+    if self._fake_clock:
+      self._fake_clock.current_time_ns += 3_000_000  # 3 milliseconds
+    for example in batch:
+      if not drop_example:
+        yield base.PredictionResult(example, model.predict(example))
+      else:
+        yield base.PredictionResult(None, model.predict(example))

Review Comment:
   Done



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to