[ 
https://issues.apache.org/jira/browse/BEAM-7516?focusedWorklogId=376020&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-376020
 ]

ASF GitHub Bot logged work on BEAM-7516:
----------------------------------------

                Author: ASF GitHub Bot
            Created on: 23/Jan/20 01:28
            Start Date: 23/Jan/20 01:28
    Worklog Time Spent: 10m 
      Work Description: robertwb commented on pull request #10291: 
[BEAM-7516][BEAM-8823] FnApiRunner works with work queues, and a primitive 
watermark manager
URL: https://github.com/apache/beam/pull/10291#discussion_r369848654
 
 

 ##########
 File path: sdks/python/apache_beam/runners/portability/fn_api_runner.py
 ##########
 @@ -249,141 +250,68 @@ def done(self):
                    in self._req_worker_mapping.items()])
 
 
-class _ListBuffer(list):
-  """Used to support parititioning of a list."""
-  def partition(self, n):
-    # type: (int) -> List[List[bytes]]
-    return [self[k::n] for k in range(n)]
+class _ProcessingQueueManager(object):
+  """Manages the queues for ProcessBundle inputs.
 
+  There are three queues:
+   - ready_inputs(_ProcessingQueueManager.KeyedQueue). This queue contains 
input
+       data that is ready to be processed. These are data such as timers past
+       their trigger time, and data to be processed.
+       The ready_inputs_queue contains tuples of (stage_name, inputs), where
+       inputs are dictionaries mapping PCollection name to data buffers.
 
-class _GroupingBuffer(object):
-  """Used to accumulate groupded (shuffled) results."""
-  def __init__(self,
-               pre_grouped_coder,  # type: coders.Coder
-               post_grouped_coder,  # type: coders.Coder
-               windowing
-              ):
-    # type: (...) -> None
-    self._key_coder = pre_grouped_coder.key_coder()
-    self._pre_grouped_coder = pre_grouped_coder
-    self._post_grouped_coder = post_grouped_coder
-    self._table = collections.defaultdict(list)  # type: 
Optional[DefaultDict[bytes, List[Any]]]
-    self._windowing = windowing
-    self._grouped_output = None  # type: Optional[List[List[bytes]]]
-
-  def append(self, elements_data):
-    # type: (bytes) -> None
-    if self._grouped_output:
-      raise RuntimeError('Grouping table append after read.')
-    input_stream = create_InputStream(elements_data)
-    coder_impl = self._pre_grouped_coder.get_impl()
-    key_coder_impl = self._key_coder.get_impl()
-    # TODO(robertwb): We could optimize this even more by using a
-    # window-dropping coder for the data plane.
-    is_trivial_windowing = self._windowing.is_default()
-    while input_stream.size() > 0:
-      windowed_key_value = coder_impl.decode_from_stream(input_stream, True)
-      key, value = windowed_key_value.value
-      self._table[key_coder_impl.encode(key)].append(
-          value if is_trivial_windowing
-          else windowed_key_value.with_value(value))
-
-  def partition(self, n):
-    # type: (int) -> List[List[bytes]]
-    """ It is used to partition _GroupingBuffer to N parts. Once it is
-    partitioned, it would not be re-partitioned with diff N. Re-partition
-    is not supported now.
-    """
-    if not self._grouped_output:
-      if self._windowing.is_default():
-        globally_window = GlobalWindows.windowed_value(
-            None,
-            timestamp=GlobalWindow().max_timestamp(),
-            pane_info=windowed_value.PaneInfo(
-                is_first=True,
-                is_last=True,
-                timing=windowed_value.PaneInfoTiming.ON_TIME,
-                index=0,
-                nonspeculative_index=0)).with_value
-        windowed_key_values = lambda key, values: [
-            globally_window((key, values))]
+   - watermark_pending_inputs(_ProcessingQueueManager.KeyedQueue). This queue
+       contains input data that is not yet ready to be processed, and is 
blocked
+       on the watermark advancing. ((stage_name, watermark), inputs), where
+       the watermark is the watermark at which the inputs should be scheudled,
+       and inputs are dictionaries mapping PCollection name to data buffers.
+  """
+  class KeyedQueue(object):
+    def __init__(self):
+      self._q = collections.deque()
+      self._keyed_elements = {}
+
+    def enque(
+        self,
+        elm  # type: Tuple[str, Dict[str, Union[_ListBuffer, _GroupingBuffer]]]
+    ):
+      # type: (...) -> None
+      key = elm[0]
 
 Review comment:
   `key, incoming_inputs = elm`
 
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Issue Time Tracking
-------------------

    Worklog Id:     (was: 376020)

> Add a watermark manager for the fn_api_runner
> ---------------------------------------------
>
>                 Key: BEAM-7516
>                 URL: https://issues.apache.org/jira/browse/BEAM-7516
>             Project: Beam
>          Issue Type: Sub-task
>          Components: sdk-py-core
>            Reporter: Pablo Estrada
>            Assignee: Pablo Estrada
>            Priority: Major
>          Time Spent: 4h 50m
>  Remaining Estimate: 0h
>
> To track watermarks for each stage



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

Reply via email to