westonpace commented on a change in pull request #9533:
URL: https://github.com/apache/arrow/pull/9533#discussion_r581463629



##########
File path: cpp/src/arrow/util/async_generator.h
##########
@@ -177,6 +179,90 @@ class TransformingGenerator {
   std::shared_ptr<TransformingGeneratorState> state_;
 };
 
+template <typename T>
+class SerialReadaheadGenerator {
+ public:
+  SerialReadaheadGenerator(AsyncGenerator<T> source_generator, int 
max_readahead)
+      : state_(std::make_shared<State>(std::move(source_generator), 
max_readahead)) {}
+
+  Future<T> operator()() {
+    if (state_->first) {
+      // Lazy generator, need to wait for the first ask to prime the pump
+      state_->first = false;
+      auto next = state_->source();
+      return next.Then(Callback{state_});
+    } else {
+      // This generator is not async-reentrant.  We won't be called until the 
last
+      // future finished so we know there is something in the queue
+      auto finished = state_->finished.load();
+      if (finished && state_->readahead_queue.isEmpty()) {
+        return Future<T>::MakeFinished(IterationTraits<T>::End());
+      }
+      auto next_ptr = state_->readahead_queue.frontPtr();
+      DCHECK(next_ptr != NULLPTR);

Review comment:
       The first one would have been checked anyways since shared_ptr should 
abort on nullptr.  The second one I converted into a proper Status check.

##########
File path: cpp/src/arrow/util/async_generator.h
##########
@@ -177,6 +179,90 @@ class TransformingGenerator {
   std::shared_ptr<TransformingGeneratorState> state_;
 };
 
+template <typename T>
+class SerialReadaheadGenerator {
+ public:
+  SerialReadaheadGenerator(AsyncGenerator<T> source_generator, int 
max_readahead)
+      : state_(std::make_shared<State>(std::move(source_generator), 
max_readahead)) {}
+
+  Future<T> operator()() {
+    if (state_->first) {
+      // Lazy generator, need to wait for the first ask to prime the pump
+      state_->first = false;
+      auto next = state_->source();
+      return next.Then(Callback{state_});
+    } else {
+      // This generator is not async-reentrant.  We won't be called until the 
last
+      // future finished so we know there is something in the queue
+      auto finished = state_->finished.load();
+      if (finished && state_->readahead_queue.isEmpty()) {
+        return Future<T>::MakeFinished(IterationTraits<T>::End());
+      }
+      auto next_ptr = state_->readahead_queue.frontPtr();
+      DCHECK(next_ptr != NULLPTR);
+      auto next = std::move(**next_ptr);
+      state_->readahead_queue.popFront();
+      auto last_available = state_->spaces_available.fetch_add(1);
+      if (last_available == 0 && !finished) {
+        // Reader idled out, we need to restart it
+        state_->Pump(state_);
+      }
+      return next;
+    }
+  }
+
+ private:
+  struct State {
+    State(AsyncGenerator<T> source_, int max_readahead)
+        : first(true),
+          source(std::move(source_)),
+          finished(false),
+          spaces_available(max_readahead),
+          readahead_queue(max_readahead) {}
+
+    void Pump(std::shared_ptr<State>& self) {
+      // Can't do readahead_queue.write(source().Then(Callback{self})) because 
then the
+      // callback might run immediately and add itself to the queue before 
this gets added
+      // to the queue messing up the order
+      auto next_slot = std::make_shared<Future<T>>();
+      auto written = readahead_queue.write(next_slot);
+      DCHECK(written);
+      *next_slot = source().Then(Callback{self});
+    }
+
+    // Only accessed by the consumer end
+    bool first;
+    // Accessed by both threads
+    AsyncGenerator<T> source;
+    std::atomic<bool> finished;
+    std::atomic<uint32_t> spaces_available;
+    util::SpscQueue<std::shared_ptr<Future<T>>> readahead_queue;
+  };
+
+  struct Callback {
+    Result<T> operator()(const Result<T>& next_result) {

Review comment:
       Done




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to