lidavidm commented on a change in pull request #11793:
URL: https://github.com/apache/arrow/pull/11793#discussion_r761062860



##########
File path: cpp/src/arrow/compute/kernels/scalar_compare.cc
##########
@@ -439,6 +472,330 @@ struct ScalarMinMax {
   }
 };
 
+template <typename Type, typename Op>
+struct BinaryScalarMinMax {
+  using ArrayType = typename TypeTraits<Type>::ArrayType;
+  using BuilderType = typename TypeTraits<Type>::BuilderType;
+  using offset_type = typename Type::offset_type;
+
+  static Status Exec(KernelContext* ctx, const ExecBatch& batch, Datum* out) {
+    const ElementWiseAggregateOptions& options = MinMaxState::Get(ctx);
+    if (std::all_of(batch.values.begin(), batch.values.end(),
+                    [](const Datum& d) { return d.is_scalar(); })) {
+      return ExecOnlyScalar(ctx, options, batch, out);
+    }
+    return ExecContainingArrays(ctx, options, batch, out);
+  }
+
+  static Status ExecOnlyScalar(KernelContext* ctx,
+                               const ElementWiseAggregateOptions& options,
+                               const ExecBatch& batch, Datum* out) {
+    if (batch.values.empty()) {
+      return Status::OK();
+    }
+    auto output = checked_cast<BaseBinaryScalar*>(out->scalar().get());
+    if (!options.skip_nulls) {
+      // any nulls in the input will produce a null output
+      for (const auto& value : batch.values) {
+        if (!value.scalar()->is_valid) {
+          output->is_valid = false;
+          return Status::OK();
+        }
+      }
+    }
+    const auto& first_scalar = *batch.values.front().scalar();
+    string_view result = UnboxScalar<Type>::Unbox(first_scalar);
+    bool valid = first_scalar.is_valid;
+    for (size_t i = 1; i < batch.values.size(); i++) {
+      const auto& scalar = *batch[i].scalar();
+      if (!scalar.is_valid) {
+        DCHECK(options.skip_nulls);
+        continue;
+      } else {
+        string_view value = UnboxScalar<Type>::Unbox(scalar);
+        result = !valid ? value : Op::Call(result, value);
+        valid = true;
+      }
+    }
+    if (valid) {
+      ARROW_ASSIGN_OR_RAISE(output->value, ctx->Allocate(result.size()));
+      std::copy(result.begin(), result.end(), output->value->mutable_data());
+      output->is_valid = true;
+    } else {
+      output->is_valid = false;
+    }
+    return Status::OK();
+  }
+
+  static Status ExecContainingArrays(KernelContext* ctx,
+                                     const ElementWiseAggregateOptions& 
options,
+                                     const ExecBatch& batch, Datum* out) {
+    // Presize data to avoid reallocations, using an upper bound estimation of 
final size.
+    int64_t estimated_final_size = 0;
+    for (int64_t i = 0; i < batch.length; i++) {
+      auto size = CalculateRowSizeUpperBound(options, batch, i);

Review comment:
       Agreed, something like `max(max(len(array inputs)), batch.length * 
max(len(scalar outputs)))` should suffice.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to