This is an automated email from the ASF dual-hosted git repository.
westonpace pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/arrow.git
The following commit(s) were added to refs/heads/master by this push:
new 3be5e11687 GH-34007: [C++] Add an array_span_mutable interface to
ExecResult (#34008)
3be5e11687 is described below
commit 3be5e11687ea874b7293d17227943bd8244e8789
Author: Jin Shang <[email protected]>
AuthorDate: Sat Feb 4 03:00:05 2023 +0800
GH-34007: [C++] Add an array_span_mutable interface to ExecResult (#34008)
### Rationale for this change
Like for Buffer, we separate two interface for getting the underlying
`array_span` for `ExecResult`, one const and one mutable. This avoids the
awkward `const_cast` we currently use.
### What changes are included in this PR?
Add the new mutable interface and update current calls to this method.
### Are these changes tested?
Tested with existing tests.
### Are there any user-facing changes?
No.
* Closes: #34007
Authored-by: Jin Shang <[email protected]>
Signed-off-by: Weston Pace <[email protected]>
---
cpp/examples/arrow/udf_example.cc | 2 +-
cpp/src/arrow/compute/exec.cc | 4 +-
cpp/src/arrow/compute/exec.h | 6 +--
cpp/src/arrow/compute/exec_test.cc | 8 ++--
cpp/src/arrow/compute/function_benchmark.cc | 4 +-
cpp/src/arrow/compute/kernels/codegen_internal.h | 18 ++++-----
cpp/src/arrow/compute/kernels/scalar_boolean.cc | 30 +++++++--------
.../arrow/compute/kernels/scalar_cast_numeric.cc | 8 ++--
.../arrow/compute/kernels/scalar_cast_temporal.cc | 12 +++---
cpp/src/arrow/compute/kernels/scalar_compare.cc | 2 +-
cpp/src/arrow/compute/kernels/scalar_if_else.cc | 44 +++++++++++-----------
cpp/src/arrow/compute/kernels/scalar_nested.cc | 4 +-
cpp/src/arrow/compute/kernels/scalar_random.cc | 2 +-
cpp/src/arrow/compute/kernels/scalar_set_lookup.cc | 4 +-
.../arrow/compute/kernels/scalar_string_ascii.cc | 4 +-
.../arrow/compute/kernels/scalar_string_internal.h | 2 +-
.../arrow/compute/kernels/scalar_temporal_unary.cc | 2 +-
cpp/src/arrow/compute/kernels/scalar_validity.cc | 8 ++--
18 files changed, 82 insertions(+), 82 deletions(-)
diff --git a/cpp/examples/arrow/udf_example.cc
b/cpp/examples/arrow/udf_example.cc
index 573b5ccc78..bcb70973d2 100644
--- a/cpp/examples/arrow/udf_example.cc
+++ b/cpp/examples/arrow/udf_example.cc
@@ -56,7 +56,7 @@ arrow::Status SampleFunction(cp::KernelContext* ctx, const
cp::ExecSpan& batch,
const int64_t* x = batch[0].array.GetValues<int64_t>(1);
const int64_t* y = batch[1].array.GetValues<int64_t>(1);
const int64_t* z = batch[2].array.GetValues<int64_t>(1);
- int64_t* out_values = out->array_span()->GetValues<int64_t>(1);
+ int64_t* out_values = out->array_span_mutable()->GetValues<int64_t>(1);
for (int64_t i = 0; i < batch.length; ++i) {
*out_values++ = *x++ + *y++ + *z++;
}
diff --git a/cpp/src/arrow/compute/exec.cc b/cpp/src/arrow/compute/exec.cc
index ad915e09c3..15f8b263ed 100644
--- a/cpp/src/arrow/compute/exec.cc
+++ b/cpp/src/arrow/compute/exec.cc
@@ -831,7 +831,7 @@ class ScalarExecutor : public
KernelExecutorImpl<ScalarKernel> {
std::shared_ptr<ArrayData> preallocation;
ExecSpan input;
ExecResult output;
- ArraySpan* output_span = output.array_span();
+ ArraySpan* output_span = output.array_span_mutable();
if (preallocate_contiguous_) {
// Make one big output allocation
@@ -866,7 +866,7 @@ class ScalarExecutor : public
KernelExecutorImpl<ScalarKernel> {
}
Status ExecuteSingleSpan(const ExecSpan& input, ExecResult* out) {
- ArraySpan* result_span = out->array_span();
+ ArraySpan* result_span = out->array_span_mutable();
if (output_type_.type->id() == Type::NA) {
result_span->null_count = result_span->length;
} else if (kernel_->null_handling == NullHandling::INTERSECTION) {
diff --git a/cpp/src/arrow/compute/exec.h b/cpp/src/arrow/compute/exec.h
index 487b8d120a..c3ccdd4f87 100644
--- a/cpp/src/arrow/compute/exec.h
+++ b/cpp/src/arrow/compute/exec.h
@@ -324,9 +324,9 @@ struct ARROW_EXPORT ExecResult {
}
}
- ArraySpan* array_span() const {
- return const_cast<ArraySpan*>(&std::get<ArraySpan>(this->value));
- }
+ const ArraySpan* array_span() const { return
&std::get<ArraySpan>(this->value); }
+ ArraySpan* array_span_mutable() { return &std::get<ArraySpan>(this->value); }
+
bool is_array_span() const { return this->value.index() == 0; }
const std::shared_ptr<ArrayData>& array_data() const {
diff --git a/cpp/src/arrow/compute/exec_test.cc
b/cpp/src/arrow/compute/exec_test.cc
index cab9bd6a1d..defc848c5e 100644
--- a/cpp/src/arrow/compute/exec_test.cc
+++ b/cpp/src/arrow/compute/exec_test.cc
@@ -874,7 +874,7 @@ Status ExecCopyArraySpan(KernelContext*, const ExecSpan&
batch, ExecResult* out)
DCHECK_EQ(1, batch.num_values());
int value_size = batch[0].type()->byte_width();
const ArraySpan& arg0 = batch[0].array;
- ArraySpan* out_arr = out->array_span();
+ ArraySpan* out_arr = out->array_span_mutable();
uint8_t* dst = out_arr->buffers[1].data + out_arr->offset * value_size;
const uint8_t* src = arg0.buffers[1].data + arg0.offset * value_size;
std::memcpy(dst, src, batch.length * value_size);
@@ -885,7 +885,7 @@ Status ExecComputedBitmap(KernelContext* ctx, const
ExecSpan& batch, ExecResult*
// Propagate nulls not used. Check that the out bitmap isn't the same already
// as the input bitmap
const ArraySpan& arg0 = batch[0].array;
- ArraySpan* out_arr = out->array_span();
+ ArraySpan* out_arr = out->array_span_mutable();
if (CountSetBits(arg0.buffers[0].data, arg0.offset, batch.length) > 0) {
// Check that the bitmap has not been already copied over
DCHECK(!BitmapEquals(arg0.buffers[0].data, arg0.offset,
out_arr->buffers[0].data,
@@ -968,7 +968,7 @@ Status ExecStateful(KernelContext* ctx, const ExecSpan&
batch, ExecResult* out)
int32_t multiplier = checked_cast<const Int32Scalar&>(*state->value).value;
const ArraySpan& arg0 = batch[0].array;
- ArraySpan* out_arr = out->array_span();
+ ArraySpan* out_arr = out->array_span_mutable();
const int32_t* arg0_data = arg0.GetValues<int32_t>(1);
int32_t* dst = out_arr->GetValues<int32_t>(1);
for (int64_t i = 0; i < arg0.length; ++i) {
@@ -980,7 +980,7 @@ Status ExecStateful(KernelContext* ctx, const ExecSpan&
batch, ExecResult* out)
Status ExecAddInt32(KernelContext* ctx, const ExecSpan& batch, ExecResult*
out) {
const int32_t* left_data = batch[0].array.GetValues<int32_t>(1);
const int32_t* right_data = batch[1].array.GetValues<int32_t>(1);
- int32_t* out_data = out->array_span()->GetValues<int32_t>(1);
+ int32_t* out_data = out->array_span_mutable()->GetValues<int32_t>(1);
for (int64_t i = 0; i < batch.length; ++i) {
*out_data++ = *left_data++ + *right_data++;
}
diff --git a/cpp/src/arrow/compute/function_benchmark.cc
b/cpp/src/arrow/compute/function_benchmark.cc
index c7850b841c..3dfd590c7e 100644
--- a/cpp/src/arrow/compute/function_benchmark.cc
+++ b/cpp/src/arrow/compute/function_benchmark.cc
@@ -89,7 +89,7 @@ void BM_CastDispatchBaseline(benchmark::State& state) {
ExecResult result;
ASSERT_OK_AND_ASSIGN(std::shared_ptr<Array> result_space,
MakeArrayOfNull(double_type, 1));
- result.array_span()->SetMembers(*result_space->data());
+ result.array_span_mutable()->SetMembers(*result_space->data());
for (auto _ : state) {
ABORT_NOT_OK(exec(&kernel_context, input, &result));
}
@@ -163,7 +163,7 @@ void BM_ExecuteScalarKernelOnScalar(benchmark::State&
state) {
ExecResult output;
ASSERT_OK_AND_ASSIGN(std::shared_ptr<Array> output_arr,
MakeArrayOfNull(int64(), 1));
- output.array_span()->SetMembers(*output_arr->data());
+ output.array_span_mutable()->SetMembers(*output_arr->data());
const int64_t N = 10000;
for (auto _ : state) {
diff --git a/cpp/src/arrow/compute/kernels/codegen_internal.h
b/cpp/src/arrow/compute/kernels/codegen_internal.h
index dd40b7ae2b..b05a47b600 100644
--- a/cpp/src/arrow/compute/kernels/codegen_internal.h
+++ b/cpp/src/arrow/compute/kernels/codegen_internal.h
@@ -582,7 +582,7 @@ struct ScalarUnary {
Status st = Status::OK();
ArrayIterator<Arg0Type> arg0_it(arg0);
RETURN_NOT_OK(
- OutputAdapter<OutType>::Write(ctx, out->array_span(), [&]() ->
OutValue {
+ OutputAdapter<OutType>::Write(ctx, out->array_span_mutable(), [&]() ->
OutValue {
return Op::template Call<OutValue, Arg0Value>(ctx, arg0_it(), &st);
}));
return st;
@@ -617,7 +617,7 @@ struct ScalarUnaryNotNullStateful {
static Status Exec(const ThisType& functor, KernelContext* ctx, const
ArraySpan& arg0,
ExecResult* out) {
Status st = Status::OK();
- auto out_data = out->array_span()->GetValues<OutValue>(1);
+ auto out_data = out->array_span_mutable()->GetValues<OutValue>(1);
VisitArrayValuesInline<Arg0Type>(
arg0,
[&](Arg0Value v) {
@@ -658,7 +658,7 @@ struct ScalarUnaryNotNullStateful {
static Status Exec(const ThisType& functor, KernelContext* ctx, const
ArraySpan& arg0,
ExecResult* out) {
Status st = Status::OK();
- ArraySpan* out_arr = out->array_span();
+ ArraySpan* out_arr = out->array_span_mutable();
FirstTimeBitmapWriter out_writer(out_arr->buffers[1].data,
out_arr->offset,
out_arr->length);
VisitArrayValuesInline<Arg0Type>(
@@ -731,7 +731,7 @@ struct ScalarBinary {
ArrayIterator<Arg0Type> arg0_it(arg0);
ArrayIterator<Arg1Type> arg1_it(arg1);
RETURN_NOT_OK(
- OutputAdapter<OutType>::Write(ctx, out->array_span(), [&]() ->
OutValue {
+ OutputAdapter<OutType>::Write(ctx, out->array_span_mutable(), [&]() ->
OutValue {
return Op::template Call<OutValue, Arg0Value, Arg1Value>(ctx,
arg0_it(),
arg1_it(),
&st);
}));
@@ -744,7 +744,7 @@ struct ScalarBinary {
ArrayIterator<Arg0Type> arg0_it(arg0);
auto arg1_val = UnboxScalar<Arg1Type>::Unbox(arg1);
RETURN_NOT_OK(
- OutputAdapter<OutType>::Write(ctx, out->array_span(), [&]() ->
OutValue {
+ OutputAdapter<OutType>::Write(ctx, out->array_span_mutable(), [&]() ->
OutValue {
return Op::template Call<OutValue, Arg0Value, Arg1Value>(ctx,
arg0_it(),
arg1_val,
&st);
}));
@@ -757,7 +757,7 @@ struct ScalarBinary {
auto arg0_val = UnboxScalar<Arg0Type>::Unbox(arg0);
ArrayIterator<Arg1Type> arg1_it(arg1);
RETURN_NOT_OK(
- OutputAdapter<OutType>::Write(ctx, out->array_span(), [&]() ->
OutValue {
+ OutputAdapter<OutType>::Write(ctx, out->array_span_mutable(), [&]() ->
OutValue {
return Op::template Call<OutValue, Arg0Value, Arg1Value>(ctx,
arg0_val,
arg1_it(),
&st);
}));
@@ -799,7 +799,7 @@ struct ScalarBinaryNotNullStateful {
Status ArrayArray(KernelContext* ctx, const ArraySpan& arg0, const
ArraySpan& arg1,
ExecResult* out) {
Status st = Status::OK();
- OutputArrayWriter<OutType> writer(out->array_span());
+ OutputArrayWriter<OutType> writer(out->array_span_mutable());
VisitTwoArrayValuesInline<Arg0Type, Arg1Type>(
arg0, arg1,
[&](Arg0Value u, Arg1Value v) {
@@ -812,7 +812,7 @@ struct ScalarBinaryNotNullStateful {
Status ArrayScalar(KernelContext* ctx, const ArraySpan& arg0, const Scalar&
arg1,
ExecResult* out) {
Status st = Status::OK();
- ArraySpan* out_span = out->array_span();
+ ArraySpan* out_span = out->array_span_mutable();
OutputArrayWriter<OutType> writer(out_span);
if (arg1.is_valid) {
const auto arg1_val = UnboxScalar<Arg1Type>::Unbox(arg1);
@@ -832,7 +832,7 @@ struct ScalarBinaryNotNullStateful {
Status ScalarArray(KernelContext* ctx, const Scalar& arg0, const ArraySpan&
arg1,
ExecResult* out) {
Status st = Status::OK();
- ArraySpan* out_span = out->array_span();
+ ArraySpan* out_span = out->array_span_mutable();
OutputArrayWriter<OutType> writer(out_span);
if (arg0.is_valid) {
const auto arg0_val = UnboxScalar<Arg0Type>::Unbox(arg0);
diff --git a/cpp/src/arrow/compute/kernels/scalar_boolean.cc
b/cpp/src/arrow/compute/kernels/scalar_boolean.cc
index fb23106b6b..27002b9180 100644
--- a/cpp/src/arrow/compute/kernels/scalar_boolean.cc
+++ b/cpp/src/arrow/compute/kernels/scalar_boolean.cc
@@ -96,7 +96,7 @@ inline Bitmap GetBitmap(const ArraySpan& arr, int index) {
}
Status InvertOpExec(KernelContext* ctx, const ExecSpan& batch, ExecResult*
out) {
- ArraySpan* out_span = out->array_span();
+ ArraySpan* out_span = out->array_span_mutable();
GetBitmap(*out_span, 1).CopyFromInverted(GetBitmap(batch[0].array, 1));
return Status::OK();
}
@@ -114,7 +114,7 @@ struct AndOp : Commutative<AndOp> {
static Status Call(KernelContext* ctx, const ArraySpan& left, const Scalar&
right,
ExecResult* out) {
- ArraySpan* out_span = out->array_span();
+ ArraySpan* out_span = out->array_span_mutable();
if (right.is_valid) {
checked_cast<const BooleanScalar&>(right).value
? GetBitmap(*out_span, 1).CopyFrom(GetBitmap(left, 1))
@@ -125,7 +125,7 @@ struct AndOp : Commutative<AndOp> {
static Status Call(KernelContext* ctx, const ArraySpan& left, const
ArraySpan& right,
ExecResult* out) {
- ArraySpan* out_span = out->array_span();
+ ArraySpan* out_span = out->array_span_mutable();
::arrow::internal::BitmapAnd(left.buffers[1].data, left.offset,
right.buffers[1].data,
right.offset, right.length, out_span->offset,
out_span->buffers[1].data);
@@ -138,7 +138,7 @@ struct KleeneAndOp : Commutative<KleeneAndOp> {
static Status Call(KernelContext* ctx, const ArraySpan& left, const Scalar&
right,
ExecResult* out) {
- ArraySpan* out_span = out->array_span();
+ ArraySpan* out_span = out->array_span_mutable();
bool right_true = right.is_valid && checked_cast<const
BooleanScalar&>(right).value;
bool right_false = right.is_valid && !checked_cast<const
BooleanScalar&>(right).value;
@@ -176,7 +176,7 @@ struct KleeneAndOp : Commutative<KleeneAndOp> {
static Status Call(KernelContext* ctx, const ArraySpan& left, const
ArraySpan& right,
ExecResult* out) {
- ArraySpan* out_span = out->array_span();
+ ArraySpan* out_span = out->array_span_mutable();
if (left.GetNullCount() == 0 && right.GetNullCount() == 0) {
GetBitmap(*out_span, 0).SetBitsTo(true);
out_span->null_count = 0;
@@ -199,7 +199,7 @@ struct OrOp : Commutative<OrOp> {
static Status Call(KernelContext* ctx, const ArraySpan& left, const Scalar&
right,
ExecResult* out) {
- ArraySpan* out_span = out->array_span();
+ ArraySpan* out_span = out->array_span_mutable();
if (right.is_valid) {
checked_cast<const BooleanScalar&>(right).value
? GetBitmap(*out_span, 1).SetBitsTo(true)
@@ -210,7 +210,7 @@ struct OrOp : Commutative<OrOp> {
static Status Call(KernelContext* ctx, const ArraySpan& left, const
ArraySpan& right,
ExecResult* out) {
- ArraySpan* out_span = out->array_span();
+ ArraySpan* out_span = out->array_span_mutable();
::arrow::internal::BitmapOr(left.buffers[1].data, left.offset,
right.buffers[1].data,
right.offset, right.length, out_span->offset,
out_span->buffers[1].data);
@@ -223,7 +223,7 @@ struct KleeneOrOp : Commutative<KleeneOrOp> {
static Status Call(KernelContext* ctx, const ArraySpan& left, const Scalar&
right,
ExecResult* out) {
- ArraySpan* out_span = out->array_span();
+ ArraySpan* out_span = out->array_span_mutable();
bool right_true = right.is_valid && checked_cast<const
BooleanScalar&>(right).value;
bool right_false = right.is_valid && !checked_cast<const
BooleanScalar&>(right).value;
@@ -261,7 +261,7 @@ struct KleeneOrOp : Commutative<KleeneOrOp> {
static Status Call(KernelContext* ctx, const ArraySpan& left, const
ArraySpan& right,
ExecResult* out) {
- ArraySpan* out_span = out->array_span();
+ ArraySpan* out_span = out->array_span_mutable();
if (left.GetNullCount() == 0 && right.GetNullCount() == 0) {
out_span->null_count = 0;
GetBitmap(*out_span, 0).SetBitsTo(true);
@@ -285,7 +285,7 @@ struct XorOp : Commutative<XorOp> {
static Status Call(KernelContext* ctx, const ArraySpan& left, const Scalar&
right,
ExecResult* out) {
- ArraySpan* out_span = out->array_span();
+ ArraySpan* out_span = out->array_span_mutable();
if (right.is_valid) {
checked_cast<const BooleanScalar&>(right).value
? GetBitmap(*out_span, 1).CopyFromInverted(GetBitmap(left, 1))
@@ -296,7 +296,7 @@ struct XorOp : Commutative<XorOp> {
static Status Call(KernelContext* ctx, const ArraySpan& left, const
ArraySpan& right,
ExecResult* out) {
- ArraySpan* out_span = out->array_span();
+ ArraySpan* out_span = out->array_span_mutable();
::arrow::internal::BitmapXor(left.buffers[1].data, left.offset,
right.buffers[1].data,
right.offset, right.length, out_span->offset,
out_span->buffers[1].data);
@@ -307,7 +307,7 @@ struct XorOp : Commutative<XorOp> {
struct AndNotOp {
static Status Call(KernelContext* ctx, const Scalar& left, const ArraySpan&
right,
ExecResult* out) {
- ArraySpan* out_span = out->array_span();
+ ArraySpan* out_span = out->array_span_mutable();
if (left.is_valid) {
checked_cast<const BooleanScalar&>(left).value
? GetBitmap(*out_span, 1).CopyFromInverted(GetBitmap(right, 1))
@@ -323,7 +323,7 @@ struct AndNotOp {
static Status Call(KernelContext* ctx, const ArraySpan& left, const
ArraySpan& right,
ExecResult* out) {
- ArraySpan* out_span = out->array_span();
+ ArraySpan* out_span = out->array_span_mutable();
::arrow::internal::BitmapAndNot(left.buffers[1].data, left.offset,
right.buffers[1].data, right.offset,
right.length,
out_span->offset,
out_span->buffers[1].data);
@@ -334,7 +334,7 @@ struct AndNotOp {
struct KleeneAndNotOp {
static Status Call(KernelContext* ctx, const Scalar& left, const ArraySpan&
right,
ExecResult* out) {
- ArraySpan* out_span = out->array_span();
+ ArraySpan* out_span = out->array_span_mutable();
bool left_true = left.is_valid && checked_cast<const
BooleanScalar&>(left).value;
bool left_false = left.is_valid && !checked_cast<const
BooleanScalar&>(left).value;
@@ -377,7 +377,7 @@ struct KleeneAndNotOp {
static Status Call(KernelContext* ctx, const ArraySpan& left, const
ArraySpan& right,
ExecResult* out) {
- ArraySpan* out_span = out->array_span();
+ ArraySpan* out_span = out->array_span_mutable();
if (left.GetNullCount() == 0 && right.GetNullCount() == 0) {
GetBitmap(*out_span, 0).SetBitsTo(true);
out_span->null_count = 0;
diff --git a/cpp/src/arrow/compute/kernels/scalar_cast_numeric.cc
b/cpp/src/arrow/compute/kernels/scalar_cast_numeric.cc
index 00c7cacf9c..4334a39c06 100644
--- a/cpp/src/arrow/compute/kernels/scalar_cast_numeric.cc
+++ b/cpp/src/arrow/compute/kernels/scalar_cast_numeric.cc
@@ -44,13 +44,13 @@ Status CastIntegerToInteger(KernelContext* ctx, const
ExecSpan& batch, ExecResul
RETURN_NOT_OK(IntegersCanFit(batch[0].array, *out->type()));
}
CastNumberToNumberUnsafe(batch[0].type()->id(), out->type()->id(),
batch[0].array,
- out->array_span());
+ out->array_span_mutable());
return Status::OK();
}
Status CastFloatingToFloating(KernelContext*, const ExecSpan& batch,
ExecResult* out) {
CastNumberToNumberUnsafe(batch[0].type()->id(), out->type()->id(),
batch[0].array,
- out->array_span());
+ out->array_span_mutable());
return Status::OK();
}
@@ -161,7 +161,7 @@ Status CheckFloatToIntTruncation(const ExecValue& input,
const ExecResult& outpu
Status CastFloatingToInteger(KernelContext* ctx, const ExecSpan& batch,
ExecResult* out) {
const auto& options = checked_cast<const CastState*>(ctx->state())->options;
CastNumberToNumberUnsafe(batch[0].type()->id(), out->type()->id(),
batch[0].array,
- out->array_span());
+ out->array_span_mutable());
if (!options.allow_float_truncate) {
RETURN_NOT_OK(CheckFloatToIntTruncation(batch[0], *out));
}
@@ -245,7 +245,7 @@ Status CastIntegerToFloating(KernelContext* ctx, const
ExecSpan& batch, ExecResu
RETURN_NOT_OK(CheckForIntegerToFloatingTruncation(batch[0], out_type));
}
CastNumberToNumberUnsafe(batch[0].type()->id(), out_type, batch[0].array,
- out->array_span());
+ out->array_span_mutable());
return Status::OK();
}
diff --git a/cpp/src/arrow/compute/kernels/scalar_cast_temporal.cc
b/cpp/src/arrow/compute/kernels/scalar_cast_temporal.cc
index 6b25d66da1..845fad3004 100644
--- a/cpp/src/arrow/compute/kernels/scalar_cast_temporal.cc
+++ b/cpp/src/arrow/compute/kernels/scalar_cast_temporal.cc
@@ -148,7 +148,7 @@ struct CastFunctor<
(is_duration_type<O>::value && is_duration_type<I>::value)>> {
static Status Exec(KernelContext* ctx, const ExecSpan& batch, ExecResult*
out) {
const ArraySpan& input = batch[0].array;
- ArraySpan* output = out->array_span();
+ ArraySpan* output = out->array_span_mutable();
const auto& in_type = checked_cast<const I&>(*batch[0].type());
const auto& out_type = checked_cast<const O&>(*output->type);
@@ -339,7 +339,7 @@ struct CastFunctor<O, I, enable_if_t<is_time_type<I>::value
&& is_time_type<O>::
static Status Exec(KernelContext* ctx, const ExecSpan& batch, ExecResult*
out) {
const ArraySpan& input = batch[0].array;
- ArraySpan* output = out->array_span();
+ ArraySpan* output = out->array_span_mutable();
// If units are the same, zero copy, otherwise convert
const auto& in_type = checked_cast<const I&>(*input.type);
@@ -358,7 +358,7 @@ template <>
struct CastFunctor<Date64Type, Date32Type> {
static Status Exec(KernelContext* ctx, const ExecSpan& batch, ExecResult*
out) {
return ShiftTime<int32_t, int64_t>(ctx, util::MULTIPLY, kMillisecondsInDay,
- batch[0].array, out->array_span());
+ batch[0].array,
out->array_span_mutable());
}
};
@@ -366,7 +366,7 @@ template <>
struct CastFunctor<Date32Type, Date64Type> {
static Status Exec(KernelContext* ctx, const ExecSpan& batch, ExecResult*
out) {
return ShiftTime<int64_t, int32_t>(ctx, util::DIVIDE, kMillisecondsInDay,
- batch[0].array, out->array_span());
+ batch[0].array,
out->array_span_mutable());
}
};
@@ -384,7 +384,7 @@ struct CastFunctor<TimestampType, Date32Type> {
// multiply to achieve days -> unit
conversion.second *= kMillisecondsInDay / 1000;
return ShiftTime<int32_t, int64_t>(ctx, util::MULTIPLY, conversion.second,
- batch[0].array, out->array_span());
+ batch[0].array,
out->array_span_mutable());
}
};
@@ -396,7 +396,7 @@ struct CastFunctor<TimestampType, Date64Type> {
// date64 is ms since epoch
auto conversion = util::GetTimestampConversion(TimeUnit::MILLI,
out_type.unit());
return ShiftTime<int64_t, int64_t>(ctx, conversion.first,
conversion.second,
- batch[0].array, out->array_span());
+ batch[0].array,
out->array_span_mutable());
}
};
diff --git a/cpp/src/arrow/compute/kernels/scalar_compare.cc
b/cpp/src/arrow/compute/kernels/scalar_compare.cc
index 798123cdc8..6ff8981588 100644
--- a/cpp/src/arrow/compute/kernels/scalar_compare.cc
+++ b/cpp/src/arrow/compute/kernels/scalar_compare.cc
@@ -261,7 +261,7 @@ struct CompareKernel {
DCHECK(kernel);
const auto kernel_data = checked_cast<const
CompareData*>(kernel->data.get());
- ArraySpan* out_arr = out->array_span();
+ ArraySpan* out_arr = out->array_span_mutable();
// TODO: implement path for offset not multiple of 8
const bool out_is_byte_aligned = out_arr->offset % 8 == 0;
diff --git a/cpp/src/arrow/compute/kernels/scalar_if_else.cc
b/cpp/src/arrow/compute/kernels/scalar_if_else.cc
index 3603ad63fc..0dd176b5d4 100644
--- a/cpp/src/arrow/compute/kernels/scalar_if_else.cc
+++ b/cpp/src/arrow/compute/kernels/scalar_if_else.cc
@@ -165,7 +165,7 @@ struct IfElseNullPromoter {
}
Status ExecIntoArraySpan() {
- ArraySpan* out_span = output->array_span();
+ ArraySpan* out_span = output->array_span_mutable();
// cond.valid & (cond.data & left.valid | ~cond.data & right.valid)
// In the following cases, we dont need to allocate out_valid bitmap
@@ -375,7 +375,7 @@ Status RunIfElseScalar(const BooleanScalar& cond, const
ExecValue& left,
const CopyArrayData& copy_array_data,
const BroadcastScalar& broadcast_scalar) {
// either left or right is an array. Output is always an array`
- ArraySpan* out_array = out->array_span();
+ ArraySpan* out_array = out->array_span_mutable();
if (!cond.is_valid) {
// cond is null; output is all null --> clear validity buffer
bit_util::ClearBitmap(out_array->buffers[0].data, out_array->offset,
@@ -450,7 +450,7 @@ struct IfElseFunctor<Type,
// AAA
static Status Call(KernelContext* ctx, const ArraySpan& cond, const
ArraySpan& left,
const ArraySpan& right, ExecResult* out) {
- T* out_values = out->array_span()->GetValues<T>(1);
+ T* out_values = out->array_span_mutable()->GetValues<T>(1);
// copy right data to out_buff
std::memcpy(out_values, right.GetValues<T>(1), right.length * sizeof(T));
@@ -468,7 +468,7 @@ struct IfElseFunctor<Type,
// ASA
static Status Call(KernelContext* ctx, const ArraySpan& cond, const Scalar&
left,
const ArraySpan& right, ExecResult* out) {
- T* out_values = out->array_span()->GetValues<T>(1);
+ T* out_values = out->array_span_mutable()->GetValues<T>(1);
// copy right data to out_buff
std::memcpy(out_values, right.GetValues<T>(1), right.length * sizeof(T));
@@ -491,7 +491,7 @@ struct IfElseFunctor<Type,
// AAS
static Status Call(KernelContext* ctx, const ArraySpan& cond, const
ArraySpan& left,
const Scalar& right, ExecResult* out) {
- T* out_values = out->array_span()->GetValues<T>(1);
+ T* out_values = out->array_span_mutable()->GetValues<T>(1);
// copy left data to out_buff
const T* left_data = left.GetValues<T>(1);
@@ -514,7 +514,7 @@ struct IfElseFunctor<Type,
// ASS
static Status Call(KernelContext* ctx, const ArraySpan& cond, const Scalar&
left,
const Scalar& right, ExecResult* out) {
- T* out_values = out->array_span()->GetValues<T>(1);
+ T* out_values = out->array_span_mutable()->GetValues<T>(1);
// copy right data to out_buff
T right_data = internal::UnboxScalar<Type>::Unbox(right);
@@ -557,7 +557,7 @@ struct IfElseFunctor<Type, enable_if_boolean<Type>> {
// AAA
static Status Call(KernelContext* ctx, const ArraySpan& cond, const
ArraySpan& left,
const ArraySpan& right, ExecResult* out) {
- ArraySpan* out_arr = out->array_span();
+ ArraySpan* out_arr = out->array_span_mutable();
// out_buff = right & ~cond
arrow::internal::BitmapAndNot(right.buffers[1].data, right.offset,
cond.buffers[1].data, cond.offset,
cond.length,
@@ -578,7 +578,7 @@ struct IfElseFunctor<Type, enable_if_boolean<Type>> {
// ASA
static Status Call(KernelContext* ctx, const ArraySpan& cond, const Scalar&
left,
const ArraySpan& right, ExecResult* out) {
- ArraySpan* out_arr = out->array_span();
+ ArraySpan* out_arr = out->array_span_mutable();
// out_buff = right & ~cond
arrow::internal::BitmapAndNot(right.buffers[1].data, right.offset,
@@ -599,7 +599,7 @@ struct IfElseFunctor<Type, enable_if_boolean<Type>> {
// AAS
static Status Call(KernelContext* ctx, const ArraySpan& cond, const
ArraySpan& left,
const Scalar& right, ExecResult* out) {
- ArraySpan* out_arr = out->array_span();
+ ArraySpan* out_arr = out->array_span_mutable();
// out_buff = left & cond
arrow::internal::BitmapAnd(left.buffers[1].data, left.offset,
cond.buffers[1].data,
@@ -621,7 +621,7 @@ struct IfElseFunctor<Type, enable_if_boolean<Type>> {
// ASS
static Status Call(KernelContext* ctx, const ArraySpan& cond, const Scalar&
left,
const Scalar& right, ExecResult* out) {
- ArraySpan* out_arr = out->array_span();
+ ArraySpan* out_arr = out->array_span_mutable();
bool left_data = internal::UnboxScalar<BooleanType>::Unbox(left);
bool right_data = internal::UnboxScalar<BooleanType>::Unbox(right);
@@ -889,7 +889,7 @@ struct IfElseFunctor<Type,
enable_if_fixed_size_binary<Type>> {
// AAA
static Status Call(KernelContext* ctx, const ArraySpan& cond, const
ArraySpan& left,
const ArraySpan& right, ExecResult* out) {
- ArraySpan* out_arr = out->array_span();
+ ArraySpan* out_arr = out->array_span_mutable();
ARROW_ASSIGN_OR_RAISE(auto byte_width, GetByteWidth(*left.type,
*right.type));
auto* out_values = out_arr->buffers[1].data + out_arr->offset * byte_width;
@@ -912,7 +912,7 @@ struct IfElseFunctor<Type,
enable_if_fixed_size_binary<Type>> {
// ASA
static Status Call(KernelContext* ctx, const ArraySpan& cond, const Scalar&
left,
const ArraySpan& right, ExecResult* out) {
- ArraySpan* out_arr = out->array_span();
+ ArraySpan* out_arr = out->array_span_mutable();
ARROW_ASSIGN_OR_RAISE(auto byte_width, GetByteWidth(*left.type,
*right.type));
auto* out_values = out_arr->buffers[1].data + out_arr->offset * byte_width;
@@ -938,7 +938,7 @@ struct IfElseFunctor<Type,
enable_if_fixed_size_binary<Type>> {
// AAS
static Status Call(KernelContext* ctx, const ArraySpan& cond, const
ArraySpan& left,
const Scalar& right, ExecResult* out) {
- ArraySpan* out_arr = out->array_span();
+ ArraySpan* out_arr = out->array_span_mutable();
ARROW_ASSIGN_OR_RAISE(auto byte_width, GetByteWidth(*left.type,
*right.type));
auto* out_values = out_arr->buffers[1].data + out_arr->offset * byte_width;
@@ -964,7 +964,7 @@ struct IfElseFunctor<Type,
enable_if_fixed_size_binary<Type>> {
// ASS
static Status Call(KernelContext* ctx, const ArraySpan& cond, const Scalar&
left,
const Scalar& right, ExecResult* out) {
- ArraySpan* out_arr = out->array_span();
+ ArraySpan* out_arr = out->array_span_mutable();
ARROW_ASSIGN_OR_RAISE(auto byte_width, GetByteWidth(*left.type,
*right.type));
auto* out_values = out_arr->buffers[1].data + out_arr->offset * byte_width;
@@ -1498,7 +1498,7 @@ Status ExecScalarCaseWhen(KernelContext* ctx, const
ExecSpan& batch, ExecResult*
output->GetMutableValues<uint8_t>(1, 0), output->offset);
} else {
// ArraySpan
- ArraySpan* output = out->array_span();
+ ArraySpan* output = out->array_span_mutable();
if (is_dictionary_type<Type>::value) {
const ExecValue& dict_from = has_result ? result : batch[1];
output->child_data.resize(1);
@@ -1527,7 +1527,7 @@ Status ExecArrayCaseWhen(KernelContext* ctx, const
ExecSpan& batch, ExecResult*
"cond struct must not be a null scalar or "
"have top-level nulls");
}
- ArraySpan* output = out->array_span();
+ ArraySpan* output = out->array_span_mutable();
const int64_t out_offset = output->offset;
const auto num_value_args = batch.values.size() - 1;
const bool have_else_arg =
@@ -2028,7 +2028,7 @@ void InitializeNullSlots(const DataType& type, uint8_t*
out_valid, uint8_t* out_
// Implement 'coalesce' for any mix of scalar/array arguments for any
fixed-width type
template <typename Type>
Status ExecArrayCoalesce(KernelContext* ctx, const ExecSpan& batch,
ExecResult* out) {
- ArraySpan* output = out->array_span();
+ ArraySpan* output = out->array_span_mutable();
const int64_t out_offset = output->offset;
// Use output validity buffer as mask to decide what values to copy
uint8_t* out_valid = output->buffers[0].data;
@@ -2108,7 +2108,7 @@ Status ExecArrayCoalesce(KernelContext* ctx, const
ExecSpan& batch, ExecResult*
template <typename Type>
Status ExecArrayScalarCoalesce(KernelContext* ctx, const ExecValue& left,
const ExecValue& right, int64_t length,
ExecResult* out) {
- ArraySpan* output = out->array_span();
+ ArraySpan* output = out->array_span_mutable();
const int64_t out_offset = output->offset;
uint8_t* out_valid = output->buffers[0].data;
uint8_t* out_values = output->buffers[1].data;
@@ -2174,7 +2174,7 @@ Status ExecArrayScalarCoalesce(KernelContext* ctx, const
ExecValue& left,
template <typename Type>
Status ExecBinaryCoalesce(KernelContext* ctx, const ExecValue& left,
const ExecValue& right, int64_t length, ExecResult*
out) {
- ArraySpan* output = out->array_span();
+ ArraySpan* output = out->array_span_mutable();
const int64_t out_offset = output->offset;
uint8_t* out_valid = output->buffers[0].data;
uint8_t* out_values = output->buffers[1].data;
@@ -2480,7 +2480,7 @@ Status ExecScalarChoose(KernelContext* ctx, const
ExecSpan& batch, ExecResult* o
// TODO(wesm): more graceful implementation than using
// MakeNullScalar, which is a little bit lazy
std::shared_ptr<Scalar> source =
MakeNullScalar(out->type()->GetSharedPtr());
- ArraySpan* output = out->array_span();
+ ArraySpan* output = out->array_span_mutable();
ExecValue copy_source;
copy_source.SetScalar(source.get());
CopyValues<Type>(copy_source, /*row=*/0, batch.length,
@@ -2495,7 +2495,7 @@ Status ExecScalarChoose(KernelContext* ctx, const
ExecSpan& batch, ExecResult* o
return Status::IndexError("choose: index ", index, " out of range");
}
auto source = batch[index + 1];
- ArraySpan* output = out->array_span();
+ ArraySpan* output = out->array_span_mutable();
CopyValues<Type>(source, /*row=*/0, batch.length,
output->GetValues<uint8_t>(0, /*absolute_offset=*/0),
output->GetValues<uint8_t>(1, /*absolute_offset=*/0),
output->offset);
@@ -2504,7 +2504,7 @@ Status ExecScalarChoose(KernelContext* ctx, const
ExecSpan& batch, ExecResult* o
template <typename Type>
Status ExecArrayChoose(KernelContext* ctx, const ExecSpan& batch, ExecResult*
out) {
- ArraySpan* output = out->array_span();
+ ArraySpan* output = out->array_span_mutable();
const int64_t out_offset = output->offset;
// Need a null bitmap if any input has nulls
uint8_t* out_valid = nullptr;
diff --git a/cpp/src/arrow/compute/kernels/scalar_nested.cc
b/cpp/src/arrow/compute/kernels/scalar_nested.cc
index 1934212a08..8d25017e40 100644
--- a/cpp/src/arrow/compute/kernels/scalar_nested.cc
+++ b/cpp/src/arrow/compute/kernels/scalar_nested.cc
@@ -39,7 +39,7 @@ namespace {
template <typename Type, typename offset_type = typename Type::offset_type>
Status ListValueLength(KernelContext* ctx, const ExecSpan& batch, ExecResult*
out) {
const ArraySpan& arr = batch[0].array;
- ArraySpan* out_arr = out->array_span();
+ ArraySpan* out_arr = out->array_span_mutable();
auto out_values = out_arr->GetValues<offset_type>(1);
const offset_type* offsets = arr.GetValues<offset_type>(1);
// Offsets are always well-defined and monotonic, even for null values
@@ -53,7 +53,7 @@ Status FixedSizeListValueLength(KernelContext* ctx, const
ExecSpan& batch,
ExecResult* out) {
auto width = checked_cast<const
FixedSizeListType&>(*batch[0].type()).list_size();
const ArraySpan& arr = batch[0].array;
- ArraySpan* out_arr = out->array_span();
+ ArraySpan* out_arr = out->array_span_mutable();
int32_t* out_values = out_arr->GetValues<int32_t>(1);
std::fill(out_values, out_values + arr.length, width);
return Status::OK();
diff --git a/cpp/src/arrow/compute/kernels/scalar_random.cc
b/cpp/src/arrow/compute/kernels/scalar_random.cc
index b1ebfd312c..f145f6b603 100644
--- a/cpp/src/arrow/compute/kernels/scalar_random.cc
+++ b/cpp/src/arrow/compute/kernels/scalar_random.cc
@@ -69,7 +69,7 @@ Status ExecRandom(KernelContext* ctx, const ExecSpan& batch,
ExecResult* out) {
std::lock_guard<std::mutex> seed_gen_lock(seed_gen_mutex);
gen.seed(seed_gen());
}
- double* out_values = out->array_span()->GetValues<double>(1);
+ double* out_values = out->array_span_mutable()->GetValues<double>(1);
for (int64_t i = 0; i < batch.length; ++i) {
out_values[i] = generate_uniform(&gen);
}
diff --git a/cpp/src/arrow/compute/kernels/scalar_set_lookup.cc
b/cpp/src/arrow/compute/kernels/scalar_set_lookup.cc
index 292a924233..28a3b37996 100644
--- a/cpp/src/arrow/compute/kernels/scalar_set_lookup.cc
+++ b/cpp/src/arrow/compute/kernels/scalar_set_lookup.cc
@@ -326,7 +326,7 @@ struct IndexInVisitor {
};
Status ExecIndexIn(KernelContext* ctx, const ExecSpan& batch, ExecResult* out)
{
- return IndexInVisitor(ctx, batch[0].array, out->array_span()).Execute();
+ return IndexInVisitor(ctx, batch[0].array,
out->array_span_mutable()).Execute();
}
// ----------------------------------------------------------------------
@@ -408,7 +408,7 @@ struct IsInVisitor {
};
Status ExecIsIn(KernelContext* ctx, const ExecSpan& batch, ExecResult* out) {
- return IsInVisitor(ctx, batch[0].array, out->array_span()).Execute();
+ return IsInVisitor(ctx, batch[0].array, out->array_span_mutable()).Execute();
}
// Unary set lookup kernels available for the following input types
diff --git a/cpp/src/arrow/compute/kernels/scalar_string_ascii.cc
b/cpp/src/arrow/compute/kernels/scalar_string_ascii.cc
index d36ef26d9a..a90a7282bb 100644
--- a/cpp/src/arrow/compute/kernels/scalar_string_ascii.cc
+++ b/cpp/src/arrow/compute/kernels/scalar_string_ascii.cc
@@ -860,7 +860,7 @@ struct BinaryLength {
static Status FixedSizeExec(KernelContext*, const ExecSpan& batch,
ExecResult* out) {
// Output is preallocated and validity buffer is precomputed
const int32_t width = batch[0].type()->byte_width();
- int32_t* buffer = out->array_span()->GetValues<int32_t>(1);
+ int32_t* buffer = out->array_span_mutable()->GetValues<int32_t>(1);
std::fill(buffer, buffer + batch.length, width);
return Status::OK();
}
@@ -1206,7 +1206,7 @@ void StringBoolTransform(KernelContext* ctx, const
ExecSpan& batch,
StrToBoolTransformFunc transform, ExecResult* out) {
using offset_type = typename Type::offset_type;
const ArraySpan& input = batch[0].array;
- ArraySpan* out_arr = out->array_span();
+ ArraySpan* out_arr = out->array_span_mutable();
if (input.length > 0) {
transform(reinterpret_cast<const offset_type*>(input.buffers[1].data) +
input.offset,
input.buffers[2].data, input.length, out_arr->offset,
diff --git a/cpp/src/arrow/compute/kernels/scalar_string_internal.h
b/cpp/src/arrow/compute/kernels/scalar_string_internal.h
index 522853564a..910b3dbdbd 100644
--- a/cpp/src/arrow/compute/kernels/scalar_string_internal.h
+++ b/cpp/src/arrow/compute/kernels/scalar_string_internal.h
@@ -221,7 +221,7 @@ struct StringPredicateFunctor {
EnsureUtf8LookupTablesFilled();
const ArraySpan& input = batch[0].array;
ArrayIterator<Type> input_it(input);
- ArraySpan* out_arr = out->array_span();
+ ArraySpan* out_arr = out->array_span_mutable();
::arrow::internal::GenerateBitsUnrolled(
out_arr->buffers[1].data, out_arr->offset, input.length, [&]() -> bool
{
std::string_view val = input_it();
diff --git a/cpp/src/arrow/compute/kernels/scalar_temporal_unary.cc
b/cpp/src/arrow/compute/kernels/scalar_temporal_unary.cc
index c0dc747e49..affdf31d2e 100644
--- a/cpp/src/arrow/compute/kernels/scalar_temporal_unary.cc
+++ b/cpp/src/arrow/compute/kernels/scalar_temporal_unary.cc
@@ -1248,7 +1248,7 @@ struct Strptime {
const ArraySpan& in = batch[0].array;
ARROW_ASSIGN_OR_RAISE(auto self, Make(ctx, *in.type));
- ArraySpan* out_span = out->array_span();
+ ArraySpan* out_span = out->array_span_mutable();
int64_t* out_data = out_span->GetValues<int64_t>(1);
if (self.error_is_null) {
diff --git a/cpp/src/arrow/compute/kernels/scalar_validity.cc
b/cpp/src/arrow/compute/kernels/scalar_validity.cc
index 32f9b4ef9b..e5fdb7d856 100644
--- a/cpp/src/arrow/compute/kernels/scalar_validity.cc
+++ b/cpp/src/arrow/compute/kernels/scalar_validity.cc
@@ -34,7 +34,7 @@ namespace {
Status IsValidExec(KernelContext* ctx, const ExecSpan& batch, ExecResult* out)
{
const ArraySpan& arr = batch[0].array;
- ArraySpan* out_span = out->array_span();
+ ArraySpan* out_span = out->array_span_mutable();
if (arr.type->id() == Type::NA) {
// Input is all nulls => output is entirely false.
bit_util::SetBitsTo(out_span->buffers[1].data, out_span->offset,
out_span->length,
@@ -84,7 +84,7 @@ static void SetNanBits(const ArraySpan& arr, uint8_t*
out_bitmap, int64_t out_of
Status IsNullExec(KernelContext* ctx, const ExecSpan& batch, ExecResult* out) {
const ArraySpan& arr = batch[0].array;
- ArraySpan* out_span = out->array_span();
+ ArraySpan* out_span = out->array_span_mutable();
if (arr.type->id() == Type::NA) {
bit_util::SetBitsTo(out_span->buffers[1].data, out_span->offset,
out_span->length,
true);
@@ -150,7 +150,7 @@ void AddFloatValidityKernel(const
std::shared_ptr<DataType>& ty, ScalarFunction*
template <bool kConstant>
Status ConstBoolExec(KernelContext* ctx, const ExecSpan& batch, ExecResult*
out) {
- ArraySpan* array = out->array_span();
+ ArraySpan* array = out->array_span_mutable();
bit_util::SetBitsTo(array->buffers[1].data, array->offset, array->length,
kConstant);
return Status::OK();
}
@@ -210,7 +210,7 @@ std::shared_ptr<ScalarFunction>
MakeIsNanFunction(std::string name, FunctionDoc
}
Status TrueUnlessNullExec(KernelContext* ctx, const ExecSpan& batch,
ExecResult* out) {
- ArraySpan* out_span = out->array_span();
+ ArraySpan* out_span = out->array_span_mutable();
if (out_span->buffers[0].data) {
// If there is a validity bitmap computed above the kernel
// invocation, we copy it to the output buffers