vibhatha commented on a change in pull request #12033: URL: https://github.com/apache/arrow/pull/12033#discussion_r774947652
########## File path: cpp/examples/arrow/exec_plan_examples.cc ########## @@ -0,0 +1,1122 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include <memory> +#include <utility> + +#include "arrow/compute/api.h" +#include "arrow/compute/api_scalar.h" +#include "arrow/compute/api_vector.h" +#include "arrow/compute/cast.h" +#include "arrow/compute/exec/exec_plan.h" +#include "arrow/compute/exec/ir_consumer.h" +#include "arrow/compute/exec/test_util.h" + +#include <arrow/dataset/dataset.h> +#include <arrow/dataset/file_parquet.h> +#include "arrow/dataset/file_base.h" +#include "arrow/dataset/plan.h" +#include "arrow/dataset/scanner.h" +#include "arrow/dataset/dataset_writer.h" + +#include "arrow/io/interfaces.h" +#include "arrow/io/memory.h" +#include "arrow/io/slow.h" +#include "arrow/io/transform.h" + +#include <arrow/result.h> +#include <arrow/status.h> +#include <arrow/table.h> + +#include <arrow/ipc/api.h> + +#include <arrow/util/future.h> +#include "arrow/util/range.h" +#include "arrow/util/thread_pool.h" +#include "arrow/util/vector.h" + +// Demonstrate various operators in Arrow Streaming Execution Engine + +#define ABORT_ON_FAILURE(expr) \ + do { \ + arrow::Status status_ = (expr); \ + if (!status_.ok()) { \ + std::cerr << status_.message() << std::endl; \ + abort(); \ + } \ + } while (0); + +#define CHECK_AND_RETURN(expr) \ + do { \ + arrow::Status status_ = (expr); \ + if (!status_.ok()) { \ + std::cerr << status_.message() << std::endl; \ + return EXIT_FAILURE; \ + } else { \ + return EXIT_SUCCESS; \ + } \ + } while (0); + +#define CHECK_AND_CONTINUE(expr) \ + do { \ + arrow::Status status_ = (expr); \ + if (!status_.ok()) { \ + std::cerr << status_.message() << std::endl; \ + return EXIT_FAILURE; \ + } \ + } while (0); + +#define SEP_STR "******" + +#define PRINT_BLOCK(msg) \ + std::cout << "" << std::endl; \ + std::cout << "\t" << SEP_STR << " " << msg << " " << SEP_STR << std::endl; \ + std::cout << "" << std::endl; + +#define PRINT_LINE(msg) std::cout << msg << std::endl; + +namespace cp = ::arrow::compute; + +std::shared_ptr<arrow::Array> GetArrayFromJSON( + const std::shared_ptr<arrow::DataType>& type, arrow::util::string_view json) { + std::shared_ptr<arrow::Array> out; + + ABORT_ON_FAILURE(arrow::ipc::internal::json::ArrayFromJSON(type, json, &out)); + return out; +} + +std::shared_ptr<arrow::RecordBatch> GetRecordBatchFromJSON( + const std::shared_ptr<arrow::Schema>& schema, arrow::util::string_view json) { + // Parse as a StructArray + auto struct_type = struct_(schema->fields()); + std::shared_ptr<arrow::Array> struct_array = GetArrayFromJSON(struct_type, json); + + // Convert StructArray to RecordBatch + return *arrow::RecordBatch::FromStructArray(struct_array); +} + +std::shared_ptr<arrow::Table> GetTableFromJSON( + const std::shared_ptr<arrow::Schema>& schema, const std::vector<std::string>& json) { + std::vector<std::shared_ptr<arrow::RecordBatch>> batches; + for (const std::string& batch_json : json) { + batches.push_back(GetRecordBatchFromJSON(schema, batch_json)); + } + return *arrow::Table::FromRecordBatches(schema, std::move(batches)); +} + +std::shared_ptr<arrow::Table> CreateTable() { + auto schema = + arrow::schema({arrow::field("a", arrow::int64()), + arrow::field("b", arrow::int64()), + arrow::field("c", arrow::int64())}); + std::shared_ptr<arrow::Array> array_a; + std::shared_ptr<arrow::Array> array_b; + std::shared_ptr<arrow::Array> array_c; + arrow::NumericBuilder<arrow::Int64Type> builder; + ABORT_ON_FAILURE(builder.AppendValues({0, 1, 2, 3, 4, 5, 6, 7, 8, 9})); + ABORT_ON_FAILURE(builder.Finish(&array_a)); + builder.Reset(); + ABORT_ON_FAILURE(builder.AppendValues({9, 8, 7, 6, 5, 4, 3, 2, 1, 0})); + ABORT_ON_FAILURE(builder.Finish(&array_b)); + builder.Reset(); + ABORT_ON_FAILURE(builder.AppendValues({1, 2, 1, 2, 1, 2, 1, 2, 1, 2})); + ABORT_ON_FAILURE(builder.Finish(&array_c)); + return arrow::Table::Make(schema, {array_a, array_b, array_c}); +} + +std::shared_ptr<arrow::dataset::Dataset> CreateDataset() { + return std::make_shared<arrow::dataset::InMemoryDataset>( + GetTableFromJSON(arrow::schema({arrow::field("a", arrow::int32()), + arrow::field("b", arrow::boolean())}), + { + R"([{"a": 1, "b": null}, + {"a": 2, "b": true}])", + R"([{"a": null, "b": true}, + {"a": 3, "b": false}])", + R"([{"a": null, "b": true}, + {"a": 4, "b": false}])", + R"([{"a": 5, "b": null}, + {"a": 6, "b": false}, + {"a": 7, "b": false}, + {"a": 8, "b": true}])", + })); +} + +std::shared_ptr<arrow::Table> CreateSimpleTable() { + auto schema = arrow::schema( + {arrow::field("a", arrow::int32()), arrow::field("b", arrow::boolean())}); + std::shared_ptr<arrow::Array> array_a; + std::shared_ptr<arrow::Array> array_b; + arrow::NumericBuilder<arrow::Int32Type> builder; + arrow::BooleanBuilder b_builder; + ABORT_ON_FAILURE(builder.AppendValues({1, 2, 3, 4, 5, 6, 7})); + ABORT_ON_FAILURE(builder.Finish(&array_a)); + builder.Reset(); + + std::vector<bool> bool_vec{false, true, false, true, false, true, false}; + ABORT_ON_FAILURE(b_builder.AppendValues(bool_vec)); + ABORT_ON_FAILURE(builder.Finish(&array_b)); + builder.Reset(); + return arrow::Table::Make(schema, {array_a, array_b}); +} + +arrow::Status exec_plan_end_to_end_sample() { + cp::ExecContext exec_context(arrow::default_memory_pool(), + ::arrow::internal::GetCpuThreadPool()); + + // ensure arrow::dataset node factories are in the registry + arrow::dataset::internal::Initialize(); + + ARROW_ASSIGN_OR_RAISE(std::shared_ptr<cp::ExecPlan> plan, + cp::ExecPlan::Make(&exec_context)); + + std::shared_ptr<arrow::dataset::Dataset> dataset = CreateDataset(); + + auto options = std::make_shared<arrow::dataset::ScanOptions>(); + // sync scanning is not supported by ScanNode + options->use_async = true; + // specify the filter + cp::Expression b_is_true = cp::field_ref("b"); + options->filter = b_is_true; + // for now, specify the projection as the full project expression (eventually this can + // just be a list of materialized field names) + + cp::Expression a_times_2 = cp::call("multiply", {cp::field_ref("a"), cp::literal(2)}); + options->projection = + cp::call("make_struct", {a_times_2}, cp::MakeStructOptions{{"a * 2"}}); + + // // construct the scan node + cp::ExecNode* scan; + + auto scan_node_options = arrow::dataset::ScanNodeOptions{dataset, options}; + + ARROW_ASSIGN_OR_RAISE(scan, + cp::MakeExecNode("scan", plan.get(), {}, scan_node_options)); + + // pipe the scan node into a filter node + cp::ExecNode* filter; + ARROW_ASSIGN_OR_RAISE(filter, cp::MakeExecNode("filter", plan.get(), {scan}, + cp::FilterNodeOptions{b_is_true})); + + cp::ExecNode* project; + + ARROW_ASSIGN_OR_RAISE(project, + cp::MakeExecNode("augmented_project", plan.get(), {filter}, + cp::ProjectNodeOptions{{a_times_2}})); + + // // finally, pipe the project node into a sink node + arrow::AsyncGenerator<arrow::util::optional<cp::ExecBatch>> sink_gen; + ARROW_ASSIGN_OR_RAISE(cp::ExecNode * sink, + cp::MakeExecNode("sink", + plan.get(), {project}, + cp::SinkNodeOptions{&sink_gen})); + + ABORT_ON_FAILURE(sink->Validate()); + + // // translate sink_gen (async) to sink_reader (sync) + std::shared_ptr<arrow::RecordBatchReader> sink_reader = + cp::MakeGeneratorReader(arrow::schema({arrow::field("a * 2", arrow::int32())}), + std::move(sink_gen), exec_context.memory_pool()); + + // // validate the plan + ABORT_ON_FAILURE(plan->Validate()); + PRINT_LINE("Exec Plan created: " << plan->ToString()); + // // start the ExecPlan + ABORT_ON_FAILURE(plan->StartProducing()); + + // // collect sink_reader into a Table + std::shared_ptr<arrow::Table> response_table; + ARROW_ASSIGN_OR_RAISE(response_table, + arrow::Table::FromRecordBatchReader(sink_reader.get())); + + std::cout << "Results : " << response_table->ToString() << std::endl; + + // // stop producing + plan->StopProducing(); + + // // plan mark finished + plan->finished().Wait(); + + return arrow::Status::OK(); +} + +cp::Expression Materialize(std::vector<std::string> names, + bool include_aug_fields = false) { + if (include_aug_fields) { + for (auto aug_name : {"__fragment_index", + "__batch_index", "__last_in_fragment"}) { + names.emplace_back(aug_name); + } + } + + std::vector<cp::Expression> exprs; + for (const auto& name : names) { + exprs.push_back(cp::field_ref(name)); + } + + return cp::project(exprs, names); +} + +arrow::Status consume(std::shared_ptr<arrow::Schema> schema, + std::function<arrow::Future<arrow::util::optional<cp::ExecBatch>>()>* sink_gen) { + auto iterator = MakeGeneratorIterator(*sink_gen); + while (true) { + ARROW_ASSIGN_OR_RAISE(auto exec_batch, iterator.Next()); + if (!exec_batch.has_value()) { + break; + } + ARROW_ASSIGN_OR_RAISE(auto record_batch, exec_batch->ToRecordBatch(schema)); + std::cout << record_batch->ToString() << '\n'; + } + return arrow::Status::OK(); +} + + +arrow::Status scan_sink_node_example() { + cp::ExecContext exec_context(arrow::default_memory_pool(), + ::arrow::internal::GetCpuThreadPool()); + + // ensure arrow::dataset node factories are in the registry + arrow::dataset::internal::Initialize(); + + // Execution plan created + ARROW_ASSIGN_OR_RAISE( + std::shared_ptr<cp::ExecPlan> plan, cp::ExecPlan::Make(&exec_context)); + + std::shared_ptr<arrow::dataset::Dataset> dataset = CreateDataset(); + + auto options = std::make_shared<arrow::dataset::ScanOptions>(); + // sync scanning is not supported by ScanNode + options->use_async = true; + options->projection = Materialize({}); // create empty projection + + // construct the scan node + cp::ExecNode* scan; + auto scan_node_options = arrow::dataset::ScanNodeOptions{dataset, options}; + + ARROW_ASSIGN_OR_RAISE(scan, + cp::MakeExecNode("scan", plan.get(), {}, scan_node_options)); + + arrow::AsyncGenerator<arrow::util::optional<cp::ExecBatch>> sink_gen; + + cp::ExecNode* sink; + + ARROW_ASSIGN_OR_RAISE( + sink, cp::MakeExecNode("sink", plan.get(), {scan}, + cp::SinkNodeOptions{&sink_gen})); + + // // translate sink_gen (async) to sink_reader (sync) + std::shared_ptr<arrow::RecordBatchReader> sink_reader = cp::MakeGeneratorReader( + dataset->schema(), std::move(sink_gen), exec_context.memory_pool()); + + // validate the ExecPlan + ABORT_ON_FAILURE(plan->Validate()); + PRINT_LINE("ExecPlan created : " << plan->ToString()); + // // start the ExecPlan + ABORT_ON_FAILURE(plan->StartProducing()); + + // // collect sink_reader into a Table + std::shared_ptr<arrow::Table> response_table; + + ARROW_ASSIGN_OR_RAISE(response_table, + arrow::Table::FromRecordBatchReader(sink_reader.get())); + + PRINT_LINE("Results : " << response_table->ToString()); + + // // stop producing + plan->StopProducing(); + // // plan mark finished + plan->finished().Wait(); + + return arrow::Status::OK(); +} + +cp::ExecBatch GetExecBatchFromJSON(const std::vector<arrow::ValueDescr>& descrs, + arrow::util::string_view json) { + auto fields = ::arrow::internal::MapVector( + [](const arrow::ValueDescr& descr) { return arrow::field("", descr.type); }, + descrs); + + cp::ExecBatch batch{*GetRecordBatchFromJSON(arrow::schema(std::move(fields)), json)}; + + auto value_it = batch.values.begin(); + for (const auto& descr : descrs) { + if (descr.shape == arrow::ValueDescr::SCALAR) { + if (batch.length == 0) { + *value_it = arrow::MakeNullScalar(value_it->type()); + } else { + *value_it = value_it->make_array()->GetScalar(0).ValueOrDie(); + } + } + ++value_it; + } + + return batch; +} + +struct BatchesWithSchema { + std::vector<cp::ExecBatch> batches; + std::shared_ptr<arrow::Schema> schema; + + arrow::AsyncGenerator<arrow::util::optional<cp::ExecBatch>> gen(bool parallel) const { + auto opt_batches = ::arrow::internal::MapVector( + [](cp::ExecBatch batch) { return arrow::util::make_optional(std::move(batch)); }, + batches); + + arrow::AsyncGenerator<arrow::util::optional<cp::ExecBatch>> gen; + + if (parallel) { + // emulate batches completing initial decode-after-scan on a cpu thread + gen = arrow::MakeBackgroundGenerator( + arrow::MakeVectorIterator(std::move(opt_batches)), + ::arrow::internal::GetCpuThreadPool()) + .ValueOrDie(); + + // ensure that callbacks are not executed immediately on a background thread + gen = arrow::MakeTransferredGenerator(std::move(gen), + ::arrow::internal::GetCpuThreadPool()); + } else { + gen = arrow::MakeVectorGenerator(std::move(opt_batches)); + } + + return gen; + } +}; + +BatchesWithSchema MakeBasicBatches() { + BatchesWithSchema out; + out.batches = {GetExecBatchFromJSON({arrow::int32(), arrow::boolean()}, + "[[null, true], [4, false]]"), + GetExecBatchFromJSON({arrow::int32(), arrow::boolean()}, + "[[5, null], [6, false], [7, false]]")}; + out.schema = arrow::schema( + {arrow::field("a", arrow::int32()), arrow::field("b", arrow::boolean())}); + return out; +} + +BatchesWithSchema MakeSortTestBasicBatches() { + BatchesWithSchema out; + out.batches = { + GetExecBatchFromJSON( + {arrow::int32(), arrow::int32(), arrow::int32(), arrow::int32()}, + "[[1, 3, 0, 2], [121, 101, 120, 12], [10, 110, 210, 121], [51, 101, 2, 34]]"), + GetExecBatchFromJSON( + {arrow::int32(), arrow::int32(), arrow::int32(), arrow::int32()}, + "[[11, 31, 1, 12], [12, 101, 120, 12], [0, 110, 210, 11], [51, 10, 2, 3]]") + }; + out.schema = arrow::schema( + {arrow::field("a", arrow::int32()), arrow::field("b", arrow::int32()), + arrow::field("c", arrow::int32()), arrow::field("d", arrow::int32())}); + return out; +} + +BatchesWithSchema MakeGroupableBatches(int multiplicity = 1) { + BatchesWithSchema out; + + out.batches = {GetExecBatchFromJSON({arrow::int32(), arrow::utf8()}, R"([ + [12, "alfa"], + [7, "beta"], + [3, "alfa"] + ])"), + GetExecBatchFromJSON({arrow::int32(), arrow::utf8()}, R"([ + [-2, "alfa"], + [-1, "gama"], + [3, "alfa"] + ])"), + GetExecBatchFromJSON({arrow::int32(), arrow::utf8()}, R"([ + [5, "gama"], + [3, "beta"], + [-8, "alfa"] + ])")}; + + size_t batch_count = out.batches.size(); + for (int repeat = 1; repeat < multiplicity; ++repeat) { + for (size_t i = 0; i < batch_count; ++i) { + out.batches.push_back(out.batches[i]); + } + } + + out.schema = arrow::schema( + {arrow::field("i32", arrow::int32()), arrow::field("str", arrow::utf8())}); + + return out; +} + +std::shared_ptr<arrow::internal::ThreadPool> MakeIOThreadPool() { + auto maybe_pool = arrow::internal::ThreadPool::MakeEternal(/*threads=*/8); + if (!maybe_pool.ok()) { + maybe_pool.status().Abort("Failed to create global IO thread pool"); + } + return *std::move(maybe_pool); +} + +arrow::internal::ThreadPool* GetIOThreadPool() { + static std::shared_ptr<arrow::internal::ThreadPool> pool = MakeIOThreadPool(); + return pool.get(); +} Review comment: Removed. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: github-unsubscr...@arrow.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org