2010YOUY01 commented on code in PR #7337: URL: https://github.com/apache/arrow-datafusion/pull/7337#discussion_r1298886837
########## datafusion/physical-expr/src/aggregate/percentile.rs: ########## @@ -0,0 +1,712 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//! # Percentile + +use crate::aggregate::utils::{down_cast_any_ref, validate_input_percentile_expr}; +use crate::expressions::format_state_name; +use crate::{AggregateExpr, PhysicalExpr}; +use arrow::array::{Array, ArrayRef, UInt32Array}; +use arrow::compute::sort_to_indices; +use arrow::datatypes::{DataType, Field}; +use datafusion_common::{internal_err, DataFusionError, Result, ScalarValue}; +use datafusion_expr::Accumulator; +use std::any::Any; +use std::convert::TryFrom; +use std::sync::Arc; + +#[derive(PartialEq, Debug, Clone, Copy)] +/// Enum representing if interpolation is used for the percentile aggregate expression. +pub enum PercentileInterpolationType { + /// Interpolates between adjacent values if the desired percentile lies between them. + Continuous, + /// Always returns an actual data point from the dataset. + Discrete, +} + +/// QUANTILE_CONT/QUANTILE_DISC expression +/// +/// This uses a lot of memory because all values need to be +/// stored in memory before a result can be computed. If an approximation is sufficient +/// then APPROX_PERCENTILE_CONT provides a much more efficient solution. +#[derive(Debug)] +pub struct Quantile { + name: String, + quantile_type: PercentileInterpolationType, + expr_value: Arc<dyn PhysicalExpr>, + percentile_score: f64, + data_type: DataType, +} + +impl Quantile { + pub fn new( + name: impl Into<String>, + quantile_type: PercentileInterpolationType, + expr_value: Arc<dyn PhysicalExpr>, + expr_percentile_score: Arc<dyn PhysicalExpr>, + data_type: DataType, + ) -> Result<Self> { + let percentile_score = validate_input_percentile_expr(&expr_percentile_score)?; + + Ok(Self { + name: name.into(), + quantile_type, + expr_value, + percentile_score, + data_type, + }) + } +} + +impl AggregateExpr for Quantile { + fn as_any(&self) -> &dyn Any { + self + } + + fn field(&self) -> Result<Field> { + Ok(Field::new(&self.name, self.data_type.clone(), true)) + } + + fn create_accumulator(&self) -> Result<Box<dyn Accumulator>> { + Ok(Box::new(PercentileAccumulator { + percentile_score: self.percentile_score, + interpolation_type: self.quantile_type, + data_type: self.data_type.clone(), + arrays: vec![], + all_values: vec![], + })) + } + + fn state_fields(&self) -> Result<Vec<Field>> { + //Intermediate state is a list of the elements we have collected so far + let field = Field::new("item", self.data_type.clone(), true); + let data_type = DataType::List(Arc::new(field)); + + Ok(vec![Field::new( + format_state_name(&self.name, "median"), + data_type, + true, + )]) + } + + fn expressions(&self) -> Vec<Arc<dyn PhysicalExpr>> { + vec![self.expr_value.clone()] + } + + fn name(&self) -> &str { + &self.name + } +} + +impl PartialEq<dyn Any> for Quantile { + fn eq(&self, other: &dyn Any) -> bool { + down_cast_any_ref(other) + .downcast_ref::<Self>() + .map(|x| { + self.name == x.name + && self.data_type == x.data_type + && self.expr_value.eq(&x.expr_value) + && self.quantile_type == x.quantile_type + && self.percentile_score == x.percentile_score + }) + .unwrap_or(false) + } +} + +/// MEDIAN aggregate expression. +/// MEDIAN(x) is equivalent to QUANTILE_CONT(x, 0.5) +/// +/// This uses a lot of memory because all values need to be +/// stored in memory before a result can be computed. If an approximation is sufficient +/// then APPROX_MEDIAN provides a much more efficient solution. +#[derive(Debug)] +pub struct Median { + name: String, + expr: Arc<dyn PhysicalExpr>, + data_type: DataType, +} + +impl Median { + /// Create a new MEDIAN aggregate function + pub fn new( + expr: Arc<dyn PhysicalExpr>, + name: impl Into<String>, + data_type: DataType, + ) -> Self { + Self { + name: name.into(), + expr, + data_type, + } + } +} + +impl AggregateExpr for Median { + /// Return a reference to Any that can be used for downcasting + fn as_any(&self) -> &dyn Any { + self + } + + fn field(&self) -> Result<Field> { + Ok(Field::new(&self.name, self.data_type.clone(), true)) + } + + fn create_accumulator(&self) -> Result<Box<dyn Accumulator>> { + Ok(Box::new(PercentileAccumulator { + percentile_score: 0.5, + interpolation_type: PercentileInterpolationType::Continuous, + data_type: self.data_type.clone(), + arrays: vec![], + all_values: vec![], + })) + } + + fn state_fields(&self) -> Result<Vec<Field>> { + //Intermediate state is a list of the elements we have collected so far + let field = Field::new("item", self.data_type.clone(), true); + let data_type = DataType::List(Arc::new(field)); + + Ok(vec![Field::new( + format_state_name(&self.name, "median"), + data_type, + true, + )]) + } + + fn expressions(&self) -> Vec<Arc<dyn PhysicalExpr>> { + vec![self.expr.clone()] + } + + fn name(&self) -> &str { + &self.name + } +} + +impl PartialEq<dyn Any> for Median { + fn eq(&self, other: &dyn Any) -> bool { + down_cast_any_ref(other) + .downcast_ref::<Self>() + .map(|x| { + self.name == x.name + && self.data_type == x.data_type + && self.expr.eq(&x.expr) + }) + .unwrap_or(false) + } +} + +#[derive(Debug)] +/// The accumulator for median/quantile_cont/quantile/disc aggregate functions +/// It accumulates the raw input values as `ScalarValue`s +/// +/// The intermediate state is represented as a List of scalar values updated by +/// `merge_batch` and a `Vec` of `ArrayRef` that are converted to scalar values +/// in the final evaluation step so that we avoid expensive conversions and +/// allocations during `update_batch`. +struct PercentileAccumulator { + percentile_score: f64, + interpolation_type: PercentileInterpolationType, + data_type: DataType, + arrays: Vec<ArrayRef>, + all_values: Vec<ScalarValue>, +} + +macro_rules! safe_average { + (f32, $v1:expr, $v2:expr) => { + $v1 / 2.0 + $v2 / 2.0 + }; + ( f64, $v1:expr, $v2:expr) => { + $v1 / 2.0 + $v2 / 2.0 + }; + ($val_type:ty, $v1:expr, $v2:expr) => { + match $v1.checked_add($v2) { + Some(sum) => sum / (2 as $val_type), + None => $v1 / (2 as $val_type) + $v2 / (2 as $val_type), + } + }; +} + +// Example: `target_percentile` is 0.12 and it's landed between dp1 and dp2 +// dp1 has percentile 0.10 and value 0 +// dp2 has percentile 0.20 and value 100 +// `quantile_cont()` do linear interpolation: +// Then interpolation result = 0 + (0.12 - 0.10) / (0.20 - 0.10) * (100 - 0) +// = 20 +// `quantile_disc()` choose the closer dp (pick one with lower percentile if equally close) +// `target_percentile` is closer to dp1's percentile, result = 0 +macro_rules! interpolate_logic { Review Comment: This complex macro is used 1. Reduce float arithmetic errors 2. Avoid potential overflows Looks like can be avoided if multiply and divide operation on `ScalarValue`s like this one is available https://github.com/apache/arrow-datafusion/blob/672f5bdfe4df2270f5ad7b1a49bb1135acd5f9e7/datafusion/common/src/scalar.rs#L2057-L2060C4 seem they're available in `arrow-rs`? But require to add them into `datafusion` -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
