Re: [PR] fix: try merging dictionary as a fallback on overflow error [arrow-rs]

2025-12-13 Thread via GitHub


duongcongtoai commented on code in PR #8652:
URL: https://github.com/apache/arrow-rs/pull/8652#discussion_r2616814828


##
arrow-data/src/transform/mod.rs:
##
@@ -672,12 +674,24 @@ impl<'a> MutableArrayData<'a> {
 next_offset += dict_len;
 }
 
-build_extend_dictionary(array, offset, offset + 
dict_len)
+// -1 since offset is exclusive
+build_extend_dictionary(array, offset, 1.max(offset + 
dict_len) - 1)
 .ok_or(ArrowError::DictionaryKeyOverflowError)
 })
-.collect();
-
-extend_values.expect("MutableArrayData::new is infallible")
+.collect::, ArrowError>>();
+match result {
+Err(_) => {

Review Comment:
   i added some more comments



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]



Re: [PR] fix: try merging dictionary as a fallback on overflow error [arrow-rs]

2025-12-13 Thread via GitHub


duongcongtoai commented on code in PR #8652:
URL: https://github.com/apache/arrow-rs/pull/8652#discussion_r2616807753


##
arrow-data/src/transform/utils.rs:
##
@@ -58,6 +61,37 @@ pub(super) unsafe fn get_last_offset(offset_buffer: &Mutable
 *unsafe { offsets.get_unchecked(offsets.len() - 1) }
 }
 
+fn iter_in_bytes_variable_sized(data: 
&ArrayData) -> Vec<&[u8]> {
+let offsets = data.buffer::(0);
+
+// the offsets of the `ArrayData` are ignored as they are only applied to 
the offset buffer.
+let values = data.buffers()[1].as_slice();
+(0..data.len())
+.map(move |i| {
+let start = offsets[i].to_usize().unwrap();
+let end = offsets[i + 1].to_usize().unwrap();
+&values[start..end]
+})
+.collect::>()
+}
+
+fn iter_in_bytes_fixed_sized(data: &ArrayData, size: usize) -> Vec<&[u8]> {
+let values = &data.buffers()[0].as_slice()[data.offset() * size..];
+values.chunks(size).collect::>()
+}
+
+/// iterate values in raw bytes regardless nullability
+pub(crate) fn iter_in_bytes<'a>(data_type: &DataType, data: &'a ArrayData) -> 
Vec<&'a [u8]> {

Review Comment:
   i renamed



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]



Re: [PR] fix: try merging dictionary as a fallback on overflow error [arrow-rs]

2025-12-11 Thread via GitHub


alamb-ghbot commented on PR #8652:
URL: https://github.com/apache/arrow-rs/pull/8652#issuecomment-3644014062

   🤖: Benchmark completed
   
   Details
   
   
   
   ```
   group
fix-overflow-on-interleave-list-of-dictmain
   -
---
   interleave dict(20, 0.0) 100 [0..100, 100..230, 450..1000]   
1.01801.7±9.75ns? ?/sec1.00792.5±3.37ns 
   ? ?/sec
   interleave dict(20, 0.0) 1024 [0..100, 100..230, 450..1000, 0..1000] 
1.01  2.3±0.01µs? ?/sec1.00  2.3±0.01µs 
   ? ?/sec
   interleave dict(20, 0.0) 1024 [0..100, 100..230, 450..1000]  
1.00  2.2±0.01µs? ?/sec1.00  2.2±0.02µs 
   ? ?/sec
   interleave dict(20, 0.0) 400 [0..100, 100..230, 450..1000]   
1.00   1255.9±7.48ns? ?/sec1.01  1269.4±70.44ns 
   ? ?/sec
   interleave dict_distinct 100 
1.00  3.0±0.02µs? ?/sec1.00  3.0±0.04µs 
   ? ?/sec
   interleave dict_distinct 1024
1.00  2.9±0.04µs? ?/sec1.01  3.0±0.03µs 
   ? ?/sec
   interleave dict_distinct 2048
1.00  2.9±0.02µs? ?/sec1.01  3.0±0.02µs 
   ? ?/sec
   interleave dict_sparse(20, 0.0) 100 [0..100, 100..230, 450..1000]
1.00  2.8±0.18µs? ?/sec1.02  2.8±0.21µs 
   ? ?/sec
   interleave dict_sparse(20, 0.0) 1024 [0..100, 100..230, 450..1000, 0..1000]  
1.10  5.2±0.31µs? ?/sec1.00  4.7±0.33µs 
   ? ?/sec
   interleave dict_sparse(20, 0.0) 1024 [0..100, 100..230, 450..1000]   
1.03  4.2±0.24µs? ?/sec1.00  4.1±0.24µs 
   ? ?/sec
   interleave dict_sparse(20, 0.0) 400 [0..100, 100..230, 450..1000]
1.00  3.2±0.23µs? ?/sec1.07  3.4±0.27µs 
   ? ?/sec
   interleave i32(0.0) 100 [0..100, 100..230, 450..1000]
1.01310.7±3.11ns? ?/sec1.00308.1±1.38ns 
   ? ?/sec
   interleave i32(0.0) 1024 [0..100, 100..230, 450..1000, 0..1000]  
1.00  1854.9±43.74ns? ?/sec1.00   1851.8±4.58ns 
   ? ?/sec
   interleave i32(0.0) 1024 [0..100, 100..230, 450..1000]   
1.00  1832.5±27.87ns? ?/sec1.00   1826.6±5.84ns 
   ? ?/sec
   interleave i32(0.0) 400 [0..100, 100..230, 450..1000]
1.00840.2±9.09ns? ?/sec1.09   912.1±19.76ns 
   ? ?/sec
   interleave i32(0.5) 100 [0..100, 100..230, 450..1000]
1.01   603.2±23.23ns? ?/sec1.00599.7±4.63ns 
   ? ?/sec
   interleave i32(0.5) 1024 [0..100, 100..230, 450..1000, 0..1000]  
1.00  4.3±0.04µs? ?/sec1.00  4.3±0.03µs 
   ? ?/sec
   interleave i32(0.5) 1024 [0..100, 100..230, 450..1000]   
1.01  4.3±0.35µs? ?/sec1.00  4.3±0.02µs 
   ? ?/sec
   interleave i32(0.5) 400 [0..100, 100..230, 450..1000]
1.05  1928.5±19.80ns? ?/sec1.00  1841.0±13.53ns 
   ? ?/sec
   interleave list(0.0,0.0,20) 100 [0..100, 100..230, 450..1000]   
1.00  4.7±0.13µs? ?/sec1.01  4.8±0.15µs 
   ? ?/sec
   interleave list(0.0,0.0,20) 1024 [0..100, 100..230, 450..1000, 0..1000] 
1.01 30.1±0.26µs? ?/sec1.00 29.8±0.55µs 
   ? ?/sec
   interleave list(0.0,0.0,20) 1024 [0..100, 100..230, 450..1000]  
1.03 30.9±1.15µs? ?/sec1.00 29.9±0.36µs 
   ? ?/sec
   interleave list(0.0,0.0,20) 400 [0..100, 100..230, 450..1000]   
1.00 12.3±0.17µs? ?/sec1.00 12.3±0.15µs 
   ? ?/sec
   interleave list(0.1,0.1,20) 100 [0..100, 100..230, 450..1000]   
1.01  7.9±0.05µs? ?/sec1.00  7.8±0.04µs 
   ? ?/sec
   interleave list(0.1,0.1,20) 1024 [0..100, 100..230, 450..1000, 0..1000] 
1.00 59.8±0.42µs? ?/sec1.00 59.8±1.16µs 
   ? ?/sec
   interleave list(0.1,0.1,20) 1024 [0..100, 100..230, 450..1000]  
1.01 60.3±0.72µs? ?/sec1.00   

Re: [PR] fix: try merging dictionary as a fallback on overflow error [arrow-rs]

2025-12-11 Thread via GitHub


alamb-ghbot commented on PR #8652:
URL: https://github.com/apache/arrow-rs/pull/8652#issuecomment-3643946406

   🤖 `./gh_compare_arrow.sh` 
[gh_compare_arrow.sh](https://github.com/alamb/datafusion-benchmarking/blob/main/scripts/gh_compare_arrow.sh)
 Running
   Linux aal-dev 6.14.0-1018-gcp #19~24.04.1-Ubuntu SMP Wed Sep 24 23:23:09 UTC 
2025 x86_64 x86_64 x86_64 GNU/Linux
   Comparing fix-overflow-on-interleave-list-of-dict 
(8a5a02873baebe81686d51f4147766238d0ad357) to 
6ff8cc4d04750332208931b483c4703025ced079 
[diff](https://github.com/apache/arrow-rs/compare/6ff8cc4d04750332208931b483c4703025ced079..8a5a02873baebe81686d51f4147766238d0ad357)
   BENCH_NAME=interleave_kernels
   BENCH_COMMAND=cargo bench --features=arrow,async,test_common,experimental 
--bench interleave_kernels 
   BENCH_FILTER=
   BENCH_BRANCH_NAME=fix-overflow-on-interleave-list-of-dict
   Results will be posted here when complete
   


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]



Re: [PR] fix: try merging dictionary as a fallback on overflow error [arrow-rs]

2025-12-11 Thread via GitHub


alamb commented on PR #8652:
URL: https://github.com/apache/arrow-rs/pull/8652#issuecomment-3643946137

   run benchmark interleave_kernels


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]



Re: [PR] fix: try merging dictionary as a fallback on overflow error [arrow-rs]

2025-12-11 Thread via GitHub


duongcongtoai commented on code in PR #8652:
URL: https://github.com/apache/arrow-rs/pull/8652#discussion_r2612060222


##
arrow-data/src/transform/mod.rs:
##
@@ -672,12 +674,24 @@ impl<'a> MutableArrayData<'a> {
 next_offset += dict_len;
 }
 
-build_extend_dictionary(array, offset, offset + 
dict_len)
+// -1 since offset is exclusive
+build_extend_dictionary(array, offset, 1.max(offset + 
dict_len) - 1)

Review Comment:
   comment addressed



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]



Re: [PR] fix: try merging dictionary as a fallback on overflow error [arrow-rs]

2025-12-11 Thread via GitHub


duongcongtoai commented on code in PR #8652:
URL: https://github.com/apache/arrow-rs/pull/8652#discussion_r2612059060


##
arrow-select/src/interleave.rs:
##
@@ -1182,4 +1188,296 @@ mod tests {
 assert_eq!(v.len(), 1);
 assert_eq!(v.data_type(), &DataType::Struct(fields));
 }
+fn create_dict_arr(
+keys: Vec,
+null_keys: Option>,
+values: Vec,
+) -> ArrayRef {
+let input_keys =
+PrimitiveArrayfrom_iter_values_with_nulls(keys, 
null_keys.map(NullBuffer::from));
+let input_values = UInt16Array::from_iter_values(values);
+let input = DictionaryArray::new(input_keys, Arc::new(input_values));
+Arc::new(input) as ArrayRef
+}
+
+fn create_dict_list_arr(
+keys: Vec,
+null_keys: Option>,
+values: Vec,
+lengths: Vec,
+list_nulls: Option>,
+) -> ArrayRef {
+let dict_arr = {
+let input_1_keys =
+UInt8Array::from_iter_values_with_nulls(keys, 
null_keys.map(NullBuffer::from));
+let input_1_values = UInt16Array::from_iter_values(values);
+DictionaryArray::new(input_1_keys, Arc::new(input_1_values))
+};
+
+let offset_buffer = OffsetBufferfrom_lengths(lengths);
+let list_arr = GenericListArray::new(
+Arc::new(Field::new_dictionary(
+"item",
+DataType::UInt8,
+DataType::UInt16,
+true,
+)),
+offset_buffer,
+Arc::new(dict_arr) as ArrayRef,
+list_nulls.map(NullBuffer::from_iter),
+);
+Arc::new(list_arr) as ArrayRef
+}
+
+#[test]

Review Comment:
   i added 2 new tests for non nested
   - test_total_distinct_keys_in_input_arrays_greater_than_key_size
   - 
test_total_distinct_keys_in_input_arrays_and_after_interleave_are_greater_than_key_size



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]



Re: [PR] fix: try merging dictionary as a fallback on overflow error [arrow-rs]

2025-12-09 Thread via GitHub


duongcongtoai commented on code in PR #8652:
URL: https://github.com/apache/arrow-rs/pull/8652#discussion_r2604389541


##
arrow-data/src/transform/dictionary.rs:
##
@@ -0,0 +1,140 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use std::collections::HashMap;
+
+use arrow_buffer::ArrowNativeType;
+use arrow_schema::{ArrowError, DataType};
+
+use crate::{
+ArrayData,
+transform::{_MutableArrayData, Extend, MutableArrayData, 
utils::iter_in_bytes},
+};
+
+pub(crate) fn merge_dictionaries<'a>(
+key_data_type: &DataType,
+value_data_type: &DataType,
+dicts: &[&'a ArrayData],
+) -> Result<(Vec>, ArrayData), ArrowError> {
+match key_data_type {
+DataType::UInt8 => merge_dictionaries_casted::(value_data_type, 
dicts),
+DataType::UInt16 => merge_dictionaries_casted::(value_data_type, 
dicts),
+DataType::UInt32 => merge_dictionaries_casted::(value_data_type, 
dicts),
+DataType::UInt64 => merge_dictionaries_casted::(value_data_type, 
dicts),
+DataType::Int8 => merge_dictionaries_casted::(value_data_type, 
dicts),
+DataType::Int16 => merge_dictionaries_casted::(value_data_type, 
dicts),
+DataType::Int32 => merge_dictionaries_casted::(value_data_type, 
dicts),
+DataType::Int64 => merge_dictionaries_casted::(value_data_type, 
dicts),
+_ => unreachable!(),
+}
+}
+
+fn merge_dictionaries_casted<'a, K: ArrowNativeType>(
+data_type: &DataType,
+dicts: &[&'a ArrayData],
+) -> Result<(Vec>, ArrayData), ArrowError> {
+let mut dedup = HashMap::new();
+let mut indices = vec![];
+let mut data_refs = vec![];
+let new_dict_keys = dicts
+.iter()
+.enumerate()
+.map(|(dict_idx, dict)| {
+let value_data = dict.child_data().first().unwrap();
+let old_keys = dict.buffer::(0);
+data_refs.push(value_data);
+let mut new_keys = vec![K::usize_as(0); old_keys.len()];
+let values = iter_in_bytes(data_type, value_data);
+for (key_index, old_key) in old_keys.iter().enumerate() {
+if dict.is_valid(key_index) {
+let value = values[old_key.as_usize()];
+match K::from_usize(dedup.len()) {
+Some(idx) => {
+let idx_for_value = 
dedup.entry(value).or_insert(idx);
+// a new entry
+if *idx_for_value == idx {
+indices.push((dict_idx, old_key.as_usize()));
+}
+
+new_keys[key_index] = *idx_for_value;
+}
+// the built dictionary has reach the cap of the key 
type
+None => match dedup.get(value) {
+// as long as this value has already been indexed
+// the merge dictionary is still valid
+Some(previous_key) => {
+new_keys[key_index] = *previous_key;
+}
+None => return 
Err(ArrowError::DictionaryKeyOverflowError),

Review Comment:
   let me add more coverage on this



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]



Re: [PR] fix: try merging dictionary as a fallback on overflow error [arrow-rs]

2025-12-09 Thread via GitHub


duongcongtoai commented on code in PR #8652:
URL: https://github.com/apache/arrow-rs/pull/8652#discussion_r2604008829


##
arrow-data/src/transform/mod.rs:
##
@@ -672,12 +674,24 @@ impl<'a> MutableArrayData<'a> {
 next_offset += dict_len;
 }
 
-build_extend_dictionary(array, offset, offset + 
dict_len)
+// -1 since offset is exclusive

Review Comment:
   this will trigger the error part
   ```
   #[test]
   fn test_uint8_dictionary_overflow_with_256_items() {
   let dict_arr = {
   let input_1_keys = UInt8Array::from_iter_values(0..=255);
   let input_1_values = UInt8Array::from_iter_values(0..=255);
   let input_1 = DictionaryArray::new(input_1_keys, 
Arc::new(input_1_values));
   input_1
   };
   
   let arr1 = Arc::new(dict_arr) as ArrayRef;
   let arr2 = arr1.clone();
   
   concat(&[&arr1, &arr2]).unwrap();
   }
   ```
   when it reaches this function
   ```
   build_extend_dictionary(array, offset, offset + 
dict_len)
   .ok_or(ArrowError::DictionaryKeyOverflowError)
   ```
   offset will be 0, dict_len is 256, and build_extend_dictionary will try cast 
256 as u8, which will throw error DictionaryKeyOverflowError, while it 
shouldn't be. The test passes anyway because we already added a fallback for 
this error



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]



Re: [PR] fix: try merging dictionary as a fallback on overflow error [arrow-rs]

2025-12-03 Thread via GitHub


alamb commented on code in PR #8652:
URL: https://github.com/apache/arrow-rs/pull/8652#discussion_r2586669463


##
arrow-data/src/transform/mod.rs:
##
@@ -672,12 +674,24 @@ impl<'a> MutableArrayData<'a> {
 next_offset += dict_len;
 }
 
-build_extend_dictionary(array, offset, offset + 
dict_len)
+// -1 since offset is exclusive
+build_extend_dictionary(array, offset, 1.max(offset + 
dict_len) - 1)
 .ok_or(ArrowError::DictionaryKeyOverflowError)
 })
-.collect();
-
-extend_values.expect("MutableArrayData::new is infallible")
+.collect::, ArrowError>>();
+match result {
+Err(_) => {

Review Comment:
   I think we should only retry when the Err is `DictionaryKeyOverflowError` -- 
this code retries regardless of the underlying error



##
arrow-data/src/transform/dictionary.rs:
##
@@ -0,0 +1,140 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use std::collections::HashMap;
+
+use arrow_buffer::ArrowNativeType;
+use arrow_schema::{ArrowError, DataType};
+
+use crate::{
+ArrayData,
+transform::{_MutableArrayData, Extend, MutableArrayData, 
utils::iter_in_bytes},
+};
+
+pub(crate) fn merge_dictionaries<'a>(

Review Comment:
   Could we please leave some comments about what this function does and why it 
is needed? (aka explain the overflow backup case)



##
arrow-data/src/transform/utils.rs:
##
@@ -58,6 +61,37 @@ pub(super) unsafe fn get_last_offset(offset_buffer: &Mutable
 *unsafe { offsets.get_unchecked(offsets.len() - 1) }
 }
 
+fn iter_in_bytes_variable_sized(data: 
&ArrayData) -> Vec<&[u8]> {
+let offsets = data.buffer::(0);
+
+// the offsets of the `ArrayData` are ignored as they are only applied to 
the offset buffer.
+let values = data.buffers()[1].as_slice();
+(0..data.len())
+.map(move |i| {
+let start = offsets[i].to_usize().unwrap();
+let end = offsets[i + 1].to_usize().unwrap();
+&values[start..end]
+})
+.collect::>()
+}
+
+fn iter_in_bytes_fixed_sized(data: &ArrayData, size: usize) -> Vec<&[u8]> {
+let values = &data.buffers()[0].as_slice()[data.offset() * size..];
+values.chunks(size).collect::>()
+}
+
+/// iterate values in raw bytes regardless nullability
+pub(crate) fn iter_in_bytes<'a>(data_type: &DataType, data: &'a ArrayData) -> 
Vec<&'a [u8]> {

Review Comment:
   this is called `iter_in_bytes...` but it returns a vec 🤔 



##
arrow-data/src/transform/dictionary.rs:
##
@@ -0,0 +1,140 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use std::collections::HashMap;
+
+use arrow_buffer::ArrowNativeType;
+use arrow_schema::{ArrowError, DataType};
+
+use crate::{
+ArrayData,
+transform::{_MutableArrayData, Extend, MutableArrayData, 
utils::iter_in_bytes},
+};
+
+pub(crate) fn merge_dictionaries<'a>(
+key_data_type: &DataType,
+value_data_type: &DataType,
+dicts: &[&'a ArrayData],
+) -> Result<(Vec>, ArrayData), ArrowError> {
+match key_data_type {
+DataType::UInt8 => merge_dictionaries_casted::(value_data_type, 
dicts),
+DataType::UInt16 => merge_dictionaries_casted::(value_data_type, 
dicts),
+DataType::UInt32 =>