Dandandan commented on code in PR #7873:
URL: https://github.com/apache/arrow-rs/pull/7873#discussion_r2192094925


##########
arrow-array/src/array/byte_view_array.rs:
##########
@@ -473,13 +473,114 @@ impl<T: ByteViewType + ?Sized> GenericByteViewArray<T> {
     /// Note: this function does not attempt to canonicalize / deduplicate 
values. For this
     /// feature see  [`GenericByteViewBuilder::with_deduplicate_strings`].
     pub fn gc(&self) -> Self {
-        let mut builder = 
GenericByteViewBuilder::<T>::with_capacity(self.len());
+        // 1) Read basic properties once
+        let len = self.len(); // number of elements
+        let views = self.views(); // raw u128 “view” values per slot
+        let nulls = self.nulls().cloned(); // reuse & clone existing null 
bitmap
+
+        // 2) Calculate the total size of all non‑inline data
+        let mut total_large = 0;
+        if let Some(nbm) = &nulls {
+            for i in nbm.valid_indices() {
+                let raw_view: u128 = unsafe { *views.get_unchecked(i) };
+                let bv = ByteView::from(raw_view);
+                if bv.length > MAX_INLINE_VIEW_LEN {
+                    total_large += bv.length as usize;
+                }
+            }
+        } else {
+            for i in 0..len {
+                let raw_view: u128 = unsafe { *views.get_unchecked(i) };
+                let bv = ByteView::from(raw_view);
+                if bv.length > MAX_INLINE_VIEW_LEN {
+                    total_large += bv.length as usize;
+                }
+            }
+        }
 
-        for v in self.iter() {
-            builder.append_option(v);
+        // allocate exactly the capacity needed for all non‑inline data
+        let mut data_buf = Vec::with_capacity(total_large);
+
+        // 3) Iterate over all views and convert them into a new
+        let mut views_buf = vec![0u128; len];
+
+        if let Some(nbm) = &nulls {
+            for i in nbm.valid_indices() {
+                // SAFETY: i < len
+                let raw_view: u128 = unsafe { *views.get_unchecked(i) };
+                let mut bv = ByteView::from(raw_view);
+
+                let new_view = if bv.length <= MAX_INLINE_VIEW_LEN {
+                    raw_view
+                } else {
+                    // OUT‑OF‑LINE CASE:
+                    //  a) fetch the original data slice from the appropriate 
buffer
+                    let buffer = unsafe { 
self.buffers.get_unchecked(bv.buffer_index as usize) };
+                    let start = bv.offset as usize;
+                    let end = start + bv.length as usize;
+                    let slice: &[u8] = unsafe { 
buffer.get_unchecked(start..end) };
+
+                    //  b) append that slice into our new single data_buf
+                    let new_offset = data_buf.len() as u32;
+                    data_buf.extend_from_slice(slice);
+
+                    //  c) update ByteView metadata to point into the new 
data_buf
+                    bv.buffer_index = 0;
+                    bv.offset = new_offset;
+                    // length and prefix remain unchanged
+
+                    //  d) convert updated ByteView back into its u128 
representation
+                    bv.into()
+                };
+
+                views_buf[i] = new_view;
+            }
+        } else {
+            for (i, &raw_view) in views.iter().enumerate().take(len) {
+                // SAFETY: i < len
+                let mut bv = ByteView::from(raw_view);
+
+                let new_view = if bv.length <= MAX_INLINE_VIEW_LEN {
+                    raw_view
+                } else {
+                    // OUT-OF-LINE CASE:
+                    // a) fetch the original data slice from the appropriate 
buffer
+                    let buffer = unsafe { 
self.buffers.get_unchecked(bv.buffer_index as usize) };
+                    let start = bv.offset as usize;
+                    let end = start + bv.length as usize;
+                    // SAFETY: start..end is within buffer bounds
+                    let slice: &[u8] = unsafe { 
buffer.get_unchecked(start..end) };
+
+                    // b) append that slice into our new single data_buf
+                    let new_offset = data_buf.len() as u32;
+                    data_buf.extend_from_slice(slice);
+
+                    // c) update ByteView metadata to point into the new 
data_buf
+                    bv.buffer_index = 0;
+                    bv.offset = new_offset;
+                    // length and prefix remain unchanged
+
+                    // d) convert updated ByteView back into its u128 
representation
+                    bv.into()
+                };
+
+                views_buf[i] = new_view;

Review Comment:
   Nice - it depends a bit on how expensive the rest of the function and if it 
improves the rest of the code gen whether this being faster is noticable in 
benchmarks.
   I see we can avoid one allocation / vec zeroing still.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: github-unsubscr...@arrow.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to