viirya commented on code in PR #7935:
URL: https://github.com/apache/arrow-rs/pull/7935#discussion_r2217933884


##########
parquet-variant/src/builder.rs:
##########
@@ -1134,39 +1197,86 @@ impl<'a> ObjectBuilder<'a> {
             )));
         }
 
-        let data_size = self.buffer.offset();
-        let num_fields = self.fields.len();
-        let is_large = num_fields > u8::MAX as usize;
+        let metadata_builder = self.parent_state.metadata_builder();
 
         self.fields.sort_by(|&field_a_id, _, &field_b_id, _| {
-            let key_a = &metadata_builder.field_name(field_a_id as usize);
-            let key_b = &metadata_builder.field_name(field_b_id as usize);
-            key_a.cmp(key_b)
+            let field_a_name = metadata_builder.field_name(field_a_id as 
usize);
+            let field_b_name = metadata_builder.field_name(field_b_id as 
usize);
+            field_a_name.cmp(field_b_name)
         });
 
-        let max_id = self.fields.iter().map(|(i, _)| *i).max().unwrap_or(0);
+        // the length of the metadata's field names is a very cheap to compute 
the upper bound.
+        // it will almost always be a tight upper bound as well -- it would 
take a pretty
+        // carefully  crafted object to use only the early field ids of a 
large dictionary.
+        let max_id = metadata_builder.field_names.len();
+        let id_size = int_size(max_id);
 
-        let id_size = int_size(max_id as usize);
+        let parent_buffer = self.parent_state.buffer();
+        let current_offset = parent_buffer.offset();
+        // current object starts from `object_start_offset`
+        let data_size = current_offset - self.object_start_offset;
         let offset_size = int_size(data_size);
 
-        // Get parent's buffer
-        let parent_buffer = self.parent_state.buffer();
-        let starting_offset = parent_buffer.offset();
+        let num_fields = self.fields.len();
+        let is_large = num_fields > u8::MAX as usize;
 
-        // Write header
+        let header_size = 1 + // header byte
+            (if is_large { 4 } else { 1 }) + // num_fields
+            (num_fields * id_size as usize) + // field IDs
+            ((num_fields + 1) * offset_size as usize); // field offsets + 
data_size
+
+        let starting_offset = self.object_start_offset;
+
+        // Shift existing data to make room for the header
+        let buffer = parent_buffer.inner_mut();
+        buffer.splice(
+            starting_offset..starting_offset,
+            std::iter::repeat_n(0u8, header_size),
+        );
+
+        // Write header at the original start position
+        let mut header_pos = starting_offset;
+
+        // Write header byte
         let header = object_header(is_large, id_size, offset_size);
-        parent_buffer.append_header(header, is_large, num_fields);
+        buffer[header_pos] = header;
+        header_pos += 1;
 
-        // Write field IDs (sorted order)
-        let ids = self.fields.keys().map(|id| *id as usize);
-        parent_buffer.append_offset_array(ids, None, id_size);
+        // Write number of fields
+        if is_large {
+            buffer[header_pos..header_pos + 4].copy_from_slice(&(num_fields as 
u32).to_le_bytes());
+            header_pos += 4;
+        } else {
+            buffer[header_pos] = num_fields as u8;
+            header_pos += 1;
+        }

Review Comment:
   This is basically `append_header` did, right? Maybe we can also extract to a 
function instead of inline here.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: github-unsubscr...@arrow.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to