etseidl commented on code in PR #9357:
URL: https://github.com/apache/arrow-rs/pull/9357#discussion_r2784218134
##########
parquet/src/arrow/arrow_writer/mod.rs:
##########
@@ -331,18 +341,57 @@ impl<W: Write + Send> ArrowWriter<W> {
),
};
- // If would exceed max_row_group_size, split batch
- if in_progress.buffered_rows + batch.num_rows() >
self.max_row_group_size {
- let to_write = self.max_row_group_size - in_progress.buffered_rows;
- let a = batch.slice(0, to_write);
- let b = batch.slice(to_write, batch.num_rows() - to_write);
- self.write(&a)?;
- return self.write(&b);
+ if let Some(max_rows) = self.max_row_group_row_count {
+ if in_progress.buffered_rows + batch.num_rows() > max_rows {
+ let to_write = max_rows - in_progress.buffered_rows;
+ let a = batch.slice(0, to_write);
+ let b = batch.slice(to_write, batch.num_rows() - to_write);
+ self.write(&a)?;
+ return self.write(&b);
+ }
+ }
+
+ // Check byte limit: if we have buffered data, use measured average
row size
+ // to split batch proactively before exceeding byte limit
+ if let Some(max_bytes) = self.max_row_group_bytes {
+ if in_progress.buffered_rows > 0 {
+ let current_bytes = in_progress.get_estimated_total_bytes();
+
+ if current_bytes >= max_bytes {
+ self.flush()?;
+ return self.write(batch);
+ }
+
+ let avg_row_bytes = current_bytes / in_progress.buffered_rows;
+ if avg_row_bytes > 0 {
+ let remaining_bytes = max_bytes - current_bytes;
Review Comment:
Perhaps add a comment here that we've already checked that `current_bytes`
is less than `max_bytes` by this line...at first glance I was thinking this
should be `saturating_sub`.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]