mapleFU commented on code in PR #35825:
URL: https://github.com/apache/arrow/pull/35825#discussion_r1229448674
##########
cpp/src/parquet/encoding.cc:
##########
@@ -1484,6 +1521,39 @@ class DictDecoderImpl : public DecoderImpl, virtual
public DictDecoder<Type> {
// Perform type-specific initiatialization
void SetDict(TypedDecoder<Type>* dictionary) override;
+ template <typename T = Type,
+ typename = std::enable_if_t<std::is_same_v<T, ByteArrayType> ||
+ std::is_same_v<T, LargeByteArrayType>>>
+ void SetByteArrayDict(TypedDecoder<Type>* dictionary) {
+ DecodeDict(dictionary);
+
+ auto dict_values =
reinterpret_cast<ByteArray*>(dictionary_->mutable_data());
+
+ int total_size = 0;
Review Comment:
Would you mind explicit use `int32_t` here?
##########
cpp/src/parquet/encoding.cc:
##########
@@ -1484,6 +1521,39 @@ class DictDecoderImpl : public DecoderImpl, virtual
public DictDecoder<Type> {
// Perform type-specific initiatialization
void SetDict(TypedDecoder<Type>* dictionary) override;
+ template <typename T = Type,
+ typename = std::enable_if_t<std::is_same_v<T, ByteArrayType> ||
+ std::is_same_v<T, LargeByteArrayType>>>
+ void SetByteArrayDict(TypedDecoder<Type>* dictionary) {
+ DecodeDict(dictionary);
+
+ auto dict_values =
reinterpret_cast<ByteArray*>(dictionary_->mutable_data());
+
+ int total_size = 0;
+ for (int i = 0; i < dictionary_length_; ++i) {
+ if (AddWithOverflow(total_size, dict_values[i].len, &total_size)) {
+ throw ParquetException("String/Binary length to large");
+ }
+ }
+ PARQUET_THROW_NOT_OK(byte_array_data_->Resize(total_size,
+ /*shrink_to_fit=*/false));
+ PARQUET_THROW_NOT_OK(
+ byte_array_offsets_->Resize((dictionary_length_ + 1) * sizeof(int32_t),
Review Comment:
Since this offset would not using `int64_t` if `LargeByteArray` enabled
because `total_size` should not overflow `int`?
##########
cpp/src/parquet/arrow/reader.cc:
##########
@@ -462,7 +463,8 @@ class LeafReader : public ColumnReaderImpl {
input_(std::move(input)),
descr_(input_->descr()) {
record_reader_ = RecordReader::Make(
Review Comment:
Should we need to check the `field_` is a "large" type if
`use_large_binary_variants`?
##########
cpp/src/parquet/encoding.cc:
##########
@@ -3503,7 +3591,11 @@ std::unique_ptr<Decoder> MakeDictDecoder(Type::type
type_num,
case Type::DOUBLE:
return std::make_unique<DictDecoderImpl<DoubleType>>(descr, pool);
case Type::BYTE_ARRAY:
- return std::make_unique<DictByteArrayDecoderImpl>(descr, pool);
+ if (use_large_binary_variants) {
+ return std::make_unique<DictLargeByteArrayDecoderImpl>(descr, pool);
+ } else {
+ return std::make_unique<DictByteArrayDecoderImpl<>>(descr, pool);
Review Comment:
Why not explicit write the Type or alias, like
`DictByteArrayDecoderImpl<ByteArrayType>`?
##########
cpp/src/parquet/encoding.cc:
##########
@@ -1854,15 +1920,30 @@ void
DictDecoderImpl<ByteArrayType>::InsertDictionary(::arrow::ArrayBuilder* bui
PARQUET_THROW_NOT_OK(binary_builder->InsertMemoValues(*arr));
}
-class DictByteArrayDecoderImpl : public DictDecoderImpl<ByteArrayType>,
- virtual public ByteArrayDecoder {
+template <>
+void DictDecoderImpl<LargeByteArrayType>::InsertDictionary(
+ ::arrow::ArrayBuilder* builder) {
+ auto binary_builder =
checked_cast<::arrow::LargeBinaryDictionary32Builder*>(builder);
+
+ // Make a LargeBinaryArray referencing the internal dictionary data
+ auto arr = std::make_shared<::arrow::LargeBinaryArray>(
+ dictionary_length_, byte_array_offsets_, byte_array_data_);
Review Comment:
Here the `byte_array_offsets_` is still `int32_t`, but `LargeBinaryArray` is
using `int64_t` as offset, would it be bad?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]