HonahX commented on issue #14:
URL: https://github.com/apache/iceberg-python/issues/14#issuecomment-1742537487
For refernce, I used the following code (added in `test/avro/test_file.py`)
to reproduce the decimal error
```python
from fastavro import reader, writer
inner_struct_type = StructType(fields=[NestedField(field_id=1000,
name='decimalCol', field_type=DecimalType(precision=10, scale=2),
required=True)])
schema = Schema(
NestedField(
field_id=102,
name="partition",
field_type=inner_struct_type,
required=True,
doc="Partition data tuple, schema based on the partition spec",
),
NestedField(field_id=103, name="record_count",
field_type=LongType(), required=True,
doc="Number of records in the file"),
)
class InnerDecimalRecord(Record):
decimalCol: Decimal
def __init__(self, *data: Any, **named_data: Any) -> None:
super().__init__(*data, **{"struct": inner_struct_type,
**named_data})
class NestedRecord(Record):
partition: InnerDecimalRecord
record_count: int
def __init__(self, *data: Any, **named_data: Any) -> None:
super().__init__(*data, **{"struct": schema.as_struct(),
**named_data})
record = NestedRecord(
partition=InnerDecimalRecord(Decimal("0.00")),
record_count=131327,
)
with TemporaryDirectory() as tmpdir:
tmp_avro_file = tmpdir + "/nested_decimal.avro"
# write to disk
with avro.AvroOutputFile[NestedRecord](
PyArrowFileIO().new_output(tmp_avro_file), schema,
"nested_decimal_schema"
) as out:
out.write_block([record])
assert record == avro_entry
# read from disk using fastavro
with open(tmp_avro_file, "rb") as fo:
r = reader(fo=fo)
it = iter(r)
fa_entry = next(it)
assert todict(record) == fa_entry
```
```python
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]