ayushtkn commented on code in PR #5261:
URL: https://github.com/apache/hive/pull/5261#discussion_r1615319595
##########
iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java:
##########
@@ -498,30 +500,58 @@ public boolean
canSetColStatistics(org.apache.hadoop.hive.ql.metadata.Table hmsT
@Override
public boolean setColStatistics(org.apache.hadoop.hive.ql.metadata.Table
hmsTable, List<ColumnStatistics> colStats) {
Table tbl = IcebergTableUtil.getTable(conf, hmsTable.getTTable());
- String snapshotId = String.format("%s-STATS-%d", tbl.name(),
tbl.currentSnapshot().snapshotId());
- return writeColStats(colStats.get(0), tbl, snapshotId);
+ return writeColStats(colStats.get(0), tbl);
}
- private boolean writeColStats(ColumnStatistics tableColStats, Table tbl,
String snapshotId) {
+ private boolean writeColStats(ColumnStatistics tableColStats, Table tbl) {
try {
- boolean rewriteStats = removeColStatsIfExists(tbl);
- if (!rewriteStats) {
+ if (!shouldRewriteColStats(tbl)) {
checkAndMergeColStats(tableColStats, tbl);
}
// Currently, we are only serializing table level stats.
byte[] serializeColStats = SerializationUtils.serialize(tableColStats);
- try (PuffinWriter writer =
Puffin.write(tbl.io().newOutputFile(getColStatsPath(tbl).toString()))
+ StatisticsFile statisticsFile;
+ String statsPath = tbl.location() + STATS + UUID.randomUUID();
Review Comment:
Maybe overthinking but if there is a clash with ``UUID.randomUUID()``, means
for some other stats file, ``UUID.randomUUID()`` returns the same id then?
##########
iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java:
##########
@@ -498,30 +500,58 @@ public boolean
canSetColStatistics(org.apache.hadoop.hive.ql.metadata.Table hmsT
@Override
public boolean setColStatistics(org.apache.hadoop.hive.ql.metadata.Table
hmsTable, List<ColumnStatistics> colStats) {
Table tbl = IcebergTableUtil.getTable(conf, hmsTable.getTTable());
- String snapshotId = String.format("%s-STATS-%d", tbl.name(),
tbl.currentSnapshot().snapshotId());
- return writeColStats(colStats.get(0), tbl, snapshotId);
+ return writeColStats(colStats.get(0), tbl);
}
- private boolean writeColStats(ColumnStatistics tableColStats, Table tbl,
String snapshotId) {
+ private boolean writeColStats(ColumnStatistics tableColStats, Table tbl) {
try {
- boolean rewriteStats = removeColStatsIfExists(tbl);
- if (!rewriteStats) {
+ if (!shouldRewriteColStats(tbl)) {
checkAndMergeColStats(tableColStats, tbl);
}
// Currently, we are only serializing table level stats.
byte[] serializeColStats = SerializationUtils.serialize(tableColStats);
- try (PuffinWriter writer =
Puffin.write(tbl.io().newOutputFile(getColStatsPath(tbl).toString()))
+ StatisticsFile statisticsFile;
+ String statsPath = tbl.location() + STATS + UUID.randomUUID();
+
+ try (PuffinWriter puffinWriter =
Puffin.write(tbl.io().newOutputFile(statsPath))
.createdBy(Constants.HIVE_ENGINE).build()) {
- writer.add(new Blob(tbl.name() + "-" + snapshotId,
ImmutableList.of(1), tbl.currentSnapshot().snapshotId(),
- tbl.currentSnapshot().sequenceNumber(),
ByteBuffer.wrap(serializeColStats), PuffinCompressionCodec.NONE,
- ImmutableMap.of()));
- writer.finish();
- return true;
+ long snapshotId = tbl.currentSnapshot().snapshotId();
+ long snapshotSequenceNumber = tbl.currentSnapshot().sequenceNumber();
+ puffinWriter.add(
+ new Blob(
+ ColumnStatisticsObj.class.getSimpleName(),
+ ImmutableList.of(1),
+ snapshotId,
+ snapshotSequenceNumber,
+ ByteBuffer.wrap(serializeColStats),
+ PuffinCompressionCodec.ZSTD,
+ ImmutableMap.of()
+ ));
+ puffinWriter.finish();
+
+ statisticsFile =
+ new GenericStatisticsFile(
+ snapshotId,
+ statsPath,
+ puffinWriter.fileSize(),
+ puffinWriter.footerSize(),
+ puffinWriter.writtenBlobsMetadata().stream()
+ .map(GenericBlobMetadata::from)
+ .collect(ImmutableList.toImmutableList())
+ );
} catch (IOException e) {
LOG.warn("Unable to write stats to puffin file {}", e.getMessage());
return false;
}
- } catch (InvalidObjectException | IOException e) {
+ Transaction transaction = tbl.newTransaction();
+ transaction
+ .updateStatistics()
+ .setStatistics(statisticsFile.snapshotId(), statisticsFile)
+ .commit();
+ transaction.commitTransaction();
Review Comment:
Why do we need a transaction here, why don't we directly update the
statistics?
```
tbl.updateStatistics().setStatistics(statisticsFile.snapshotId(),
statisticsFile).commit();
```
##########
iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java:
##########
@@ -498,30 +500,58 @@ public boolean
canSetColStatistics(org.apache.hadoop.hive.ql.metadata.Table hmsT
@Override
public boolean setColStatistics(org.apache.hadoop.hive.ql.metadata.Table
hmsTable, List<ColumnStatistics> colStats) {
Table tbl = IcebergTableUtil.getTable(conf, hmsTable.getTTable());
- String snapshotId = String.format("%s-STATS-%d", tbl.name(),
tbl.currentSnapshot().snapshotId());
- return writeColStats(colStats.get(0), tbl, snapshotId);
+ return writeColStats(colStats.get(0), tbl);
}
- private boolean writeColStats(ColumnStatistics tableColStats, Table tbl,
String snapshotId) {
+ private boolean writeColStats(ColumnStatistics tableColStats, Table tbl) {
try {
- boolean rewriteStats = removeColStatsIfExists(tbl);
- if (!rewriteStats) {
+ if (!shouldRewriteColStats(tbl)) {
checkAndMergeColStats(tableColStats, tbl);
}
// Currently, we are only serializing table level stats.
byte[] serializeColStats = SerializationUtils.serialize(tableColStats);
- try (PuffinWriter writer =
Puffin.write(tbl.io().newOutputFile(getColStatsPath(tbl).toString()))
+ StatisticsFile statisticsFile;
+ String statsPath = tbl.location() + STATS + UUID.randomUUID();
+
+ try (PuffinWriter puffinWriter =
Puffin.write(tbl.io().newOutputFile(statsPath))
.createdBy(Constants.HIVE_ENGINE).build()) {
- writer.add(new Blob(tbl.name() + "-" + snapshotId,
ImmutableList.of(1), tbl.currentSnapshot().snapshotId(),
- tbl.currentSnapshot().sequenceNumber(),
ByteBuffer.wrap(serializeColStats), PuffinCompressionCodec.NONE,
- ImmutableMap.of()));
- writer.finish();
- return true;
+ long snapshotId = tbl.currentSnapshot().snapshotId();
+ long snapshotSequenceNumber = tbl.currentSnapshot().sequenceNumber();
+ puffinWriter.add(
+ new Blob(
+ ColumnStatisticsObj.class.getSimpleName(),
+ ImmutableList.of(1),
+ snapshotId,
+ snapshotSequenceNumber,
+ ByteBuffer.wrap(serializeColStats),
+ PuffinCompressionCodec.ZSTD,
Review Comment:
earlier it was `none`, Are we sure we want to go with ZSTD or we should
preserve `none` here like before? I think the iceberg Puffin default is `none`
https://github.com/apache/iceberg/blob/311dbbb10dfa41fd22966ec87a03d449d0d84f96/core/src/main/java/org/apache/iceberg/puffin/Puffin.java#L39
##########
iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java:
##########
@@ -534,34 +564,24 @@ public boolean
canProvideColStatistics(org.apache.hadoop.hive.ql.metadata.Table
}
private boolean canProvideColStats(Table table, long snapshotId) {
- Path statsPath = getColStatsPath(table, snapshotId);
- try {
- FileSystem fs = statsPath.getFileSystem(conf);
- return fs.exists(statsPath);
- } catch (Exception e) {
- LOG.warn("Exception when trying to find Iceberg column stats for
table:{} , snapshot:{} , " +
- "statsPath: {} , stack trace: {}", table.name(),
table.currentSnapshot(), statsPath, e);
- }
- return false;
+ return IcebergTableUtil.getColStatsPath(table, snapshotId).isPresent();
Review Comment:
Do we know in one query how many times do we call this
``getColStatsPath()``? I think atleast twice, once for `canProvideColStats` &
second would be when we do `getColStatistics`, if more or not that cheap then
may be we should explore caching it or some other way to avoid calculating
multiple times
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]