[ 
https://issues.apache.org/jira/browse/HIVE-27158?focusedWorklogId=853119&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-853119
 ]

ASF GitHub Bot logged work on HIVE-27158:
-----------------------------------------

                Author: ASF GitHub Bot
            Created on: 27/Mar/23 09:24
            Start Date: 27/Mar/23 09:24
    Worklog Time Spent: 10m 
      Work Description: deniskuzZ commented on code in PR #4131:
URL: https://github.com/apache/hive/pull/4131#discussion_r1149035181


##########
iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java:
##########
@@ -349,6 +365,96 @@ public Map<String, String> getBasicStatistics(Partish 
partish) {
     return stats;
   }
 
+
+  @Override
+  public boolean canSetColStatistics() {
+    String statsSource = HiveConf.getVar(conf, 
HiveConf.ConfVars.HIVE_USE_STATS_FROM).toLowerCase();
+    return statsSource.equals(PUFFIN);
+  }
+
+  @Override
+  public boolean 
canProvideColStatistics(org.apache.hadoop.hive.ql.metadata.Table tbl) {
+
+    org.apache.hadoop.hive.ql.metadata.Table hmsTable = tbl;
+    TableDesc tableDesc = Utilities.getTableDesc(hmsTable);
+    Table table = Catalogs.loadTable(conf, tableDesc.getProperties());
+    if (table.currentSnapshot() != null) {
+      String statsSource = HiveConf.getVar(conf, 
HiveConf.ConfVars.HIVE_COL_STATS_SOURCE).toLowerCase();
+      String statsPath = table.location() + STATS + table.name() + 
table.currentSnapshot().snapshotId();
+      if (statsSource.equals(PUFFIN)) {
+        try (FileSystem fs = new Path(table.location()).getFileSystem(conf)) {
+          if (fs.exists(new Path(statsPath))) {
+            return true;
+          }
+        } catch (IOException e) {
+          LOG.warn(e.getMessage());
+        }
+      }
+    }
+    return false;
+  }
+
+  @Override
+  public List<ColumnStatisticsObj> 
getColStatistics(org.apache.hadoop.hive.ql.metadata.Table tbl) {
+
+    org.apache.hadoop.hive.ql.metadata.Table hmsTable = tbl;
+    TableDesc tableDesc = Utilities.getTableDesc(hmsTable);
+    Table table = Catalogs.loadTable(conf, tableDesc.getProperties());
+    String statsSource = HiveConf.getVar(conf, 
HiveConf.ConfVars.HIVE_COL_STATS_SOURCE).toLowerCase();
+    switch (statsSource) {
+      case ICEBERG:
+        // Place holder for iceberg stats
+        break;
+      case PUFFIN:
+        String snapshotId = table.name() + 
table.currentSnapshot().snapshotId();
+        String statsPath = table.location() + STATS + snapshotId;
+        LOG.info("Using stats from puffin file at:" + statsPath);
+        try (PuffinReader reader = 
Puffin.read(table.io().newInputFile(statsPath)).build()) {
+          BlobMetadata blobMetadata = reader.fileMetadata().blobs().get(0);
+          Map<BlobMetadata, List<ColumnStatistics>> collect =
+              
Streams.stream(reader.readAll(ImmutableList.of(blobMetadata))).collect(Collectors.toMap(Pair::first,
+                  blobMetadataByteBufferPair -> SerializationUtils.deserialize(
+                      
ByteBuffers.toByteArray(blobMetadataByteBufferPair.second()))));
+
+          return 
collect.entrySet().stream().iterator().next().getValue().get(0).getStatsObj();
+        } catch (IOException e) {
+          LOG.info(String.valueOf(e));
+        }
+        break;
+      default:
+        // fall back to metastore
+    }
+    return null;
+  }
+
+
+  @Override
+  public boolean setColStatistics(org.apache.hadoop.hive.ql.metadata.Table 
table,
+      List<ColumnStatistics> colStats) {
+    TableDesc tableDesc = Utilities.getTableDesc(table);
+    Table tbl = Catalogs.loadTable(conf, tableDesc.getProperties());
+    String snapshotId = tbl.name() + tbl.currentSnapshot().snapshotId();
+    byte[] serializeColStats = SerializationUtils.serialize((Serializable) 
colStats);
+
+    try (PuffinWriter writer = 
Puffin.write(tbl.io().newOutputFile(tbl.location() + STATS + snapshotId))
+        .createdBy("Hive").build()) {
+      writer.add(
+          new Blob(
+              tbl.name() + "-" + snapshotId,
+              ImmutableList.of(1),
+              tbl.currentSnapshot().snapshotId(),
+              tbl.currentSnapshot().sequenceNumber(),
+              ByteBuffer.wrap(serializeColStats),
+              PuffinCompressionCodec.NONE,
+              ImmutableMap.of()));
+      writer.finish();
+    } catch (IOException e) {
+      LOG.info(String.valueOf(e));

Review Comment:
   do not swallow exception





Issue Time Tracking
-------------------

    Worklog Id:     (was: 853119)
    Time Spent: 5h 10m  (was: 5h)

> Store hive columns stats in puffin files for iceberg tables
> -----------------------------------------------------------
>
>                 Key: HIVE-27158
>                 URL: https://issues.apache.org/jira/browse/HIVE-27158
>             Project: Hive
>          Issue Type: Improvement
>            Reporter: Simhadri Govindappa
>            Assignee: Simhadri Govindappa
>            Priority: Major
>              Labels: pull-request-available
>          Time Spent: 5h 10m
>  Remaining Estimate: 0h
>




--
This message was sent by Atlassian Jira
(v8.20.10#820010)

Reply via email to