lirui-apache commented on a change in pull request #8449: [FLINK-12235][hive] 
Support partition related operations in HiveCatalog
URL: https://github.com/apache/flink/pull/8449#discussion_r286297791
 
 

 ##########
 File path: 
flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/catalog/hive/HiveCatalog.java
 ##########
 @@ -607,44 +609,231 @@ private  static Table instantiateHiveTable(ObjectPath 
tablePath, CatalogBaseTabl
        // ------ partitions ------
 
        @Override
-       public void createPartition(ObjectPath tablePath, CatalogPartitionSpec 
partitionSpec, CatalogPartition partition, boolean ignoreIfExists)
-                       throws TableNotExistException, 
TableNotPartitionedException, PartitionSpecInvalidException, 
PartitionAlreadyExistsException, CatalogException {
-               throw new UnsupportedOperationException();
+       public boolean partitionExists(ObjectPath tablePath, 
CatalogPartitionSpec partitionSpec)
+               throws CatalogException {
+               try {
+                       return getHivePartition(tablePath, partitionSpec) != 
null;
+               } catch (NoSuchObjectException | TableNotExistException | 
PartitionSpecInvalidException e) {
+                       return false;
+               } catch (TException e) {
+                       throw new CatalogException(
+                               String.format("Failed to get partition %s of 
table %s", partitionSpec, tablePath), e);
+               }
        }
 
        @Override
-       public void dropPartition(ObjectPath tablePath, CatalogPartitionSpec 
partitionSpec, boolean ignoreIfNotExists)
-                       throws PartitionNotExistException, CatalogException {
-               throw new UnsupportedOperationException();
+       public void createPartition(ObjectPath tablePath, CatalogPartitionSpec 
partitionSpec, CatalogPartition partition, boolean ignoreIfExists)
+               throws TableNotExistException, TableNotPartitionedException, 
PartitionSpecInvalidException, PartitionAlreadyExistsException, 
CatalogException {
+
+               Table hiveTable = getHiveTable(tablePath);
+
+               ensurePartitionedTable(tablePath, hiveTable);
+
+               try {
+                       client.add_partition(createHivePartition(hiveTable, 
partitionSpec, partition));
+               } catch (AlreadyExistsException e) {
+                       if (!ignoreIfExists) {
+                               throw new 
PartitionAlreadyExistsException(catalogName, tablePath, partitionSpec);
+                       }
+               } catch (TException e) {
+                       throw new CatalogException(
+                               String.format("Failed to create partition %s of 
table %s", partitionSpec, tablePath));
+               }
        }
 
        @Override
-       public void alterPartition(ObjectPath tablePath, CatalogPartitionSpec 
partitionSpec, CatalogPartition newPartition, boolean ignoreIfNotExists)
-                       throws PartitionNotExistException, CatalogException {
-               throw new UnsupportedOperationException();
+       public void dropPartition(ObjectPath tablePath, CatalogPartitionSpec 
partitionSpec, boolean ignoreIfNotExists)
+               throws PartitionNotExistException, CatalogException {
+               try {
+                       Table hiveTable = getHiveTable(tablePath);
+                       client.dropPartition(tablePath.getDatabaseName(), 
tablePath.getObjectName(),
+                               getOrderedFullPartitionValues(partitionSpec, 
getFieldNames(hiveTable.getPartitionKeys()), tablePath), true);
+               } catch (NoSuchObjectException e) {
+                       if (!ignoreIfNotExists) {
+                               throw new 
PartitionNotExistException(catalogName, tablePath, partitionSpec, e);
+                       }
+               } catch (MetaException | TableNotExistException | 
PartitionSpecInvalidException e) {
+                       throw new PartitionNotExistException(catalogName, 
tablePath, partitionSpec, e);
+               } catch (TException e) {
+                       throw new CatalogException(
+                               String.format("Failed to drop partition %s of 
table %s", partitionSpec, tablePath));
+               }
        }
 
        @Override
        public List<CatalogPartitionSpec> listPartitions(ObjectPath tablePath)
-                       throws TableNotExistException, 
TableNotPartitionedException, CatalogException {
-               throw new UnsupportedOperationException();
+               throws TableNotExistException, TableNotPartitionedException, 
CatalogException {
+               Table hiveTable = getHiveTable(tablePath);
+
+               ensurePartitionedTable(tablePath, hiveTable);
+
+               try {
+                       return 
client.listPartitionNames(tablePath.getDatabaseName(), 
tablePath.getObjectName(), (short) -1).stream()
+                               
.map(HiveCatalog::createPartitionSpec).collect(Collectors.toList());
+               } catch (TException e) {
+                       throw new CatalogException(
+                               String.format("Failed to list partitions of 
table %s", tablePath), e);
+               }
        }
 
        @Override
        public List<CatalogPartitionSpec> listPartitions(ObjectPath tablePath, 
CatalogPartitionSpec partitionSpec)
-                       throws TableNotExistException, 
TableNotPartitionedException, CatalogException {
-               throw new UnsupportedOperationException();
+               throws TableNotExistException, TableNotPartitionedException, 
CatalogException {
+               Table hiveTable = getHiveTable(tablePath);
+
+               ensurePartitionedTable(tablePath, hiveTable);
+
+               try {
+                       // partition spec can be partial
+                       List<String> partialVals = 
MetaStoreUtils.getPvals(hiveTable.getPartitionKeys(), 
partitionSpec.getPartitionSpec());
+                       return 
client.listPartitionNames(tablePath.getDatabaseName(), 
tablePath.getObjectName(), partialVals,
+                               (short) 
-1).stream().map(HiveCatalog::createPartitionSpec).collect(Collectors.toList());
+               } catch (TException e) {
+                       throw new CatalogException(
+                               String.format("Failed to list partitions of 
table %s", tablePath), e);
+               }
        }
 
        @Override
        public CatalogPartition getPartition(ObjectPath tablePath, 
CatalogPartitionSpec partitionSpec)
-                       throws PartitionNotExistException, CatalogException {
-               throw new UnsupportedOperationException();
+               throws PartitionNotExistException, CatalogException {
+               try {
+                       Partition hivePartition = getHivePartition(tablePath, 
partitionSpec);
+                       return createCatalogPartition(hivePartition);
+               } catch (NoSuchObjectException | MetaException | 
TableNotExistException | PartitionSpecInvalidException e) {
+                       throw new PartitionNotExistException(catalogName, 
tablePath, partitionSpec, e);
+               } catch (TException e) {
+                       throw new CatalogException(
+                               String.format("Failed to get partition %s of 
table %s", partitionSpec, tablePath), e);
+               }
        }
 
        @Override
-       public boolean partitionExists(ObjectPath tablePath, 
CatalogPartitionSpec partitionSpec) throws CatalogException {
-               throw new UnsupportedOperationException();
+       public void alterPartition(ObjectPath tablePath, CatalogPartitionSpec 
partitionSpec, CatalogPartition newPartition, boolean ignoreIfNotExists)
+               throws PartitionNotExistException, CatalogException {
+               // Explicitly check if the partition exists or not
+               // because alter_partition() doesn't throw 
NoSuchObjectException like dropPartition() when the target doesn't exist
+               if (partitionExists(tablePath, partitionSpec)) {
+                       try {
+                               Table hiveTable = getHiveTable(tablePath);
+                               Partition newHivePartition = 
createHivePartition(hiveTable, partitionSpec, newPartition);
+                               if (newHivePartition.getSd().getLocation() == 
null) {
+                                       Partition oldHivePartition = 
getHivePartition(tablePath, partitionSpec);
+                                       
newHivePartition.getSd().setLocation(oldHivePartition.getSd().getLocation());
+                               }
+                               client.alter_partition(
+                                       tablePath.getDatabaseName(),
+                                       tablePath.getObjectName(),
+                                       newHivePartition
+                               );
+                       } catch (InvalidOperationException | MetaException | 
TableNotExistException | PartitionSpecInvalidException e) {
+                               throw new 
PartitionNotExistException(catalogName, tablePath, partitionSpec, e);
+                       } catch (TException e) {
+                               throw new CatalogException(
+                                       String.format("Failed to alter existing 
partition with new partition %s of table %s",
+                                               partitionSpec, tablePath), e);
+                       }
+               } else if (!ignoreIfNotExists) {
+                       throw new PartitionNotExistException(catalogName, 
tablePath, partitionSpec);
+               }
+       }
+
+       private Partition createHivePartition(Table hiveTable, 
CatalogPartitionSpec partitionSpec, CatalogPartition catalogPartition)
 
 Review comment:
   OK

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to