This is an automated email from the ASF dual-hosted git repository.
morningman pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/master by this push:
new 21b32280306 [fix](catalog) gen partition id by name (#39325)
21b32280306 is described below
commit 21b322803060cbf2db9b48ddbca0c25e2f2a1363
Author: Mingyu Chen <[email protected]>
AuthorDate: Mon Aug 19 14:18:05 2024 +0800
[fix](catalog) gen partition id by name (#39325)
Followup #38525
Previously, we use sequence number to generate partition id of table in
hive metastore.
for example, there are 2 partitions: `dt=2024-10-02` and
`dt=2024-10-03`, the partition id will be 0 and 1.
But if a new partition being added: `dt=2024-10-01`, the partiton id
will be 0, 1, and 2.
You can see, before, the id `0` is for `dt=2024-10-02`, but now `0` is
for `dt=2024-10-01`.
This PR use catalog/db/table/partition name to generate a id for the
partition,
so that each partition will have unique id.
---
.../apache/doris/datasource/hive/HiveMetaStoreCache.java | 14 ++++----------
.../java/org/apache/doris/datasource/CatalogMgrTest.java | 2 +-
.../apache/doris/planner/ListPartitionPrunerV2Test.java | 12 ++++++------
3 files changed, 11 insertions(+), 17 deletions(-)
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetaStoreCache.java
b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetaStoreCache.java
index 312f2382b0d..ad36dc221d8 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetaStoreCache.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetaStoreCache.java
@@ -34,6 +34,7 @@ import org.apache.doris.common.UserException;
import org.apache.doris.common.security.authentication.AuthenticationConfig;
import org.apache.doris.common.util.CacheBulkLoader;
import org.apache.doris.common.util.LocationPath;
+import org.apache.doris.common.util.Util;
import org.apache.doris.datasource.CacheException;
import org.apache.doris.datasource.hive.AcidInfo.DeleteDeltaInfo;
import org.apache.doris.datasource.property.PropertyConverter;
@@ -250,9 +251,8 @@ public class HiveMetaStoreCache {
Map<Long, PartitionItem> idToPartitionItem =
Maps.newHashMapWithExpectedSize(partitionNames.size());
BiMap<String, Long> partitionNameToIdMap =
HashBiMap.create(partitionNames.size());
Map<Long, List<UniqueId>> idToUniqueIdsMap =
Maps.newHashMapWithExpectedSize(partitionNames.size());
- long idx = 0;
for (String partitionName : partitionNames) {
- long partitionId = idx++;
+ long partitionId = Util.genIdByName(catalog.getName(), key.dbName,
key.tblName, partitionName);
ListPartitionItem listPartitionItem =
toListPartitionItem(partitionName, key.types);
idToPartitionItem.put(partitionId, listPartitionItem);
partitionNameToIdMap.put(partitionName, partitionId);
@@ -273,7 +273,7 @@ public class HiveMetaStoreCache {
singleUidToColumnRangeMap =
ListPartitionPrunerV2.genSingleUidToColumnRange(singleColumnRangeMap);
}
Map<Long, List<String>> partitionValuesMap =
ListPartitionPrunerV2.getPartitionValuesMap(idToPartitionItem);
- return new HivePartitionValues(idToPartitionItem, uidToPartitionRange,
rangeToId, singleColumnRangeMap, idx,
+ return new HivePartitionValues(idToPartitionItem, uidToPartitionRange,
rangeToId, singleColumnRangeMap,
partitionNameToIdMap, idToUniqueIdsMap,
singleUidToColumnRangeMap, partitionValuesMap);
}
@@ -638,13 +638,12 @@ public class HiveMetaStoreCache {
Map<String, Long> partitionNameToIdMapBefore =
copy.getPartitionNameToIdMap();
Map<Long, List<UniqueId>> idToUniqueIdsMap =
copy.getIdToUniqueIdsMap();
Map<Long, PartitionItem> idToPartitionItem = new HashMap<>();
- long idx = copy.getNextPartitionId();
for (String partitionName : partitionNames) {
if (partitionNameToIdMapBefore.containsKey(partitionName)) {
LOG.info("addPartitionsCache partitionName:[{}] has exist in
table:[{}]", partitionName, tblName);
continue;
}
- long partitionId = idx++;
+ long partitionId = Util.genIdByName(catalog.getName(), dbName,
tblName, partitionName);
ListPartitionItem listPartitionItem =
toListPartitionItem(partitionName, key.types);
idToPartitionItemBefore.put(partitionId, listPartitionItem);
idToPartitionItem.put(partitionId, listPartitionItem);
@@ -653,7 +652,6 @@ public class HiveMetaStoreCache {
Map<Long, List<String>> partitionValuesMapBefore =
copy.getPartitionValuesMap();
Map<Long, List<String>> partitionValuesMap =
ListPartitionPrunerV2.getPartitionValuesMap(idToPartitionItem);
partitionValuesMapBefore.putAll(partitionValuesMap);
- copy.setNextPartitionId(idx);
if (key.types.size() > 1) {
Map<UniqueId, Range<PartitionKey>> uidToPartitionRangeBefore =
copy.getUidToPartitionRange();
// uidToPartitionRange and rangeToId are only used for
multi-column partition
@@ -1075,7 +1073,6 @@ public class HiveMetaStoreCache {
@Data
public static class HivePartitionValues {
- private long nextPartitionId;
private BiMap<String, Long> partitionNameToIdMap;
private Map<Long, List<UniqueId>> idToUniqueIdsMap;
private Map<Long, PartitionItem> idToPartitionItem;
@@ -1094,7 +1091,6 @@ public class HiveMetaStoreCache {
Map<UniqueId, Range<PartitionKey>> uidToPartitionRange,
Map<Range<PartitionKey>, UniqueId> rangeToId,
RangeMap<ColumnBound, UniqueId> singleColumnRangeMap,
- long nextPartitionId,
BiMap<String, Long> partitionNameToIdMap,
Map<Long, List<UniqueId>> idToUniqueIdsMap,
Map<UniqueId, Range<ColumnBound>> singleUidToColumnRangeMap,
@@ -1103,7 +1099,6 @@ public class HiveMetaStoreCache {
this.uidToPartitionRange = uidToPartitionRange;
this.rangeToId = rangeToId;
this.singleColumnRangeMap = singleColumnRangeMap;
- this.nextPartitionId = nextPartitionId;
this.partitionNameToIdMap = partitionNameToIdMap;
this.idToUniqueIdsMap = idToUniqueIdsMap;
this.singleUidToColumnRangeMap = singleUidToColumnRangeMap;
@@ -1112,7 +1107,6 @@ public class HiveMetaStoreCache {
public HivePartitionValues copy() {
HivePartitionValues copy = new HivePartitionValues();
- copy.setNextPartitionId(nextPartitionId);
copy.setPartitionNameToIdMap(partitionNameToIdMap == null ? null :
HashBiMap.create(partitionNameToIdMap));
copy.setIdToUniqueIdsMap(idToUniqueIdsMap == null ? null :
Maps.newHashMap(idToUniqueIdsMap));
copy.setIdToPartitionItem(idToPartitionItem == null ? null :
Maps.newHashMap(idToPartitionItem));
diff --git
a/fe/fe-core/src/test/java/org/apache/doris/datasource/CatalogMgrTest.java
b/fe/fe-core/src/test/java/org/apache/doris/datasource/CatalogMgrTest.java
index 5f1e19f3284..e5e8a9d71a5 100644
--- a/fe/fe-core/src/test/java/org/apache/doris/datasource/CatalogMgrTest.java
+++ b/fe/fe-core/src/test/java/org/apache/doris/datasource/CatalogMgrTest.java
@@ -548,7 +548,7 @@ public class CatalogMgrTest extends TestWithFeService {
singleUidToColumnRangeMap =
ListPartitionPrunerV2.genSingleUidToColumnRange(singleColumnRangeMap);
}
Map<Long, List<String>> partitionValuesMap =
ListPartitionPrunerV2.getPartitionValuesMap(idToPartitionItem);
- return new HivePartitionValues(idToPartitionItem, uidToPartitionRange,
rangeToId, singleColumnRangeMap, idx,
+ return new HivePartitionValues(idToPartitionItem, uidToPartitionRange,
rangeToId, singleColumnRangeMap,
partitionNameToIdMap, idToUniqueIdsMap,
singleUidToColumnRangeMap, partitionValuesMap);
}
diff --git
a/fe/fe-core/src/test/java/org/apache/doris/planner/ListPartitionPrunerV2Test.java
b/fe/fe-core/src/test/java/org/apache/doris/planner/ListPartitionPrunerV2Test.java
index 6c1accce675..5af7b831e98 100644
---
a/fe/fe-core/src/test/java/org/apache/doris/planner/ListPartitionPrunerV2Test.java
+++
b/fe/fe-core/src/test/java/org/apache/doris/planner/ListPartitionPrunerV2Test.java
@@ -103,8 +103,8 @@ public class ListPartitionPrunerV2Test {
String tblName = "tb";
HiveMetaStoreCache.HivePartitionValues partitionValues =
cache.getPartitionValues(dbName, tblName, types);
Assert.assertEquals(1, partitionValues.getIdToPartitionItem().size());
-
Assert.assertTrue(partitionValues.getIdToPartitionItem().containsKey(0L));
- List<PartitionKey> items =
partitionValues.getIdToPartitionItem().get(0L).getItems();
+
Assert.assertTrue(partitionValues.getIdToPartitionItem().containsKey(8882801933302843777L));
+ List<PartitionKey> items =
partitionValues.getIdToPartitionItem().get(8882801933302843777L).getItems();
Assert.assertEquals(1, items.size());
PartitionKey partitionKey = items.get(0);
Assert.assertEquals("1.234", partitionKey.getKeys().get(0).toString());
@@ -116,8 +116,8 @@ public class ListPartitionPrunerV2Test {
cache.addPartitionsCache(dbName, tblName, values, types);
HiveMetaStoreCache.HivePartitionValues partitionValues2 =
cache.getPartitionValues(dbName, tblName, types);
Assert.assertEquals(2, partitionValues2.getIdToPartitionItem().size());
-
Assert.assertTrue(partitionValues2.getIdToPartitionItem().containsKey(1L));
- List<PartitionKey> items2 =
partitionValues2.getIdToPartitionItem().get(1L).getItems();
+
Assert.assertTrue(partitionValues2.getIdToPartitionItem().containsKey(7070400225537799947L));
+ List<PartitionKey> items2 =
partitionValues2.getIdToPartitionItem().get(7070400225537799947L).getItems();
Assert.assertEquals(1, items2.size());
PartitionKey partitionKey2 = items2.get(0);
Assert.assertEquals("5.678",
partitionKey2.getKeys().get(0).toString());
@@ -128,8 +128,8 @@ public class ListPartitionPrunerV2Test {
cache.invalidateTableCache(dbName, tblName);
HiveMetaStoreCache.HivePartitionValues partitionValues3 =
cache.getPartitionValues(dbName, tblName, types);
Assert.assertEquals(1, partitionValues3.getIdToPartitionItem().size());
-
Assert.assertTrue(partitionValues3.getIdToPartitionItem().containsKey(0L));
- List<PartitionKey> items3 =
partitionValues3.getIdToPartitionItem().get(0L).getItems();
+
Assert.assertTrue(partitionValues3.getIdToPartitionItem().containsKey(8882801933302843777L));
+ List<PartitionKey> items3 =
partitionValues3.getIdToPartitionItem().get(8882801933302843777L).getItems();
Assert.assertEquals(1, items3.size());
PartitionKey partitionKey3 = items3.get(0);
Assert.assertEquals("1.234",
partitionKey3.getKeys().get(0).toString());
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]