This is an automated email from the ASF dual-hosted git repository.
jinsongzhou pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/amoro.git
The following commit(s) were added to refs/heads/master by this push:
new aa379e276 [AMORO-3252] Fix primary key duplicate exception when
concurrently inserting table optimization process entries into database (#3520)
aa379e276 is described below
commit aa379e276055ea4faa9b18d0da60b4525af9fe64
Author: Jzjsnow <[email protected]>
AuthorDate: Tue Jul 22 17:41:23 2025 +0800
[AMORO-3252] Fix primary key duplicate exception when concurrently
inserting table optimization process entries into database (#3520)
* [AMORO-3252] Use snowflake ID for optimizing process id
* fixup! [AMORO-3252] Use snowflake ID for optimizing process id
---
.../persistence/mapper/OptimizingMapper.java | 23 ++--
.../inline/OptimizingExpiringExecutor.java | 8 +-
.../amoro/server/table/DefaultOptimizingState.java | 4 +-
.../amoro/server/table/DefaultTableManager.java | 7 +-
.../amoro/server/utils/IcebergTableUtil.java | 4 +-
.../amoro/server/utils/SnowflakeIdGenerator.java | 139 +++++++++++++++++++++
amoro-ams/src/main/resources/mysql/upgrade.sql | 8 +-
amoro-ams/src/main/resources/postgres/upgrade.sql | 8 +-
.../server/util/TestSnowflakeIdGenerator.java | 71 +++++++++++
amoro-web/src/services/table.service.ts | 2 +-
10 files changed, 251 insertions(+), 23 deletions(-)
diff --git
a/amoro-ams/src/main/java/org/apache/amoro/server/persistence/mapper/OptimizingMapper.java
b/amoro-ams/src/main/java/org/apache/amoro/server/persistence/mapper/OptimizingMapper.java
index 81f22b15e..3124a6670 100644
---
a/amoro-ams/src/main/java/org/apache/amoro/server/persistence/mapper/OptimizingMapper.java
+++
b/amoro-ams/src/main/java/org/apache/amoro/server/persistence/mapper/OptimizingMapper.java
@@ -54,8 +54,9 @@ public interface OptimizingMapper {
/** OptimizingProcess operation below */
@Delete(
- "DELETE FROM table_optimizing_process WHERE table_id = #{tableId} and
process_id < #{time}")
- void deleteOptimizingProcessBefore(@Param("tableId") long tableId,
@Param("time") long time);
+ "DELETE FROM table_optimizing_process WHERE table_id = #{tableId} and
process_id < #{expireId}")
+ void deleteOptimizingProcessBefore(
+ @Param("tableId") long tableId, @Param("expireId") long expireId);
@Insert(
"INSERT INTO table_optimizing_process(table_id, catalog_name, db_name,
table_name ,process_id,"
@@ -256,8 +257,8 @@ public interface OptimizingMapper {
void updateTaskRuntime(
@Param("taskRuntime") TaskRuntime<? extends StagedTaskDescriptor<?, ?,
?>> taskRuntime);
- @Delete("DELETE FROM task_runtime WHERE table_id = #{tableId} AND process_id
< #{time}")
- void deleteTaskRuntimesBefore(@Param("tableId") long tableId, @Param("time")
long time);
+ @Delete("DELETE FROM task_runtime WHERE table_id = #{tableId} AND process_id
< #{expireId}")
+ void deleteTaskRuntimesBefore(@Param("tableId") long tableId,
@Param("expireId") long expireId);
/** Optimizing rewrite input and output operations below */
@Update(
@@ -274,7 +275,7 @@ public interface OptimizingMapper {
/** Optimizing task quota operations below */
@Select(
"SELECT process_id, task_id, retry_num, table_id, start_time, end_time,
fail_reason "
- + "FROM optimizing_task_quota WHERE table_id = #{tableId} AND
process_id >= #{startTime}")
+ + "FROM optimizing_task_quota WHERE table_id = #{tableId} AND
process_id >= #{minProcessId}")
@Results(
id = "taskQuota",
value = {
@@ -290,15 +291,15 @@ public interface OptimizingMapper {
@Result(property = "failReason", column = "fail_reason")
})
List<TaskRuntime.TaskQuota> selectTaskQuotasByTime(
- @Param("tableId") long tableId, @Param("startTime") long startTime);
+ @Param("tableId") long tableId, @Param("minProcessId") long
minProcessId);
@Select(
"SELECT process_id, task_id, retry_num, table_id, start_time, end_time,
fail_reason "
- + "FROM optimizing_task_quota WHERE table_id in
(#{tables::number[]}) AND process_id >= #{startTime}")
+ + "FROM optimizing_task_quota WHERE table_id in
(#{tables::number[]}) AND process_id >= #{minProcessId}")
@Lang(InListExtendedLanguageDriver.class)
@ResultMap("taskQuota")
List<TaskRuntime.TaskQuota> selectTableQuotas(
- @Param("tables") Collection<Long> tables, @Param("startTime") long
startTime);
+ @Param("tables") Collection<Long> tables, @Param("minProcessId") long
minProcessId);
@Insert(
"INSERT INTO optimizing_task_quota (process_id, task_id, retry_num,
table_id, start_time, end_time,"
@@ -309,6 +310,8 @@ public interface OptimizingMapper {
+ " #{taskQuota.failReason, jdbcType=VARCHAR})")
void insertTaskQuota(@Param("taskQuota") TaskRuntime.TaskQuota taskQuota);
- @Delete("DELETE FROM optimizing_task_quota WHERE table_id = #{table_id} AND
process_id < #{time}")
- void deleteOptimizingQuotaBefore(@Param("table_id") long tableId,
@Param("time") long timestamp);
+ @Delete(
+ "DELETE FROM optimizing_task_quota WHERE table_id = #{table_id} AND
process_id < #{expireId}")
+ void deleteOptimizingQuotaBefore(
+ @Param("table_id") long tableId, @Param("expireId") long expireId);
}
diff --git
a/amoro-ams/src/main/java/org/apache/amoro/server/scheduler/inline/OptimizingExpiringExecutor.java
b/amoro-ams/src/main/java/org/apache/amoro/server/scheduler/inline/OptimizingExpiringExecutor.java
index e0f5c824e..4a0c330a9 100644
---
a/amoro-ams/src/main/java/org/apache/amoro/server/scheduler/inline/OptimizingExpiringExecutor.java
+++
b/amoro-ams/src/main/java/org/apache/amoro/server/scheduler/inline/OptimizingExpiringExecutor.java
@@ -23,6 +23,7 @@ import
org.apache.amoro.server.persistence.mapper.OptimizingMapper;
import org.apache.amoro.server.scheduler.PeriodicTableScheduler;
import org.apache.amoro.server.table.DefaultTableRuntime;
import org.apache.amoro.server.table.TableService;
+import org.apache.amoro.server.utils.SnowflakeIdGenerator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -62,25 +63,26 @@ public class OptimizingExpiringExecutor extends
PeriodicTableScheduler {
private class Persistency extends PersistentBase {
public void doExpiring(DefaultTableRuntime tableRuntime) {
long expireTime = System.currentTimeMillis() - keepTime;
+ long minProcessId = SnowflakeIdGenerator.getMinSnowflakeId(expireTime);
doAsTransaction(
() ->
doAs(
OptimizingMapper.class,
mapper ->
mapper.deleteOptimizingProcessBefore(
- tableRuntime.getTableIdentifier().getId(),
expireTime)),
+ tableRuntime.getTableIdentifier().getId(),
minProcessId)),
() ->
doAs(
OptimizingMapper.class,
mapper ->
mapper.deleteTaskRuntimesBefore(
- tableRuntime.getTableIdentifier().getId(),
expireTime)),
+ tableRuntime.getTableIdentifier().getId(),
minProcessId)),
() ->
doAs(
OptimizingMapper.class,
mapper ->
mapper.deleteOptimizingQuotaBefore(
- tableRuntime.getTableIdentifier().getId(),
expireTime)));
+ tableRuntime.getTableIdentifier().getId(),
minProcessId)));
}
}
}
diff --git
a/amoro-ams/src/main/java/org/apache/amoro/server/table/DefaultOptimizingState.java
b/amoro-ams/src/main/java/org/apache/amoro/server/table/DefaultOptimizingState.java
index 12c3106ae..b36de90f2 100644
---
a/amoro-ams/src/main/java/org/apache/amoro/server/table/DefaultOptimizingState.java
+++
b/amoro-ams/src/main/java/org/apache/amoro/server/table/DefaultOptimizingState.java
@@ -43,6 +43,7 @@ import
org.apache.amoro.server.persistence.mapper.TableBlockerMapper;
import org.apache.amoro.server.persistence.mapper.TableMetaMapper;
import org.apache.amoro.server.table.blocker.TableBlocker;
import org.apache.amoro.server.utils.IcebergTableUtil;
+import org.apache.amoro.server.utils.SnowflakeIdGenerator;
import org.apache.amoro.shade.guava32.com.google.common.base.MoreObjects;
import org.apache.amoro.shade.guava32.com.google.common.base.Preconditions;
import org.apache.amoro.table.BaseTable;
@@ -327,11 +328,12 @@ public class DefaultOptimizingState extends
StatedPersistentBase implements Proc
public void resetTaskQuotas(long startTimeMills) {
tableLock.lock();
try {
+ long minProcessId =
SnowflakeIdGenerator.getMinSnowflakeId(startTimeMills);
taskQuotas.clear();
taskQuotas.addAll(
getAs(
OptimizingMapper.class,
- mapper -> mapper.selectTaskQuotasByTime(tableIdentifier.getId(),
startTimeMills)));
+ mapper -> mapper.selectTaskQuotasByTime(tableIdentifier.getId(),
minProcessId)));
} finally {
tableLock.unlock();
}
diff --git
a/amoro-ams/src/main/java/org/apache/amoro/server/table/DefaultTableManager.java
b/amoro-ams/src/main/java/org/apache/amoro/server/table/DefaultTableManager.java
index 8eaeb9db7..558443963 100644
---
a/amoro-ams/src/main/java/org/apache/amoro/server/table/DefaultTableManager.java
+++
b/amoro-ams/src/main/java/org/apache/amoro/server/table/DefaultTableManager.java
@@ -45,6 +45,7 @@ import
org.apache.amoro.server.persistence.mapper.OptimizingMapper;
import org.apache.amoro.server.persistence.mapper.TableBlockerMapper;
import org.apache.amoro.server.persistence.mapper.TableMetaMapper;
import org.apache.amoro.server.table.blocker.TableBlocker;
+import org.apache.amoro.server.utils.SnowflakeIdGenerator;
import org.apache.amoro.shade.guava32.com.google.common.base.Preconditions;
import org.apache.amoro.shade.guava32.com.google.common.collect.Lists;
import org.apache.amoro.shade.guava32.com.google.common.collect.Maps;
@@ -300,11 +301,9 @@ public class DefaultTableManager extends PersistentBase
implements TableManager
}
long calculatingEndTime = System.currentTimeMillis();
long calculatingStartTime = calculatingEndTime -
AmoroServiceConstants.QUOTA_LOOK_BACK_TIME;
-
+ long minProcessId =
SnowflakeIdGenerator.getMinSnowflakeId(calculatingStartTime);
List<TaskRuntime.TaskQuota> quotas =
- getAs(
- OptimizingMapper.class,
- mapper -> mapper.selectTableQuotas(tableIds,
calculatingStartTime));
+ getAs(OptimizingMapper.class, mapper ->
mapper.selectTableQuotas(tableIds, minProcessId));
return quotas.stream()
.collect(Collectors.groupingBy(TaskRuntime.TaskQuota::getTableId,
Collectors.toList()));
diff --git
a/amoro-ams/src/main/java/org/apache/amoro/server/utils/IcebergTableUtil.java
b/amoro-ams/src/main/java/org/apache/amoro/server/utils/IcebergTableUtil.java
index 2393b2949..e378df326 100644
---
a/amoro-ams/src/main/java/org/apache/amoro/server/utils/IcebergTableUtil.java
+++
b/amoro-ams/src/main/java/org/apache/amoro/server/utils/IcebergTableUtil.java
@@ -79,6 +79,7 @@ import java.util.stream.Collectors;
public class IcebergTableUtil {
private static final Logger LOG =
LoggerFactory.getLogger(IcebergTableUtil.class);
+ private static final SnowflakeIdGenerator snowflakeIdGenerator = new
SnowflakeIdGenerator();
public static long getSnapshotId(Table table, boolean refresh) {
Snapshot currentSnapshot = getSnapshot(table, refresh);
@@ -266,8 +267,7 @@ public class IcebergTableUtil {
table, entry.getKey(), entry.getValue()))
.reduce(Expressions::or)
.orElse(Expressions.alwaysTrue());
- long planTime = System.currentTimeMillis();
- long processId = Math.max(optimizingState.getNewestProcessId() + 1,
planTime);
+ long processId = snowflakeIdGenerator.generateId();
ServerTableIdentifier identifier = tableRuntime.getTableIdentifier();
OptimizingConfig config = optimizingState.getOptimizingConfig();
long lastMinor = optimizingState.getLastMinorOptimizingTime();
diff --git
a/amoro-ams/src/main/java/org/apache/amoro/server/utils/SnowflakeIdGenerator.java
b/amoro-ams/src/main/java/org/apache/amoro/server/utils/SnowflakeIdGenerator.java
new file mode 100644
index 000000000..96b7285cb
--- /dev/null
+++
b/amoro-ams/src/main/java/org/apache/amoro/server/utils/SnowflakeIdGenerator.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.amoro.server.utils;
+
+/** SnowflakeId generator */
+public class SnowflakeIdGenerator {
+ // Base timestamp (e.g., the start time of the service)
+ private static final long EPOCH_SECONDS = 0L;
+
+ // Given that the maximum int value in javascript is 2^53-1, here the length
of the SnowflakeId is
+ // compressed to 54 bits (fixed header 1 bit + timestamp part + machine ID
part + sequence part)
+ // Number of bits allocated for the timestamp part
+ private static final long TIMESTAMP_BITS = 40L;
+
+ // Number of bits allocated for the machine ID part
+ private static final long MACHINE_ID_BITS = 5L;
+
+ // Number of bits allocated for the sequence number part
+ private static final long SEQUENCE_BITS = 8L;
+
+ // Left shift amount for the timestamp part
+ private static final long TIMESTAMP_LEFT_SHIFT = MACHINE_ID_BITS +
SEQUENCE_BITS;
+
+ // Left shift amount for the machine ID part
+ private static final long MACHINE_ID_LEFT_SHIFT = SEQUENCE_BITS;
+
+ // Maximum value for the machine ID
+ private static final long MAX_MACHINE_ID = ~(-1L << MACHINE_ID_BITS);
+
+ // Maximum value for the sequence number
+ private static final long SEQUENCE_MASK = ~(-1L << SEQUENCE_BITS);
+
+ // Machine ID and Sequence
+ private final long machineId;
+ private static final long DEFAULT_MACHINE_ID = 0L;
+ private long sequence = 0L;
+ private long lastTimestamp = -1L;
+
+ /** Constructor with default machine ID */
+ public SnowflakeIdGenerator() {
+ this(DEFAULT_MACHINE_ID);
+ }
+
+ /**
+ * Constructor to set Machine ID
+ *
+ * @param machineId Machine ID, must be between 0 and 32
+ */
+ public SnowflakeIdGenerator(long machineId) {
+ if (machineId > MAX_MACHINE_ID || machineId < 0) {
+ throw new IllegalArgumentException("Machine ID must be between 0 and " +
MAX_MACHINE_ID);
+ }
+ this.machineId = machineId;
+ }
+
+ /** Generate a unique Snowflake ID */
+ public synchronized long generateId() {
+ long timestamp = currentTime();
+
+ if (timestamp < lastTimestamp) {
+ throw new RuntimeException("Clock moved backwards!");
+ }
+
+ // If the timestamp is the same as the last generated, increment sequence
number
+ if (timestamp == lastTimestamp) {
+ sequence = (sequence + 1) & SEQUENCE_MASK;
+ if (sequence == 0) {
+ timestamp =
+ waitForNextMillis(lastTimestamp); // If sequence overflows, wait
for next millisecond
+ }
+ } else {
+ sequence = 0;
+ }
+
+ lastTimestamp = timestamp;
+
+ // Return Snowflake ID by shifting the parts to the correct positions
+ return ((timestamp - EPOCH_SECONDS) << TIMESTAMP_LEFT_SHIFT)
+ | (machineId << MACHINE_ID_LEFT_SHIFT)
+ | sequence;
+ }
+
+ /** Get the current timestamp in 10ms */
+ private long currentTime() {
+ return System.currentTimeMillis() / 10;
+ }
+
+ private long waitForNextMillis(long lastTimestamp) {
+ long timestamp = currentTime();
+ while (timestamp <= lastTimestamp) {
+ timestamp = currentTime();
+ }
+ return timestamp;
+ }
+
+ /**
+ * Get the minimum Snowflake ID for a specific timestamp (for example,
1735689600
+ * (2025-01-01T00:00:00Z))
+ *
+ * @param timestamp Specified timestamp (in milliseconds or seconds)
+ * @return Minimum Snowflake ID
+ */
+ public static long getMinSnowflakeId(long timestamp) {
+ // if timestamp is milliseconds, compress it to 10ms
+ if (timestamp < 10000000000L) {
+ timestamp *= 100; // Convert seconds to 10ms
+ } else {
+ timestamp /= 10; // Convert milliseconds to 10ms
+ }
+
+ return (timestamp << TIMESTAMP_LEFT_SHIFT);
+ }
+
+ /**
+ * Extract the timestamp part from a Snowflake ID
+ *
+ * @param snowflakeId Snowflake ID
+ * @return Timestamp part in seconds
+ */
+ public static long extractTimestamp(long snowflakeId) {
+ return (snowflakeId >> TIMESTAMP_LEFT_SHIFT) / 100 + EPOCH_SECONDS;
+ }
+}
diff --git a/amoro-ams/src/main/resources/mysql/upgrade.sql
b/amoro-ams/src/main/resources/mysql/upgrade.sql
index 15ce40098..71bc576c5 100644
--- a/amoro-ams/src/main/resources/mysql/upgrade.sql
+++ b/amoro-ams/src/main/resources/mysql/upgrade.sql
@@ -17,4 +17,10 @@
-- We will confirm the corresponding version of these upgrade scripts when
releasing.
-- Update the precision from s level to ms.
- ALTER TABLE `table_runtime` MODIFY COLUMN `optimizing_status_start_time`
TIMESTAMP(3) DEFAULT CURRENT_TIMESTAMP(3) COMMENT 'Table optimize status start
time';
+ALTER TABLE `table_runtime` MODIFY COLUMN `optimizing_status_start_time`
TIMESTAMP(3) DEFAULT CURRENT_TIMESTAMP(3) COMMENT 'Table optimize status start
time';
+
+-- Update processId to SnowflakeId
+UPDATE `table_optimizing_process` SET `process_id` = `process_id` /10 << 13;
+UPDATE `task_runtime` SET `process_id` = `process_id` /10 << 13;
+UPDATE `optimizing_task_quota` SET `process_id` = `process_id` /10 << 13;
+UPDATE `table_runtime` SET `optimizing_process_id` = `optimizing_process_id`
/10 << 13;
\ No newline at end of file
diff --git a/amoro-ams/src/main/resources/postgres/upgrade.sql
b/amoro-ams/src/main/resources/postgres/upgrade.sql
index de690614b..ec89d292b 100644
--- a/amoro-ams/src/main/resources/postgres/upgrade.sql
+++ b/amoro-ams/src/main/resources/postgres/upgrade.sql
@@ -19,4 +19,10 @@
-- Update the precision from s level to ms.
ALTER TABLE table_runtime
ALTER COLUMN optimizing_status_start_time TYPE TIMESTAMP(3),
- ALTER COLUMN optimizing_status_start_time SET DEFAULT CURRENT_TIMESTAMP(3);
\ No newline at end of file
+ ALTER COLUMN optimizing_status_start_time SET DEFAULT CURRENT_TIMESTAMP(3);
+
+-- Update processId to SnowflakeId
+UPDATE table_optimizing_process SET process_id = process_id /10 << 13;
+UPDATE task_runtime SET process_id = process_id /10 << 13;
+UPDATE optimizing_task_quota SET process_id = process_id /10 << 13;
+UPDATE table_runtime SET optimizing_process_id = optimizing_process_id /10 <<
13;
diff --git
a/amoro-ams/src/test/java/org/apache/amoro/server/util/TestSnowflakeIdGenerator.java
b/amoro-ams/src/test/java/org/apache/amoro/server/util/TestSnowflakeIdGenerator.java
new file mode 100644
index 000000000..fd0a63c7d
--- /dev/null
+++
b/amoro-ams/src/test/java/org/apache/amoro/server/util/TestSnowflakeIdGenerator.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.amoro.server.util;
+
+import org.apache.amoro.server.utils.SnowflakeIdGenerator;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestSnowflakeIdGenerator {
+ private SnowflakeIdGenerator generator;
+
+ private static final long TEST_TIMESTAMP_S = 1735689600;
+ private static final long TEST_TIMESTAMP_MS = 1735689600000L;
+ private static final long TEST_MACHINE_ID = 0;
+ private static final long TEST_SEQUENCE = 0;
+ private static final long TEST_ID =
+ (TEST_TIMESTAMP_S * 100 << 13) | (TEST_MACHINE_ID << 5) | TEST_SEQUENCE;
+ private static final long TEST_MIN_ID = 1421876920320000L;
+
+ @Before
+ public void setUp() {
+ generator = new SnowflakeIdGenerator(0);
+ }
+
+ @Test
+ public void testConstructor_InvalidMachineId_ThrowsException() {
+ Assert.assertThrows(IllegalArgumentException.class, () -> new
SnowflakeIdGenerator(32));
+ }
+
+ @Test
+ public void testGenerateId() {
+ long id1 = generator.generateId();
+ long id2 = generator.generateId();
+ Assert.assertTrue(id2 > id1);
+ }
+
+ @Test
+ public void testGetMinSnowflakeId_From_Timestamp_S() {
+ long minId = SnowflakeIdGenerator.getMinSnowflakeId(TEST_TIMESTAMP_S);
+ Assert.assertEquals(TEST_MIN_ID, minId);
+ }
+
+ @Test
+ public void testGetMinSnowflakeId_From_Timestamp_Ms() {
+ long minId = SnowflakeIdGenerator.getMinSnowflakeId(TEST_TIMESTAMP_MS);
+ Assert.assertEquals(TEST_MIN_ID, minId);
+ }
+
+ @Test
+ public void testExtractTimestamp() {
+ long extractedTimestamp = SnowflakeIdGenerator.extractTimestamp(TEST_ID);
+ Assert.assertEquals(TEST_TIMESTAMP_S, extractedTimestamp);
+ }
+}
diff --git a/amoro-web/src/services/table.service.ts
b/amoro-web/src/services/table.service.ts
index ded3d1f7f..08f046cca 100644
--- a/amoro-web/src/services/table.service.ts
+++ b/amoro-web/src/services/table.service.ts
@@ -181,7 +181,7 @@ export function getTableOptimizingTypes(
return
request.get(`api/ams/v1/tables/catalogs/${catalog}/dbs/${db}/tables/${table}/optimizing-types`,
{ params: { token } })
}
-// get optimizing taskes
+// get optimizing tasks
export function getTasksByOptimizingProcessId(
params: {
catalog: string