This is an automated email from the ASF dual-hosted git repository.
tkhurana pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git
The following commit(s) were added to refs/heads/master by this push:
new 1115c7c306 PHOENIX-300 Add support for TRUNCATE TABLE (#1409)
1115c7c306 is described below
commit 1115c7c306e3981a8c14a1fdd26063e55bdf4b6e
Author: Rahul Agarkar <[email protected]>
AuthorDate: Sat Jan 31 00:17:29 2026 +0530
PHOENIX-300 Add support for TRUNCATE TABLE (#1409)
Co-authored-by: Abhishek Kothalikar <[email protected]>
---
phoenix-core-client/src/main/antlr3/PhoenixSQL.g | 20 +-
.../apache/phoenix/exception/SQLExceptionCode.java | 7 +
.../org/apache/phoenix/jdbc/PhoenixStatement.java | 34 ++
.../org/apache/phoenix/parse/ParseNodeFactory.java | 14 +
.../phoenix/parse/TruncateTableStatement.java | 59 +++
.../phoenix/query/ConnectionQueryServices.java | 9 +-
.../phoenix/query/ConnectionQueryServicesImpl.java | 23 +
.../query/ConnectionlessQueryServicesImpl.java | 5 +
.../query/DelegateConnectionQueryServices.java | 6 +
.../org/apache/phoenix/schema/MetaDataClient.java | 128 +++++
.../apache/phoenix/end2end/TruncateTableIT.java | 534 +++++++++++++++++++++
11 files changed, 837 insertions(+), 2 deletions(-)
diff --git a/phoenix-core-client/src/main/antlr3/PhoenixSQL.g
b/phoenix-core-client/src/main/antlr3/PhoenixSQL.g
index a2e913093e..b905c43f3e 100644
--- a/phoenix-core-client/src/main/antlr3/PhoenixSQL.g
+++ b/phoenix-core-client/src/main/antlr3/PhoenixSQL.g
@@ -161,6 +161,9 @@ tokens
REGIONS = 'regions';
NOVERIFY = 'noverify';
RETURNING = 'returning';
+ TRUNCATE = 'truncate';
+ PRESERVE='preserve';
+ SPLITS='splits';
}
@@ -432,6 +435,7 @@ oneStatement returns [BindableStatement ret]
| s=upsert_node
| s=delete_node
| s=create_table_node
+ | s=truncate_table_node
| s=create_schema_node
| s=create_view_node
| s=create_index_node
@@ -487,7 +491,21 @@ create_table_node returns [CreateTableStatement ret]
(COLUMN_QUALIFIER_COUNTER LPAREN cqc=initializiation_list RPAREN)?
{ret = factory.createTable(t, p, c, pk, s, PTableType.TABLE, ex!=null,
null, null, getBindCount(), im!=null ? true : null, cqc, noverify!=null); }
;
-
+
+// Parse a truncate table statement.
+truncate_table_node returns [TruncateTableStatement ret]
+ : TRUNCATE TABLE t=from_table_name
+ (
+ // Case 1: Explicitly DROP SPLITS
+ DROP SPLITS
+ { $ret = factory.truncateTable(t, PTableType.TABLE, false); }
+ |
+ // Default Case: PRESERVE SPLITS or Nothing (Both mean
preserve=true)
+ (PRESERVE SPLITS)?
+ { $ret = factory.truncateTable(t, PTableType.TABLE, true); }
+ )
+ ;
+
// Parse a create schema statement.
create_schema_node returns [CreateSchemaStatement ret]
: CREATE SCHEMA (IF NOT ex=EXISTS)? s=identifier
diff --git
a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
index 7ecd02528d..5d9fde1659 100644
---
a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
+++
b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
@@ -620,6 +620,13 @@ public enum SQLExceptionCode {
// 1153 code is taken by CANNOT_DROP_CDC_INDEX
SPLIT_FILE_DONT_EXIST(1154, "XCL54", "Either split file don't exist or is
not a file"),
UNABLE_TO_OPEN_SPLIT_FILE(1155, "XCL55", "Exception occurred while opening
splits file"),
+ CANNOT_TRUNCATE_MULTITENANT_TABLE(1158, "XCL58",
+ "Cannot truncate a multi-tenant table from a tenant-specific connection."),
+ TRUNCATE_NOT_ALLOWED_ON_VIEW(1159, "XCL59", "Truncate is not allowed on
views."),
+ TRUNCATE_NOT_ALLOWED_ON_SYSTEM_TABLE(1160, "XCL60",
+ "Cannot truncate tables of type" + PTableType.SYSTEM),
+ TRUNCATE_MUST_PRESERVE_SPLITS_FOR_SALTED_TABLE(1161, "XCL61",
+ "Truncate must preserve splits for salted tables."),
/**
* Implementation defined class. Phoenix internal error. (errorcode 20,
sqlstate INT).
diff --git
a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
index 8ef621d535..b527ecd042 100644
---
a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
+++
b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
@@ -189,6 +189,7 @@ import org.apache.phoenix.parse.ShowTablesStatement;
import org.apache.phoenix.parse.TableName;
import org.apache.phoenix.parse.TableNode;
import org.apache.phoenix.parse.TraceStatement;
+import org.apache.phoenix.parse.TruncateTableStatement;
import org.apache.phoenix.parse.UDFParseNode;
import org.apache.phoenix.parse.UpdateStatisticsStatement;
import org.apache.phoenix.parse.UpsertStatement;
@@ -1245,6 +1246,33 @@ public class PhoenixStatement implements
PhoenixMonitoredStatement, SQLCloseable
}
}
+ private static class ExecutableTruncateTableStatement extends
TruncateTableStatement
+ implements CompilableStatement {
+ ExecutableTruncateTableStatement(TableName tableName, PTableType tableType,
+ boolean preserveSplits) {
+ super(tableName, tableType, preserveSplits);
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public MutationPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp
seqAction)
+ throws SQLException {
+ final StatementContext context = new StatementContext(stmt);
+ return new BaseMutationPlan(context, this.getOperation()) {
+ @Override
+ public ExplainPlan getExplainPlan() throws SQLException {
+ return new ExplainPlan(Collections.singletonList("Truncate Table"));
+ }
+
+ @Override
+ public MutationState execute() throws SQLException {
+ MetaDataClient client = new
MetaDataClient(getContext().getConnection());
+ return client.truncateTable(ExecutableTruncateTableStatement.this);
+ }
+ };
+ }
+ }
+
private static class ExecutableCreateTableStatement extends
CreateTableStatement
implements CompilableStatement {
ExecutableCreateTableStatement(TableName tableName,
@@ -2201,6 +2229,12 @@ public class PhoenixStatement implements
PhoenixMonitoredStatement, SQLCloseable
bindCount);
}
+ @Override
+ public TruncateTableStatement truncateTable(TableName tableName,
PTableType tableType,
+ boolean preserveSplits) {
+ return new ExecutableTruncateTableStatement(tableName, tableType,
preserveSplits);
+ }
+
@Override
public CreateSchemaStatement createSchema(String schemaName, boolean
ifNotExists) {
return new ExecutableCreateSchemaStatement(schemaName, ifNotExists);
diff --git
a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
index 6a22664695..716dbd566a 100644
---
a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
+++
b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
@@ -379,6 +379,20 @@ public class ParseNodeFactory {
baseTableName, tableTypeIdNode, bindCount, immutableRows, null, false);
}
+ public TruncateTableStatement truncateTable(TableName tableName, PTableType
tableType,
+ boolean preserveSplits) {
+ return new TruncateTableStatement(tableName, tableType, preserveSplits);
+ }
+
+ // Maintain backward compatibility or overload
+ public TruncateTableStatement truncateTable(TableName tableName, PTableType
tableType) {
+ return new TruncateTableStatement(tableName, tableType, true);
+ }
+
+ // public TruncateTableStatement truncateTable(TableName tableName,
PTableType tableType) {
+ // return new TruncateTableStatement(tableName, tableType);
+ // }
+
public CreateSchemaStatement createSchema(String schemaName, boolean
ifNotExists) {
return new CreateSchemaStatement(schemaName, ifNotExists);
}
diff --git
a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TruncateTableStatement.java
b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TruncateTableStatement.java
new file mode 100644
index 0000000000..cdf1339d5d
--- /dev/null
+++
b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TruncateTableStatement.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.parse;
+
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.schema.PTableType;
+
+public class TruncateTableStatement extends MutableStatement {
+ private final TableName tableName;
+ private final PTableType tableType;
+ private final boolean preserveSplits;
+
+ public TruncateTableStatement(TruncateTableStatement truncateTableStatement)
{
+ this.tableName = truncateTableStatement.tableName;
+ this.tableType = truncateTableStatement.tableType;
+ this.preserveSplits = true;
+ }
+
+ protected TruncateTableStatement(TableName tableName, PTableType tableType,
+ boolean preserveSplits) {
+ this.tableName = tableName;
+ this.tableType =
PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA.equals(tableName.getSchemaName())
+ ? PTableType.SYSTEM
+ : tableType;
+ this.preserveSplits = preserveSplits;
+ }
+
+ public TableName getTableName() {
+ return tableName;
+ }
+
+ public PTableType getTableType() {
+ return tableType;
+ }
+
+ public boolean preserveSplits() {
+ return preserveSplits;
+ }
+
+ @Override
+ public int getBindCount() {
+ return 0;
+ }
+}
diff --git
a/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
b/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
index 45340d88d5..8808d67d76 100644
---
a/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
+++
b/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
@@ -298,7 +298,14 @@ public interface ConnectionQueryServices extends
QueryServices, MetaDataMutated
String columnName, String familyName) throws SQLException;
/**
- * Close all phoenix connections created using this CQS.
+ * Truncate a phoenix table
+ */
+ void truncateTable(String schemaName, String tableName, boolean
isNamespaceMapped,
+ boolean preserveSplits) throws SQLException;
+
+ /**
+ * Close all phoenix
cphoenix-core-client/src/main/java/org/apache/phoenix/query
+ * /ConnectionQueryServices.javaonnections created using this CQS.
* @param reasonBuilder exception builder for building reasons why
connection is closed.
*/
default void closeAllConnections(SQLExceptionInfo.Builder reasonBuilder) {
diff --git
a/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
b/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 4ab5b6336b..8b5a4a3870 100644
---
a/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++
b/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -2819,6 +2819,29 @@ public class ConnectionQueryServicesImpl extends
DelegateQueryServices
dropTables(Collections.<byte[]> singletonList(tableNameToDelete));
}
+ @Override
+ public void truncateTable(String schemaName, String tableName, boolean
isNamespaceMapped,
+ boolean preserveSplits) throws SQLException {
+ SQLException sqlE = null;
+ TableName hbaseTableName = SchemaUtil.getPhysicalTableName(
+ SchemaUtil.getTableName(schemaName,
tableName).getBytes(StandardCharsets.UTF_8),
+ isNamespaceMapped);
+ try {
+ Admin admin = getAdmin();
+ admin.disableTable(hbaseTableName);
+ admin.truncateTable(hbaseTableName, preserveSplits);
+ assert admin.isTableEnabled(hbaseTableName);
+ // Invalidate the region cache post truncation
+ clearTableRegionCache(hbaseTableName);
+ } catch (Exception e) {
+ sqlE = ClientUtil.parseServerException(e);
+ } finally {
+ if (sqlE != null) {
+ throw sqlE;
+ }
+ }
+ }
+
@VisibleForTesting
void dropTables(final List<byte[]> tableNamesToDelete) throws SQLException {
SQLException sqlE = null;
diff --git
a/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
b/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
index ad5df1e39e..d0fe82ee19 100644
---
a/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
+++
b/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
@@ -885,6 +885,11 @@ public class ConnectionlessQueryServicesImpl extends
DelegateQueryServices
String columnName, String familyName) throws SQLException {
}
+ @Override
+ public void truncateTable(String schemaName, String tableName, boolean
isNamespaceMapped,
+ boolean preserveSplits) throws SQLException {
+ }
+
@Override
public PMetaData getMetaDataCache() {
return metaData;
diff --git
a/phoenix-core-client/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
b/phoenix-core-client/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
index fdb78faa00..24a4229709 100644
---
a/phoenix-core-client/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
+++
b/phoenix-core-client/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
@@ -64,6 +64,12 @@ public class DelegateConnectionQueryServices extends
DelegateQueryServices
super(delegate);
}
+ @Override
+ public void truncateTable(String schemaName, String tableName, boolean
isNamespaceMapped,
+ boolean preserveSplits) throws SQLException {
+ getDelegate().truncateTable(schemaName, tableName, isNamespaceMapped,
preserveSplits);
+ }
+
@Override
public ConnectionQueryServices getDelegate() {
return (ConnectionQueryServices) super.getDelegate();
diff --git
a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index edb38da970..1b8e898bd3 100644
---
a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++
b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -23,12 +23,14 @@ import static
org.apache.phoenix.coprocessorclient.tasks.IndexRebuildTaskConstan
import static
org.apache.phoenix.coprocessorclient.tasks.IndexRebuildTaskConstants.REBUILD_ALL;
import static
org.apache.phoenix.exception.SQLExceptionCode.CANNOT_SET_CONDITIONAL_TTL_ON_TABLE_WITH_MULTIPLE_COLUMN_FAMILIES;
import static
org.apache.phoenix.exception.SQLExceptionCode.CANNOT_TRANSFORM_TRANSACTIONAL_TABLE;
+import static
org.apache.phoenix.exception.SQLExceptionCode.CANNOT_TRUNCATE_MULTITENANT_TABLE;
import static
org.apache.phoenix.exception.SQLExceptionCode.CDC_ALREADY_ENABLED;
import static
org.apache.phoenix.exception.SQLExceptionCode.ERROR_WRITING_TO_SCHEMA_REGISTRY;
import static
org.apache.phoenix.exception.SQLExceptionCode.INSUFFICIENT_MULTI_TENANT_COLUMNS;
import static
org.apache.phoenix.exception.SQLExceptionCode.PARENT_TABLE_NOT_FOUND;
import static
org.apache.phoenix.exception.SQLExceptionCode.SALTING_NOT_ALLOWED_FOR_CDC;
import static
org.apache.phoenix.exception.SQLExceptionCode.TABLE_ALREADY_EXIST;
+import static
org.apache.phoenix.exception.SQLExceptionCode.TRUNCATE_NOT_ALLOWED_ON_SYSTEM_TABLE;
import static
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.APPEND_ONLY_SCHEMA;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARG_POSITION;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARRAY_SIZE;
@@ -253,6 +255,7 @@ import org.apache.phoenix.parse.ParseNode;
import org.apache.phoenix.parse.ParseNodeFactory;
import org.apache.phoenix.parse.PrimaryKeyConstraint;
import org.apache.phoenix.parse.TableName;
+import org.apache.phoenix.parse.TruncateTableStatement;
import org.apache.phoenix.parse.UpdateStatisticsStatement;
import org.apache.phoenix.parse.UseSchemaStatement;
import org.apache.phoenix.query.ConnectionQueryServices;
@@ -4034,6 +4037,131 @@ public class MetaDataClient {
}
}
+ private void deleteStatistics(List<String> truncatedObjectList) throws
SQLException {
+ if (truncatedObjectList == null || truncatedObjectList.isEmpty()) {
+ return;
+ }
+ String deleteStatsSql = "DELETE FROM " +
PhoenixDatabaseMetaData.SYSTEM_STATS_NAME + " WHERE "
+ + PhoenixDatabaseMetaData.PHYSICAL_NAME + " = ?";
+ try (PreparedStatement stmt = connection.prepareStatement(deleteStatsSql))
{
+ for (String physicalName : truncatedObjectList) {
+ stmt.setString(1, physicalName);
+ stmt.executeUpdate();
+ }
+ connection.commit();
+ }
+ }
+
+ private PTable getPTableRef(String schemaName, String tableName) throws
SQLException {
+ return connection.getTable(
+ new PTableKey(connection.getTenantId(),
SchemaUtil.getTableName(schemaName, tableName)));
+ }
+
+ private void validateObjectTypeBeingTruncated(PTable pTable, String
tableName, String schemaName,
+ boolean preserveSplits) throws SQLException {
+ // Cannot drop splits on a salted table
+ if (!preserveSplits && pTable.getBucketNum() != null) {
+ throw new SQLExceptionInfo.Builder(
+
SQLExceptionCode.TRUNCATE_MUST_PRESERVE_SPLITS_FOR_SALTED_TABLE).setSchemaName(schemaName)
+ .setTableName(tableName).build().buildException();
+ }
+ // disallow truncating a view
+ if (pTable.getType() == PTableType.VIEW) {
+ throw new
SQLExceptionInfo.Builder(SQLExceptionCode.TRUNCATE_NOT_ALLOWED_ON_VIEW)
+
.setSchemaName(schemaName).setTableName(tableName).build().buildException();
+ }
+ // block if the table type is SYSTEM or if it resides in the SYSTEM schema
+ if (
+ pTable.getType() == PTableType.SYSTEM
+ || PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA.equals(schemaName)
+ ) {
+ throw new SQLExceptionInfo.Builder(TRUNCATE_NOT_ALLOWED_ON_SYSTEM_TABLE)
+
.setSchemaName(schemaName).setTableName(tableName).build().buildException();
+ }
+ // Block truncate as it would wipe data for ALL tenants
+ if (pTable.isMultiTenant() && connection.getTenantId() != null) {
+ throw new SQLExceptionInfo.Builder(CANNOT_TRUNCATE_MULTITENANT_TABLE)
+
.setSchemaName(schemaName).setTableName(tableName).build().buildException();
+ }
+ }
+
+ private void truncateGlobalIndexes(PTable pTable, List<String>
truncatedObjectList,
+ boolean preserveSplits) throws SQLException {
+ for (PTable index : pTable.getIndexes()) {
+ if (index.getIndexType() != IndexType.LOCAL) {
+ if (!preserveSplits && index.getBucketNum() != null) {
+ throw new SQLExceptionInfo.Builder(
+ SQLExceptionCode.TRUNCATE_MUST_PRESERVE_SPLITS_FOR_SALTED_TABLE)
+
.setTableName(index.getName().getString()).build().buildException();
+ }
+ String indexSchemaName = index.getSchemaName().getString();
+ String indexTableName = index.getTableName().getString();
+ connection.getQueryServices().truncateTable(indexSchemaName,
indexTableName,
+ index.isNamespaceMapped(), preserveSplits);
+ truncatedObjectList.add(index.getPhysicalName().getString());
+ }
+ }
+ }
+
+ private void truncateSharedViewIndexTable(PTable pTable, List<String>
truncatedObjectList,
+ boolean preserveSplits) throws SQLException {
+ if (pTable.getType() == PTableType.TABLE) {
+ byte[] viewIndexPhysicalNameBytes =
+
MetaDataUtil.getViewIndexPhysicalName(pTable.getPhysicalName().getBytes());
+ String viewIndexPhysicalName =
Bytes.toString(viewIndexPhysicalNameBytes);
+
+ try (Admin admin = connection.getQueryServices().getAdmin()) {
+ org.apache.hadoop.hbase.TableName viewIdxTableName =
+ org.apache.hadoop.hbase.TableName.valueOf(viewIndexPhysicalName);
+ if (admin.tableExists(viewIdxTableName)) {
+ String viewIdxSchema =
SchemaUtil.getSchemaNameFromFullName(viewIndexPhysicalName);
+ String viewIdxTableStr =
SchemaUtil.getTableNameFromFullName(viewIndexPhysicalName);
+ connection.getQueryServices().truncateTable(viewIdxSchema,
viewIdxTableStr, false,
+ preserveSplits);
+
connection.getQueryServices().clearTableRegionCache(viewIdxTableName);
+ truncatedObjectList.add(viewIndexPhysicalName);
+ }
+ } catch (IOException e) {
+ throw ClientUtil.parseServerException(e);
+ }
+ }
+ }
+
+ public MutationState truncateTable(TruncateTableStatement statement) throws
SQLException {
+ String schemaName =
+ connection.getSchema() != null &&
statement.getTableName().getSchemaName() == null
+ ? connection.getSchema()
+ : statement.getTableName().getSchemaName();
+ boolean isNamespaceMapped =
SchemaUtil.isNamespaceMappingEnabled(statement.getTableType(),
+ connection.getQueryServices().getProps());
+ String tableName = statement.getTableName().getTableName();
+ boolean preserveSplits = statement.preserveSplits();
+ PTable pTable = getPTableRef(schemaName, tableName);
+
+ // Allow truncate only for valid object types
+ validateObjectTypeBeingTruncated(pTable, tableName, schemaName,
preserveSplits);
+
+ try {
+ List<String> truncatedObjectList = new ArrayList<>();
+ // Truncate the Base Table
+ connection.getQueryServices().truncateTable(schemaName, tableName,
isNamespaceMapped,
+ preserveSplits);
+ // Mark for stats deletion
+ truncatedObjectList.add(pTable.getPhysicalName().getString());
+ // Identify and Truncate Global Indexes
+ truncateGlobalIndexes(pTable, truncatedObjectList, preserveSplits);
+ // Identify and Truncate Shared View Index Table
+ truncateSharedViewIndexTable(pTable, truncatedObjectList,
preserveSplits);
+ // Delete Statistics of all the tables being truncated
+ deleteStatistics(truncatedObjectList);
+ // Update cache post truncation
+ updateCache(schemaName, tableName);
+ return new MutationState(0, 0, connection);
+ } catch (SQLException e) {
+ throw e;
+ }
+ }
+
public MutationState dropTable(DropTableStatement statement) throws
SQLException {
String schemaName =
connection.getSchema() != null &&
statement.getTableName().getSchemaName() == null
diff --git
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TruncateTableIT.java
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TruncateTableIT.java
new file mode 100644
index 0000000000..a1f8cd8eca
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TruncateTableIT.java
@@ -0,0 +1,534 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Properties;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.TableNotFoundException;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(ParallelStatsDisabledTest.class)
+public class TruncateTableIT extends ParallelStatsDisabledIT {
+
+ @Test
+ public void testTruncateTable() throws SQLException {
+ Properties props = new Properties();
+ String tableName = generateUniqueName();
+
+ try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+ String createTableDDL = "CREATE TABLE " + tableName + " (pk char(2) not
null primary key)";
+ conn.createStatement().execute(createTableDDL);
+
+ try {
+ conn.setAutoCommit(true);
+ try (PreparedStatement stmt =
+ conn.prepareStatement("UPSERT INTO " + tableName + " VALUES('a')")) {
+ stmt.execute();
+ }
+
+ try (
+ ResultSet rs = conn.createStatement().executeQuery("SELECT COUNT(*)
FROM " + tableName)) {
+ assertTrue(rs.next());
+ assertEquals(1, rs.getInt(1));
+ }
+
+ conn.createStatement().execute("TRUNCATE TABLE " + tableName);
+
+ try (
+ ResultSet rs = conn.createStatement().executeQuery("SELECT COUNT(*)
FROM " + tableName)) {
+ assertTrue(rs.next());
+ assertEquals(0, rs.getInt(1));
+ }
+ } finally {
+ conn.createStatement().execute("DROP TABLE IF EXISTS " + tableName);
+ }
+ } catch (SQLException e) {
+ fail(e.getMessage());
+ }
+ }
+
+ @Test
+ public void testTruncateTableNotExist() throws Exception {
+ Properties props = new Properties();
+ try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+ String tableName = "nonExistentTable";
+ try {
+ conn.createStatement().execute("TRUNCATE TABLE " + tableName);
+ fail("Should have thrown TableNotFoundException");
+ } catch (TableNotFoundException e) {
+ // Expected
+ }
+ }
+ }
+
+ @Test
+ public void testTruncateTableNonExistentSchema() throws SQLException {
+ Properties props = new Properties();
+ String schemaName = "nonExistentSchema";
+ String tableName = generateUniqueName();
+
+ try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+ conn.createStatement().execute("CREATE TABLE " + tableName + " (C1
INTEGER PRIMARY KEY)");
+ try {
+ conn.setAutoCommit(true);
+
+ try (PreparedStatement stmt =
+ conn.prepareStatement("UPSERT INTO " + tableName + " VALUES(1)")) {
+ stmt.execute();
+ }
+
+ try (
+ ResultSet rs = conn.createStatement().executeQuery("SELECT COUNT(*)
FROM " + tableName)) {
+ assertTrue(rs.next());
+ assertEquals(1, rs.getInt(1));
+ }
+
+ try {
+ conn.createStatement().execute("TRUNCATE TABLE " + schemaName + "."
+ tableName);
+ fail("Should have failed due to non-existent schema/table");
+ } catch (SQLException e) {
+ // Expected
+ }
+ } finally {
+ conn.createStatement().execute("DROP TABLE IF EXISTS " + tableName);
+ }
+ }
+ }
+
+ @Test
+ public void testTruncateTableWithImplicitSchema() throws SQLException {
+ Properties props = new Properties();
+ String schemaName = generateUniqueName();
+ String tableName = generateUniqueName();
+ String fullTableName = schemaName + "." + tableName;
+ String createTableWithSchema =
+ "CREATE TABLE " + fullTableName + " (C1 char(2) NOT NULL PRIMARY KEY)";
+
+ try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+ conn.createStatement().execute(createTableWithSchema);
+ try {
+ conn.setAutoCommit(true);
+
+ try (PreparedStatement stmt =
+ conn.prepareStatement("UPSERT INTO " + fullTableName + "
values('a')")) {
+ stmt.execute();
+ }
+
+ try (ResultSet rs =
+ conn.createStatement().executeQuery("SELECT COUNT(*) FROM " +
fullTableName)) {
+ assertTrue(rs.next());
+ assertEquals(1, rs.getInt(1));
+ }
+
+ try (PreparedStatement stmt =
+ conn.prepareStatement("UPSERT INTO " + fullTableName + "
values('b')")) {
+ stmt.execute();
+ }
+
+ try (ResultSet rs =
+ conn.createStatement().executeQuery("SELECT COUNT(*) FROM " +
fullTableName)) {
+ assertTrue(rs.next());
+ assertEquals(2, rs.getInt(1));
+ }
+
+ conn.createStatement().execute("TRUNCATE TABLE " + fullTableName);
+
+ try (ResultSet rs =
+ conn.createStatement().executeQuery("SELECT COUNT(*) FROM " +
fullTableName)) {
+ assertTrue(rs.next());
+ assertEquals(0, rs.getInt(1));
+ }
+ } finally {
+ conn.createStatement().execute("DROP TABLE IF EXISTS " +
fullTableName);
+ }
+ } catch (SQLException e) {
+ fail(e.getMessage());
+ }
+ }
+
+ @Test
+ public void testTruncateTableWithExplicitSchema() throws SQLException {
+ Properties props = new Properties();
+ props.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED,
Boolean.toString(true));
+ props.setProperty(QueryServices.IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE,
Boolean.toString(false));
+
+ String schemaName = generateUniqueName();
+ String tableName = generateUniqueName();
+
+ String schemaCreateStmt = "CREATE SCHEMA IF NOT EXISTS " + schemaName;
+ String tableCreateStmt = "CREATE TABLE IF NOT EXISTS " + tableName
+ + " (C1 char(2) NOT NULL PRIMARY KEY) SALT_BUCKETS=4";
+
+ try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+ conn.setAutoCommit(true);
+ conn.createStatement().execute(schemaCreateStmt);
+ conn.createStatement().execute("USE " + schemaName);
+ conn.createStatement().execute(tableCreateStmt);
+
+ try {
+ try (PreparedStatement stmt =
+ conn.prepareStatement("UPSERT INTO " + tableName + " VALUES ('a')"))
{
+ stmt.execute();
+ }
+
+ try (
+ ResultSet rs = conn.createStatement().executeQuery("SELECT COUNT(*)
FROM " + tableName)) {
+ assertTrue(rs.next());
+ assertEquals(1, rs.getInt(1));
+ }
+
+ try (PreparedStatement stmt =
+ conn.prepareStatement("UPSERT INTO " + tableName + " VALUES ('b')"))
{
+ stmt.execute();
+ }
+
+ try (
+ ResultSet rs = conn.createStatement().executeQuery("SELECT COUNT(*)
FROM " + tableName)) {
+ assertTrue(rs.next());
+ assertEquals(2, rs.getInt(1));
+ }
+
+ conn.createStatement().execute("TRUNCATE TABLE " + tableName);
+
+ try (
+ ResultSet rs = conn.createStatement().executeQuery("SELECT COUNT(*)
FROM " + tableName)) {
+ assertTrue(rs.next());
+ assertEquals(0, rs.getInt(1));
+ }
+ } finally {
+ conn.createStatement().execute("DROP TABLE IF EXISTS " + schemaName +
"." + tableName);
+ conn.createStatement().execute("DROP SCHEMA IF EXISTS " + schemaName);
+ }
+ } catch (SQLException e) {
+ fail(e.getMessage());
+ }
+ }
+
+ @Test
+ public void testTruncateViewNotAllowed() throws SQLException {
+ Properties props = new Properties();
+ String tableName = generateUniqueName();
+ String viewName = generateUniqueName();
+ try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+ conn.createStatement()
+ .execute("CREATE TABLE " + tableName + " (pk char(2) not null primary
key)");
+ conn.createStatement().execute("CREATE VIEW " + viewName + " AS SELECT *
FROM " + tableName);
+ try {
+ conn.createStatement().execute("TRUNCATE TABLE " + viewName);
+ fail("Should not allow truncating a VIEW");
+ } catch (SQLException e) {
+
assertEquals(SQLExceptionCode.TRUNCATE_NOT_ALLOWED_ON_VIEW.getErrorCode(),
+ e.getErrorCode());
+ } finally {
+ conn.createStatement().execute("DROP VIEW IF EXISTS " + viewName);
+ conn.createStatement().execute("DROP TABLE IF EXISTS " + tableName);
+ }
+ }
+ }
+
+ @Test
+ public void testTruncateSystemTable() throws SQLException {
+ Properties props = new Properties();
+ try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+ try {
+ conn.createStatement().execute("TRUNCATE TABLE SYSTEM.CATALOG");
+ fail("Should not be able to truncate SYSTEM.CATALOG");
+ } catch (SQLException e) {
+
assertEquals(SQLExceptionCode.TRUNCATE_NOT_ALLOWED_ON_SYSTEM_TABLE.getErrorCode(),
+ e.getErrorCode());
+ }
+ }
+ }
+
+ @Test
+ public void testTruncateClearsRegionCache() throws Exception {
+ Properties props = new Properties();
+ String tableName = generateUniqueName();
+ try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+ // 1. Create Table and Insert Data
+ conn.createStatement()
+ .execute("CREATE TABLE " + tableName + " (K VARCHAR PRIMARY KEY, V
VARCHAR)");
+ try {
+ conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES
('a', 'a')");
+ conn.commit();
+
+ PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
+ org.apache.hadoop.hbase.TableName hbaseTableName =
+ SchemaUtil.getPhysicalTableName(tableName.getBytes(), false);
+
+ try (org.apache.hadoop.hbase.client.Admin admin =
pconn.getQueryServices().getAdmin()) {
+ org.apache.hadoop.hbase.client.Connection hbaseConn =
admin.getConnection();
+
+ try (org.apache.hadoop.hbase.client.RegionLocator locator =
+ hbaseConn.getRegionLocator(hbaseTableName)) {
+ // Populate the cache by retrieving the region location
+ org.apache.hadoop.hbase.HRegionLocation loc1 =
+
locator.getRegionLocation(org.apache.hadoop.hbase.util.Bytes.toBytes("a"),
false);
+ Assert.assertNotNull("Region location should not be null", loc1);
+ // Truncating table creates new regions with new Region IDs
(timestamps) and should
+ // invalidate the client cache
+ conn.createStatement().execute("TRUNCATE TABLE " + tableName);
+ // Retrieve the region location again
+ // If the cache was cleared, this will fetch the NEW region from
.META.
+ // If the cache was NOT cleared, this will return the OLD (stale)
region from the cache.
+ org.apache.hadoop.hbase.HRegionLocation loc2 =
+
locator.getRegionLocation(org.apache.hadoop.hbase.util.Bytes.toBytes("a"),
false);
+ Assert.assertNotNull("Region location should not be null after
truncate", loc2);
+
+ // Verify that the Region IDs are different
+ // HBase Truncate (preserve splits) drops and recreates regions,
so the Region ID
+ // (timestamp) must change.
+ if (loc1.getRegion().getRegionId() ==
loc2.getRegion().getRegionId()) {
+ fail(
+ "Region cache was not cleared after truncate. Client returned
stale region location: "
+ + loc1);
+ }
+ }
+ }
+ } finally {
+ conn.createStatement().execute("DROP TABLE IF EXISTS " + tableName);
+ }
+ }
+ }
+
+ @Test
+ public void testTruncateTableDropSplits() throws Exception {
+ Properties props = new Properties();
+ String tableName = generateUniqueName();
+ try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+ // Create table with splits
+ conn.createStatement().execute("CREATE TABLE " + tableName
+ + " (ID INTEGER PRIMARY KEY, V1 VARCHAR) SPLIT ON (10, 20, 30)");
+ try {
+ // Verify splits exist
+ org.apache.hadoop.hbase.TableName physName =
+ SchemaUtil.getPhysicalTableName(tableName.getBytes(), false);
+ PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
+ try (org.apache.hadoop.hbase.client.Admin admin =
pconn.getQueryServices().getAdmin()) {
+ assertEquals(4, admin.getRegions(physName).size());
+ }
+
+ // Truncate with DROP SPLITS
+ conn.createStatement().execute("TRUNCATE TABLE " + tableName + " DROP
SPLITS");
+
+ // Verify table is empty and has only 1 region
+ try (org.apache.hadoop.hbase.client.Admin admin =
pconn.getQueryServices().getAdmin()) {
+ assertEquals(1, admin.getRegions(physName).size());
+ }
+ } finally {
+ conn.createStatement().execute("DROP TABLE IF EXISTS " + tableName);
+ }
+ }
+ }
+
+ @Test
+ public void testTruncateSaltedTableDropSplitsFails() throws Exception {
+ Properties props = new Properties();
+ String tableName = generateUniqueName();
+ try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+ conn.createStatement().execute(
+ "CREATE TABLE " + tableName + " (ID INTEGER PRIMARY KEY, V1 VARCHAR)
SALT_BUCKETS=4");
+
+ try {
+ conn.createStatement().execute("TRUNCATE TABLE " + tableName + " DROP
SPLITS");
+ fail("Should not allow DROP SPLITS on salted table");
+ } catch (SQLException e) {
+
assertEquals(SQLExceptionCode.TRUNCATE_MUST_PRESERVE_SPLITS_FOR_SALTED_TABLE.getErrorCode(),
+ e.getErrorCode());
+ } finally {
+ conn.createStatement().execute("DROP TABLE IF EXISTS " + tableName);
+ }
+ }
+ }
+
+ @Test
+ public void testTruncateMultiTenantTableBlockedForTenantConnection() throws
SQLException {
+ String tableName = generateUniqueName();
+ String tenantId = "tenant1";
+
+ // Create a Multi-Tenant Table using a Global Connection
+ try (Connection globalConn = DriverManager.getConnection(getUrl())) {
+ String ddl = "CREATE TABLE " + tableName
+ + " (TENANT_ID VARCHAR NOT NULL, ID INTEGER NOT NULL, V1 VARCHAR "
+ + "CONSTRAINT PK PRIMARY KEY (TENANT_ID, ID)) MULTI_TENANT=true";
+ globalConn.createStatement().execute(ddl);
+
+ try {
+ // Insert data for "tenant1"
+ globalConn.createStatement()
+ .execute("UPSERT INTO " + tableName + " VALUES ('" + tenantId + "',
1, 'data1')");
+ // Insert data for "tenant2"
+ globalConn.createStatement()
+ .execute("UPSERT INTO " + tableName + " VALUES ('tenant2', 2,
'data2')");
+ globalConn.commit();
+
+ Properties props = new Properties();
+ props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
+ try (Connection tenantConn = DriverManager.getConnection(getUrl(),
props)) {
+
+ // Verify tenant sees their data
+ ResultSet rs = tenantConn.createStatement().executeQuery("SELECT *
FROM " + tableName);
+ assertTrue(rs.next());
+ assertEquals(1, rs.getInt("ID"));
+
+ // Attempt Truncate from Tenant Connection
+ try {
+ tenantConn.createStatement().execute("TRUNCATE TABLE " +
tableName);
+ fail("Truncate should be blocked for multi-tenant table from
tenant connection");
+ } catch (SQLException e) {
+
assertEquals(SQLExceptionCode.CANNOT_TRUNCATE_MULTITENANT_TABLE.getErrorCode(),
+ e.getErrorCode());
+ }
+ }
+
+ // Verify Data Intact (via Global Connection)
+ ResultSet rs =
+ globalConn.createStatement().executeQuery("SELECT COUNT(*) FROM " +
tableName);
+ assertTrue(rs.next());
+ assertEquals(2, rs.getInt(1)); // Both tenant rows should still exist
+
+ } finally {
+ globalConn.createStatement().execute("DROP TABLE IF EXISTS " +
tableName);
+ }
+ }
+ }
+
+ @Test
+ public void testTruncateTableWithSplitsAndIndexes() throws Exception {
+ String tableName = generateUniqueName();
+ int numRegions = 15;
+ int rowsPerRegion = 25;
+ int numGlobalIndexes = 5;
+ int numViews = 5;
+ int numViewIndexes = 4;
+
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ // Create Table with Splits (14 split points for 15 regions)
+ StringBuilder splitPoints = new StringBuilder();
+ for (int i = 1; i < numRegions; i++) {
+ if (i > 1) splitPoints.append(", ");
+ // Use a prefix to ensure distribution, e.g., '01', '02', ... '14'
+ splitPoints.append(String.format("'%02d'", i * rowsPerRegion));
+ }
+
+ String ddl = "CREATE TABLE " + tableName
+ + " (ID CHAR(3) NOT NULL PRIMARY KEY, V1 VARCHAR, V2 VARCHAR) SPLIT ON
("
+ + splitPoints.toString() + ")";
+ conn.createStatement().execute(ddl);
+
+ try {
+ // Create Global Indexes on Base Table
+ for (int i = 0; i < numGlobalIndexes; i++) {
+ conn.createStatement().execute(
+ "CREATE INDEX " + tableName + "_IDX_" + i + " ON " + tableName + "
(V1) INCLUDE (V2)");
+ }
+
+ // Create Views and Global Indexes on Views
+ for (int i = 0; i < numViews; i++) {
+ String viewName = tableName + "_VIEW_" + i;
+ conn.createStatement()
+ .execute("CREATE VIEW " + viewName + " AS SELECT * FROM " +
tableName);
+ for (int j = 0; j < numViewIndexes; j++) {
+ conn.createStatement().execute(
+ "CREATE INDEX " + viewName + "_IDX_" + j + " ON " + viewName + "
(V2) INCLUDE (V1)");
+ }
+ }
+
+ // Insert Data (distributed across regions)
+ conn.setAutoCommit(false);
+ int totalRows = numRegions * rowsPerRegion;
+ try (PreparedStatement stmt =
+ conn.prepareStatement("UPSERT INTO " + tableName + " VALUES (?, ?,
?)")) {
+ for (int i = 0; i < totalRows; i++) {
+ // Key format ensures distribution: 000, 001, ..., 374
+ // Split points: '025', '050', ...
+ String key = String.format("%03d", i);
+ stmt.setString(1, key);
+ stmt.setString(2, "v1_" + i);
+ stmt.setString(3, "v2_" + i);
+ stmt.execute();
+ }
+ }
+ conn.commit();
+
+ // Verify Region Count Before Truncate
+ PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
+ org.apache.hadoop.hbase.TableName physName =
+ SchemaUtil.getPhysicalTableName(tableName.getBytes(), false);
+ try (org.apache.hadoop.hbase.client.Admin admin =
pconn.getQueryServices().getAdmin()) {
+ assertEquals("Region count before truncate should be " + numRegions,
numRegions,
+ admin.getRegions(physName).size());
+ }
+
+ // Verify Data Count
+ ResultSet rs = conn.createStatement().executeQuery("SELECT COUNT(*)
FROM " + tableName);
+ assertTrue(rs.next());
+ assertEquals(totalRows, rs.getInt(1));
+
+ // Perform Truncate (Preserve Splits is default)
+ conn.createStatement().execute("TRUNCATE TABLE " + tableName);
+
+ // Verify Table Empty
+ rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM " +
tableName);
+ assertTrue(rs.next());
+ assertEquals(0, rs.getInt(1));
+
+ // Verify Region Count After Truncate (Should still be 15)
+ try (org.apache.hadoop.hbase.client.Admin admin =
pconn.getQueryServices().getAdmin()) {
+ assertEquals("Region count after truncate should be " + numRegions,
numRegions,
+ admin.getRegions(physName).size());
+ }
+
+ // Verify stats are deleted/reset (optional but good practice given
previous context)
+ conn.createStatement().execute("UPDATE STATISTICS " + tableName);
+ // Stats verification logic if needed
+
+ } finally {
+ // Cleanup
+ for (int i = 0; i < numViews; i++) {
+ String viewName = tableName + "_VIEW_" + i;
+ for (int j = 0; j < numViewIndexes; j++) {
+ conn.createStatement()
+ .execute("DROP INDEX IF EXISTS " + viewName + "_IDX_" + j + " ON
" + viewName);
+ }
+ conn.createStatement().execute("DROP VIEW IF EXISTS " + viewName);
+ }
+ for (int i = 0; i < numGlobalIndexes; i++) {
+ conn.createStatement()
+ .execute("DROP INDEX IF EXISTS " + tableName + "_IDX_" + i + " ON
" + tableName);
+ }
+ conn.createStatement().execute("DROP TABLE IF EXISTS " + tableName);
+ }
+ }
+ }
+}