yihua commented on code in PR #13669: URL: https://github.com/apache/hudi/pull/13669#discussion_r2252254335
########## hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/table/upgrade/TestUpgradeDowngrade.java: ########## @@ -0,0 +1,653 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hudi.table.upgrade; + +import org.apache.hudi.common.config.RecordMergeMode; +import org.apache.hudi.common.model.HoodieIndexMetadata; +import org.apache.hudi.common.table.HoodieTableConfig; +import org.apache.hudi.common.table.HoodieTableMetaClient; +import org.apache.hudi.common.table.HoodieTableVersion; +import org.apache.hudi.common.table.timeline.HoodieInstant; +import org.apache.hudi.common.table.timeline.HoodieTimeline; +import org.apache.hudi.common.table.timeline.InstantFileNameGenerator; +import org.apache.hudi.common.table.timeline.versioning.TimelineLayoutVersion; +import org.apache.hudi.common.testutils.HoodieTestUtils; +import org.apache.hudi.common.util.Option; +import org.apache.hudi.config.HoodieWriteConfig; +import org.apache.hudi.metadata.HoodieTableMetadata; +import org.apache.hudi.storage.StoragePath; +import org.apache.hudi.testutils.SparkClientFunctionalTestHarness; +import org.apache.spark.sql.Dataset; +import org.apache.spark.sql.Row; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; +import java.util.stream.Collectors; + +import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Properties; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Test class for upgrade/downgrade operations using pre-created fixture tables + * from different Hudi releases. + */ +public class TestUpgradeDowngrade extends SparkClientFunctionalTestHarness { + + private static final Logger LOG = LoggerFactory.getLogger(TestUpgradeDowngrade.class); + private static final String FIXTURES_BASE_PATH = "/upgrade-downgrade-fixtures/mor-tables/"; + + @TempDir + java.nio.file.Path tempDir; + + private HoodieTableMetaClient metaClient; + + @ParameterizedTest + @MethodSource("upgradeVersions") + public void testUpgradeOnly(HoodieTableVersion originalVersion) throws Exception { + LOG.info("Testing upgrade for version {}", originalVersion); + + HoodieTableMetaClient originalMetaClient = loadFixtureTable(originalVersion); + assertEquals(originalVersion, originalMetaClient.getTableConfig().getTableVersion(), + "Fixture table should be at expected version"); + + Option<HoodieTableVersion> targetVersionOpt = getNextVersion(originalVersion); + if (!targetVersionOpt.isPresent()) { + LOG.info("Skipping upgrade test for version {} (no higher version available)", originalVersion); + return; + } + HoodieTableVersion targetVersion = targetVersionOpt.get(); + + HoodieWriteConfig config = createWriteConfig(originalMetaClient, true); + + int initialPendingCommits = originalMetaClient.getCommitsTimeline().filterPendingExcludingCompaction().countInstants(); + int initialCompletedCommits = originalMetaClient.getCommitsTimeline().filterCompletedInstants().countInstants(); + + // Read original data before upgrade for validation + Dataset<Row> originalData = readTableData(originalMetaClient, "before upgrade"); + + LOG.info("Upgrading from {} to {}", originalVersion, targetVersion); + new UpgradeDowngrade(originalMetaClient, config, context(), SparkUpgradeDowngradeHelper.getInstance()) + .run(targetVersion, null); + + HoodieTableMetaClient upgradedMetaClient = HoodieTableMetaClient.builder() + .setConf(storageConf().newInstance()) + .setBasePath(originalMetaClient.getBasePath()) + .build(); + + assertTableVersionOnDataAndMetadataTable(upgradedMetaClient, targetVersion); + validateVersionSpecificProperties(upgradedMetaClient, originalVersion, targetVersion); + validateDataConsistency(originalData, upgradedMetaClient, "after upgrade"); + + int finalPendingCommits = upgradedMetaClient.getCommitsTimeline().filterPendingExcludingCompaction().countInstants(); + assertTrue(finalPendingCommits <= initialPendingCommits, + "Pending commits should be cleaned up or reduced after upgrade"); + + int finalCompletedCommits = upgradedMetaClient.getCommitsTimeline().filterCompletedInstants().countInstants(); + assertTrue(finalCompletedCommits >= initialCompletedCommits, + "Completed commits should be preserved or increased after upgrade"); + + LOG.info("Successfully completed upgrade test for version {} -> {}", originalVersion, targetVersion); + } + + @ParameterizedTest + @MethodSource("downgradeVersions") + public void testDowngradeOnly(HoodieTableVersion targetVersion) throws Exception { Review Comment: nit: `targetVersion` is still original version? ```suggestion public void testDowngradeOnly(HoodieTableVersion targetVersion) throws Exception { ``` ########## hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/table/upgrade/TestUpgradeDowngrade.java: ########## @@ -0,0 +1,653 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hudi.table.upgrade; + +import org.apache.hudi.common.config.RecordMergeMode; +import org.apache.hudi.common.model.HoodieIndexMetadata; +import org.apache.hudi.common.table.HoodieTableConfig; +import org.apache.hudi.common.table.HoodieTableMetaClient; +import org.apache.hudi.common.table.HoodieTableVersion; +import org.apache.hudi.common.table.timeline.HoodieInstant; +import org.apache.hudi.common.table.timeline.HoodieTimeline; +import org.apache.hudi.common.table.timeline.InstantFileNameGenerator; +import org.apache.hudi.common.table.timeline.versioning.TimelineLayoutVersion; +import org.apache.hudi.common.testutils.HoodieTestUtils; +import org.apache.hudi.common.util.Option; +import org.apache.hudi.config.HoodieWriteConfig; +import org.apache.hudi.metadata.HoodieTableMetadata; +import org.apache.hudi.storage.StoragePath; +import org.apache.hudi.testutils.SparkClientFunctionalTestHarness; +import org.apache.spark.sql.Dataset; +import org.apache.spark.sql.Row; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; +import java.util.stream.Collectors; + +import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Properties; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Test class for upgrade/downgrade operations using pre-created fixture tables + * from different Hudi releases. + */ +public class TestUpgradeDowngrade extends SparkClientFunctionalTestHarness { + + private static final Logger LOG = LoggerFactory.getLogger(TestUpgradeDowngrade.class); + private static final String FIXTURES_BASE_PATH = "/upgrade-downgrade-fixtures/mor-tables/"; + + @TempDir + java.nio.file.Path tempDir; + + private HoodieTableMetaClient metaClient; + + @ParameterizedTest + @MethodSource("upgradeVersions") + public void testUpgradeOnly(HoodieTableVersion originalVersion) throws Exception { + LOG.info("Testing upgrade for version {}", originalVersion); + + HoodieTableMetaClient originalMetaClient = loadFixtureTable(originalVersion); + assertEquals(originalVersion, originalMetaClient.getTableConfig().getTableVersion(), + "Fixture table should be at expected version"); + + Option<HoodieTableVersion> targetVersionOpt = getNextVersion(originalVersion); + if (!targetVersionOpt.isPresent()) { + LOG.info("Skipping upgrade test for version {} (no higher version available)", originalVersion); + return; + } + HoodieTableVersion targetVersion = targetVersionOpt.get(); + + HoodieWriteConfig config = createWriteConfig(originalMetaClient, true); + + int initialPendingCommits = originalMetaClient.getCommitsTimeline().filterPendingExcludingCompaction().countInstants(); + int initialCompletedCommits = originalMetaClient.getCommitsTimeline().filterCompletedInstants().countInstants(); + + // Read original data before upgrade for validation + Dataset<Row> originalData = readTableData(originalMetaClient, "before upgrade"); + + LOG.info("Upgrading from {} to {}", originalVersion, targetVersion); + new UpgradeDowngrade(originalMetaClient, config, context(), SparkUpgradeDowngradeHelper.getInstance()) + .run(targetVersion, null); + + HoodieTableMetaClient upgradedMetaClient = HoodieTableMetaClient.builder() + .setConf(storageConf().newInstance()) + .setBasePath(originalMetaClient.getBasePath()) + .build(); + + assertTableVersionOnDataAndMetadataTable(upgradedMetaClient, targetVersion); + validateVersionSpecificProperties(upgradedMetaClient, originalVersion, targetVersion); + validateDataConsistency(originalData, upgradedMetaClient, "after upgrade"); + + int finalPendingCommits = upgradedMetaClient.getCommitsTimeline().filterPendingExcludingCompaction().countInstants(); + assertTrue(finalPendingCommits <= initialPendingCommits, + "Pending commits should be cleaned up or reduced after upgrade"); + + int finalCompletedCommits = upgradedMetaClient.getCommitsTimeline().filterCompletedInstants().countInstants(); + assertTrue(finalCompletedCommits >= initialCompletedCommits, + "Completed commits should be preserved or increased after upgrade"); + + LOG.info("Successfully completed upgrade test for version {} -> {}", originalVersion, targetVersion); + } + + @ParameterizedTest + @MethodSource("downgradeVersions") + public void testDowngradeOnly(HoodieTableVersion targetVersion) throws Exception { + LOG.info("Testing downgrade for version {}", targetVersion); + + HoodieTableMetaClient targetMetaClient = loadFixtureTable(targetVersion); + assertEquals(targetVersion, targetMetaClient.getTableConfig().getTableVersion(), + "Fixture table should be at expected version"); + + Option<HoodieTableVersion> sourceVersionOpt = getPreviousVersion(targetVersion); + if (!sourceVersionOpt.isPresent()) { + LOG.info("Skipping downgrade test for version {} (no lower version available)", targetVersion); + return; + } + HoodieTableVersion sourceVersion = sourceVersionOpt.get(); + + HoodieWriteConfig config = createWriteConfig(targetMetaClient, true); + + // Count initial timeline state + int initialPendingCommits = targetMetaClient.getCommitsTimeline().filterPendingExcludingCompaction().countInstants(); + int initialCompletedCommits = targetMetaClient.getCommitsTimeline().filterCompletedInstants().countInstants(); + + // Read original data before downgrade for validation + Dataset<Row> originalData = readTableData(targetMetaClient, "before downgrade"); + + LOG.info("Downgrading from {} to {}", targetVersion, sourceVersion); + new UpgradeDowngrade(targetMetaClient, config, context(), SparkUpgradeDowngradeHelper.getInstance()) + .run(sourceVersion, null); + + // Create fresh meta client to read updated table configuration after downgrade + HoodieTableMetaClient downgradedMetaClient = HoodieTableMetaClient.builder() + .setConf(storageConf().newInstance()) + .setBasePath(targetMetaClient.getBasePath()) + .build(); + + assertTableVersionOnDataAndMetadataTable(downgradedMetaClient, sourceVersion); + validateVersionSpecificProperties(downgradedMetaClient, targetVersion, sourceVersion); + validateDataConsistency(originalData, downgradedMetaClient, "after downgrade"); + + // Verify rollback behavior - pending commits should be cleaned up or reduced + int finalPendingCommits = downgradedMetaClient.getCommitsTimeline().filterPendingExcludingCompaction().countInstants(); + assertTrue(finalPendingCommits <= initialPendingCommits, + "Pending commits should be cleaned up or reduced after downgrade"); + + // Verify we still have completed commits + int finalCompletedCommits = downgradedMetaClient.getCommitsTimeline().filterCompletedInstants().countInstants(); + assertTrue(finalCompletedCommits >= initialCompletedCommits, + "Completed commits should be preserved or increased after downgrade"); + + LOG.info("Successfully completed downgrade test for version {} -> {}", targetVersion, sourceVersion); + } + + @ParameterizedTest + @MethodSource("tableVersions") + public void testAutoUpgradeDisabled(HoodieTableVersion originalVersion) throws Exception { + LOG.info("Testing auto-upgrade disabled for version {}", originalVersion); + + HoodieTableMetaClient originalMetaClient = loadFixtureTable(originalVersion); + + Option<HoodieTableVersion> targetVersionOpt = getNextVersion(originalVersion); + if (!targetVersionOpt.isPresent()) { + LOG.info("Skipping auto-upgrade test for version {} (no higher version available)", originalVersion); + return; + } + HoodieTableVersion targetVersion = targetVersionOpt.get(); + + HoodieWriteConfig config = createWriteConfig(originalMetaClient, false); + + // Attempt upgrade with auto-upgrade disabled + new UpgradeDowngrade(originalMetaClient, config, context(), SparkUpgradeDowngradeHelper.getInstance()) + .run(targetVersion, null); + + // Create fresh meta client to validate that version remained unchanged + HoodieTableMetaClient unchangedMetaClient = HoodieTableMetaClient.builder() + .setConf(storageConf().newInstance()) + .setBasePath(originalMetaClient.getBasePath()) + .build(); + assertEquals(originalVersion, unchangedMetaClient.getTableConfig().getTableVersion(), + "Table version should remain unchanged when auto-upgrade is disabled"); + validateSpecificPropertiesForVersion(unchangedMetaClient, unchangedMetaClient.getTableConfig(), originalVersion); + validateVersionSpecificProperties(unchangedMetaClient, originalVersion, originalVersion); + performDataValidationOnTable(unchangedMetaClient, "after auto-upgrade disabled test"); + + LOG.info("Auto-upgrade disabled test passed for version {}", originalVersion); + } + + /** + * Load a fixture table from resources and copy it to a temporary location for testing. + */ + private HoodieTableMetaClient loadFixtureTable(HoodieTableVersion version) throws IOException { + String fixtureName = getFixtureName(version); + String resourcePath = FIXTURES_BASE_PATH + fixtureName; + + LOG.info("Loading fixture from resource path: {}", resourcePath); + HoodieTestUtils.extractZipToDirectory(resourcePath, tempDir, getClass()); + + String tableName = fixtureName.replace(".zip", ""); + String tablePath = tempDir.resolve(tableName).toString(); + + metaClient = HoodieTableMetaClient.builder() + .setConf(storageConf().newInstance()) + .setBasePath(tablePath) + .build(); + + LOG.info("Loaded fixture table {} at version {}", fixtureName, metaClient.getTableConfig().getTableVersion()); + return metaClient; + } + + /** + * Create write config for test operations (upgrade/downgrade/validation). + */ + private HoodieWriteConfig createWriteConfig(HoodieTableMetaClient metaClient, boolean autoUpgrade) { + Properties props = new Properties(); + props.putAll(metaClient.getTableConfig().getProps()); Review Comment: It is not necessary to add all table configs to the write config. Might be OK for now. ########## hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/table/upgrade/TestUpgradeDowngrade.java: ########## @@ -0,0 +1,653 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hudi.table.upgrade; + +import org.apache.hudi.common.config.RecordMergeMode; +import org.apache.hudi.common.model.HoodieIndexMetadata; +import org.apache.hudi.common.table.HoodieTableConfig; +import org.apache.hudi.common.table.HoodieTableMetaClient; +import org.apache.hudi.common.table.HoodieTableVersion; +import org.apache.hudi.common.table.timeline.HoodieInstant; +import org.apache.hudi.common.table.timeline.HoodieTimeline; +import org.apache.hudi.common.table.timeline.InstantFileNameGenerator; +import org.apache.hudi.common.table.timeline.versioning.TimelineLayoutVersion; +import org.apache.hudi.common.testutils.HoodieTestUtils; +import org.apache.hudi.common.util.Option; +import org.apache.hudi.config.HoodieWriteConfig; +import org.apache.hudi.metadata.HoodieTableMetadata; +import org.apache.hudi.storage.StoragePath; +import org.apache.hudi.testutils.SparkClientFunctionalTestHarness; +import org.apache.spark.sql.Dataset; +import org.apache.spark.sql.Row; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; +import java.util.stream.Collectors; + +import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Properties; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Test class for upgrade/downgrade operations using pre-created fixture tables + * from different Hudi releases. + */ +public class TestUpgradeDowngrade extends SparkClientFunctionalTestHarness { + + private static final Logger LOG = LoggerFactory.getLogger(TestUpgradeDowngrade.class); + private static final String FIXTURES_BASE_PATH = "/upgrade-downgrade-fixtures/mor-tables/"; + + @TempDir + java.nio.file.Path tempDir; + + private HoodieTableMetaClient metaClient; + + @ParameterizedTest + @MethodSource("upgradeVersions") + public void testUpgradeOnly(HoodieTableVersion originalVersion) throws Exception { + LOG.info("Testing upgrade for version {}", originalVersion); + + HoodieTableMetaClient originalMetaClient = loadFixtureTable(originalVersion); + assertEquals(originalVersion, originalMetaClient.getTableConfig().getTableVersion(), + "Fixture table should be at expected version"); + + Option<HoodieTableVersion> targetVersionOpt = getNextVersion(originalVersion); + if (!targetVersionOpt.isPresent()) { + LOG.info("Skipping upgrade test for version {} (no higher version available)", originalVersion); + return; + } + HoodieTableVersion targetVersion = targetVersionOpt.get(); + + HoodieWriteConfig config = createWriteConfig(originalMetaClient, true); + + int initialPendingCommits = originalMetaClient.getCommitsTimeline().filterPendingExcludingCompaction().countInstants(); + int initialCompletedCommits = originalMetaClient.getCommitsTimeline().filterCompletedInstants().countInstants(); + + // Read original data before upgrade for validation + Dataset<Row> originalData = readTableData(originalMetaClient, "before upgrade"); + + LOG.info("Upgrading from {} to {}", originalVersion, targetVersion); + new UpgradeDowngrade(originalMetaClient, config, context(), SparkUpgradeDowngradeHelper.getInstance()) + .run(targetVersion, null); + + HoodieTableMetaClient upgradedMetaClient = HoodieTableMetaClient.builder() + .setConf(storageConf().newInstance()) + .setBasePath(originalMetaClient.getBasePath()) + .build(); + + assertTableVersionOnDataAndMetadataTable(upgradedMetaClient, targetVersion); + validateVersionSpecificProperties(upgradedMetaClient, originalVersion, targetVersion); + validateDataConsistency(originalData, upgradedMetaClient, "after upgrade"); + + int finalPendingCommits = upgradedMetaClient.getCommitsTimeline().filterPendingExcludingCompaction().countInstants(); + assertTrue(finalPendingCommits <= initialPendingCommits, + "Pending commits should be cleaned up or reduced after upgrade"); + + int finalCompletedCommits = upgradedMetaClient.getCommitsTimeline().filterCompletedInstants().countInstants(); + assertTrue(finalCompletedCommits >= initialCompletedCommits, + "Completed commits should be preserved or increased after upgrade"); + + LOG.info("Successfully completed upgrade test for version {} -> {}", originalVersion, targetVersion); + } + + @ParameterizedTest + @MethodSource("downgradeVersions") + public void testDowngradeOnly(HoodieTableVersion targetVersion) throws Exception { + LOG.info("Testing downgrade for version {}", targetVersion); + + HoodieTableMetaClient targetMetaClient = loadFixtureTable(targetVersion); + assertEquals(targetVersion, targetMetaClient.getTableConfig().getTableVersion(), + "Fixture table should be at expected version"); + + Option<HoodieTableVersion> sourceVersionOpt = getPreviousVersion(targetVersion); + if (!sourceVersionOpt.isPresent()) { + LOG.info("Skipping downgrade test for version {} (no lower version available)", targetVersion); + return; + } + HoodieTableVersion sourceVersion = sourceVersionOpt.get(); + + HoodieWriteConfig config = createWriteConfig(targetMetaClient, true); + + // Count initial timeline state + int initialPendingCommits = targetMetaClient.getCommitsTimeline().filterPendingExcludingCompaction().countInstants(); + int initialCompletedCommits = targetMetaClient.getCommitsTimeline().filterCompletedInstants().countInstants(); + + // Read original data before downgrade for validation + Dataset<Row> originalData = readTableData(targetMetaClient, "before downgrade"); + + LOG.info("Downgrading from {} to {}", targetVersion, sourceVersion); + new UpgradeDowngrade(targetMetaClient, config, context(), SparkUpgradeDowngradeHelper.getInstance()) + .run(sourceVersion, null); + + // Create fresh meta client to read updated table configuration after downgrade + HoodieTableMetaClient downgradedMetaClient = HoodieTableMetaClient.builder() + .setConf(storageConf().newInstance()) + .setBasePath(targetMetaClient.getBasePath()) + .build(); + + assertTableVersionOnDataAndMetadataTable(downgradedMetaClient, sourceVersion); + validateVersionSpecificProperties(downgradedMetaClient, targetVersion, sourceVersion); + validateDataConsistency(originalData, downgradedMetaClient, "after downgrade"); + + // Verify rollback behavior - pending commits should be cleaned up or reduced + int finalPendingCommits = downgradedMetaClient.getCommitsTimeline().filterPendingExcludingCompaction().countInstants(); + assertTrue(finalPendingCommits <= initialPendingCommits, + "Pending commits should be cleaned up or reduced after downgrade"); + + // Verify we still have completed commits + int finalCompletedCommits = downgradedMetaClient.getCommitsTimeline().filterCompletedInstants().countInstants(); + assertTrue(finalCompletedCommits >= initialCompletedCommits, + "Completed commits should be preserved or increased after downgrade"); + + LOG.info("Successfully completed downgrade test for version {} -> {}", targetVersion, sourceVersion); + } + + @ParameterizedTest + @MethodSource("tableVersions") + public void testAutoUpgradeDisabled(HoodieTableVersion originalVersion) throws Exception { + LOG.info("Testing auto-upgrade disabled for version {}", originalVersion); + + HoodieTableMetaClient originalMetaClient = loadFixtureTable(originalVersion); + + Option<HoodieTableVersion> targetVersionOpt = getNextVersion(originalVersion); + if (!targetVersionOpt.isPresent()) { + LOG.info("Skipping auto-upgrade test for version {} (no higher version available)", originalVersion); + return; + } + HoodieTableVersion targetVersion = targetVersionOpt.get(); + + HoodieWriteConfig config = createWriteConfig(originalMetaClient, false); + + // Attempt upgrade with auto-upgrade disabled + new UpgradeDowngrade(originalMetaClient, config, context(), SparkUpgradeDowngradeHelper.getInstance()) + .run(targetVersion, null); + + // Create fresh meta client to validate that version remained unchanged + HoodieTableMetaClient unchangedMetaClient = HoodieTableMetaClient.builder() + .setConf(storageConf().newInstance()) + .setBasePath(originalMetaClient.getBasePath()) + .build(); + assertEquals(originalVersion, unchangedMetaClient.getTableConfig().getTableVersion(), + "Table version should remain unchanged when auto-upgrade is disabled"); + validateSpecificPropertiesForVersion(unchangedMetaClient, unchangedMetaClient.getTableConfig(), originalVersion); + validateVersionSpecificProperties(unchangedMetaClient, originalVersion, originalVersion); + performDataValidationOnTable(unchangedMetaClient, "after auto-upgrade disabled test"); + + LOG.info("Auto-upgrade disabled test passed for version {}", originalVersion); + } + + /** + * Load a fixture table from resources and copy it to a temporary location for testing. + */ + private HoodieTableMetaClient loadFixtureTable(HoodieTableVersion version) throws IOException { + String fixtureName = getFixtureName(version); + String resourcePath = FIXTURES_BASE_PATH + fixtureName; + + LOG.info("Loading fixture from resource path: {}", resourcePath); + HoodieTestUtils.extractZipToDirectory(resourcePath, tempDir, getClass()); + + String tableName = fixtureName.replace(".zip", ""); + String tablePath = tempDir.resolve(tableName).toString(); + + metaClient = HoodieTableMetaClient.builder() + .setConf(storageConf().newInstance()) + .setBasePath(tablePath) + .build(); + + LOG.info("Loaded fixture table {} at version {}", fixtureName, metaClient.getTableConfig().getTableVersion()); + return metaClient; + } + + /** + * Create write config for test operations (upgrade/downgrade/validation). + */ + private HoodieWriteConfig createWriteConfig(HoodieTableMetaClient metaClient, boolean autoUpgrade) { + Properties props = new Properties(); + props.putAll(metaClient.getTableConfig().getProps()); + + HoodieWriteConfig.Builder builder = HoodieWriteConfig.newBuilder() + .withPath(metaClient.getBasePath().toString()) + .withAutoUpgradeVersion(autoUpgrade) + .withProps(props); + + // Add timeline layout version only if available (needed for upgrade operations) + if (metaClient.getTableConfig().getTimelineLayoutVersion().isPresent()) { + builder.withTimelineLayoutVersion(metaClient.getTableConfig().getTimelineLayoutVersion().get().getVersion()); + } + + // For validation operations, keep timeline server disabled for simplicity + if (!autoUpgrade) { + builder.withEmbeddedTimelineServerEnabled(false); + } + + return builder.build(); + } + + /** + * Get the next version up from the current version. + */ + private Option<HoodieTableVersion> getNextVersion(HoodieTableVersion current) { + switch (current) { + case FOUR: + return Option.of(HoodieTableVersion.FIVE); + case FIVE: + return Option.of(HoodieTableVersion.SIX); + case SIX: + // even though there is a table version 7, this is not an official release and serves as a bridge + // so the next version should be 8 + return Option.of(HoodieTableVersion.EIGHT); + case EIGHT: + return Option.of(HoodieTableVersion.NINE); + case NINE: + return Option.empty(); // No higher version available Review Comment: nit: remove this line to fall through ########## hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/table/upgrade/TestUpgradeDowngrade.java: ########## @@ -0,0 +1,653 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hudi.table.upgrade; + +import org.apache.hudi.common.config.RecordMergeMode; +import org.apache.hudi.common.model.HoodieIndexMetadata; +import org.apache.hudi.common.table.HoodieTableConfig; +import org.apache.hudi.common.table.HoodieTableMetaClient; +import org.apache.hudi.common.table.HoodieTableVersion; +import org.apache.hudi.common.table.timeline.HoodieInstant; +import org.apache.hudi.common.table.timeline.HoodieTimeline; +import org.apache.hudi.common.table.timeline.InstantFileNameGenerator; +import org.apache.hudi.common.table.timeline.versioning.TimelineLayoutVersion; +import org.apache.hudi.common.testutils.HoodieTestUtils; +import org.apache.hudi.common.util.Option; +import org.apache.hudi.config.HoodieWriteConfig; +import org.apache.hudi.metadata.HoodieTableMetadata; +import org.apache.hudi.storage.StoragePath; +import org.apache.hudi.testutils.SparkClientFunctionalTestHarness; +import org.apache.spark.sql.Dataset; +import org.apache.spark.sql.Row; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; +import java.util.stream.Collectors; + +import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Properties; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Test class for upgrade/downgrade operations using pre-created fixture tables + * from different Hudi releases. + */ +public class TestUpgradeDowngrade extends SparkClientFunctionalTestHarness { + + private static final Logger LOG = LoggerFactory.getLogger(TestUpgradeDowngrade.class); + private static final String FIXTURES_BASE_PATH = "/upgrade-downgrade-fixtures/mor-tables/"; + + @TempDir + java.nio.file.Path tempDir; + + private HoodieTableMetaClient metaClient; + + @ParameterizedTest + @MethodSource("upgradeVersions") + public void testUpgradeOnly(HoodieTableVersion originalVersion) throws Exception { + LOG.info("Testing upgrade for version {}", originalVersion); + + HoodieTableMetaClient originalMetaClient = loadFixtureTable(originalVersion); + assertEquals(originalVersion, originalMetaClient.getTableConfig().getTableVersion(), + "Fixture table should be at expected version"); + + Option<HoodieTableVersion> targetVersionOpt = getNextVersion(originalVersion); + if (!targetVersionOpt.isPresent()) { + LOG.info("Skipping upgrade test for version {} (no higher version available)", originalVersion); + return; + } + HoodieTableVersion targetVersion = targetVersionOpt.get(); + + HoodieWriteConfig config = createWriteConfig(originalMetaClient, true); + + int initialPendingCommits = originalMetaClient.getCommitsTimeline().filterPendingExcludingCompaction().countInstants(); + int initialCompletedCommits = originalMetaClient.getCommitsTimeline().filterCompletedInstants().countInstants(); + + // Read original data before upgrade for validation + Dataset<Row> originalData = readTableData(originalMetaClient, "before upgrade"); + + LOG.info("Upgrading from {} to {}", originalVersion, targetVersion); + new UpgradeDowngrade(originalMetaClient, config, context(), SparkUpgradeDowngradeHelper.getInstance()) + .run(targetVersion, null); + + HoodieTableMetaClient upgradedMetaClient = HoodieTableMetaClient.builder() + .setConf(storageConf().newInstance()) + .setBasePath(originalMetaClient.getBasePath()) + .build(); + + assertTableVersionOnDataAndMetadataTable(upgradedMetaClient, targetVersion); + validateVersionSpecificProperties(upgradedMetaClient, originalVersion, targetVersion); + validateDataConsistency(originalData, upgradedMetaClient, "after upgrade"); + + int finalPendingCommits = upgradedMetaClient.getCommitsTimeline().filterPendingExcludingCompaction().countInstants(); + assertTrue(finalPendingCommits <= initialPendingCommits, + "Pending commits should be cleaned up or reduced after upgrade"); + + int finalCompletedCommits = upgradedMetaClient.getCommitsTimeline().filterCompletedInstants().countInstants(); + assertTrue(finalCompletedCommits >= initialCompletedCommits, + "Completed commits should be preserved or increased after upgrade"); + + LOG.info("Successfully completed upgrade test for version {} -> {}", originalVersion, targetVersion); + } + + @ParameterizedTest + @MethodSource("downgradeVersions") + public void testDowngradeOnly(HoodieTableVersion targetVersion) throws Exception { + LOG.info("Testing downgrade for version {}", targetVersion); + + HoodieTableMetaClient targetMetaClient = loadFixtureTable(targetVersion); + assertEquals(targetVersion, targetMetaClient.getTableConfig().getTableVersion(), + "Fixture table should be at expected version"); + + Option<HoodieTableVersion> sourceVersionOpt = getPreviousVersion(targetVersion); + if (!sourceVersionOpt.isPresent()) { + LOG.info("Skipping downgrade test for version {} (no lower version available)", targetVersion); + return; + } + HoodieTableVersion sourceVersion = sourceVersionOpt.get(); + + HoodieWriteConfig config = createWriteConfig(targetMetaClient, true); + + // Count initial timeline state + int initialPendingCommits = targetMetaClient.getCommitsTimeline().filterPendingExcludingCompaction().countInstants(); + int initialCompletedCommits = targetMetaClient.getCommitsTimeline().filterCompletedInstants().countInstants(); + + // Read original data before downgrade for validation + Dataset<Row> originalData = readTableData(targetMetaClient, "before downgrade"); + + LOG.info("Downgrading from {} to {}", targetVersion, sourceVersion); + new UpgradeDowngrade(targetMetaClient, config, context(), SparkUpgradeDowngradeHelper.getInstance()) + .run(sourceVersion, null); + + // Create fresh meta client to read updated table configuration after downgrade + HoodieTableMetaClient downgradedMetaClient = HoodieTableMetaClient.builder() + .setConf(storageConf().newInstance()) + .setBasePath(targetMetaClient.getBasePath()) + .build(); + + assertTableVersionOnDataAndMetadataTable(downgradedMetaClient, sourceVersion); + validateVersionSpecificProperties(downgradedMetaClient, targetVersion, sourceVersion); + validateDataConsistency(originalData, downgradedMetaClient, "after downgrade"); + + // Verify rollback behavior - pending commits should be cleaned up or reduced + int finalPendingCommits = downgradedMetaClient.getCommitsTimeline().filterPendingExcludingCompaction().countInstants(); + assertTrue(finalPendingCommits <= initialPendingCommits, + "Pending commits should be cleaned up or reduced after downgrade"); + + // Verify we still have completed commits + int finalCompletedCommits = downgradedMetaClient.getCommitsTimeline().filterCompletedInstants().countInstants(); + assertTrue(finalCompletedCommits >= initialCompletedCommits, + "Completed commits should be preserved or increased after downgrade"); + + LOG.info("Successfully completed downgrade test for version {} -> {}", targetVersion, sourceVersion); + } + + @ParameterizedTest + @MethodSource("tableVersions") + public void testAutoUpgradeDisabled(HoodieTableVersion originalVersion) throws Exception { + LOG.info("Testing auto-upgrade disabled for version {}", originalVersion); + + HoodieTableMetaClient originalMetaClient = loadFixtureTable(originalVersion); + + Option<HoodieTableVersion> targetVersionOpt = getNextVersion(originalVersion); + if (!targetVersionOpt.isPresent()) { + LOG.info("Skipping auto-upgrade test for version {} (no higher version available)", originalVersion); + return; + } + HoodieTableVersion targetVersion = targetVersionOpt.get(); + + HoodieWriteConfig config = createWriteConfig(originalMetaClient, false); + + // Attempt upgrade with auto-upgrade disabled + new UpgradeDowngrade(originalMetaClient, config, context(), SparkUpgradeDowngradeHelper.getInstance()) + .run(targetVersion, null); + + // Create fresh meta client to validate that version remained unchanged + HoodieTableMetaClient unchangedMetaClient = HoodieTableMetaClient.builder() + .setConf(storageConf().newInstance()) + .setBasePath(originalMetaClient.getBasePath()) + .build(); + assertEquals(originalVersion, unchangedMetaClient.getTableConfig().getTableVersion(), + "Table version should remain unchanged when auto-upgrade is disabled"); + validateSpecificPropertiesForVersion(unchangedMetaClient, unchangedMetaClient.getTableConfig(), originalVersion); + validateVersionSpecificProperties(unchangedMetaClient, originalVersion, originalVersion); + performDataValidationOnTable(unchangedMetaClient, "after auto-upgrade disabled test"); + + LOG.info("Auto-upgrade disabled test passed for version {}", originalVersion); + } + + /** + * Load a fixture table from resources and copy it to a temporary location for testing. + */ + private HoodieTableMetaClient loadFixtureTable(HoodieTableVersion version) throws IOException { + String fixtureName = getFixtureName(version); + String resourcePath = FIXTURES_BASE_PATH + fixtureName; + + LOG.info("Loading fixture from resource path: {}", resourcePath); + HoodieTestUtils.extractZipToDirectory(resourcePath, tempDir, getClass()); + + String tableName = fixtureName.replace(".zip", ""); + String tablePath = tempDir.resolve(tableName).toString(); + + metaClient = HoodieTableMetaClient.builder() + .setConf(storageConf().newInstance()) + .setBasePath(tablePath) + .build(); + + LOG.info("Loaded fixture table {} at version {}", fixtureName, metaClient.getTableConfig().getTableVersion()); + return metaClient; + } + + /** + * Create write config for test operations (upgrade/downgrade/validation). + */ + private HoodieWriteConfig createWriteConfig(HoodieTableMetaClient metaClient, boolean autoUpgrade) { + Properties props = new Properties(); + props.putAll(metaClient.getTableConfig().getProps()); + + HoodieWriteConfig.Builder builder = HoodieWriteConfig.newBuilder() + .withPath(metaClient.getBasePath().toString()) + .withAutoUpgradeVersion(autoUpgrade) + .withProps(props); + + // Add timeline layout version only if available (needed for upgrade operations) + if (metaClient.getTableConfig().getTimelineLayoutVersion().isPresent()) { + builder.withTimelineLayoutVersion(metaClient.getTableConfig().getTimelineLayoutVersion().get().getVersion()); + } Review Comment: The timeline layout version should not be directly set by the user. ########## hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/table/upgrade/TestUpgradeDowngrade.java: ########## @@ -0,0 +1,653 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hudi.table.upgrade; + +import org.apache.hudi.common.config.RecordMergeMode; +import org.apache.hudi.common.model.HoodieIndexMetadata; +import org.apache.hudi.common.table.HoodieTableConfig; +import org.apache.hudi.common.table.HoodieTableMetaClient; +import org.apache.hudi.common.table.HoodieTableVersion; +import org.apache.hudi.common.table.timeline.HoodieInstant; +import org.apache.hudi.common.table.timeline.HoodieTimeline; +import org.apache.hudi.common.table.timeline.InstantFileNameGenerator; +import org.apache.hudi.common.table.timeline.versioning.TimelineLayoutVersion; +import org.apache.hudi.common.testutils.HoodieTestUtils; +import org.apache.hudi.common.util.Option; +import org.apache.hudi.config.HoodieWriteConfig; +import org.apache.hudi.metadata.HoodieTableMetadata; +import org.apache.hudi.storage.StoragePath; +import org.apache.hudi.testutils.SparkClientFunctionalTestHarness; +import org.apache.spark.sql.Dataset; +import org.apache.spark.sql.Row; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; +import java.util.stream.Collectors; + +import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Properties; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Test class for upgrade/downgrade operations using pre-created fixture tables + * from different Hudi releases. + */ +public class TestUpgradeDowngrade extends SparkClientFunctionalTestHarness { + + private static final Logger LOG = LoggerFactory.getLogger(TestUpgradeDowngrade.class); + private static final String FIXTURES_BASE_PATH = "/upgrade-downgrade-fixtures/mor-tables/"; + + @TempDir + java.nio.file.Path tempDir; + + private HoodieTableMetaClient metaClient; + + @ParameterizedTest + @MethodSource("upgradeVersions") + public void testUpgradeOnly(HoodieTableVersion originalVersion) throws Exception { + LOG.info("Testing upgrade for version {}", originalVersion); + + HoodieTableMetaClient originalMetaClient = loadFixtureTable(originalVersion); + assertEquals(originalVersion, originalMetaClient.getTableConfig().getTableVersion(), + "Fixture table should be at expected version"); + + Option<HoodieTableVersion> targetVersionOpt = getNextVersion(originalVersion); + if (!targetVersionOpt.isPresent()) { + LOG.info("Skipping upgrade test for version {} (no higher version available)", originalVersion); + return; + } + HoodieTableVersion targetVersion = targetVersionOpt.get(); + + HoodieWriteConfig config = createWriteConfig(originalMetaClient, true); + + int initialPendingCommits = originalMetaClient.getCommitsTimeline().filterPendingExcludingCompaction().countInstants(); + int initialCompletedCommits = originalMetaClient.getCommitsTimeline().filterCompletedInstants().countInstants(); + + // Read original data before upgrade for validation + Dataset<Row> originalData = readTableData(originalMetaClient, "before upgrade"); + + LOG.info("Upgrading from {} to {}", originalVersion, targetVersion); + new UpgradeDowngrade(originalMetaClient, config, context(), SparkUpgradeDowngradeHelper.getInstance()) + .run(targetVersion, null); + + HoodieTableMetaClient upgradedMetaClient = HoodieTableMetaClient.builder() + .setConf(storageConf().newInstance()) + .setBasePath(originalMetaClient.getBasePath()) + .build(); + + assertTableVersionOnDataAndMetadataTable(upgradedMetaClient, targetVersion); + validateVersionSpecificProperties(upgradedMetaClient, originalVersion, targetVersion); + validateDataConsistency(originalData, upgradedMetaClient, "after upgrade"); + + int finalPendingCommits = upgradedMetaClient.getCommitsTimeline().filterPendingExcludingCompaction().countInstants(); + assertTrue(finalPendingCommits <= initialPendingCommits, + "Pending commits should be cleaned up or reduced after upgrade"); + + int finalCompletedCommits = upgradedMetaClient.getCommitsTimeline().filterCompletedInstants().countInstants(); + assertTrue(finalCompletedCommits >= initialCompletedCommits, + "Completed commits should be preserved or increased after upgrade"); + + LOG.info("Successfully completed upgrade test for version {} -> {}", originalVersion, targetVersion); + } + + @ParameterizedTest + @MethodSource("downgradeVersions") + public void testDowngradeOnly(HoodieTableVersion targetVersion) throws Exception { + LOG.info("Testing downgrade for version {}", targetVersion); + + HoodieTableMetaClient targetMetaClient = loadFixtureTable(targetVersion); + assertEquals(targetVersion, targetMetaClient.getTableConfig().getTableVersion(), + "Fixture table should be at expected version"); + + Option<HoodieTableVersion> sourceVersionOpt = getPreviousVersion(targetVersion); + if (!sourceVersionOpt.isPresent()) { + LOG.info("Skipping downgrade test for version {} (no lower version available)", targetVersion); + return; + } + HoodieTableVersion sourceVersion = sourceVersionOpt.get(); + + HoodieWriteConfig config = createWriteConfig(targetMetaClient, true); + + // Count initial timeline state + int initialPendingCommits = targetMetaClient.getCommitsTimeline().filterPendingExcludingCompaction().countInstants(); + int initialCompletedCommits = targetMetaClient.getCommitsTimeline().filterCompletedInstants().countInstants(); + + // Read original data before downgrade for validation + Dataset<Row> originalData = readTableData(targetMetaClient, "before downgrade"); + + LOG.info("Downgrading from {} to {}", targetVersion, sourceVersion); + new UpgradeDowngrade(targetMetaClient, config, context(), SparkUpgradeDowngradeHelper.getInstance()) + .run(sourceVersion, null); + + // Create fresh meta client to read updated table configuration after downgrade + HoodieTableMetaClient downgradedMetaClient = HoodieTableMetaClient.builder() + .setConf(storageConf().newInstance()) + .setBasePath(targetMetaClient.getBasePath()) + .build(); + + assertTableVersionOnDataAndMetadataTable(downgradedMetaClient, sourceVersion); + validateVersionSpecificProperties(downgradedMetaClient, targetVersion, sourceVersion); + validateDataConsistency(originalData, downgradedMetaClient, "after downgrade"); + + // Verify rollback behavior - pending commits should be cleaned up or reduced + int finalPendingCommits = downgradedMetaClient.getCommitsTimeline().filterPendingExcludingCompaction().countInstants(); + assertTrue(finalPendingCommits <= initialPendingCommits, + "Pending commits should be cleaned up or reduced after downgrade"); + + // Verify we still have completed commits + int finalCompletedCommits = downgradedMetaClient.getCommitsTimeline().filterCompletedInstants().countInstants(); + assertTrue(finalCompletedCommits >= initialCompletedCommits, + "Completed commits should be preserved or increased after downgrade"); + + LOG.info("Successfully completed downgrade test for version {} -> {}", targetVersion, sourceVersion); + } + + @ParameterizedTest + @MethodSource("tableVersions") + public void testAutoUpgradeDisabled(HoodieTableVersion originalVersion) throws Exception { + LOG.info("Testing auto-upgrade disabled for version {}", originalVersion); + + HoodieTableMetaClient originalMetaClient = loadFixtureTable(originalVersion); + + Option<HoodieTableVersion> targetVersionOpt = getNextVersion(originalVersion); + if (!targetVersionOpt.isPresent()) { + LOG.info("Skipping auto-upgrade test for version {} (no higher version available)", originalVersion); + return; + } + HoodieTableVersion targetVersion = targetVersionOpt.get(); + + HoodieWriteConfig config = createWriteConfig(originalMetaClient, false); + + // Attempt upgrade with auto-upgrade disabled + new UpgradeDowngrade(originalMetaClient, config, context(), SparkUpgradeDowngradeHelper.getInstance()) + .run(targetVersion, null); + + // Create fresh meta client to validate that version remained unchanged + HoodieTableMetaClient unchangedMetaClient = HoodieTableMetaClient.builder() + .setConf(storageConf().newInstance()) + .setBasePath(originalMetaClient.getBasePath()) + .build(); + assertEquals(originalVersion, unchangedMetaClient.getTableConfig().getTableVersion(), + "Table version should remain unchanged when auto-upgrade is disabled"); + validateSpecificPropertiesForVersion(unchangedMetaClient, unchangedMetaClient.getTableConfig(), originalVersion); + validateVersionSpecificProperties(unchangedMetaClient, originalVersion, originalVersion); + performDataValidationOnTable(unchangedMetaClient, "after auto-upgrade disabled test"); + + LOG.info("Auto-upgrade disabled test passed for version {}", originalVersion); + } + + /** + * Load a fixture table from resources and copy it to a temporary location for testing. + */ + private HoodieTableMetaClient loadFixtureTable(HoodieTableVersion version) throws IOException { + String fixtureName = getFixtureName(version); + String resourcePath = FIXTURES_BASE_PATH + fixtureName; + + LOG.info("Loading fixture from resource path: {}", resourcePath); + HoodieTestUtils.extractZipToDirectory(resourcePath, tempDir, getClass()); + + String tableName = fixtureName.replace(".zip", ""); + String tablePath = tempDir.resolve(tableName).toString(); + + metaClient = HoodieTableMetaClient.builder() + .setConf(storageConf().newInstance()) + .setBasePath(tablePath) + .build(); + + LOG.info("Loaded fixture table {} at version {}", fixtureName, metaClient.getTableConfig().getTableVersion()); + return metaClient; + } + + /** + * Create write config for test operations (upgrade/downgrade/validation). + */ + private HoodieWriteConfig createWriteConfig(HoodieTableMetaClient metaClient, boolean autoUpgrade) { + Properties props = new Properties(); + props.putAll(metaClient.getTableConfig().getProps()); + + HoodieWriteConfig.Builder builder = HoodieWriteConfig.newBuilder() + .withPath(metaClient.getBasePath().toString()) + .withAutoUpgradeVersion(autoUpgrade) + .withProps(props); + + // Add timeline layout version only if available (needed for upgrade operations) + if (metaClient.getTableConfig().getTimelineLayoutVersion().isPresent()) { + builder.withTimelineLayoutVersion(metaClient.getTableConfig().getTimelineLayoutVersion().get().getVersion()); + } + + // For validation operations, keep timeline server disabled for simplicity + if (!autoUpgrade) { + builder.withEmbeddedTimelineServerEnabled(false); + } + + return builder.build(); + } + + /** + * Get the next version up from the current version. + */ + private Option<HoodieTableVersion> getNextVersion(HoodieTableVersion current) { + switch (current) { + case FOUR: + return Option.of(HoodieTableVersion.FIVE); + case FIVE: + return Option.of(HoodieTableVersion.SIX); + case SIX: + // even though there is a table version 7, this is not an official release and serves as a bridge + // so the next version should be 8 + return Option.of(HoodieTableVersion.EIGHT); + case EIGHT: + return Option.of(HoodieTableVersion.NINE); + case NINE: + return Option.empty(); // No higher version available + default: + return Option.empty(); + } + } + + /** + * Get the previous version down from the current version. + */ + private Option<HoodieTableVersion> getPreviousVersion(HoodieTableVersion current) { + switch (current) { + case NINE: + return Option.of(HoodieTableVersion.EIGHT); + case EIGHT: + return Option.of(HoodieTableVersion.SIX); + case SIX: + return Option.of(HoodieTableVersion.FIVE); + case FIVE: + return Option.of(HoodieTableVersion.FOUR); + case FOUR: + return Option.empty(); // for now we are focusing on V4-V9 Review Comment: nit: remove this line to fall through -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
