This is an automated email from the ASF dual-hosted git repository.

klcopp pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git

commit 999ac7ac01eca723a35d5077930bf2c9405644f9
Author: Karen Coppage <karen.copp...@cloudera.com>
AuthorDate: Mon Aug 10 13:37:02 2020 +0200

    Revert "HIVE-24021: Read insert-only tables truncated by Impala correctly"
    
    This reverts commit 0df679664d12a60d52050e074aa4ea6ed614bd02.
---
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java    |  4 --
 .../hadoop/hive/ql/TestTxnCommandsForMmTable.java  | 44 ----------------------
 2 files changed, 48 deletions(-)

diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index f1afceb..991f151 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -220,10 +220,6 @@ public class AcidUtils {
       if (name.startsWith(OrcAcidVersion.ACID_FORMAT)) {
         return true;
       }
-      // Don't filter out empty files
-      if (name.startsWith("_empty")) {
-        return true;
-      }
       return !name.startsWith("_") && !name.startsWith(".");
     }
   };
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommandsForMmTable.java 
b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommandsForMmTable.java
index d92b40e..535bf11 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommandsForMmTable.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommandsForMmTable.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hive.ql;
 
 import java.io.File;
-import java.util.Collections;
 import java.util.List;
 
 import org.apache.hadoop.fs.FileStatus;
@@ -482,49 +481,6 @@ public class TestTxnCommandsForMmTable extends 
TxnCommandsBaseForTests {
     verifyDirAndResult(0, true);
   }
 
-  /**
-   * Impala truncates insert-only tables by writing a base directory (like 
insert overwrite) containing an empty file
-   * named "_empty". Generally in Hive files beginning with an underscore are 
hidden, so here we make sure that Hive
-   * reads these bases correctly.
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testImpalaTruncatedMmTable() throws Exception {
-    FileSystem fs = FileSystem.get(hiveConf);
-    FileStatus[] status;
-
-    Path tblLocation = new Path(TEST_WAREHOUSE_DIR + "/" +
-        (TableExtended.MMTBL).toString().toLowerCase());
-
-    // 1. Insert two rows to an MM table
-    runStatementOnDriver("insert into " + TableExtended.MMTBL + "(a,b) 
values(1,2)");
-    runStatementOnDriver("insert into " + TableExtended.MMTBL + "(a,b) 
values(3,4)");
-    status = fs.listStatus(tblLocation, FileUtils.STAGING_DIR_PATH_FILTER);
-    // There should be 2 delta dirs in the location
-    Assert.assertEquals(2, status.length);
-    for (int i = 0; i < status.length; i++) {
-      Assert.assertTrue(status[i].getPath().getName().matches("delta_.*"));
-    }
-
-    // 2. Simulate Impala truncating the table: write a base dir 
(base_0000003) containing an empty file.
-    // Hive will name the empty file "000000_0"
-    runStatementOnDriver("insert overwrite  table " + TableExtended.MMTBL + " 
select * from "
-        + TableExtended.MMTBL + " where 1=2");
-    status = fs.listStatus(tblLocation, FileUtils.STAGING_DIR_PATH_FILTER);
-    // There should be 2 delta dirs, plus 1 base dir in the location
-    Assert.assertEquals(3, status.length);
-    verifyDirAndResult(2, true);
-
-    // rename empty file to "_empty"
-    Path basePath = new Path(tblLocation, "base_0000003");
-    fs.rename(new Path(basePath, "000000_0"), new Path(basePath, "_empty"));
-
-    // 3. Verify query result. Selecting from a truncated table should return 
nothing.
-    List<String> rs = runStatementOnDriver("select a,b from " + 
TableExtended.MMTBL + " order by a,b");
-    Assert.assertEquals(Collections.emptyList(), rs);
-  }
-
   private void verifyDirAndResult(int expectedDeltas) throws Exception {
     verifyDirAndResult(expectedDeltas, false);
   }

Reply via email to