pvary commented on a change in pull request #2038:
URL: https://github.com/apache/iceberg/pull/2038#discussion_r559626416
##########
File path:
mr/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java
##########
@@ -206,33 +207,42 @@ public static PartitionSpec spec(Configuration config) {
* @param config The target configuration to store to
* @param table The table which we want to store to the configuration
*/
+
@VisibleForTesting
- static void put(Configuration config, Table table) {
- // The Table contains a FileIO and the FileIO serializes the configuration
so we might end up recursively
- // serializing the objects. To avoid this unset the values for now before
serializing.
- config.unset(InputFormatConfig.SERIALIZED_TABLE);
- config.unset(InputFormatConfig.FILE_IO);
- config.unset(InputFormatConfig.LOCATION_PROVIDER);
- config.unset(InputFormatConfig.ENCRYPTION_MANAGER);
- config.unset(InputFormatConfig.TABLE_LOCATION);
- config.unset(InputFormatConfig.TABLE_SCHEMA);
- config.unset(InputFormatConfig.PARTITION_SPEC);
-
- String base64Table = table instanceof Serializable ?
SerializationUtil.serializeToBase64(table) : null;
- String base64Io = SerializationUtil.serializeToBase64(table.io());
- String base64LocationProvider =
SerializationUtil.serializeToBase64(table.locationProvider());
- String base64EncryptionManager =
SerializationUtil.serializeToBase64(table.encryption());
-
- if (base64Table != null) {
- config.set(InputFormatConfig.SERIALIZED_TABLE, base64Table);
+ static void overlayTableProperties(Configuration configuration, TableDesc
tableDesc, Map<String, String> map) {
+ Properties props = tableDesc.getProperties();
+ Table table = Catalogs.loadTable(configuration, props);
+ String schemaJson = SchemaParser.toJson(table.schema());
+
+ Map<String, String> original = new HashMap<>(map);
+ map.clear();
+
+ map.putAll(Maps.fromProperties(props));
+ map.putAll(original);
Review comment:
I was not sure what is the more readable code. The one suggested by you
is much nicer!
Changed
##########
File path:
mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerWithEngine.java
##########
@@ -256,4 +265,293 @@ public void testSelectDistinctFromTable() throws
IOException {
Assert.assertEquals(tableName, size, distinctIds);
}
}
+
+ @Test
+ public void testInsert() throws IOException {
+ Assume.assumeTrue("Tez write is not implemented yet",
executionEngine.equals("mr"));
+
+ Table table = testTables.createTable(shell, "customers",
HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA,
+ fileFormat, ImmutableList.of());
+
+ // The expected query is like
+ // INSERT INTO customers VALUES (0, 'Alice'), (1, 'Bob'), (2, 'Trudy')
+ StringBuilder query = new StringBuilder().append("INSERT INTO customers
VALUES ");
+ HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS.forEach(record ->
query.append("(")
+ .append(record.get(0)).append(",'")
+ .append(record.get(1)).append("','")
+ .append(record.get(2)).append("'),"));
+ query.setLength(query.length() - 1);
+
+ shell.executeStatement(query.toString());
+
+ HiveIcebergTestUtils.validateData(table, new
ArrayList<>(HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS), 0);
+ }
+
+ @Test
+ public void testInsertFromSelect() throws IOException {
+ Assume.assumeTrue("Tez write is not implemented yet",
executionEngine.equals("mr"));
+
+ Table table = testTables.createTable(shell, "customers",
HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA,
+ fileFormat, HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS);
+
+ shell.executeStatement("INSERT INTO customers SELECT * FROM customers");
+
+ // Check that everything is duplicated as expected
+ List<Record> records = new
ArrayList<>(HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS);
+ records.addAll(HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS);
+ HiveIcebergTestUtils.validateData(table, records, 0);
+ }
+
+ @Test
+ public void testInsertFromSelectWithOrderBy() throws IOException {
+ Assume.assumeTrue("Tez write is not implemented yet",
executionEngine.equals("mr"));
+
+ // We expect that there will be Mappers and Reducers here
+ Table table = testTables.createTable(shell, "customers",
HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA,
+ fileFormat, HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS);
+
+ shell.executeStatement("INSERT INTO customers SELECT * FROM customers
ORDER BY customer_id");
+
+ // Check that everything is duplicated as expected
+ List<Record> records = new
ArrayList<>(HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS);
+ records.addAll(HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS);
+ HiveIcebergTestUtils.validateData(table, records, 0);
+ }
+
+ @Test
+ public void testWriteArrayOfPrimitivesInTable() throws IOException {
+ Assume.assumeTrue("Tez write is not implemented yet",
executionEngine.equals("mr"));
+ Schema schema = new Schema(required(1, "id", Types.LongType.get()),
+ required(2, "arrayofprimitives",
+ Types.ListType.ofRequired(3, Types.StringType.get())));
+ List<Record> records = TestHelper.generateRandomRecords(schema, 5, 0L);
+ testComplexTypeWrite(schema, records);
+ }
+
+ @Test
+ public void testWriteArrayOfArraysInTable() throws IOException {
+ Assume.assumeTrue("Tez write is not implemented yet",
executionEngine.equals("mr"));
+ Schema schema =
+ new Schema(
+ required(1, "id", Types.LongType.get()),
+ required(2, "arrayofarrays",
+ Types.ListType.ofRequired(3, Types.ListType.ofRequired(4,
Types.StringType.get()))));
+ List<Record> records = TestHelper.generateRandomRecords(schema, 5,
0L).stream()
+ .filter(r -> !((List<String>)
r.get(1)).isEmpty()).collect(Collectors.toList());
+ testComplexTypeWrite(schema, records);
+ }
+
+ @Test
+ public void testWriteArrayOfMapsInTable() throws IOException {
+ Assume.assumeTrue("Tez write is not implemented yet",
executionEngine.equals("mr"));
+ Schema schema =
+ new Schema(required(1, "id", Types.LongType.get()),
+ required(2, "arrayofmaps", Types.ListType
+ .ofRequired(3, Types.MapType.ofRequired(4, 5,
Types.StringType.get(),
+ Types.StringType.get()))));
+ List<Record> records = TestHelper.generateRandomRecords(schema, 5,
0L).stream()
+ .filter(r -> !((List<Map<String, String>>)
r.get(1)).isEmpty()).collect(Collectors.toList());
+ testComplexTypeWrite(schema, records);
+ }
+
+ @Test
+ public void testWriteArrayOfStructsInTable() throws IOException {
+ Assume.assumeTrue("Tez write is not implemented yet",
executionEngine.equals("mr"));
+ Schema schema =
+ new Schema(required(1, "id", Types.LongType.get()),
+ required(2, "arrayofstructs", Types.ListType.ofRequired(3,
Types.StructType
+ .of(required(4, "something", Types.StringType.get()),
required(5, "someone",
+ Types.StringType.get()), required(6, "somewhere",
Types.StringType.get())))));
+ List<Record> records = TestHelper.generateRandomRecords(schema, 5,
0L).stream()
+ .filter(r -> !((List<?>)
r.get(1)).isEmpty()).collect(Collectors.toList());
+ testComplexTypeWrite(schema, records);
+ }
+
+ @Test
+ public void testWriteMapOfPrimitivesInTable() throws IOException {
+ Assume.assumeTrue("Tez write is not implemented yet",
executionEngine.equals("mr"));
+ Schema schema = new Schema(required(1, "id", Types.LongType.get()),
+ required(2, "mapofprimitives", Types.MapType.ofRequired(3, 4,
Types.StringType.get(),
+ Types.StringType.get())));
+ List<Record> records = TestHelper.generateRandomRecords(schema, 5,
0L).stream()
+ .filter(r -> !((Map<String, String>)
r.get(1)).isEmpty()).collect(Collectors.toList());
+ testComplexTypeWrite(schema, records);
+ }
+
+ @Test
+ public void testWriteMapOfArraysInTable() throws IOException {
+ Assume.assumeTrue("Tez write is not implemented yet",
executionEngine.equals("mr"));
+ Schema schema = new Schema(required(1, "id", Types.LongType.get()),
+ required(2, "mapofarrays",
+ Types.MapType.ofRequired(3, 4, Types.StringType.get(),
Types.ListType.ofRequired(5,
+ Types.StringType.get()))));
+ List<Record> records = TestHelper.generateRandomRecords(schema, 5,
0L).stream()
+ .filter(r -> !((Map<String, List<String>>)
r.get(1)).isEmpty()).collect(Collectors.toList());
+ testComplexTypeWrite(schema, records);
+ }
+
+ @Test
+ public void testWriteMapOfMapsInTable() throws IOException {
+ Assume.assumeTrue("Tez write is not implemented yet",
executionEngine.equals("mr"));
+ Schema schema = new Schema(required(1, "id", Types.LongType.get()),
+ required(2, "mapofmaps", Types.MapType.ofRequired(3, 4,
Types.StringType.get(),
+ Types.MapType.ofRequired(5, 6, Types.StringType.get(),
Types.StringType.get()))));
+ List<Record> records = TestHelper.generateRandomRecords(schema, 5,
0L).stream()
+ .filter(r -> !((Map<String, Map<String, String>>)
r.get(1)).isEmpty()).collect(Collectors.toList());
+ testComplexTypeWrite(schema, records);
+ }
+
+ @Test
+ public void testWriteMapOfStructsInTable() throws IOException {
+ Assume.assumeTrue("Tez write is not implemented yet",
executionEngine.equals("mr"));
+ Schema schema = new Schema(required(1, "id", Types.LongType.get()),
+ required(2, "mapofstructs", Types.MapType.ofRequired(3, 4,
Types.StringType.get(),
+ Types.StructType.of(required(5, "something",
Types.StringType.get()),
+ required(6, "someone", Types.StringType.get()),
+ required(7, "somewhere", Types.StringType.get())))));
+ List<Record> records = TestHelper.generateRandomRecords(schema, 5,
0L).stream()
+ .filter(r -> !((Map<String, GenericRecord>)
r.get(1)).isEmpty()).collect(Collectors.toList());
+ testComplexTypeWrite(schema, records);
+ }
+
+ @Test
+ public void testWriteStructOfPrimitivesInTable() throws IOException {
+ Assume.assumeTrue("Tez write is not implemented yet",
executionEngine.equals("mr"));
+ Schema schema = new Schema(required(1, "id", Types.LongType.get()),
+ required(2, "structofprimitives",
+ Types.StructType.of(required(3, "key", Types.StringType.get()),
required(4, "value",
+ Types.StringType.get()))));
+ List<Record> records = TestHelper.generateRandomRecords(schema, 5, 0L);
+ testComplexTypeWrite(schema, records);
+ }
+
+ @Test
+ public void testWriteStructOfArraysInTable() throws IOException {
+ Assume.assumeTrue("Tez write is not implemented yet",
executionEngine.equals("mr"));
+ Schema schema = new Schema(required(1, "id", Types.LongType.get()),
+ required(2, "structofarrays", Types.StructType
+ .of(required(3, "names", Types.ListType.ofRequired(4,
Types.StringType.get())),
+ required(5, "birthdays", Types.ListType.ofRequired(6,
+ Types.StringType.get())))));
+ List<Record> records = TestHelper.generateRandomRecords(schema, 5, 0L);
+ testComplexTypeWrite(schema, records);
+ }
+
+ @Test
+ public void testWriteStructOfMapsInTable() throws IOException {
+ Assume.assumeTrue("Tez write is not implemented yet",
executionEngine.equals("mr"));
+ Schema schema = new Schema(required(1, "id", Types.LongType.get()),
+ required(2, "structofmaps", Types.StructType
+ .of(required(3, "map1", Types.MapType.ofRequired(4, 5,
+ Types.StringType.get(), Types.StringType.get())), required(6,
"map2",
+ Types.MapType.ofRequired(7, 8, Types.StringType.get(),
+ Types.StringType.get())))));
+ List<Record> records = TestHelper.generateRandomRecords(schema, 5, 0L);
+ testComplexTypeWrite(schema, records);
+ }
+
+ @Test
+ public void testWriteStructOfStructsInTable() throws IOException {
+ Assume.assumeTrue("Tez write is not implemented yet",
executionEngine.equals("mr"));
+ Schema schema = new Schema(required(1, "id", Types.LongType.get()),
+ required(2, "structofstructs", Types.StructType.of(required(3,
"struct1", Types.StructType
+ .of(required(4, "key", Types.StringType.get()), required(5,
"value",
+ Types.StringType.get()))))));
+ List<Record> records = TestHelper.generateRandomRecords(schema, 5, 0L);
+ testComplexTypeWrite(schema, records);
+ }
+
+ @Test
+ public void testPartitionedWrite() throws IOException {
+ Assume.assumeTrue("Tez write is not implemented yet",
executionEngine.equals("mr"));
+
+ PartitionSpec spec =
+
PartitionSpec.builderFor(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA).bucket("customer_id",
3).build();
+
+ TableIdentifier identifier = TableIdentifier.of("default",
"partitioned_customers");
+
+ shell.executeStatement("CREATE EXTERNAL TABLE " + identifier +
+ " STORED BY '" + HiveIcebergStorageHandler.class.getName() + "' " +
+ testTables.locationForCreateTableSQL(identifier) +
+ "TBLPROPERTIES ('" + InputFormatConfig.TABLE_SCHEMA + "'='" +
+
SchemaParser.toJson(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA) + "', "
+
+ "'" + InputFormatConfig.PARTITION_SPEC + "'='" +
+ PartitionSpecParser.toJson(spec) + "', " +
+ "'" + InputFormatConfig.WRITE_FILE_FORMAT + "'='" + fileFormat + "')");
+
+ List<Record> records =
TestHelper.generateRandomRecords(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA,
4, 0L);
+
+ StringBuilder query = new StringBuilder().append("INSERT INTO " +
identifier + " VALUES ");
+ records.forEach(record -> query.append("(")
+ .append(record.get(0)).append(",'")
+ .append(record.get(1)).append("','")
+ .append(record.get(2)).append("'),"));
+ query.setLength(query.length() - 1);
+
+ shell.executeStatement(query.toString());
+
+ Table table = testTables.loadTable(identifier);
+ HiveIcebergTestUtils.validateData(table, records, 0);
+ }
+
+ private void testComplexTypeWrite(Schema schema, List<Record> records)
throws IOException {
+ String tableName = "complex_table";
+ Table table = testTables.createTable(shell, "complex_table", schema,
+ fileFormat, ImmutableList.of());
Review comment:
Fixed
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]