This is an automated email from the ASF dual-hosted git repository.

progers pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/drill.git

commit 41d4334ba67ad439e0b8f64935f80ee68e16dedd
Author: Paul Rogers <par0...@gmail.com>
AuthorDate: Sun Jul 17 18:07:39 2022 -0700

    Snapshot after first dummy union writer attempt
---
 .../impl/scan/v3/schema/MutableTupleSchema.java    |   9 +-
 .../physical/resultSet/impl/BuildFromSchema.java   |   9 -
 .../physical/resultSet/impl/ColumnBuilder.java     |   9 +-
 .../table/function/WithOptionsTableMacro.java      |   2 +-
 .../impl/TestResultSetLoaderUnprojected.java       | 333 +++++++++++++++++++++
 .../drill/exec/record/metadata/ColumnMetadata.java |  14 +-
 .../vector/accessor/writer/DummyUnionWriter.java   | 140 +++++++++
 .../exec/vector/accessor/writer/MapWriter.java     |   3 -
 .../vector/accessor/writer/SimpleListShim.java     |   7 +-
 .../vector/accessor/writer/UnionVectorShim.java    |   6 +-
 .../exec/vector/accessor/writer/UnionWriter.java   | 149 +++++++++
 .../vector/accessor/writer/UnionWriterImpl.java    | 138 +--------
 12 files changed, 656 insertions(+), 163 deletions(-)

diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/v3/schema/MutableTupleSchema.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/v3/schema/MutableTupleSchema.java
index 71a465b8e5..089ec17da4 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/v3/schema/MutableTupleSchema.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/v3/schema/MutableTupleSchema.java
@@ -173,7 +173,14 @@ public class MutableTupleSchema {
   }
 
   public ColumnHandle insert(ColumnMetadata col) {
-    return insert(insertPoint++, col);
+    switch (projType) {
+    case SOME:
+      return insert(columns.size(), col);
+    case ALL:
+      return insert(insertPoint++, col);
+    default:
+      throw new IllegalArgumentException("No projection, should not have 
materialized: " + col.name());
+    }
   }
 
   /**
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/BuildFromSchema.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/BuildFromSchema.java
index 84f28264b1..1df1390e62 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/BuildFromSchema.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/BuildFromSchema.java
@@ -48,7 +48,6 @@ import 
org.apache.drill.exec.vector.accessor.writer.RepeatedListWriter;
  * This class contrasts with the @{link ColumnBuilder} class which
  * builds the structure within a single vector and writer.
  */
-
 public class BuildFromSchema {
 
   /**
@@ -59,7 +58,6 @@ public class BuildFromSchema {
    * but have slightly different semantics. This shim wraps
    * the semantics so the builder code is simpler.
    */
-
   private interface ParentShim {
     ObjectWriter add(ColumnMetadata colSchema);
   }
@@ -140,7 +138,6 @@ public class BuildFromSchema {
    *
    * @param schema desired tuple schema to be materialized
    */
-
   public void buildTuple(TupleWriter writer, TupleMetadata schema) {
     final ParentShim tupleShim = new TupleShim(writer);
     for (int i = 0; i < schema.size(); i++) {
@@ -157,7 +154,6 @@ public class BuildFromSchema {
    * @param colSchema the schema of the column to add
    * @return the object writer for the added column
    */
-
   public ObjectWriter buildColumn(TupleState state, ColumnMetadata colSchema) {
     return buildColumn(new TupleStateShim(state), colSchema);
   }
@@ -171,7 +167,6 @@ public class BuildFromSchema {
    * @param colSchema the schema of the column to add
    * @return the object writer for the added column
    */
-
   private ObjectWriter buildColumn(ParentShim parent, ColumnMetadata 
colSchema) {
     if (colSchema.isMultiList()) {
       return buildRepeatedList(parent, colSchema);
@@ -197,7 +192,6 @@ public class BuildFromSchema {
    * @return true if the column is of type LIST with a single
    * element type
    */
-
   private boolean isSingleList(ColumnMetadata colSchema) {
     return colSchema.isVariant() && colSchema.isArray() && 
colSchema.variantSchema().isSingleType();
   }
@@ -231,7 +225,6 @@ public class BuildFromSchema {
    * the common behavior
    * @param colSchema the schema of the variant (LIST or UNION) column
    */
-
   private ObjectWriter buildVariant(ParentShim parent, ColumnMetadata 
colSchema) {
     final ObjectWriter colWriter = parent.add(colSchema.cloneEmpty());
     expandVariant(colWriter, colSchema);
@@ -271,7 +264,6 @@ public class BuildFromSchema {
    * @param parent tuple writer for the tuple that holds the array
    * @param colSchema schema definition of the array
    */
-
   private ObjectWriter buildRepeatedList(ParentShim parent, ColumnMetadata 
colSchema) {
     final ObjectWriter objWriter = parent.add(colSchema.cloneEmpty());
     final RepeatedListWriter listWriter = (RepeatedListWriter) 
objWriter.array();
@@ -292,7 +284,6 @@ public class BuildFromSchema {
    * @param colWriter the writer for the (possibly structured) column
    * @param colSchema the schema definition for the column
    */
-
   private void expandColumn(ObjectWriter colWriter, ColumnMetadata colSchema) {
 
     if (colSchema.isMultiList()) {
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/ColumnBuilder.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/ColumnBuilder.java
index 45f4b1f456..04fe811b71 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/ColumnBuilder.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/ColumnBuilder.java
@@ -319,7 +319,14 @@ public class ColumnBuilder {
     // have content that varies from batch to batch. Only the leaf
     // vectors can be cached.
     assert columnSchema.variantSchema().size() == 0;
-    final UnionVector vector = new UnionVector(columnSchema.schema(), 
parent.loader().allocator(), null);
+    final UnionVector vector;
+    final UnionWriterImpl unionWriter;
+    if (parent.projection().projection(columnSchema).isProjected || 
allowCreation(parent)) {
+      vector = new UnionVector(columnSchema.schema(), 
parent.loader().allocator(), null);
+      unionWriter = new UnionWriterImpl(columnSchema, vector, null);
+    } else {
+      vector = null;
+    }
 
     // Then the union writer.
     final UnionWriterImpl unionWriter = new UnionWriterImpl(columnSchema, 
vector, null);
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/table/function/WithOptionsTableMacro.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/table/function/WithOptionsTableMacro.java
index abcd9ef4ab..69b86e1d09 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/table/function/WithOptionsTableMacro.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/table/function/WithOptionsTableMacro.java
@@ -34,7 +34,7 @@ import java.util.function.Function;
  */
 public class WithOptionsTableMacro implements TableMacro {
 
-  private static final org.slf4j.Logger logger = 
org.slf4j.LoggerFactory.getLogger(WithOptionsTableMacro.class);
+  private static final Logger logger = 
LoggerFactory.getLogger(WithOptionsTableMacro.class);
 
   private final TableSignature sig;
   private final Function<List<Object>, DrillTable> function;
diff --git 
a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/resultSet/impl/TestResultSetLoaderUnprojected.java
 
b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/resultSet/impl/TestResultSetLoaderUnprojected.java
new file mode 100644
index 0000000000..d0415b615c
--- /dev/null
+++ 
b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/resultSet/impl/TestResultSetLoaderUnprojected.java
@@ -0,0 +1,333 @@
+package org.apache.drill.exec.physical.resultSet.impl;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.util.List;
+
+import org.apache.drill.common.expression.SchemaPath;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.exec.physical.resultSet.ResultSetLoader;
+import org.apache.drill.exec.physical.resultSet.RowSetLoader;
+import 
org.apache.drill.exec.physical.resultSet.impl.ResultSetLoaderImpl.ResultSetOptions;
+import org.apache.drill.exec.physical.resultSet.project.Projections;
+import org.apache.drill.exec.physical.rowSet.RowSet;
+import org.apache.drill.exec.physical.rowSet.RowSetTestUtils;
+import org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet;
+import org.apache.drill.exec.record.metadata.SchemaBuilder;
+import org.apache.drill.exec.record.metadata.TupleMetadata;
+import org.apache.drill.exec.vector.accessor.ArrayWriter;
+import org.apache.drill.exec.vector.accessor.ScalarWriter;
+import org.apache.drill.exec.vector.accessor.TupleWriter;
+import org.apache.drill.test.SubOperatorTest;
+import org.apache.drill.test.rowSet.RowSetUtilities;
+import org.junit.Test;
+
+/**
+ * Verify the correct functioning of the "dummy" columns created
+ * for unprojected columns.
+ */
+public class TestResultSetLoaderUnprojected  extends SubOperatorTest {
+
+  @Test
+  public void testScalar()
+  {
+    List<SchemaPath> selection = RowSetTestUtils.projectList("a");
+    TupleMetadata schema = new SchemaBuilder()
+        .add("a", MinorType.INT)
+        .add("b", MinorType.INT)
+        .buildSchema();
+    ResultSetOptions options = new ResultSetOptionBuilder()
+        .projection(Projections.parse(selection))
+        .readerSchema(schema)
+        .build();
+    ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), 
options);
+
+    RowSetLoader rootWriter = rsLoader.writer();
+    TupleMetadata actualSchema = rootWriter.tupleSchema();
+    assertEquals(2, actualSchema.size());
+    assertEquals("a", actualSchema.column(0).getName());
+    assertEquals("b", actualSchema.column(1).getName());
+    assertTrue(rootWriter.column("a").isProjected());
+    assertFalse(rootWriter.column("b").isProjected());
+    rsLoader.startBatch();
+    for (int i = 1; i < 3; i++) {
+      rootWriter.start();
+      rootWriter.scalar(0).setInt(i);
+      rootWriter.scalar(1).setInt(i * 5);
+      rootWriter.save();
+    }
+    TupleMetadata expectedSchema = new SchemaBuilder()
+        .add("a", MinorType.INT)
+        .buildSchema();
+    SingleRowSet expected = fixture.rowSetBuilder(expectedSchema)
+        .addRow(1)
+        .addRow(2)
+        .build();
+    RowSet actual = fixture.wrap(rsLoader.harvest());
+    RowSetUtilities.verify(expected, actual);
+    rsLoader.close();
+  }
+
+  @Test
+  public void testScalarArray()
+  {
+    List<SchemaPath> selection = RowSetTestUtils.projectList("a");
+    TupleMetadata schema = new SchemaBuilder()
+        .add("a", MinorType.INT)
+        .addArray("b", MinorType.INT)
+        .buildSchema();
+    ResultSetOptions options = new ResultSetOptionBuilder()
+        .projection(Projections.parse(selection))
+        .readerSchema(schema)
+        .build();
+    ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), 
options);
+
+    RowSetLoader rootWriter = rsLoader.writer();
+    TupleMetadata actualSchema = rootWriter.tupleSchema();
+    assertEquals(2, actualSchema.size());
+    assertEquals("a", actualSchema.column(0).getName());
+    assertEquals("b", actualSchema.column(1).getName());
+    assertTrue(rootWriter.column("a").isProjected());
+    assertFalse(rootWriter.column("b").isProjected());
+    rsLoader.startBatch();
+    for (int i = 1; i < 3; i++) {
+      rootWriter.start();
+      rootWriter.scalar(0).setInt(i);
+      for (int j = 0; j < 3; j++) {
+        ArrayWriter aw = rootWriter.array(1);
+        ScalarWriter sw = rootWriter.array(1).scalar();
+        sw.setInt(i * 5 + j);
+        aw.save();
+      }
+      rootWriter.save();
+    }
+    TupleMetadata expectedSchema = new SchemaBuilder()
+        .add("a", MinorType.INT)
+        .buildSchema();
+    SingleRowSet expected = fixture.rowSetBuilder(expectedSchema)
+        .addRow(1)
+        .addRow(2)
+        .build();
+    RowSet actual = fixture.wrap(rsLoader.harvest());
+    RowSetUtilities.verify(expected, actual);
+    rsLoader.close();
+  }
+
+  @Test
+  public void testMap()
+  {
+    List<SchemaPath> selection = RowSetTestUtils.projectList("a");
+    TupleMetadata schema = new SchemaBuilder()
+        .addMap("a")
+          .add("foo", MinorType.INT)
+          .resumeSchema()
+        .addMap("b")
+          .add("foo", MinorType.INT)
+          .resumeSchema()
+        .buildSchema();
+    ResultSetOptions options = new ResultSetOptionBuilder()
+        .projection(Projections.parse(selection))
+        .readerSchema(schema)
+        .build();
+    ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), 
options);
+
+    RowSetLoader rootWriter = rsLoader.writer();
+    TupleMetadata actualSchema = rootWriter.tupleSchema();
+    assertEquals(2, actualSchema.size());
+    assertEquals("a", actualSchema.column(0).getName());
+    assertEquals("b", actualSchema.column(1).getName());
+    assertTrue(rootWriter.column("a").isProjected());
+    assertFalse(rootWriter.column("b").isProjected());
+    rsLoader.startBatch();
+    for (int i = 1; i < 3; i++) {
+      rootWriter.start();
+      rootWriter.tuple(0).scalar("foo").setInt(i);
+      rootWriter.tuple(1).scalar("foo").setInt(i * 5);
+      rootWriter.save();
+    }
+    TupleMetadata expectedSchema = new SchemaBuilder()
+        .addMap("a")
+          .add("foo", MinorType.INT)
+          .resumeSchema()
+        .buildSchema();
+    SingleRowSet expected = fixture.rowSetBuilder(expectedSchema)
+        .addSingleCol(RowSetUtilities.mapValue(1))
+        .addSingleCol(RowSetUtilities.mapValue(2))
+        .build();
+    RowSet actual = fixture.wrap(rsLoader.harvest());
+    RowSetUtilities.verify(expected, actual);
+    rsLoader.close();
+  }
+
+  @Test
+  public void testMapElements()
+  {
+    List<SchemaPath> selection = RowSetTestUtils.projectList("a.foo");
+    TupleMetadata schema = new SchemaBuilder()
+        .addMap("a")
+          .add("foo", MinorType.INT)
+          .add("bar", MinorType.INT)
+          .resumeSchema()
+        .buildSchema();
+    ResultSetOptions options = new ResultSetOptionBuilder()
+        .projection(Projections.parse(selection))
+        .readerSchema(schema)
+        .build();
+    ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), 
options);
+
+    RowSetLoader rootWriter = rsLoader.writer();
+    TupleMetadata actualSchema = rootWriter.tupleSchema();
+    assertEquals(1, actualSchema.size());
+    assertEquals("a", actualSchema.metadata(0).name());
+    assertEquals(2, actualSchema.metadata(0).tupleSchema().size());
+    assertEquals("foo", 
actualSchema.metadata(0).tupleSchema().metadata(0).name());
+    assertEquals("bar", 
actualSchema.metadata(0).tupleSchema().metadata(1).name());
+    assertTrue(rootWriter.column("a").isProjected());
+    assertTrue(rootWriter.tuple("a").column(0).isProjected());
+    assertFalse(rootWriter.tuple("a").column(1).isProjected());
+    rsLoader.startBatch();
+    for (int i = 1; i < 3; i++) {
+      rootWriter.start();
+      TupleWriter aWriter = rootWriter.tuple(0);
+      aWriter.scalar("foo").setInt(i);
+      aWriter.scalar("bar").setInt(i * 5);
+      rootWriter.save();
+    }
+    TupleMetadata expectedSchema = new SchemaBuilder()
+        .addMap("a")
+          .add("foo", MinorType.INT)
+          .resumeSchema()
+        .buildSchema();
+    SingleRowSet expected = fixture.rowSetBuilder(expectedSchema)
+        .addSingleCol(RowSetUtilities.mapValue(1))
+        .addSingleCol(RowSetUtilities.mapValue(2))
+        .build();
+    RowSet actual = fixture.wrap(rsLoader.harvest());
+    RowSetUtilities.verify(expected, actual);
+    rsLoader.close();
+  }
+
+  @Test
+  public void testMapArray()
+  {
+    List<SchemaPath> selection = RowSetTestUtils.projectList("a");
+    TupleMetadata schema = new SchemaBuilder()
+        .addMapArray("a")
+          .add("foo", MinorType.INT)
+          .resumeSchema()
+        .addMapArray("b")
+          .add("foo", MinorType.INT)
+          .resumeSchema()
+        .buildSchema();
+    ResultSetOptions options = new ResultSetOptionBuilder()
+        .projection(Projections.parse(selection))
+        .readerSchema(schema)
+        .build();
+    ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), 
options);
+
+    RowSetLoader rootWriter = rsLoader.writer();
+    TupleMetadata actualSchema = rootWriter.tupleSchema();
+    assertEquals(2, actualSchema.size());
+    assertEquals("a", actualSchema.column(0).getName());
+    assertEquals("b", actualSchema.column(1).getName());
+    assertTrue(rootWriter.column("a").isProjected());
+    assertFalse(rootWriter.column("b").isProjected());
+    rsLoader.startBatch();
+    for (int i = 0; i < 2; i++) {
+      rootWriter.start();
+      ArrayWriter aWriter = rootWriter.array(0);
+      ArrayWriter bWriter = rootWriter.array(1);
+      for (int j = 0; j < 2; j++) {
+        aWriter.tuple().scalar(0).setInt(i * 2 + j + 1);
+        bWriter.tuple().scalar(0).setInt((i * 2 + j) * 5);
+        aWriter.save();
+        bWriter.save();
+      }
+      rootWriter.save();
+    }
+    TupleMetadata expectedSchema = new SchemaBuilder()
+        .addMapArray("a")
+          .add("foo", MinorType.INT)
+          .resumeSchema()
+        .buildSchema();
+    SingleRowSet expected = fixture.rowSetBuilder(expectedSchema)
+        .addSingleCol(
+            RowSetUtilities.mapArray(
+                RowSetUtilities.mapValue(1),
+                RowSetUtilities.mapValue(2)))
+        .addSingleCol(
+            RowSetUtilities.mapArray(
+                RowSetUtilities.mapValue(3),
+                RowSetUtilities.mapValue(4)))
+        .build();
+    RowSet actual = fixture.wrap(rsLoader.harvest());
+    RowSetUtilities.verify(expected, actual);
+    rsLoader.close();
+  }
+
+  @Test
+  public void testVariant()
+  {
+    List<SchemaPath> selection = RowSetTestUtils.projectList("a");
+    TupleMetadata schema = new SchemaBuilder()
+        .addUnion("a")
+          .resumeSchema()
+        .addUnion("b")
+          .resumeSchema()
+        .buildSchema();
+    ResultSetOptions options = new ResultSetOptionBuilder()
+        .projection(Projections.parse(selection))
+        .readerSchema(schema)
+        .build();
+    ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), 
options);
+
+    RowSetLoader rootWriter = rsLoader.writer();
+    TupleMetadata actualSchema = rootWriter.tupleSchema();
+    assertEquals(2, actualSchema.size());
+    assertEquals("a", actualSchema.column(0).getName());
+    assertEquals("b", actualSchema.column(1).getName());
+    assertTrue(rootWriter.column("a").isProjected());
+    assertFalse(rootWriter.column("b").isProjected());
+    rsLoader.startBatch();
+    rootWriter.start();
+    rootWriter.scalar(0).setInt(1);
+    rootWriter.scalar(1).setInt(5);
+    rootWriter.save();
+    rootWriter.start();
+    rootWriter.scalar(0).setString("2");
+    rootWriter.scalar(1).setString("10");
+    rootWriter.save();
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+        .addUnion("a")
+          .addType(MinorType.INT)
+          .addType(MinorType.VARCHAR)
+          .resumeSchema()
+        .buildSchema();
+    SingleRowSet expected = fixture.rowSetBuilder(expectedSchema)
+        .addRow(1)
+        .addRow("2")
+        .build();
+    RowSet actual = fixture.wrap(rsLoader.harvest());
+    RowSetUtilities.verify(expected, actual);
+    rsLoader.close();
+  }
+
+  public void testList()
+  {
+
+  }
+
+  public void test2DList()
+  {
+
+  }
+
+  public void testDict()
+  {
+
+  }
+
+}
diff --git 
a/exec/vector/src/main/java/org/apache/drill/exec/record/metadata/ColumnMetadata.java
 
b/exec/vector/src/main/java/org/apache/drill/exec/record/metadata/ColumnMetadata.java
index 0f78f55ee9..d80b2fe4fb 100644
--- 
a/exec/vector/src/main/java/org/apache/drill/exec/record/metadata/ColumnMetadata.java
+++ 
b/exec/vector/src/main/java/org/apache/drill/exec/record/metadata/ColumnMetadata.java
@@ -79,9 +79,9 @@ public interface ColumnMetadata extends Propertied {
    * columns may be available only when explicitly requested. For example,
    * the log reader has a "_raw" column which includes the entire input
    * line before parsing. This column can be requested explicitly:<br>
-   * <tt>SELECT foo, bar, _raw FROM ...</tt><br>
+   * {@code SELECT foo, bar, _raw FROM ...}<br>
    * but the column will <i>not</i> be included when using the wildcard:<br>
-   * <tt>SELECT * FROM ...</tt>
+   * {@code SELECT * FROM ...}
    * <p>
    * Marking a column (either in the provided schema or the reader schema)
    * will prevent that column from appearing in a wildcard expansion.
@@ -192,25 +192,25 @@ public interface ColumnMetadata extends Propertied {
   StructureType structureType();
 
   /**
-   * Schema for <tt>TUPLE</tt> columns.
+   * Schema for {@code TUPLE} columns.
    *
    * @return the tuple schema
    */
   TupleMetadata tupleSchema();
 
   /**
-   * Schema for <tt>VARIANT</tt> columns.
+   * Schema for {@code VARIANT} columns.
    *
    * @return the variant schema
    */
   VariantMetadata variantSchema();
 
   /**
-   * Schema of inner dimension for <tt>MULTI_ARRAY<tt> columns.
+   * Schema of inner dimension for <code>MULTI_ARRAY</code> columns.
    * If an array is 3D, the outer column represents all 3 dimensions.
-   * <tt>outer.childSchema()</tt> gives another <tt>MULTI_ARRAY</tt>
+   * {@code outer.childSchema()} gives another {@code MULTI_ARRAY}
    * for the inner 2D array.
-   * <tt>outer.childSchema().childSchema()</tt> gives a column
+   * {@code outer.childSchema().childSchema()} gives a column
    * of some other type (but repeated) for the 1D array.
    * <p>
    * Sorry for the mess, but it is how the code works and we are not
diff --git 
a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/DummyUnionWriter.java
 
b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/DummyUnionWriter.java
new file mode 100644
index 0000000000..fd72d54006
--- /dev/null
+++ 
b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/DummyUnionWriter.java
@@ -0,0 +1,140 @@
+package org.apache.drill.exec.vector.accessor.writer;
+
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.exec.vector.accessor.ArrayWriter;
+import org.apache.drill.exec.vector.accessor.ColumnReader;
+import org.apache.drill.exec.vector.accessor.ColumnWriterIndex;
+import org.apache.drill.exec.vector.accessor.ObjectWriter;
+import org.apache.drill.exec.vector.accessor.ScalarWriter;
+import org.apache.drill.exec.vector.accessor.TupleWriter;
+import org.apache.drill.exec.vector.accessor.impl.HierarchicalFormatter;
+
+public class DummyUnionWriter extends UnionWriter {
+
+  @Override
+  public ObjectWriter member(MinorType type) {
+    // TODO Auto-generated method stub
+    return null;
+  }
+
+  @Override
+  public ScalarWriter scalar(MinorType type) {
+    // TODO Auto-generated method stub
+    return null;
+  }
+
+  @Override
+  public TupleWriter tuple() {
+    // TODO Auto-generated method stub
+    return null;
+  }
+
+  @Override
+  public ArrayWriter array() {
+    // TODO Auto-generated method stub
+    return null;
+  }
+
+  @Override
+  public boolean isProjected() {
+    // TODO Auto-generated method stub
+    return false;
+  }
+
+  @Override
+  public void copy(ColumnReader from) {
+    // TODO Auto-generated method stub
+
+  }
+
+  @Override
+  public void setObject(Object value) {
+    // TODO Auto-generated method stub
+
+  }
+
+  @Override
+  public void bindIndex(ColumnWriterIndex index) {
+    // TODO Auto-generated method stub
+
+  }
+
+  @Override
+  public void startWrite() {
+    // TODO Auto-generated method stub
+
+  }
+
+  @Override
+  public void startRow() {
+    // TODO Auto-generated method stub
+
+  }
+
+  @Override
+  public void endArrayValue() {
+    // TODO Auto-generated method stub
+
+  }
+
+  @Override
+  public void restartRow() {
+    // TODO Auto-generated method stub
+
+  }
+
+  @Override
+  public void saveRow() {
+    // TODO Auto-generated method stub
+
+  }
+
+  @Override
+  public void endWrite() {
+    // TODO Auto-generated method stub
+
+  }
+
+  @Override
+  public void preRollover() {
+    // TODO Auto-generated method stub
+
+  }
+
+  @Override
+  public void postRollover() {
+    // TODO Auto-generated method stub
+
+  }
+
+  @Override
+  public void dump(HierarchicalFormatter format) {
+    // TODO Auto-generated method stub
+
+  }
+
+  @Override
+  public int rowStartIndex() {
+    // TODO Auto-generated method stub
+    return 0;
+  }
+
+  @Override
+  public int lastWriteIndex() {
+    // TODO Auto-generated method stub
+    return 0;
+  }
+
+  @Override
+  public int writeIndex() {
+    // TODO Auto-generated method stub
+    return 0;
+  }
+
+  @Override
+  public void bindShim(UnionShim shim) {
+    // TODO Auto-generated method stub
+
+  }
+
+}
diff --git 
a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/MapWriter.java
 
b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/MapWriter.java
index 3245323c33..e183ca9f0a 100644
--- 
a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/MapWriter.java
+++ 
b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/MapWriter.java
@@ -32,7 +32,6 @@ import org.apache.drill.exec.vector.complex.RepeatedMapVector;
 /**
  * Writer for a Drill Map type. Maps are actually tuples, just like rows.
  */
-
 public abstract class MapWriter extends AbstractTupleWriter {
 
   /**
@@ -40,7 +39,6 @@ public abstract class MapWriter extends AbstractTupleWriter {
    * rather, this writer is a holder for the columns within the map, and those
    * columns are what is written.
    */
-
   protected static class SingleMapWriter extends MapWriter {
     private final MapVector mapVector;
 
@@ -83,7 +81,6 @@ public abstract class MapWriter extends AbstractTupleWriter {
    * Since the map is an array, it has an associated offset vector, which the
    * parent array writer is responsible for maintaining.
    */
-
   protected static class ArrayMapWriter extends MapWriter {
 
     protected ArrayMapWriter(ColumnMetadata schema, List<AbstractObjectWriter> 
writers) {
diff --git 
a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/SimpleListShim.java
 
b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/SimpleListShim.java
index a6a1e6c7bb..23daef41a1 100644
--- 
a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/SimpleListShim.java
+++ 
b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/SimpleListShim.java
@@ -22,7 +22,7 @@ import org.apache.drill.exec.record.metadata.ColumnMetadata;
 import org.apache.drill.exec.vector.accessor.ColumnWriterIndex;
 import org.apache.drill.exec.vector.accessor.ObjectWriter;
 import org.apache.drill.exec.vector.accessor.impl.HierarchicalFormatter;
-import org.apache.drill.exec.vector.accessor.writer.UnionWriterImpl.UnionShim;
+import org.apache.drill.exec.vector.accessor.writer.UnionWriter.UnionShim;
 import com.google.common.base.Preconditions;
 
 /**
@@ -33,10 +33,9 @@ import com.google.common.base.Preconditions;
  * to present a uniform variant interface for a list that holds zero
  * one (this case) or many types.
  */
-
 public class SimpleListShim implements UnionShim {
 
-  private UnionWriterImpl writer;
+  private UnionWriter writer;
   private AbstractObjectWriter colWriter;
 
   public SimpleListShim() { }
@@ -46,7 +45,7 @@ public class SimpleListShim implements UnionShim {
   }
 
   @Override
-  public void bindWriter(UnionWriterImpl writer) {
+  public void bindWriter(UnionWriter writer) {
     this.writer = writer;
   }
 
diff --git 
a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/UnionVectorShim.java
 
b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/UnionVectorShim.java
index f9d0996764..566b7d46d0 100644
--- 
a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/UnionVectorShim.java
+++ 
b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/UnionVectorShim.java
@@ -25,7 +25,7 @@ import org.apache.drill.exec.vector.accessor.ObjectWriter;
 import 
org.apache.drill.exec.vector.accessor.VariantWriter.VariantWriterListener;
 import org.apache.drill.exec.vector.accessor.impl.HierarchicalFormatter;
 import 
org.apache.drill.exec.vector.accessor.writer.AbstractFixedWidthWriter.BaseFixedWidthWriter;
-import org.apache.drill.exec.vector.accessor.writer.UnionWriterImpl.UnionShim;
+import org.apache.drill.exec.vector.accessor.writer.UnionWriter.UnionShim;
 import org.apache.drill.exec.vector.complex.UnionVector;
 
 /**
@@ -68,7 +68,7 @@ public class UnionVectorShim implements UnionShim {
 
   private final UnionVector vector;
   private final AbstractObjectWriter variants[];
-  private UnionWriterImpl writer;
+  private UnionWriter writer;
 
   /**
    * Writer for the type vector associated with the union. The type vector
@@ -95,7 +95,7 @@ public class UnionVectorShim implements UnionShim {
   }
 
   @Override
-  public void bindWriter(UnionWriterImpl writer) {
+  public void bindWriter(UnionWriter writer) {
     this.writer = writer;
     final ColumnWriterIndex index = writer.index();
     if (index != null) {
diff --git 
a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/UnionWriter.java
 
b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/UnionWriter.java
new file mode 100644
index 0000000000..859f30f8a7
--- /dev/null
+++ 
b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/UnionWriter.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.vector.accessor.writer;
+
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.exec.record.metadata.ColumnMetadata;
+import org.apache.drill.exec.record.metadata.VariantMetadata;
+import org.apache.drill.exec.vector.accessor.ColumnWriter;
+import org.apache.drill.exec.vector.accessor.ObjectType;
+import org.apache.drill.exec.vector.accessor.ObjectWriter;
+import org.apache.drill.exec.vector.accessor.VariantWriter;
+import 
org.apache.drill.exec.vector.accessor.VariantWriter.VariantWriterListener;
+import org.apache.drill.exec.vector.accessor.impl.HierarchicalFormatter;
+import org.apache.drill.exec.vector.accessor.writer.UnionWriter.UnionShim;
+import 
org.apache.drill.exec.vector.accessor.writer.WriterEvents.ColumnWriterListener;
+
+/**
+ * Writer to a union vector.
+ */
+public abstract class UnionWriter implements VariantWriter, WriterEvents {
+
+  public interface UnionShim extends WriterEvents {
+    void bindWriter(UnionWriter writer);
+    void setNull();
+    boolean hasType(MinorType type);
+
+    /**
+     * Return an existing writer for the given type, or create a new one
+     * if needed.
+     *
+     * @param type desired variant type
+     * @return a writer for that type
+     */
+    ObjectWriter member(MinorType type);
+    void setType(MinorType type);
+    @Override
+    int lastWriteIndex();
+    @Override
+    int rowStartIndex();
+    AbstractObjectWriter addMember(ColumnMetadata colSchema);
+    AbstractObjectWriter addMember(MinorType type);
+    void addMember(AbstractObjectWriter colWriter);
+  }
+
+  public static class VariantObjectWriter extends AbstractObjectWriter {
+
+    private final UnionWriterImpl writer;
+
+    public VariantObjectWriter(UnionWriterImpl writer) {
+      this.writer = writer;
+    }
+
+    @Override
+    public ColumnWriter writer() { return writer; }
+
+    @Override
+    public VariantWriter variant() { return writer; }
+
+    @Override
+    public WriterEvents events() { return writer; }
+
+    @Override
+    public void dump(HierarchicalFormatter format) {
+      writer.dump(format);
+    }
+  }
+
+  private final ColumnMetadata schema;
+  protected UnionShim shim;
+  private VariantWriterListener listener;
+
+  public UnionWriter(ColumnMetadata schema) {
+    this.schema = schema;
+  }
+
+  public abstract void bindShim(UnionShim shim);
+  public VariantWriterListener listener() { return listener; }
+  public UnionShim shim() { return shim; }
+
+  public void bindListener(VariantWriterListener listener) {
+    this.listener = listener;
+  }
+
+  // Unions are complex: listeners should bind to the components as they
+  // are created.
+
+  @Override
+  public void bindListener(ColumnWriterListener listener) { }
+
+  @Override
+  public ColumnMetadata schema() { return schema; }
+
+  @Override
+  public VariantMetadata variantSchema() { return schema.variantSchema(); }
+
+  @Override
+  public int size() { return variantSchema().size(); }
+
+  @Override
+  public ObjectType type() { return ObjectType.VARIANT; }
+
+  @Override
+  public boolean nullable() { return true; }
+
+  @Override
+  public boolean hasType(MinorType type) {
+    return shim.hasType(type);
+  }
+
+  @Override
+  public void setNull() {
+    shim.setNull();
+  }
+
+  @Override
+  public ObjectWriter memberWriter(MinorType type) {
+    return shim.member(type);
+  }
+
+  @Override
+  public void setType(MinorType type) {
+    shim.setType(type);
+  }
+
+  @Override
+  public ObjectWriter addMember(ColumnMetadata colSchema) {
+    return shim.addMember(colSchema);
+  }
+
+  @Override
+  public ObjectWriter addMember(MinorType type) {
+    return shim.addMember(type);
+  }
+}
diff --git 
a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/UnionWriterImpl.java
 
b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/UnionWriterImpl.java
index 98f0798e22..b6967c8a3f 100644
--- 
a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/UnionWriterImpl.java
+++ 
b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/UnionWriterImpl.java
@@ -1,94 +1,25 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
 package org.apache.drill.exec.vector.accessor.writer;
 
 import java.math.BigDecimal;
 
 import org.apache.drill.common.types.TypeProtos.MinorType;
 import org.apache.drill.exec.record.metadata.ColumnMetadata;
-import org.apache.drill.exec.record.metadata.VariantMetadata;
 import org.apache.drill.exec.vector.accessor.ArrayWriter;
 import org.apache.drill.exec.vector.accessor.ColumnReader;
-import org.apache.drill.exec.vector.accessor.ColumnWriter;
 import org.apache.drill.exec.vector.accessor.ColumnWriterIndex;
-import org.apache.drill.exec.vector.accessor.ObjectType;
 import org.apache.drill.exec.vector.accessor.ObjectWriter;
 import org.apache.drill.exec.vector.accessor.ScalarWriter;
 import org.apache.drill.exec.vector.accessor.TupleWriter;
 import org.apache.drill.exec.vector.accessor.VariantReader;
-import org.apache.drill.exec.vector.accessor.VariantWriter;
 import org.apache.drill.exec.vector.accessor.WriterPosition;
 import org.apache.drill.exec.vector.accessor.impl.HierarchicalFormatter;
 import org.apache.drill.exec.vector.complex.UnionVector;
 import org.joda.time.Period;
 
 /**
- * Writer to a union vector.
+ * Writer to a materialized union.
  */
-
-public class UnionWriterImpl implements VariantWriter, WriterEvents {
-
-  public interface UnionShim extends WriterEvents {
-    void bindWriter(UnionWriterImpl writer);
-    void setNull();
-    boolean hasType(MinorType type);
-
-    /**
-     * Return an existing writer for the given type, or create a new one
-     * if needed.
-     *
-     * @param type desired variant type
-     * @return a writer for that type
-     */
-
-    ObjectWriter member(MinorType type);
-    void setType(MinorType type);
-    @Override
-    int lastWriteIndex();
-    @Override
-    int rowStartIndex();
-    AbstractObjectWriter addMember(ColumnMetadata colSchema);
-    AbstractObjectWriter addMember(MinorType type);
-    void addMember(AbstractObjectWriter colWriter);
-  }
-
-  public static class VariantObjectWriter extends AbstractObjectWriter {
-
-    private final UnionWriterImpl writer;
-
-    public VariantObjectWriter(UnionWriterImpl writer) {
-      this.writer = writer;
-    }
-
-    @Override
-    public ColumnWriter writer() { return writer; }
-
-    @Override
-    public VariantWriter variant() { return writer; }
-
-    @Override
-    public WriterEvents events() { return writer; }
-
-    @Override
-    public void dump(HierarchicalFormatter format) {
-      writer.dump(format);
-    }
-  }
+public class UnionWriterImpl extends UnionWriter {
 
   /**
    * The result set loader requires information about the child positions
@@ -102,7 +33,6 @@ public class UnionWriterImpl implements VariantWriter, 
WriterEvents {
    * need to implement the same methods, so we can't just implement these
    * methods on the union writer itself.
    */
-
   private class ElementPositions implements WriterPosition {
 
     @Override
@@ -117,15 +47,12 @@ public class UnionWriterImpl implements VariantWriter, 
WriterEvents {
     }
   }
 
-  private final ColumnMetadata schema;
-  private UnionShim shim;
   private ColumnWriterIndex index;
   private State state = State.IDLE;
-  private VariantWriterListener listener;
   private final WriterPosition elementPosition = new ElementPositions();
 
   public UnionWriterImpl(ColumnMetadata schema) {
-    this.schema = schema;
+    super(schema);
   }
 
   public UnionWriterImpl(ColumnMetadata schema, UnionVector vector,
@@ -140,24 +67,13 @@ public class UnionWriterImpl implements VariantWriter, 
WriterEvents {
     shim.bindIndex(index);
   }
 
-  public void bindListener(VariantWriterListener listener) {
-    this.listener = listener;
-  }
-
-  // Unions are complex: listeners should bind to the components as they
-  // are created.
-
-  @Override
-  public void bindListener(ColumnWriterListener listener) { }
-
   // The following are for coordinating with the shim.
 
   public State state() { return state; }
   public ColumnWriterIndex index() { return index; }
-  public VariantWriterListener listener() { return listener; }
-  public UnionShim shim() { return shim; }
   public WriterPosition elementPosition() { return elementPosition; }
 
+  @Override
   public void bindShim(UnionShim shim) {
     this.shim = shim;
     shim.bindWriter(this);
@@ -169,36 +85,6 @@ public class UnionWriterImpl implements VariantWriter, 
WriterEvents {
     }
   }
 
-  @Override
-  public ObjectType type() { return ObjectType.VARIANT; }
-
-  @Override
-  public boolean nullable() { return true; }
-
-  @Override
-  public ColumnMetadata schema() { return schema; }
-
-  @Override
-  public VariantMetadata variantSchema() { return schema.variantSchema(); }
-
-  @Override
-  public int size() { return variantSchema().size(); }
-
-  @Override
-  public boolean hasType(MinorType type) {
-    return shim.hasType(type);
-  }
-
-  @Override
-  public void setNull() {
-    shim.setNull();
-  }
-
-  @Override
-  public ObjectWriter memberWriter(MinorType type) {
-    return shim.member(type);
-  }
-
   @Override
   public ObjectWriter member(MinorType type) {
 
@@ -211,21 +97,6 @@ public class UnionWriterImpl implements VariantWriter, 
WriterEvents {
     return writer;
   }
 
-  @Override
-  public void setType(MinorType type) {
-    shim.setType(type);
-  }
-
-  @Override
-  public ObjectWriter addMember(ColumnMetadata colSchema) {
-    return shim.addMember(colSchema);
-  }
-
-  @Override
-  public ObjectWriter addMember(MinorType type) {
-    return shim.addMember(type);
-  }
-
   /**
    * Add a column writer to an existing union writer. Used for implementations
    * that support "live" schema evolution: column discovery while writing.
@@ -234,7 +105,6 @@ public class UnionWriterImpl implements VariantWriter, 
WriterEvents {
    *
    * @param writer the column writer to add
    */
-
   protected void addMember(AbstractObjectWriter writer) {
     final MinorType type = writer.schema().type();
 


Reply via email to