[
https://issues.apache.org/jira/browse/APEXMALHAR-2014?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=15215212#comment-15215212
]
ASF GitHub Bot commented on APEXMALHAR-2014:
--------------------------------------------
Github user devtagare commented on a diff in the pull request:
https://github.com/apache/incubator-apex-malhar/pull/219#discussion_r57661266
--- Diff:
contrib/src/main/java/com/datatorrent/contrib/parquet/ParquetFilePOJOReader.java
---
@@ -0,0 +1,305 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package com.datatorrent.contrib.parquet;
+
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.StringTokenizer;
+
+import org.apache.commons.lang3.ClassUtils;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
+import com.datatorrent.api.Context;
+import com.datatorrent.api.DefaultOutputPort;
+import com.datatorrent.api.annotation.OutputPortFieldAnnotation;
+import com.datatorrent.lib.util.FieldInfo;
+import com.datatorrent.lib.util.FieldInfo.SupportType;
+import com.datatorrent.lib.util.PojoUtils;
+import com.datatorrent.lib.util.PojoUtils.Setter;
+
+import parquet.example.data.Group;
+import parquet.io.ParquetEncodingException;
+
+/**
+ * <p>
+ * ParquetFilePOJOReader
+ * </p>
+ * ParquetFilePOJOReader operator is a concrete implementation of
+ * AbstractParquetFileReader to read Parquet files and emit records as
POJOs.The
+ * POJO class name & field mapping should be provided by the user. If this
+ * mapping is not provided then reflection is used to determine this
mapping. As
+ * of now only primitive types ( INT32, INT64, BOOLEAN, FLOAT, DOUBLE,
BINARY )
+ * are supported.
+ *
+ * @displayName ParquetFilePOJOReader
+ * @tags parquet,input adapter
+ * @since 3.3.0
+ */
+public class ParquetFilePOJOReader extends
AbstractParquetFileReader<Object>
+{
+
+ /**
+ * POJO class
+ */
+ protected transient Class<?> pojoClass;
+ /**
+ * Map containing setters for fields in POJO
+ */
+ protected transient Map<String, Setter> pojoSetters;
+ /**
+ * String representing Parquet TO POJO field mapping. If not provided,
then
+ * reflection is used to determine the mapping. Format :
+ * PARQUET_FIELD_NAME:POJO_FIELD_NAME:TYPE
+ * E.g.event_id:event_id_v2:INTEGER,org_id:org_id_v2:STRING,long_id:
+ * long_id_v2:
+ *
LONG,css_file_loaded:css_file_loaded_v2:BOOLEAN,float_val:float_val_v2:
+ * FLOAT,double_val:double_val_v2:DOUBLE
+ */
+ protected transient String groupToPOJOFieldsMapping = null;
+ protected transient List<FieldInfo> fieldInfos;
+ protected transient List<ActiveFieldInfo> columnFieldSetters = null;
+ protected static final String FIELD_SEPARATOR = ":";
+ protected static final String RECORD_SEPARATOR = ",";
+
+ @OutputPortFieldAnnotation(schemaRequired = true)
+ public final transient DefaultOutputPort<Object> output = new
DefaultOutputPort<Object>()
+ {
+
+ @Override
+ public void setup(Context.PortContext context)
+ {
+ pojoClass = context.getValue(Context.PortContext.TUPLE_CLASS);
+ pojoSetters = Maps.newHashMap();
+ for (Field f : pojoClass.getDeclaredFields()) {
+ try {
+ pojoSetters.put(f.getName(), generateSettersForField(pojoClass,
f.getName()));
+ } catch (NoSuchFieldException | SecurityException e) {
+ e.printStackTrace();
+ }
+ }
+
+ if (groupToPOJOFieldsMapping == null) {
+ fieldInfos = createFieldInfoMap(generateFieldInfoInputs());
+ } else {
+ fieldInfos = createFieldInfoMap(groupToPOJOFieldsMapping);
+ }
+ initColumnFieldSetters();
+ }
+
+ };
+
+ /**
+ * Converts Group to POJO
+ */
+ @Override
+ protected Object convertGroup(Group group)
+ {
+ Object obj;
+ try {
+ obj = pojoClass.newInstance();
+ } catch (InstantiationException | IllegalAccessException ex) {
+ throw new RuntimeException(ex);
+ }
+ for (int i = 0; i < columnFieldSetters.size(); i++) {
+ ParquetFilePOJOReader.ActiveFieldInfo afi =
columnFieldSetters.get(i);
+
+ afi = columnFieldSetters.get(i);
+ int fieldIndex = schema.getFieldIndex(afi.fieldInfo.getColumnName());
+ SupportType st = afi.fieldInfo.getType();
+
+ switch (st) {
+
+ case BOOLEAN:
+ Boolean booleanVal =
Boolean.parseBoolean(group.getValueToString(fieldIndex, 0));
+ pojoSetters.get(afi.fieldInfo.getPojoFieldExpression()).set(obj,
booleanVal);
+ break;
+
+ case INTEGER:
+ Integer intVal =
Integer.parseInt(group.getValueToString(fieldIndex, 0));
+ pojoSetters.get(afi.fieldInfo.getPojoFieldExpression()).set(obj,
intVal);
+ break;
+
+ case LONG:
+ Long longVal = Long.parseLong(group.getValueToString(fieldIndex,
0));
+ pojoSetters.get(afi.fieldInfo.getPojoFieldExpression()).set(obj,
longVal);
+ break;
+
+ case FLOAT:
+ Float floatVal =
Float.parseFloat(group.getValueToString(fieldIndex, 0));
+ pojoSetters.get(afi.fieldInfo.getPojoFieldExpression()).set(obj,
floatVal);
+ break;
+
+ case DOUBLE:
+ Double doubleVal =
Double.parseDouble(group.getValueToString(fieldIndex, 0));
+ pojoSetters.get(afi.fieldInfo.getPojoFieldExpression()).set(obj,
doubleVal);
+ break;
+
+ case STRING:
+ pojoSetters.get(afi.fieldInfo.getPojoFieldExpression()).set(obj,
group.getValueToString(fieldIndex, 0));
+ break;
+
+ default:
+ throw new ParquetEncodingException("Unsupported column type: " +
schema.getType(fieldIndex));
--- End diff --
Can the default case be to serialize the record as a byte array and set it
on the object ? This could help serializing non-primitive types as byte arrays,
which the user can then deserialize.
> ParquetReader operator
> ----------------------
>
> Key: APEXMALHAR-2014
> URL: https://issues.apache.org/jira/browse/APEXMALHAR-2014
> Project: Apache Apex Malhar
> Issue Type: New Feature
> Reporter: shubham pathak
> Assignee: shubham pathak
>
> Developing a ParquetReaderOperator which would allow apex users to read
> records from parquet files.
--
This message was sent by Atlassian JIRA
(v6.3.4#6332)