ambition119 commented on a change in pull request #657: [HUDI-57] support orc 
file
URL: https://github.com/apache/incubator-hudi/pull/657#discussion_r281007136
 
 

 ##########
 File path: 
hoodie-client/src/main/java/com/uber/hoodie/io/storage/HoodieOrcWriter.java
 ##########
 @@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2016 Uber Technologies, Inc. (hoodie-dev-gr...@uber.com)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *          http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.uber.hoodie.io.storage;
+
+import com.uber.hoodie.avro.HoodieAvroWriteSupport;
+import com.uber.hoodie.common.model.HoodieRecord;
+import com.uber.hoodie.common.model.HoodieRecordPayload;
+import com.uber.hoodie.common.util.FSUtils;
+import com.uber.hoodie.common.util.HoodieAvroUtils;
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicLong;
+import org.apache.avro.Schema;
+import org.apache.avro.generic.GenericRecord;
+import org.apache.avro.generic.IndexedRecord;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
+import org.apache.orc.CompressionKind;
+import org.apache.orc.OrcFile;
+import org.apache.orc.TypeDescription;
+import org.apache.orc.Writer;
+import org.apache.spark.TaskContext;
+
+/**
+ * HoodieOrcWriter use hive's Writer to help limit the size of underlying 
file. Provides
+ * a way to check if the current file can take more records with the 
<code>canWrite()</code>
+ */
+public class HoodieOrcWriter<T extends HoodieRecordPayload, R extends 
IndexedRecord>
+    implements HoodieStorageWriter<R> {
+
+  private static AtomicLong recordIndex = new AtomicLong(1);
+
+  private final Path file;
+  private final HoodieWrapperFileSystem fs;
+  private final long stripeSize;
+  private final HoodieAvroWriteSupport writeSupport;
+  private final String commitTime;
+  private final Writer writer;
+  private final TypeDescription orcSchema;
+  private final List<String> orcFieldNames;
+  private final VectorizedRowBatch orcBatch;
+
+  public HoodieOrcWriter(String commitTime, Path file, HoodieOrcConfig 
orcConfig,
+      TypeDescription orcSchema) throws IOException {
+    this.file = HoodieWrapperFileSystem.convertToHoodiePath(file, 
orcConfig.getHadoopConf());
+    this.fs = (HoodieWrapperFileSystem) 
this.file.getFileSystem(registerFileSystem(file, orcConfig.getHadoopConf()));
+    this.writeSupport = orcConfig.getWriteSupport();
+    this.commitTime = commitTime;
+    this.stripeSize = orcConfig.getStripeSize();
+
+    this.orcSchema = orcSchema;
+    // hoodie meta field
+    this.orcSchema.addField("_hoodie_commit_time", 
TypeDescription.createString())
+        .addField("_hoodie_commit_seqno", TypeDescription.createString())
+        .addField("_hoodie_record_key", TypeDescription.createString())
+        .addField("_hoodie_partition_path", TypeDescription.createString())
+        .addField("_hoodie_file_name", TypeDescription.createString());
+
+    this.writer = OrcFile.createWriter(
+        file,
+        OrcFile.writerOptions(orcConfig.getHadoopConf())
+            .setSchema(this.orcSchema)
+            .stripeSize(orcConfig.getStripeSize())
+            .bufferSize(orcConfig.getBufferSize())
+            .blockSize(orcConfig.getBlockSize())
+            .compress(CompressionKind.ZLIB)
+            .version(org.apache.orc.OrcFile.Version.V_0_12));
+
+    orcFieldNames = this.orcSchema.getFieldNames();
+    orcBatch = this.orcSchema.createRowBatch();
+  }
+
+  public static Configuration registerFileSystem(Path file, Configuration 
conf) {
+    Configuration returnConf = new Configuration(conf);
+    String scheme = FSUtils.getFs(file.toString(), conf).getScheme();
+    returnConf.set("fs." + HoodieWrapperFileSystem.getHoodieScheme(scheme) + 
".impl",
+        HoodieWrapperFileSystem.class.getName());
+
+    return returnConf;
+  }
+
+  @Override
+  public void writeAvroWithMetadata(R avroRecord, HoodieRecord record) throws 
IOException {
+    GenericRecord genericRecord = (GenericRecord) avroRecord;
+    String seqId = HoodieRecord.generateSequenceId(commitTime, 
TaskContext.getPartitionId(),
+        recordIndex.getAndIncrement());
+
+    HoodieAvroUtils.addHoodieKeyToRecord(genericRecord, record.getRecordKey(),
+        record.getPartitionPath(), file.getName());
+
+    GenericRecord metadataToRecord = HoodieAvroUtils
+        .addCommitMetadataToRecord(genericRecord, commitTime, seqId);
+
+    Schema avroSchema = metadataToRecord.getSchema();
+
+    int row = orcBatch.size++;
+    setOrcFiledValue((R) avroRecord, avroSchema, row);
+
+    if (orcBatch.size !=  0) {
+      writer.addRowBatch(orcBatch);
+      orcBatch.reset();
+    }
+    writeSupport.add(record.getRecordKey());
+  }
+
+  @Override
+  public boolean canWrite() {
+    return fs.getBytesWritten(file) < stripeSize;
+  }
+
+  @Override
+  public void writeAvro(String key, R avroRecord) throws IOException {
+    Schema avroSchema = avroRecord.getSchema();
+
+    int row = orcBatch.size++;
+    setOrcFiledValue((R) avroRecord, avroSchema, row);
+
+    if (orcBatch.size !=  0) {
+      writer.addRowBatch(orcBatch);
+      orcBatch.reset();
+    }
+
+    writeSupport.add(key);
+  }
+
+  @Override
+  public void close() throws IOException {
+    if (null != this.writer) {
+      this.writer.close();
+    }
+  }
+
+  private void setOrcFiledValue(R avroRecord, Schema avroSchema, int row) 
throws IOException {
 
 Review comment:
   thanks

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to