n3nash commented on a change in pull request #600:  Timeline Service with 
Incremental View Syncing support
URL: https://github.com/apache/incubator-hudi/pull/600#discussion_r274217845
 
 

 ##########
 File path: 
hoodie-common/src/main/java/com/uber/hoodie/common/table/view/AbstractTableFileSystemView.java
 ##########
 @@ -0,0 +1,828 @@
+/*
+ *  Copyright (c) 2018 Uber Technologies, Inc. (hoodie-dev-gr...@uber.com)
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *           http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package com.uber.hoodie.common.table.view;
+
+import com.google.common.base.Preconditions;
+import com.uber.hoodie.common.model.CompactionOperation;
+import com.uber.hoodie.common.model.FileSlice;
+import com.uber.hoodie.common.model.HoodieDataFile;
+import com.uber.hoodie.common.model.HoodieFileGroup;
+import com.uber.hoodie.common.model.HoodieFileGroupId;
+import com.uber.hoodie.common.model.HoodieLogFile;
+import com.uber.hoodie.common.table.HoodieTableMetaClient;
+import com.uber.hoodie.common.table.HoodieTimeline;
+import com.uber.hoodie.common.table.HoodieView;
+import com.uber.hoodie.common.table.timeline.HoodieInstant;
+import com.uber.hoodie.common.util.CompactionUtils;
+import com.uber.hoodie.common.util.FSUtils;
+import com.uber.hoodie.common.util.Option;
+import com.uber.hoodie.common.util.collection.Pair;
+import com.uber.hoodie.exception.HoodieIOException;
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+
+/**
+ * Common thread-safe implementation for multiple TableFileSystemView 
Implementations.
+ * Provides uniform handling of
+ *   (a) Loading file-system views from underlying file-system
+ *   (b) Pending compaction operations and changing file-system views based on 
that
+ *   (c) Thread-safety in loading and managing file system views for this 
dataset.
+ *   (d) resetting file-system views
+ * The actual mechanism of fetching file slices from different view storages 
is delegated to sub-classes.
+ */
+public abstract class AbstractTableFileSystemView implements HoodieView, 
Serializable {
+
+  private static Logger log = 
LogManager.getLogger(AbstractTableFileSystemView.class);
+
+  protected HoodieTableMetaClient metaClient;
+
+  // This is the commits that will be visible for all views extending this view
+  protected HoodieTimeline visibleActiveTimeline;
+
+  // Used to concurrently load and populate partition views
+  private ConcurrentHashMap<String, Boolean> addedPartitions = new 
ConcurrentHashMap<>(4096);
+
+  // Locks to control concurrency. Sync operations use write-lock blocking all 
fetch operations.
+  // For the common-case, we allow concurrent read of single or multiple 
partitions
+  private final ReentrantReadWriteLock globalLock = new 
ReentrantReadWriteLock();
+  private final ReadLock readLock = globalLock.readLock();
+  private final WriteLock writeLock = globalLock.writeLock();
+
+  private String getPartitionPathFromFilePath(String fullPath) {
+    return FSUtils.getRelativePartitionPath(new 
Path(metaClient.getBasePath()), new Path(fullPath).getParent());
+  }
+
+  /**
+   * Inisitalize the view.
+   */
+  protected void init(HoodieTableMetaClient metaClient, HoodieTimeline 
visibleActiveTimeline) {
+    this.metaClient = metaClient;
+    this.visibleActiveTimeline = visibleActiveTimeline;
+    // Load Pending Compaction Operations
+    resetPendingCompactionOperations(
+        CompactionUtils.getAllPendingCompactionOperations(metaClient).values()
+            .stream().map(e -> Pair.of(e.getKey(),
+            CompactionOperation.convertFromAvroRecordInstance(e.getValue()))));
+  }
+
+  /**
+   * Adds the provided statuses into the file system view, and also caches it 
inside this object.
+   */
+  protected List<HoodieFileGroup> addFilesToView(FileStatus[] statuses) {
+    long beginFgTs = System.currentTimeMillis();
+    List<HoodieFileGroup> fileGroups = buildFileGroups(statuses, 
visibleActiveTimeline, true);
+    long endFgTs = System.currentTimeMillis();
+    // Make building FileGroup Map efficient for both InMemory and DiskBased 
stuctures.
+    
fileGroups.stream().collect(Collectors.groupingBy(HoodieFileGroup::getPartitionPath)).entrySet()
+        .forEach(entry -> {
+          String partition = entry.getKey();
+          if (!isPartitionAvailableInStore(partition)) {
+            storePartitionView(partition, entry.getValue());
+          }
+        });
+    long endStoreTs = System.currentTimeMillis();
+    log.info("addFilesToView: NumFiles=" + statuses.length + ", 
FileGroupsCreationTime=" + (endFgTs - beginFgTs)
+        + ", StoreTimeTaken=" + (endStoreTs - endFgTs));
+    return fileGroups;
+  }
+
+  /**
+   * Build FileGroups from passed in file-status
+   */
+  protected List<HoodieFileGroup> buildFileGroups(FileStatus[] statuses, 
HoodieTimeline timeline,
+      boolean addPendingCompactionFileSlice) {
+    return buildFileGroups(convertFileStatusesToDataFiles(statuses), 
convertFileStatusesToLogFiles(statuses), timeline,
+        addPendingCompactionFileSlice);
+  }
+
+  protected List<HoodieFileGroup> buildFileGroups(Stream<HoodieDataFile> 
dataFileStream,
+      Stream<HoodieLogFile> logFileStream, HoodieTimeline timeline, boolean 
addPendingCompactionFileSlice) {
+
+    Map<Pair<String, String>, List<HoodieDataFile>> dataFiles = dataFileStream
+        .collect(Collectors.groupingBy((dataFile) -> {
+          String partitionPathStr = 
getPartitionPathFromFilePath(dataFile.getPath());
+          return Pair.of(partitionPathStr, dataFile.getFileId());
+        }));
+
+    Map<Pair<String, String>, List<HoodieLogFile>> logFiles = logFileStream
+        .collect(Collectors.groupingBy((logFile) -> {
+          String partitionPathStr = FSUtils.getRelativePartitionPath(
+              new Path(metaClient.getBasePath()),
+              logFile.getPath().getParent());
+          return Pair.of(partitionPathStr, logFile.getFileId());
+        }));
+
+    Set<Pair<String, String>> fileIdSet = new HashSet<>(dataFiles.keySet());
+    fileIdSet.addAll(logFiles.keySet());
+
+    List<HoodieFileGroup> fileGroups = new ArrayList<>();
+    fileIdSet.forEach(pair -> {
+      String fileId = pair.getValue();
+      HoodieFileGroup group = new HoodieFileGroup(pair.getKey(), fileId, 
timeline);
+      if (dataFiles.containsKey(pair)) {
+        dataFiles.get(pair).forEach(group::addDataFile);
+      }
+      if (logFiles.containsKey(pair)) {
+        logFiles.get(pair).forEach(group::addLogFile);
+      }
+      if (addPendingCompactionFileSlice) {
+        addPendingCompactionSliceIfNeeded(group);
+      }
+      fileGroups.add(group);
+    });
+
+    return fileGroups;
+  }
+
+  /**
+   * Add a new file-slice if compaction is pending
+   */
+  private void addPendingCompactionSliceIfNeeded(HoodieFileGroup group) {
+    Option<Pair<String, CompactionOperation>> pendingCompaction =
+        getPendingCompactionOperationWithInstant(group.getFileGroupId());
+    if (pendingCompaction.isPresent()) {
+      // If there is no delta-commit after compaction request, this step would 
ensure a new file-slice appears
+      // so that any new ingestion uses the correct base-instant
+      group.addNewFileSliceAtInstant(pendingCompaction.get().getKey());
+    }
+  }
+
+  /**
+   * Clears the partition Map only
+   */
+  public void reset() {
+    reset(visibleActiveTimeline);
+  }
+
+  /**
+   * Resets the partition-path
+   */
+  void reset(HoodieTimeline timeline) {
+    try {
+      writeLock.lock();
+
+      addedPartitions.clear();
+      resetViewState();
+
+      // Initialize with new Hoodie timeline.
+      init(metaClient, timeline);
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  /**
+   * Allows all view metadata to be reset by subclasses
+   */
+  protected abstract void resetViewState();
+
+  /**
+   * Allows lazily loading the partitions if needed
+   *
+   * @param partition partition to be loaded if not present
+   */
+  private void ensurePartitionLoadedCorrectly(String partition) {
+
+    Preconditions.checkArgument(!isClosed(), "View is already closed");
+
+    // ensure we list files only once even in the face of concurrency
+    addedPartitions.computeIfAbsent(partition, (partitionPathStr) -> {
+      long beginTs = System.currentTimeMillis();
+      if (!isPartitionAvailableInStore(partitionPathStr)) {
+        // Not loaded yet
+        try {
+          log.info("Building file system view for partition (" + 
partitionPathStr + ")");
+
+          // Create the path if it does not exist already
+          Path partitionPath = new Path(metaClient.getBasePath(), 
partitionPathStr);
+          FSUtils.createPathIfNotExists(metaClient.getFs(), partitionPath);
+          long beginLsTs = System.currentTimeMillis();
+          FileStatus[] statuses = metaClient.getFs().listStatus(partitionPath);
+          long endLsTs = System.currentTimeMillis();
+          log.info("#files found in partition (" + partitionPathStr + ") =" + 
statuses.length
+              + ", Time taken =" + (endLsTs - beginLsTs));
+          List<HoodieFileGroup> groups = addFilesToView(statuses);
+
+          if (groups.isEmpty()) {
+            storePartitionView(partitionPathStr, new ArrayList<>());
+          }
+        } catch (IOException e) {
+          throw new HoodieIOException("Failed to list data files in partition 
" + partitionPathStr, e);
+        }
+      } else {
+        log.debug("View already built for Partition :" + partitionPathStr + ", 
FOUND is ");
+      }
+      long endTs = System.currentTimeMillis();
+      log.info("Time to load partition (" + partitionPathStr + ") =" + (endTs 
- beginTs));
+      return true;
+    });
+  }
+
+  /**
+   * Helper to convert file-status to data-files
+   *
+   * @param statuses List of FIle-Status
 
 Review comment:
   nit : File

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to