This is an automated email from the ASF dual-hosted git repository.

sivabalan pushed a commit to branch release-0.5.3
in repository https://gitbox.apache.org/repos/asf/hudi.git

commit cebce61a3496f8788f5b96d6aedce0a8394fc325
Author: Suneel Marthi <smar...@apache.org>
AuthorDate: Fri Mar 13 20:28:05 2020 -0400

    [HUDI-629]: Replace Guava's Hashing with an equivalent in NumericUtils.java 
(#1350)
    
    * [HUDI-629]: Replace Guava's Hashing with an equivalent in 
NumericUtils.java
---
 hudi-client/pom.xml                                |  5 --
 .../apache/hudi/client/CompactionAdminClient.java  | 22 +++----
 .../org/apache/hudi/client/HoodieCleanClient.java  |  4 +-
 .../org/apache/hudi/client/HoodieWriteClient.java  | 14 ++---
 .../apache/hudi/config/HoodieCompactionConfig.java | 12 ++--
 .../org/apache/hudi/config/HoodieWriteConfig.java  |  1 -
 .../bloom/BucketizedBloomCheckPartitioner.java     |  5 +-
 .../apache/hudi/table/HoodieCopyOnWriteTable.java  |  6 +-
 .../apache/hudi/table/HoodieMergeOnReadTable.java  |  8 +--
 .../compact/HoodieMergeOnReadTableCompactor.java   | 10 ++--
 .../apache/hudi/table/rollback/RollbackHelper.java |  4 +-
 .../apache/hudi/config/TestHoodieWriteConfig.java  |  2 +-
 .../hudi/common/model/TimelineLayoutVersion.java   |  6 +-
 .../hudi/common/table/HoodieTableMetaClient.java   |  8 +--
 .../hudi/common/table/log/HoodieLogFileReader.java |  7 +--
 .../table/timeline/HoodieActiveTimeline.java       | 48 +++++++--------
 .../table/view/AbstractTableFileSystemView.java    | 23 ++++---
 .../table/view/FileSystemViewStorageConfig.java    |  5 +-
 .../table/view/HoodieTableFileSystemView.java      | 19 +++---
 .../view/RemoteHoodieTableFileSystemView.java      | 12 ++--
 .../table/view/RocksDbBasedFileSystemView.java     |  6 +-
 .../org/apache/hudi/common/util/AvroUtils.java     |  3 +-
 .../java/org/apache/hudi/common/util/FSUtils.java  |  5 +-
 .../hudi/common/util/FailSafeConsistencyGuard.java |  7 +--
 .../org/apache/hudi/common/util/NumericUtils.java  | 30 ++++++++++
 .../org/apache/hudi/common/util/RocksDBDAO.java    | 17 +++---
 .../apache/hudi/common/util/ValidationUtils.java   | 70 ++++++++++++++++++++++
 .../common/util/queue/BoundedInMemoryQueue.java    |  4 +-
 .../hudi/common/versioning/MetadataMigrator.java   |  7 +--
 .../versioning/clean/CleanV1MigrationHandler.java  |  9 +--
 .../versioning/clean/CleanV2MigrationHandler.java  |  4 +-
 .../compaction/CompactionV1MigrationHandler.java   | 11 ++--
 .../compaction/CompactionV2MigrationHandler.java   | 11 ++--
 .../hudi/common/minicluster/HdfsTestService.java   |  4 +-
 .../common/minicluster/ZookeeperTestService.java   |  9 +--
 .../hudi/common/table/log/TestHoodieLogFormat.java |  2 +-
 .../table/view/TestIncrementalFSViewSync.java      | 16 ++---
 .../apache/hudi/common/util/TestNumericUtils.java  | 26 ++++++++
 .../realtime/HoodieParquetRealtimeInputFormat.java |  4 +-
 hudi-hive/pom.xml                                  |  5 --
 .../org/apache/hudi/hive/HoodieHiveClient.java     |  4 +-
 .../hudi/hive/MultiPartKeysValueExtractor.java     |  4 +-
 .../org/apache/hudi/hive/SchemaDifference.java     |  2 +-
 .../org/apache/hudi/hive/util/HiveTestService.java |  8 +--
 hudi-integ-test/pom.xml                            |  7 ---
 .../timeline/service/FileSystemViewHandler.java    |  4 +-
 .../hudi/utilities/HoodieWithTimelineServer.java   |  4 +-
 .../org/apache/hudi/utilities/UtilHelpers.java     |  4 +-
 .../hudi/utilities/deltastreamer/DeltaSync.java    | 10 ++--
 .../deltastreamer/HoodieDeltaStreamer.java         |  4 +-
 .../sources/helpers/IncrSourceHelper.java          | 14 +++--
 51 files changed, 308 insertions(+), 228 deletions(-)

diff --git a/hudi-client/pom.xml b/hudi-client/pom.xml
index a91a19f..7b665de 100644
--- a/hudi-client/pom.xml
+++ b/hudi-client/pom.xml
@@ -119,11 +119,6 @@
     </dependency>
 
     <dependency>
-      <groupId>com.google.guava</groupId>
-      <artifactId>guava</artifactId>
-    </dependency>
-
-    <dependency>
       <groupId>com.beust</groupId>
       <artifactId>jcommander</artifactId>
       <scope>test</scope>
diff --git 
a/hudi-client/src/main/java/org/apache/hudi/client/CompactionAdminClient.java 
b/hudi-client/src/main/java/org/apache/hudi/client/CompactionAdminClient.java
index 713fed4..7d2d664 100644
--- 
a/hudi-client/src/main/java/org/apache/hudi/client/CompactionAdminClient.java
+++ 
b/hudi-client/src/main/java/org/apache/hudi/client/CompactionAdminClient.java
@@ -36,13 +36,13 @@ import org.apache.hudi.common.util.AvroUtils;
 import org.apache.hudi.common.util.CompactionUtils;
 import org.apache.hudi.common.util.FSUtils;
 import org.apache.hudi.common.util.Option;
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.common.util.collection.Pair;
 import org.apache.hudi.config.HoodieWriteConfig;
 import org.apache.hudi.exception.HoodieException;
 import org.apache.hudi.exception.HoodieIOException;
 import org.apache.hudi.table.compact.OperationResult;
 
-import com.google.common.base.Preconditions;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.log4j.LogManager;
@@ -138,7 +138,7 @@ public class CompactionAdminClient extends 
AbstractHoodieClient {
       // TODO: Add a rollback instant but for compaction
       HoodieInstant instant = new HoodieInstant(State.REQUESTED, 
COMPACTION_ACTION, compactionInstant);
       boolean deleted = metaClient.getFs().delete(new 
Path(metaClient.getMetaPath(), instant.getFileName()), false);
-      Preconditions.checkArgument(deleted, "Unable to delete compaction 
instant.");
+      ValidationUtils.checkArgument(deleted, "Unable to delete compaction 
instant.");
     }
     return res;
   }
@@ -247,7 +247,7 @@ public class CompactionAdminClient extends 
AbstractHoodieClient {
     List<HoodieLogFile> logFilesToBeMoved =
         merged.getLogFiles().filter(lf -> lf.getLogVersion() > 
maxVersion).collect(Collectors.toList());
     return logFilesToBeMoved.stream().map(lf -> {
-      Preconditions.checkArgument(lf.getLogVersion() - maxVersion > 0, "Expect 
new log version to be sane");
+      ValidationUtils.checkArgument(lf.getLogVersion() - maxVersion > 0, 
"Expect new log version to be sane");
       HoodieLogFile newLogFile = new HoodieLogFile(new 
Path(lf.getPath().getParent(),
           FSUtils.makeLogFileName(lf.getFileId(), "." + 
FSUtils.getFileExtensionFromLog(lf.getPath()),
               compactionInstant, lf.getLogVersion() - maxVersion, 
HoodieLogFormat.UNKNOWN_WRITE_TOKEN)));
@@ -266,9 +266,9 @@ public class CompactionAdminClient extends 
AbstractHoodieClient {
   protected static void renameLogFile(HoodieTableMetaClient metaClient, 
HoodieLogFile oldLogFile,
       HoodieLogFile newLogFile) throws IOException {
     FileStatus[] statuses = 
metaClient.getFs().listStatus(oldLogFile.getPath());
-    Preconditions.checkArgument(statuses.length == 1, "Only one status must be 
present");
-    Preconditions.checkArgument(statuses[0].isFile(), "Source File must 
exist");
-    
Preconditions.checkArgument(oldLogFile.getPath().getParent().equals(newLogFile.getPath().getParent()),
+    ValidationUtils.checkArgument(statuses.length == 1, "Only one status must 
be present");
+    ValidationUtils.checkArgument(statuses[0].isFile(), "Source File must 
exist");
+    
ValidationUtils.checkArgument(oldLogFile.getPath().getParent().equals(newLogFile.getPath().getParent()),
         "Log file must only be moved within the parent directory");
     metaClient.getFs().rename(oldLogFile.getPath(), newLogFile.getPath());
   }
@@ -300,9 +300,9 @@ public class CompactionAdminClient extends 
AbstractHoodieClient {
                     new 
Path(FSUtils.getPartitionPath(metaClient.getBasePath(), 
operation.getPartitionPath()),
                         new Path(operation.getDataFileName().get())))
                 .getPath().toString();
-            Preconditions.checkArgument(df.isPresent(),
+            ValidationUtils.checkArgument(df.isPresent(),
                 "Data File must be present. File Slice was : " + fs + ", 
operation :" + operation);
-            Preconditions.checkArgument(df.get().getPath().equals(expPath),
+            ValidationUtils.checkArgument(df.get().getPath().equals(expPath),
                 "Base Path in operation is specified as " + expPath + " but 
got path " + df.get().getPath());
           }
           Set<HoodieLogFile> logFilesInFileSlice = 
fs.getLogFiles().collect(Collectors.toSet());
@@ -310,7 +310,7 @@ public class CompactionAdminClient extends 
AbstractHoodieClient {
             try {
               FileStatus[] fileStatuses = metaClient.getFs().listStatus(new 
Path(
                   FSUtils.getPartitionPath(metaClient.getBasePath(), 
operation.getPartitionPath()), new Path(dp)));
-              Preconditions.checkArgument(fileStatuses.length == 1, "Expect 
only 1 file-status");
+              ValidationUtils.checkArgument(fileStatuses.length == 1, "Expect 
only 1 file-status");
               return new HoodieLogFile(fileStatuses[0]);
             } catch (FileNotFoundException fe) {
               throw new CompactionValidationException(fe.getMessage());
@@ -320,12 +320,12 @@ public class CompactionAdminClient extends 
AbstractHoodieClient {
           }).collect(Collectors.toSet());
           Set<HoodieLogFile> missing = 
logFilesInCompactionOp.stream().filter(lf -> !logFilesInFileSlice.contains(lf))
               .collect(Collectors.toSet());
-          Preconditions.checkArgument(missing.isEmpty(),
+          ValidationUtils.checkArgument(missing.isEmpty(),
               "All log files specified in compaction operation is not present. 
Missing :" + missing + ", Exp :"
                   + logFilesInCompactionOp + ", Got :" + logFilesInFileSlice);
           Set<HoodieLogFile> diff = logFilesInFileSlice.stream().filter(lf -> 
!logFilesInCompactionOp.contains(lf))
               .collect(Collectors.toSet());
-          Preconditions.checkArgument(diff.stream().allMatch(lf -> 
lf.getBaseCommitTime().equals(compactionInstant)),
+          ValidationUtils.checkArgument(diff.stream().allMatch(lf -> 
lf.getBaseCommitTime().equals(compactionInstant)),
               "There are some log-files which are neither specified in 
compaction plan "
                   + "nor present after compaction request instant. Some of 
these :" + diff);
         } else {
diff --git 
a/hudi-client/src/main/java/org/apache/hudi/client/HoodieCleanClient.java 
b/hudi-client/src/main/java/org/apache/hudi/client/HoodieCleanClient.java
index 019d264..15ba2a7 100644
--- a/hudi-client/src/main/java/org/apache/hudi/client/HoodieCleanClient.java
+++ b/hudi-client/src/main/java/org/apache/hudi/client/HoodieCleanClient.java
@@ -31,13 +31,13 @@ import 
org.apache.hudi.common.table.timeline.HoodieInstant.State;
 import org.apache.hudi.common.util.AvroUtils;
 import org.apache.hudi.common.util.CleanerUtils;
 import org.apache.hudi.common.util.Option;
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.config.HoodieWriteConfig;
 import org.apache.hudi.exception.HoodieIOException;
 import org.apache.hudi.metrics.HoodieMetrics;
 import org.apache.hudi.table.HoodieTable;
 
 import com.codahale.metrics.Timer;
-import com.google.common.base.Preconditions;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
 import org.apache.spark.api.java.JavaSparkContext;
@@ -147,7 +147,7 @@ public class HoodieCleanClient<T extends 
HoodieRecordPayload> extends AbstractHo
 
   private HoodieCleanMetadata runClean(HoodieTable<T> table, HoodieInstant 
cleanInstant,
       HoodieCleanerPlan cleanerPlan) {
-    Preconditions.checkArgument(
+    ValidationUtils.checkArgument(
         cleanInstant.getState().equals(State.REQUESTED) || 
cleanInstant.getState().equals(State.INFLIGHT));
 
     try {
diff --git 
a/hudi-client/src/main/java/org/apache/hudi/client/HoodieWriteClient.java 
b/hudi-client/src/main/java/org/apache/hudi/client/HoodieWriteClient.java
index 40be544..e201487 100644
--- a/hudi-client/src/main/java/org/apache/hudi/client/HoodieWriteClient.java
+++ b/hudi-client/src/main/java/org/apache/hudi/client/HoodieWriteClient.java
@@ -42,6 +42,7 @@ import org.apache.hudi.common.util.AvroUtils;
 import org.apache.hudi.common.util.CompactionUtils;
 import org.apache.hudi.common.util.FSUtils;
 import org.apache.hudi.common.util.Option;
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.config.HoodieCompactionConfig;
 import org.apache.hudi.config.HoodieWriteConfig;
 import org.apache.hudi.exception.HoodieCommitException;
@@ -61,7 +62,6 @@ import org.apache.hudi.table.WorkloadProfile;
 import org.apache.hudi.table.WorkloadStat;
 
 import com.codahale.metrics.Timer;
-import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableMap;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
@@ -580,7 +580,7 @@ public class HoodieWriteClient<T extends 
HoodieRecordPayload> extends AbstractHo
       }
 
       // Cannot allow savepoint time on a commit that could have been cleaned
-      Preconditions.checkArgument(
+      ValidationUtils.checkArgument(
           HoodieTimeline.compareTimestamps(commitTime, lastCommitRetained, 
HoodieTimeline.GREATER_OR_EQUAL),
           "Could not savepoint commit " + commitTime + " as this is beyond the 
lookup window " + lastCommitRetained);
 
@@ -696,8 +696,8 @@ public class HoodieWriteClient<T extends 
HoodieRecordPayload> extends AbstractHo
     // Make sure the rollback was successful
     Option<HoodieInstant> lastInstant =
         
activeTimeline.reload().getCommitsAndCompactionTimeline().filterCompletedAndCompactionInstants().lastInstant();
-    Preconditions.checkArgument(lastInstant.isPresent());
-    
Preconditions.checkArgument(lastInstant.get().getTimestamp().equals(savepointTime),
+    ValidationUtils.checkArgument(lastInstant.isPresent());
+    
ValidationUtils.checkArgument(lastInstant.get().getTimestamp().equals(savepointTime),
         savepointTime + "is not the last commit after rolling back " + 
commitsToRollback + ", last commit was "
             + lastInstant.get().getTimestamp());
     return true;
@@ -868,7 +868,7 @@ public class HoodieWriteClient<T extends 
HoodieRecordPayload> extends AbstractHo
     HoodieTableMetaClient metaClient = createMetaClient(true);
     // if there are pending compactions, their instantTime must not be greater 
than that of this instant time
     
metaClient.getActiveTimeline().filterPendingCompactionTimeline().lastInstant().ifPresent(latestPending
 ->
-        Preconditions.checkArgument(
+        ValidationUtils.checkArgument(
             HoodieTimeline.compareTimestamps(latestPending.getTimestamp(), 
instantTime, HoodieTimeline.LESSER),
         "Latest pending compaction instant time must be earlier than this 
instant time. Latest Compaction :"
             + latestPending + ",  Ingesting at " + instantTime));
@@ -901,7 +901,7 @@ public class HoodieWriteClient<T extends 
HoodieRecordPayload> extends AbstractHo
     HoodieTableMetaClient metaClient = createMetaClient(true);
     // if there are inflight writes, their instantTime must not be less than 
that of compaction instant time
     
metaClient.getCommitsTimeline().filterPendingExcludingCompaction().firstInstant().ifPresent(earliestInflight
 -> {
-      Preconditions.checkArgument(
+      ValidationUtils.checkArgument(
           HoodieTimeline.compareTimestamps(earliestInflight.getTimestamp(), 
instantTime, HoodieTimeline.GREATER),
           "Earliest write inflight instant time must be later than compaction 
time. Earliest :" + earliestInflight
               + ", Compaction scheduled at " + instantTime);
@@ -911,7 +911,7 @@ public class HoodieWriteClient<T extends 
HoodieRecordPayload> extends AbstractHo
         
.getActiveTimeline().getCommitsAndCompactionTimeline().getInstants().filter(instant
 -> HoodieTimeline
             .compareTimestamps(instant.getTimestamp(), instantTime, 
HoodieTimeline.GREATER_OR_EQUAL))
         .collect(Collectors.toList());
-    Preconditions.checkArgument(conflictingInstants.isEmpty(),
+    ValidationUtils.checkArgument(conflictingInstants.isEmpty(),
         "Following instants have timestamps >= compactionInstant (" + 
instantTime + ") Instants :"
             + conflictingInstants);
     HoodieTable<T> table = HoodieTable.getHoodieTable(metaClient, config, jsc);
diff --git 
a/hudi-client/src/main/java/org/apache/hudi/config/HoodieCompactionConfig.java 
b/hudi-client/src/main/java/org/apache/hudi/config/HoodieCompactionConfig.java
index 180dba1..074ea78 100644
--- 
a/hudi-client/src/main/java/org/apache/hudi/config/HoodieCompactionConfig.java
+++ 
b/hudi-client/src/main/java/org/apache/hudi/config/HoodieCompactionConfig.java
@@ -20,11 +20,10 @@ package org.apache.hudi.config;
 
 import org.apache.hudi.common.model.HoodieCleaningPolicy;
 import org.apache.hudi.common.model.OverwriteWithLatestAvroPayload;
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.table.compact.strategy.CompactionStrategy;
 import 
org.apache.hudi.table.compact.strategy.LogFileSizeBasedCompactionStrategy;
 
-import com.google.common.base.Preconditions;
-
 import javax.annotation.concurrent.Immutable;
 
 import java.io.File;
@@ -121,12 +120,9 @@ public class HoodieCompactionConfig extends 
DefaultHoodieConfig {
     private final Properties props = new Properties();
 
     public Builder fromFile(File propertiesFile) throws IOException {
-      FileReader reader = new FileReader(propertiesFile);
-      try {
+      try (FileReader reader = new FileReader(propertiesFile)) {
         this.props.load(reader);
         return this;
-      } finally {
-        reader.close();
       }
     }
 
@@ -292,8 +288,8 @@ public class HoodieCompactionConfig extends 
DefaultHoodieConfig {
       int maxInstantsToKeep = 
Integer.parseInt(props.getProperty(HoodieCompactionConfig.MAX_COMMITS_TO_KEEP_PROP));
       int cleanerCommitsRetained =
           
Integer.parseInt(props.getProperty(HoodieCompactionConfig.CLEANER_COMMITS_RETAINED_PROP));
-      Preconditions.checkArgument(maxInstantsToKeep > minInstantsToKeep);
-      Preconditions.checkArgument(minInstantsToKeep > cleanerCommitsRetained,
+      ValidationUtils.checkArgument(maxInstantsToKeep > minInstantsToKeep);
+      ValidationUtils.checkArgument(minInstantsToKeep > cleanerCommitsRetained,
           String.format(
               "Increase %s=%d to be greater than %s=%d. Otherwise, there is 
risk of incremental pull "
                   + "missing data from few instants.",
diff --git 
a/hudi-client/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java 
b/hudi-client/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java
index 489bdb2..f88d96a 100644
--- a/hudi-client/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java
+++ b/hudi-client/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java
@@ -96,7 +96,6 @@ public class HoodieWriteConfig extends DefaultHoodieConfig {
   private static final String MAX_CONSISTENCY_CHECKS_PROP = 
"hoodie.consistency.check.max_checks";
   private static int DEFAULT_MAX_CONSISTENCY_CHECKS = 7;
 
-
   private ConsistencyGuardConfig consistencyGuardConfig;
 
   // Hoodie Write Client transparently rewrites File System View config when 
embedded mode is enabled
diff --git 
a/hudi-client/src/main/java/org/apache/hudi/index/bloom/BucketizedBloomCheckPartitioner.java
 
b/hudi-client/src/main/java/org/apache/hudi/index/bloom/BucketizedBloomCheckPartitioner.java
index 17b7506..798120c 100644
--- 
a/hudi-client/src/main/java/org/apache/hudi/index/bloom/BucketizedBloomCheckPartitioner.java
+++ 
b/hudi-client/src/main/java/org/apache/hudi/index/bloom/BucketizedBloomCheckPartitioner.java
@@ -18,14 +18,13 @@
 
 package org.apache.hudi.index.bloom;
 
+import org.apache.hudi.common.util.NumericUtils;
 import org.apache.hudi.common.util.collection.Pair;
 
-import com.google.common.hash.Hashing;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
 import org.apache.spark.Partitioner;
 
-import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
@@ -144,7 +143,7 @@ public class BucketizedBloomCheckPartitioner extends 
Partitioner {
   @Override
   public int getPartition(Object key) {
     final Pair<String, String> parts = (Pair<String, String>) key;
-    final long hashOfKey = Hashing.md5().hashString(parts.getRight(), 
StandardCharsets.UTF_8).asLong();
+    final long hashOfKey = NumericUtils.getMessageDigestHash("MD5", 
parts.getRight());
     final List<Integer> candidatePartitions = 
fileGroupToPartitions.get(parts.getLeft());
     final int idx = (int) Math.floorMod(hashOfKey, candidatePartitions.size());
     assert idx >= 0;
diff --git 
a/hudi-client/src/main/java/org/apache/hudi/table/HoodieCopyOnWriteTable.java 
b/hudi-client/src/main/java/org/apache/hudi/table/HoodieCopyOnWriteTable.java
index 4efc0e6..82b08b7 100644
--- 
a/hudi-client/src/main/java/org/apache/hudi/table/HoodieCopyOnWriteTable.java
+++ 
b/hudi-client/src/main/java/org/apache/hudi/table/HoodieCopyOnWriteTable.java
@@ -37,6 +37,7 @@ import 
org.apache.hudi.common.table.timeline.HoodieActiveTimeline;
 import org.apache.hudi.common.table.timeline.HoodieInstant;
 import org.apache.hudi.common.table.timeline.HoodieInstant.State;
 import org.apache.hudi.common.util.FSUtils;
+import org.apache.hudi.common.util.NumericUtils;
 import org.apache.hudi.common.util.Option;
 import org.apache.hudi.common.util.collection.Pair;
 import org.apache.hudi.common.util.queue.BoundedInMemoryExecutor;
@@ -52,7 +53,6 @@ import org.apache.hudi.execution.SparkBoundedInMemoryExecutor;
 import org.apache.hudi.io.HoodieCreateHandle;
 import org.apache.hudi.io.HoodieMergeHandle;
 
-import com.google.common.hash.Hashing;
 import org.apache.avro.generic.GenericRecord;
 import org.apache.avro.generic.IndexedRecord;
 import org.apache.hadoop.fs.FileSystem;
@@ -72,7 +72,6 @@ import org.apache.spark.api.java.function.PairFlatMapFunction;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.Serializable;
-import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
@@ -732,8 +731,7 @@ public class HoodieCopyOnWriteTable<T extends 
HoodieRecordPayload> extends Hoodi
         // pick the target bucket to use based on the weights.
         double totalWeight = 0.0;
         final long totalInserts = Math.max(1, globalStat.getNumInserts());
-        final long hashOfKey =
-            Hashing.md5().hashString(keyLocation._1().getRecordKey(), 
StandardCharsets.UTF_8).asLong();
+        final long hashOfKey = NumericUtils.getMessageDigestHash("MD5", 
keyLocation._1().getRecordKey());
         final double r = 1.0 * Math.floorMod(hashOfKey, totalInserts) / 
totalInserts;
         for (InsertBucket insertBucket : targetBuckets) {
           totalWeight += insertBucket.weight;
diff --git 
a/hudi-client/src/main/java/org/apache/hudi/table/HoodieMergeOnReadTable.java 
b/hudi-client/src/main/java/org/apache/hudi/table/HoodieMergeOnReadTable.java
index c36cdf4..50d41b3 100644
--- 
a/hudi-client/src/main/java/org/apache/hudi/table/HoodieMergeOnReadTable.java
+++ 
b/hudi-client/src/main/java/org/apache/hudi/table/HoodieMergeOnReadTable.java
@@ -34,6 +34,7 @@ import 
org.apache.hudi.common.table.timeline.HoodieActiveTimeline;
 import org.apache.hudi.common.table.timeline.HoodieInstant;
 import org.apache.hudi.common.util.FSUtils;
 import org.apache.hudi.common.util.Option;
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.config.HoodieWriteConfig;
 import org.apache.hudi.exception.HoodieCompactionException;
 import org.apache.hudi.exception.HoodieIOException;
@@ -42,7 +43,6 @@ import 
org.apache.hudi.execution.MergeOnReadLazyInsertIterable;
 import org.apache.hudi.io.HoodieAppendHandle;
 import org.apache.hudi.table.compact.HoodieMergeOnReadTableCompactor;
 
-import com.google.common.base.Preconditions;
 import org.apache.hudi.table.rollback.RollbackHelper;
 import org.apache.hudi.table.rollback.RollbackRequest;
 import org.apache.log4j.LogManager;
@@ -422,7 +422,7 @@ public class HoodieMergeOnReadTable<T extends 
HoodieRecordPayload> extends Hoodi
 
   private List<RollbackRequest> generateAppendRollbackBlocksAction(String 
partitionPath, HoodieInstant rollbackInstant,
       HoodieCommitMetadata commitMetadata) {
-    
Preconditions.checkArgument(rollbackInstant.getAction().equals(HoodieTimeline.DELTA_COMMIT_ACTION));
+    
ValidationUtils.checkArgument(rollbackInstant.getAction().equals(HoodieTimeline.DELTA_COMMIT_ACTION));
 
     // wStat.getPrevCommit() might not give the right commit time in the 
following
     // scenario : If a compaction was scheduled, the new commitTime associated 
with the requested compaction will be
@@ -439,9 +439,9 @@ public class HoodieMergeOnReadTable<T extends 
HoodieRecordPayload> extends Hoodi
 
       if (validForRollback) {
         // For sanity, log instant time can never be less than base-commit on 
which we are rolling back
-        Preconditions
+        ValidationUtils
             
.checkArgument(HoodieTimeline.compareTimestamps(fileIdToBaseCommitTimeForLogMap.get(wStat.getFileId()),
-                rollbackInstant.getTimestamp(), 
HoodieTimeline.LESSER_OR_EQUAL));
+              rollbackInstant.getTimestamp(), HoodieTimeline.LESSER_OR_EQUAL));
       }
 
       return validForRollback && 
HoodieTimeline.compareTimestamps(fileIdToBaseCommitTimeForLogMap.get(
diff --git 
a/hudi-client/src/main/java/org/apache/hudi/table/compact/HoodieMergeOnReadTableCompactor.java
 
b/hudi-client/src/main/java/org/apache/hudi/table/compact/HoodieMergeOnReadTableCompactor.java
index 4795e90..e1d2706 100644
--- 
a/hudi-client/src/main/java/org/apache/hudi/table/compact/HoodieMergeOnReadTableCompactor.java
+++ 
b/hudi-client/src/main/java/org/apache/hudi/table/compact/HoodieMergeOnReadTableCompactor.java
@@ -35,14 +35,13 @@ import org.apache.hudi.common.util.CompactionUtils;
 import org.apache.hudi.common.util.FSUtils;
 import org.apache.hudi.common.util.HoodieAvroUtils;
 import org.apache.hudi.common.util.Option;
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.common.util.collection.Pair;
 import org.apache.hudi.config.HoodieWriteConfig;
 import org.apache.hudi.table.compact.strategy.CompactionStrategy;
 import org.apache.hudi.table.HoodieCopyOnWriteTable;
 import org.apache.hudi.table.HoodieTable;
 
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
 import org.apache.avro.Schema;
 import org.apache.hadoop.fs.FileSystem;
@@ -56,6 +55,7 @@ import org.apache.spark.util.AccumulatorV2;
 import org.apache.spark.util.LongAccumulator;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
@@ -125,7 +125,7 @@ public class HoodieMergeOnReadTableCompactor implements 
HoodieCompactor {
         config.getCompactionReverseLogReadEnabled(), 
config.getMaxDFSStreamBufferSize(),
         config.getSpillableMapBasePath());
     if (!scanner.iterator().hasNext()) {
-      return Lists.<WriteStatus>newArrayList();
+      return new ArrayList<>();
     }
 
     Option<HoodieBaseFile> oldDataFileOpt =
@@ -169,7 +169,7 @@ public class HoodieMergeOnReadTableCompactor implements 
HoodieCompactor {
     jsc.sc().register(totalLogFiles);
     jsc.sc().register(totalFileSlices);
 
-    Preconditions.checkArgument(hoodieTable.getMetaClient().getTableType() == 
HoodieTableType.MERGE_ON_READ,
+    ValidationUtils.checkArgument(hoodieTable.getMetaClient().getTableType() 
== HoodieTableType.MERGE_ON_READ,
         "Can only compact table of type " + HoodieTableType.MERGE_ON_READ + " 
and not "
             + hoodieTable.getMetaClient().getTableType().name());
 
@@ -214,7 +214,7 @@ public class HoodieMergeOnReadTableCompactor implements 
HoodieCompactor {
     // compactions only
     HoodieCompactionPlan compactionPlan = 
config.getCompactionStrategy().generateCompactionPlan(config, operations,
         
CompactionUtils.getAllPendingCompactionPlans(metaClient).stream().map(Pair::getValue).collect(toList()));
-    Preconditions.checkArgument(
+    ValidationUtils.checkArgument(
         compactionPlan.getOperations().stream().noneMatch(
             op -> fgIdsInPendingCompactions.contains(new 
HoodieFileGroupId(op.getPartitionPath(), op.getFileId()))),
         "Bad Compaction Plan. FileId MUST NOT have multiple pending 
compactions. "
diff --git 
a/hudi-client/src/main/java/org/apache/hudi/table/rollback/RollbackHelper.java 
b/hudi-client/src/main/java/org/apache/hudi/table/rollback/RollbackHelper.java
index e559536..901c51e 100644
--- 
a/hudi-client/src/main/java/org/apache/hudi/table/rollback/RollbackHelper.java
+++ 
b/hudi-client/src/main/java/org/apache/hudi/table/rollback/RollbackHelper.java
@@ -28,10 +28,10 @@ import 
org.apache.hudi.common.table.log.block.HoodieCommandBlock.HoodieCommandBl
 import 
org.apache.hudi.common.table.log.block.HoodieLogBlock.HeaderMetadataType;
 import org.apache.hudi.common.table.timeline.HoodieInstant;
 import org.apache.hudi.common.util.FSUtils;
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.config.HoodieWriteConfig;
 import org.apache.hudi.exception.HoodieRollbackException;
 
-import com.google.common.base.Preconditions;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.PathFilter;
@@ -147,7 +147,7 @@ public class RollbackHelper implements Serializable {
    * @return Merged HoodieRollbackStat
    */
   private HoodieRollbackStat mergeRollbackStat(HoodieRollbackStat stat1, 
HoodieRollbackStat stat2) {
-    
Preconditions.checkArgument(stat1.getPartitionPath().equals(stat2.getPartitionPath()));
+    
ValidationUtils.checkArgument(stat1.getPartitionPath().equals(stat2.getPartitionPath()));
     final List<String> successDeleteFiles = new ArrayList<>();
     final List<String> failedDeleteFiles = new ArrayList<>();
     final Map<FileStatus, Long> commandBlocksCount = new HashMap<>();
diff --git 
a/hudi-client/src/test/java/org/apache/hudi/config/TestHoodieWriteConfig.java 
b/hudi-client/src/test/java/org/apache/hudi/config/TestHoodieWriteConfig.java
index 80b59a0..0d9c59d 100644
--- 
a/hudi-client/src/test/java/org/apache/hudi/config/TestHoodieWriteConfig.java
+++ 
b/hudi-client/src/test/java/org/apache/hudi/config/TestHoodieWriteConfig.java
@@ -37,7 +37,7 @@ public class TestHoodieWriteConfig {
   @Test
   public void testPropertyLoading() throws IOException {
     Builder builder = HoodieWriteConfig.newBuilder().withPath("/tmp");
-    Map<String, String> params = new HashMap<>();
+    Map<String, String> params = new HashMap<>(3);
     params.put(HoodieCompactionConfig.CLEANER_COMMITS_RETAINED_PROP, "1");
     params.put(HoodieCompactionConfig.MAX_COMMITS_TO_KEEP_PROP, "5");
     params.put(HoodieCompactionConfig.MIN_COMMITS_TO_KEEP_PROP, "2");
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/model/TimelineLayoutVersion.java
 
b/hudi-common/src/main/java/org/apache/hudi/common/model/TimelineLayoutVersion.java
index cd45eb3..d05d843 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/model/TimelineLayoutVersion.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/model/TimelineLayoutVersion.java
@@ -18,7 +18,7 @@
 
 package org.apache.hudi.common.model;
 
-import com.google.common.base.Preconditions;
+import org.apache.hudi.common.util.ValidationUtils;
 
 import java.io.Serializable;
 import java.util.Objects;
@@ -37,8 +37,8 @@ public class TimelineLayoutVersion implements Serializable, 
Comparable<TimelineL
   private Integer version;
 
   public TimelineLayoutVersion(Integer version) {
-    Preconditions.checkArgument(version <= CURR_VERSION);
-    Preconditions.checkArgument(version >= VERSION_0);
+    ValidationUtils.checkArgument(version <= CURR_VERSION);
+    ValidationUtils.checkArgument(version >= VERSION_0);
     this.version = version;
   }
 
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableMetaClient.java
 
b/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableMetaClient.java
index f66348e..5c56d17 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableMetaClient.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableMetaClient.java
@@ -26,14 +26,14 @@ import 
org.apache.hudi.common.table.timeline.HoodieActiveTimeline;
 import org.apache.hudi.common.table.timeline.HoodieArchivedTimeline;
 import org.apache.hudi.common.table.timeline.HoodieInstant;
 import org.apache.hudi.common.util.ConsistencyGuardConfig;
-import org.apache.hudi.common.util.FSUtils;
 import org.apache.hudi.common.util.FailSafeConsistencyGuard;
+import org.apache.hudi.common.util.FSUtils;
 import org.apache.hudi.common.util.NoOpConsistencyGuard;
 import org.apache.hudi.common.util.Option;
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.exception.TableNotFoundException;
 import org.apache.hudi.exception.HoodieException;
 
-import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -119,7 +119,7 @@ public class HoodieTableMetaClient implements Serializable {
     Option<TimelineLayoutVersion> tableConfigVersion = 
tableConfig.getTimelineLayoutVersion();
     if (layoutVersion.isPresent() && tableConfigVersion.isPresent()) {
       // Ensure layout version passed in config is not lower than the one seen 
in hoodie.properties
-      
Preconditions.checkArgument(layoutVersion.get().compareTo(tableConfigVersion.get())
 >= 0,
+      
ValidationUtils.checkArgument(layoutVersion.get().compareTo(tableConfigVersion.get())
 >= 0,
           "Layout Version defined in hoodie properties has higher version (" + 
tableConfigVersion.get()
               + ") than the one passed in config (" + layoutVersion.get() + 
")");
     }
@@ -233,7 +233,7 @@ public class HoodieTableMetaClient implements Serializable {
   public HoodieWrapperFileSystem getFs() {
     if (fs == null) {
       FileSystem fileSystem = FSUtils.getFs(metaPath, hadoopConf.newCopy());
-      Preconditions.checkArgument(!(fileSystem instanceof 
HoodieWrapperFileSystem),
+      ValidationUtils.checkArgument(!(fileSystem instanceof 
HoodieWrapperFileSystem),
           "File System not expected to be that of HoodieWrapperFileSystem");
       fs = new HoodieWrapperFileSystem(fileSystem,
           consistencyGuardConfig.isConsistencyCheckEnabled()
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/table/log/HoodieLogFileReader.java
 
b/hudi-common/src/main/java/org/apache/hudi/common/table/log/HoodieLogFileReader.java
index 40a5243..f6e9326 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/table/log/HoodieLogFileReader.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/table/log/HoodieLogFileReader.java
@@ -28,11 +28,11 @@ import 
org.apache.hudi.common.table.log.block.HoodieLogBlock.HeaderMetadataType;
 import 
org.apache.hudi.common.table.log.block.HoodieLogBlock.HoodieLogBlockType;
 import org.apache.hudi.common.util.FSUtils;
 import org.apache.hudi.common.util.Option;
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.exception.CorruptedLogFileException;
 import org.apache.hudi.exception.HoodieIOException;
 import org.apache.hudi.exception.HoodieNotSupportedException;
 
-import com.google.common.base.Preconditions;
 import org.apache.avro.Schema;
 import org.apache.hadoop.fs.BufferedFSInputStream;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -61,7 +61,6 @@ class HoodieLogFileReader implements HoodieLogFormat.Reader {
   private final HoodieLogFile logFile;
   private static final byte[] MAGIC_BUFFER = new byte[6];
   private final Schema readerSchema;
-  private HoodieLogFormat.LogFormatVersion nextBlockVersion;
   private boolean readBlockLazily;
   private long reverseLogFilePosition;
   private long lastReverseLogFilePosition;
@@ -145,13 +144,13 @@ class HoodieLogFileReader implements 
HoodieLogFormat.Reader {
     }
 
     // 2. Read the version for this log format
-    this.nextBlockVersion = readVersion();
+    HoodieLogFormat.LogFormatVersion nextBlockVersion = readVersion();
 
     // 3. Read the block type for a log block
     if (nextBlockVersion.getVersion() != 
HoodieLogFormatVersion.DEFAULT_VERSION) {
       type = inputStream.readInt();
 
-      Preconditions.checkArgument(type < HoodieLogBlockType.values().length, 
"Invalid block byte type found " + type);
+      ValidationUtils.checkArgument(type < HoodieLogBlockType.values().length, 
"Invalid block byte type found " + type);
       blockType = HoodieLogBlockType.values()[type];
     }
 
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieActiveTimeline.java
 
b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieActiveTimeline.java
index e5829f8..389314f 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieActiveTimeline.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieActiveTimeline.java
@@ -23,9 +23,9 @@ import org.apache.hudi.common.table.HoodieTimeline;
 import org.apache.hudi.common.table.timeline.HoodieInstant.State;
 import org.apache.hudi.common.util.FileIOUtils;
 import org.apache.hudi.common.util.Option;
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.exception.HoodieIOException;
 
-import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableSet;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -140,7 +140,7 @@ public class HoodieActiveTimeline extends 
HoodieDefaultTimeline {
 
   public void saveAsComplete(HoodieInstant instant, Option<byte[]> data) {
     LOG.info("Marking instant complete " + instant);
-    Preconditions.checkArgument(instant.isInflight(),
+    ValidationUtils.checkArgument(instant.isInflight(),
         "Could not mark an already completed instant as complete again " + 
instant);
     transitionState(instant, HoodieTimeline.getCompletedInstant(instant), 
data);
     LOG.info("Completed " + instant);
@@ -155,18 +155,18 @@ public class HoodieActiveTimeline extends 
HoodieDefaultTimeline {
   }
 
   public void deleteInflight(HoodieInstant instant) {
-    Preconditions.checkArgument(instant.isInflight());
+    ValidationUtils.checkArgument(instant.isInflight());
     deleteInstantFile(instant);
   }
 
   public void deletePending(HoodieInstant instant) {
-    Preconditions.checkArgument(!instant.isCompleted());
+    ValidationUtils.checkArgument(!instant.isCompleted());
     deleteInstantFile(instant);
   }
 
   public void deleteCompactionRequested(HoodieInstant instant) {
-    Preconditions.checkArgument(instant.isRequested());
-    
Preconditions.checkArgument(instant.getAction().equals(HoodieTimeline.COMPACTION_ACTION));
+    ValidationUtils.checkArgument(instant.isRequested());
+    
ValidationUtils.checkArgument(instant.getAction().equals(HoodieTimeline.COMPACTION_ACTION));
     deleteInstantFile(instant);
   }
 
@@ -222,8 +222,8 @@ public class HoodieActiveTimeline extends 
HoodieDefaultTimeline {
    * @return requested instant
    */
   public HoodieInstant revertCompactionInflightToRequested(HoodieInstant 
inflightInstant) {
-    
Preconditions.checkArgument(inflightInstant.getAction().equals(HoodieTimeline.COMPACTION_ACTION));
-    Preconditions.checkArgument(inflightInstant.isInflight());
+    
ValidationUtils.checkArgument(inflightInstant.getAction().equals(HoodieTimeline.COMPACTION_ACTION));
+    ValidationUtils.checkArgument(inflightInstant.isInflight());
     HoodieInstant requestedInstant =
         new HoodieInstant(State.REQUESTED, COMPACTION_ACTION, 
inflightInstant.getTimestamp());
     if (metaClient.getTimelineLayoutVersion().isNullVersion()) {
@@ -242,8 +242,8 @@ public class HoodieActiveTimeline extends 
HoodieDefaultTimeline {
    * @return inflight instant
    */
   public HoodieInstant transitionCompactionRequestedToInflight(HoodieInstant 
requestedInstant) {
-    
Preconditions.checkArgument(requestedInstant.getAction().equals(HoodieTimeline.COMPACTION_ACTION));
-    Preconditions.checkArgument(requestedInstant.isRequested());
+    
ValidationUtils.checkArgument(requestedInstant.getAction().equals(HoodieTimeline.COMPACTION_ACTION));
+    ValidationUtils.checkArgument(requestedInstant.isRequested());
     HoodieInstant inflightInstant =
         new HoodieInstant(State.INFLIGHT, COMPACTION_ACTION, 
requestedInstant.getTimestamp());
     transitionState(requestedInstant, inflightInstant, Option.empty());
@@ -258,8 +258,8 @@ public class HoodieActiveTimeline extends 
HoodieDefaultTimeline {
    * @return commit instant
    */
   public HoodieInstant transitionCompactionInflightToComplete(HoodieInstant 
inflightInstant, Option<byte[]> data) {
-    
Preconditions.checkArgument(inflightInstant.getAction().equals(HoodieTimeline.COMPACTION_ACTION));
-    Preconditions.checkArgument(inflightInstant.isInflight());
+    
ValidationUtils.checkArgument(inflightInstant.getAction().equals(HoodieTimeline.COMPACTION_ACTION));
+    ValidationUtils.checkArgument(inflightInstant.isInflight());
     HoodieInstant commitInstant = new HoodieInstant(State.COMPLETED, 
COMMIT_ACTION, inflightInstant.getTimestamp());
     transitionState(inflightInstant, commitInstant, data);
     return commitInstant;
@@ -283,8 +283,8 @@ public class HoodieActiveTimeline extends 
HoodieDefaultTimeline {
    * @return commit instant
    */
   public HoodieInstant transitionCleanInflightToComplete(HoodieInstant 
inflightInstant, Option<byte[]> data) {
-    
Preconditions.checkArgument(inflightInstant.getAction().equals(HoodieTimeline.CLEAN_ACTION));
-    Preconditions.checkArgument(inflightInstant.isInflight());
+    
ValidationUtils.checkArgument(inflightInstant.getAction().equals(HoodieTimeline.CLEAN_ACTION));
+    ValidationUtils.checkArgument(inflightInstant.isInflight());
     HoodieInstant commitInstant = new HoodieInstant(State.COMPLETED, 
CLEAN_ACTION, inflightInstant.getTimestamp());
     // Then write to timeline
     transitionState(inflightInstant, commitInstant, data);
@@ -299,15 +299,15 @@ public class HoodieActiveTimeline extends 
HoodieDefaultTimeline {
    * @return commit instant
    */
   public HoodieInstant transitionCleanRequestedToInflight(HoodieInstant 
requestedInstant, Option<byte[]> data) {
-    
Preconditions.checkArgument(requestedInstant.getAction().equals(HoodieTimeline.CLEAN_ACTION));
-    Preconditions.checkArgument(requestedInstant.isRequested());
+    
ValidationUtils.checkArgument(requestedInstant.getAction().equals(HoodieTimeline.CLEAN_ACTION));
+    ValidationUtils.checkArgument(requestedInstant.isRequested());
     HoodieInstant inflight = new HoodieInstant(State.INFLIGHT, CLEAN_ACTION, 
requestedInstant.getTimestamp());
     transitionState(requestedInstant, inflight, data);
     return inflight;
   }
 
   private void transitionState(HoodieInstant fromInstant, HoodieInstant 
toInstant, Option<byte[]> data) {
-    
Preconditions.checkArgument(fromInstant.getTimestamp().equals(toInstant.getTimestamp()));
+    
ValidationUtils.checkArgument(fromInstant.getTimestamp().equals(toInstant.getTimestamp()));
     try {
       if (metaClient.getTimelineLayoutVersion().isNullVersion()) {
         // Re-create the .inflight file by opening a new file and write the 
commit metadata in
@@ -321,7 +321,7 @@ public class HoodieActiveTimeline extends 
HoodieDefaultTimeline {
       } else {
         // Ensures old state exists in timeline
         LOG.info("Checking for file exists ?" + new 
Path(metaClient.getMetaPath(), fromInstant.getFileName()));
-        Preconditions.checkArgument(metaClient.getFs().exists(new 
Path(metaClient.getMetaPath(),
+        ValidationUtils.checkArgument(metaClient.getFs().exists(new 
Path(metaClient.getMetaPath(),
             fromInstant.getFileName())));
         // Use Write Once to create Target File
         createImmutableFileInPath(new Path(metaClient.getMetaPath(), 
toInstant.getFileName()), data);
@@ -333,7 +333,7 @@ public class HoodieActiveTimeline extends 
HoodieDefaultTimeline {
   }
 
   private void revertCompleteToInflight(HoodieInstant completed, HoodieInstant 
inflight) {
-    
Preconditions.checkArgument(completed.getTimestamp().equals(inflight.getTimestamp()));
+    
ValidationUtils.checkArgument(completed.getTimestamp().equals(inflight.getTimestamp()));
     Path inFlightCommitFilePath = new Path(metaClient.getMetaPath(), 
inflight.getFileName());
     Path commitFilePath = new Path(metaClient.getMetaPath(), 
completed.getFileName());
     try {
@@ -359,7 +359,7 @@ public class HoodieActiveTimeline extends 
HoodieDefaultTimeline {
         }
 
         boolean success = metaClient.getFs().delete(commitFilePath, false);
-        Preconditions.checkArgument(success, "State Reverting failed");
+        ValidationUtils.checkArgument(success, "State Reverting failed");
       }
     } catch (IOException e) {
       throw new HoodieIOException("Could not complete revert " + completed, e);
@@ -368,7 +368,7 @@ public class HoodieActiveTimeline extends 
HoodieDefaultTimeline {
 
   public void transitionRequestedToInflight(HoodieInstant requested, 
Option<byte[]> content) {
     HoodieInstant inflight = new HoodieInstant(State.INFLIGHT, 
requested.getAction(), requested.getTimestamp());
-    Preconditions.checkArgument(requested.isRequested(), "Instant " + 
requested + " in wrong state");
+    ValidationUtils.checkArgument(requested.isRequested(), "Instant " + 
requested + " in wrong state");
     transitionState(requested, inflight, content);
   }
 
@@ -377,15 +377,15 @@ public class HoodieActiveTimeline extends 
HoodieDefaultTimeline {
   }
 
   public void saveToCompactionRequested(HoodieInstant instant, Option<byte[]> 
content, boolean overwrite) {
-    
Preconditions.checkArgument(instant.getAction().equals(HoodieTimeline.COMPACTION_ACTION));
+    
ValidationUtils.checkArgument(instant.getAction().equals(HoodieTimeline.COMPACTION_ACTION));
     // Write workload to auxiliary folder
     createFileInAuxiliaryFolder(instant, content);
     createFileInMetaPath(instant.getFileName(), content, overwrite);
   }
 
   public void saveToCleanRequested(HoodieInstant instant, Option<byte[]> 
content) {
-    
Preconditions.checkArgument(instant.getAction().equals(HoodieTimeline.CLEAN_ACTION));
-    Preconditions.checkArgument(instant.getState().equals(State.REQUESTED));
+    
ValidationUtils.checkArgument(instant.getAction().equals(HoodieTimeline.CLEAN_ACTION));
+    ValidationUtils.checkArgument(instant.getState().equals(State.REQUESTED));
     // Plan is stored in meta path
     createFileInMetaPath(instant.getFileName(), content, false);
   }
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/table/view/AbstractTableFileSystemView.java
 
b/hudi-common/src/main/java/org/apache/hudi/common/table/view/AbstractTableFileSystemView.java
index ee28cdc..d8f77e4 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/table/view/AbstractTableFileSystemView.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/table/view/AbstractTableFileSystemView.java
@@ -32,10 +32,10 @@ import org.apache.hudi.common.util.CompactionUtils;
 import org.apache.hudi.common.util.FSUtils;
 import org.apache.hudi.common.util.HoodieTimer;
 import org.apache.hudi.common.util.Option;
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.common.util.collection.Pair;
 import org.apache.hudi.exception.HoodieIOException;
 
-import com.google.common.base.Preconditions;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.log4j.LogManager;
@@ -116,10 +116,9 @@ public abstract class AbstractTableFileSystemView 
implements SyncableFileSystemV
     long fgBuildTimeTakenMs = timer.endTimer();
     timer.startTimer();
     // Group by partition for efficient updates for both InMemory and 
DiskBased stuctures.
-    
fileGroups.stream().collect(Collectors.groupingBy(HoodieFileGroup::getPartitionPath)).entrySet().forEach(entry
 -> {
-      String partition = entry.getKey();
+    
fileGroups.stream().collect(Collectors.groupingBy(HoodieFileGroup::getPartitionPath)).forEach((partition,
 value) -> {
       if (!isPartitionAvailableInStore(partition)) {
-        storePartitionView(partition, entry.getValue());
+        storePartitionView(partition, value);
       }
     });
     long storePartitionsTs = timer.endTimer();
@@ -209,7 +208,7 @@ public abstract class AbstractTableFileSystemView 
implements SyncableFileSystemV
    */
   private void ensurePartitionLoadedCorrectly(String partition) {
 
-    Preconditions.checkArgument(!isClosed(), "View is already closed");
+    ValidationUtils.checkArgument(!isClosed(), "View is already closed");
 
     // ensure we list files only once even in the face of concurrency
     addedPartitions.computeIfAbsent(partition, (partitionPathStr) -> {
@@ -397,11 +396,9 @@ public abstract class AbstractTableFileSystemView 
implements SyncableFileSystemV
   public final Stream<HoodieBaseFile> getLatestBaseFilesInRange(List<String> 
commitsToReturn) {
     try {
       readLock.lock();
-      return fetchAllStoredFileGroups().map(fileGroup -> {
-        return Option.fromJavaOptional(
-            fileGroup.getAllBaseFiles().filter(baseFile -> 
commitsToReturn.contains(baseFile.getCommitTime())
-                && !isBaseFileDueToPendingCompaction(baseFile)).findFirst());
-      }).filter(Option::isPresent).map(Option::get);
+      return fetchAllStoredFileGroups().map(fileGroup -> 
Option.fromJavaOptional(
+          fileGroup.getAllBaseFiles().filter(baseFile -> 
commitsToReturn.contains(baseFile.getCommitTime())
+              && 
!isBaseFileDueToPendingCompaction(baseFile)).findFirst())).filter(Option::isPresent).map(Option::get);
     } finally {
       readLock.unlock();
     }
@@ -443,7 +440,7 @@ public abstract class AbstractTableFileSystemView 
implements SyncableFileSystemV
       String partitionPath = formatPartitionKey(partitionStr);
       ensurePartitionLoadedCorrectly(partitionPath);
       Option<FileSlice> fs = fetchLatestFileSlice(partitionPath, fileId);
-      return fs.map(f -> filterBaseFileAfterPendingCompaction(f));
+      return fs.map(this::filterBaseFileAfterPendingCompaction);
     } finally {
       readLock.unlock();
     }
@@ -480,7 +477,7 @@ public abstract class AbstractTableFileSystemView 
implements SyncableFileSystemV
       ensurePartitionLoadedCorrectly(partitionPath);
       Stream<FileSlice> fileSliceStream = 
fetchLatestFileSlicesBeforeOrOn(partitionPath, maxCommitTime);
       if (includeFileSlicesInPendingCompaction) {
-        return fileSliceStream.map(fs -> 
filterBaseFileAfterPendingCompaction(fs));
+        return fileSliceStream.map(this::filterBaseFileAfterPendingCompaction);
       } else {
         return fileSliceStream.filter(fs -> 
!isPendingCompactionScheduledForFileId(fs.getFileGroupId()));
       }
@@ -815,7 +812,7 @@ public abstract class AbstractTableFileSystemView 
implements SyncableFileSystemV
   /**
    * Return Only Commits and Compaction timeline for building file-groups.
    * 
-   * @return
+   * @return {@code HoodieTimeline}
    */
   public HoodieTimeline getVisibleCommitsAndCompactionTimeline() {
     return visibleCommitsAndCompactionTimeline;
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/table/view/FileSystemViewStorageConfig.java
 
b/hudi-common/src/main/java/org/apache/hudi/common/table/view/FileSystemViewStorageConfig.java
index 4cf6942..93c5507 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/table/view/FileSystemViewStorageConfig.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/table/view/FileSystemViewStorageConfig.java
@@ -18,10 +18,9 @@
 
 package org.apache.hudi.common.table.view;
 
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.config.DefaultHoodieConfig;
 
-import com.google.common.base.Preconditions;
-
 import java.io.File;
 import java.io.FileReader;
 import java.io.IOException;
@@ -192,7 +191,7 @@ public class FileSystemViewStorageConfig extends 
DefaultHoodieConfig {
       // Validations
       
FileSystemViewStorageType.valueOf(props.getProperty(FILESYSTEM_VIEW_STORAGE_TYPE));
       
FileSystemViewStorageType.valueOf(props.getProperty(FILESYSTEM_SECONDARY_VIEW_STORAGE_TYPE));
-      
Preconditions.checkArgument(Integer.parseInt(props.getProperty(FILESYSTEM_VIEW_REMOTE_PORT))
 > 0);
+      
ValidationUtils.checkArgument(Integer.parseInt(props.getProperty(FILESYSTEM_VIEW_REMOTE_PORT))
 > 0);
       return new FileSystemViewStorageConfig(props);
     }
   }
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/table/view/HoodieTableFileSystemView.java
 
b/hudi-common/src/main/java/org/apache/hudi/common/table/view/HoodieTableFileSystemView.java
index dd71124..1f7165b 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/table/view/HoodieTableFileSystemView.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/table/view/HoodieTableFileSystemView.java
@@ -25,15 +25,16 @@ import org.apache.hudi.common.table.HoodieTableMetaClient;
 import org.apache.hudi.common.table.HoodieTimeline;
 import org.apache.hudi.common.table.TableFileSystemView;
 import org.apache.hudi.common.util.Option;
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.common.util.collection.Pair;
 
-import com.google.common.base.Preconditions;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
@@ -134,15 +135,14 @@ public class HoodieTableFileSystemView extends 
IncrementalTimelineSyncFileSystem
   @Override
   protected void resetPendingCompactionOperations(Stream<Pair<String, 
CompactionOperation>> operations) {
     // Build fileId to Pending Compaction Instants
-    this.fgIdToPendingCompaction = 
createFileIdToPendingCompactionMap(operations.map(entry -> {
-      return Pair.of(entry.getValue().getFileGroupId(), 
Pair.of(entry.getKey(), entry.getValue()));
-    }).collect(Collectors.toMap(Pair::getKey, Pair::getValue)));
+    this.fgIdToPendingCompaction = 
createFileIdToPendingCompactionMap(operations.map(entry ->
+      Pair.of(entry.getValue().getFileGroupId(), Pair.of(entry.getKey(), 
entry.getValue()))).collect(Collectors.toMap(Pair::getKey, Pair::getValue)));
   }
 
   @Override
   protected void addPendingCompactionOperations(Stream<Pair<String, 
CompactionOperation>> operations) {
     operations.forEach(opInstantPair -> {
-      
Preconditions.checkArgument(!fgIdToPendingCompaction.containsKey(opInstantPair.getValue().getFileGroupId()),
+      
ValidationUtils.checkArgument(!fgIdToPendingCompaction.containsKey(opInstantPair.getValue().getFileGroupId()),
           "Duplicate FileGroupId found in pending compaction operations. FgId 
:"
               + opInstantPair.getValue().getFileGroupId());
       fgIdToPendingCompaction.put(opInstantPair.getValue().getFileGroupId(),
@@ -153,7 +153,7 @@ public class HoodieTableFileSystemView extends 
IncrementalTimelineSyncFileSystem
   @Override
   protected void removePendingCompactionOperations(Stream<Pair<String, 
CompactionOperation>> operations) {
     operations.forEach(opInstantPair -> {
-      
Preconditions.checkArgument(fgIdToPendingCompaction.containsKey(opInstantPair.getValue().getFileGroupId()),
+      
ValidationUtils.checkArgument(fgIdToPendingCompaction.containsKey(opInstantPair.getValue().getFileGroupId()),
           "Trying to remove a FileGroupId which is not found in pending 
compaction operations. FgId :"
               + opInstantPair.getValue().getFileGroupId());
       
fgIdToPendingCompaction.remove(opInstantPair.getValue().getFileGroupId());
@@ -166,8 +166,7 @@ public class HoodieTableFileSystemView extends 
IncrementalTimelineSyncFileSystem
    */
   @Override
   Stream<HoodieFileGroup> fetchAllStoredFileGroups(String partition) {
-    final List<HoodieFileGroup> fileGroups = new ArrayList<>();
-    fileGroups.addAll(partitionToFileGroupsMap.get(partition));
+    final List<HoodieFileGroup> fileGroups = new 
ArrayList<>(partitionToFileGroupsMap.get(partition));
     return fileGroups.stream();
   }
 
@@ -200,9 +199,7 @@ public class HoodieTableFileSystemView extends 
IncrementalTimelineSyncFileSystem
 
   @Override
   public Stream<HoodieFileGroup> fetchAllStoredFileGroups() {
-    return partitionToFileGroupsMap.values().stream().flatMap(fg -> {
-      return fg.stream();
-    });
+    return 
partitionToFileGroupsMap.values().stream().flatMap(Collection::stream);
   }
 
   @Override
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/table/view/RemoteHoodieTableFileSystemView.java
 
b/hudi-common/src/main/java/org/apache/hudi/common/table/view/RemoteHoodieTableFileSystemView.java
index 48e1c25..01010a2 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/table/view/RemoteHoodieTableFileSystemView.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/table/view/RemoteHoodieTableFileSystemView.java
@@ -34,12 +34,12 @@ import org.apache.hudi.common.table.timeline.dto.InstantDTO;
 import org.apache.hudi.common.table.timeline.dto.TimelineDTO;
 import org.apache.hudi.common.util.Option;
 import org.apache.hudi.common.util.StringUtils;
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.common.util.collection.Pair;
 import org.apache.hudi.exception.HoodieRemoteException;
 
 import com.fasterxml.jackson.core.type.TypeReference;
 import com.fasterxml.jackson.databind.ObjectMapper;
-import com.google.common.base.Preconditions;
 import org.apache.http.client.fluent.Request;
 import org.apache.http.client.fluent.Response;
 import org.apache.http.client.utils.URIBuilder;
@@ -134,14 +134,12 @@ public class RemoteHoodieTableFileSystemView implements 
SyncableFileSystemView,
 
   private <T> T executeRequest(String requestPath, Map<String, String> 
queryParameters, TypeReference reference,
       RequestMethod method) throws IOException {
-    Preconditions.checkArgument(!closed, "View already closed");
+    ValidationUtils.checkArgument(!closed, "View already closed");
 
     URIBuilder builder =
         new 
URIBuilder().setHost(serverHost).setPort(serverPort).setPath(requestPath).setScheme("http");
 
-    queryParameters.entrySet().stream().forEach(entry -> {
-      builder.addParameter(entry.getKey(), entry.getValue());
-    });
+    queryParameters.forEach(builder::addParameter);
 
     // Adding mandatory parameters - Last instants affecting file-slice
     timeline.lastInstant().ifPresent(instant -> 
builder.addParameter(LAST_INSTANT_TS, instant.getTimestamp()));
@@ -149,7 +147,7 @@ public class RemoteHoodieTableFileSystemView implements 
SyncableFileSystemView,
 
     String url = builder.toString();
     LOG.info("Sending request : (" + url + ")");
-    Response response = null;
+    Response response;
     int timeout = 1000 * 300; // 5 min timeout
     switch (method) {
       case GET:
@@ -197,7 +195,7 @@ public class RemoteHoodieTableFileSystemView implements 
SyncableFileSystemView,
     Map<String, String> paramsMap = new HashMap<>();
     paramsMap.put(BASEPATH_PARAM, basePath);
     paramsMap.put(PARTITION_PARAM, partitionPath);
-    Preconditions.checkArgument(paramNames.length == paramVals.length);
+    ValidationUtils.checkArgument(paramNames.length == paramVals.length);
     for (int i = 0; i < paramNames.length; i++) {
       paramsMap.put(paramNames[i], paramVals[i]);
     }
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/table/view/RocksDbBasedFileSystemView.java
 
b/hudi-common/src/main/java/org/apache/hudi/common/table/view/RocksDbBasedFileSystemView.java
index e258702..a89ac58 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/table/view/RocksDbBasedFileSystemView.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/table/view/RocksDbBasedFileSystemView.java
@@ -29,9 +29,9 @@ import org.apache.hudi.common.table.HoodieTimeline;
 import org.apache.hudi.common.util.Option;
 import org.apache.hudi.common.util.RocksDBDAO;
 import org.apache.hudi.common.util.RocksDBSchemaHelper;
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.common.util.collection.Pair;
 
-import com.google.common.base.Preconditions;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.log4j.LogManager;
@@ -110,7 +110,7 @@ public class RocksDbBasedFileSystemView extends 
IncrementalTimelineSyncFileSyste
   protected void addPendingCompactionOperations(Stream<Pair<String, 
CompactionOperation>> operations) {
     rocksDB.writeBatch(batch ->
         operations.forEach(opInstantPair -> {
-          
Preconditions.checkArgument(!isPendingCompactionScheduledForFileId(opInstantPair.getValue().getFileGroupId()),
+          
ValidationUtils.checkArgument(!isPendingCompactionScheduledForFileId(opInstantPair.getValue().getFileGroupId()),
               "Duplicate FileGroupId found in pending compaction operations. 
FgId :"
                   + opInstantPair.getValue().getFileGroupId());
           rocksDB.putInBatch(batch, 
schemaHelper.getColFamilyForPendingCompaction(),
@@ -123,7 +123,7 @@ public class RocksDbBasedFileSystemView extends 
IncrementalTimelineSyncFileSyste
   void removePendingCompactionOperations(Stream<Pair<String, 
CompactionOperation>> operations) {
     rocksDB.writeBatch(batch ->
         operations.forEach(opInstantPair -> {
-          Preconditions.checkArgument(
+          ValidationUtils.checkArgument(
               
getPendingCompactionOperationWithInstant(opInstantPair.getValue().getFileGroupId())
 != null,
               "Trying to remove a FileGroupId which is not found in pending 
compaction operations. FgId :"
                   + opInstantPair.getValue().getFileGroupId());
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/util/AvroUtils.java 
b/hudi-common/src/main/java/org/apache/hudi/common/util/AvroUtils.java
index 1962894..683a4dd 100644
--- a/hudi-common/src/main/java/org/apache/hudi/common/util/AvroUtils.java
+++ b/hudi-common/src/main/java/org/apache/hudi/common/util/AvroUtils.java
@@ -28,7 +28,6 @@ import org.apache.hudi.avro.model.HoodieSavepointMetadata;
 import org.apache.hudi.avro.model.HoodieSavepointPartitionMetadata;
 import org.apache.hudi.common.HoodieRollbackStat;
 
-import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableMap;
 import org.apache.avro.file.DataFileReader;
 import org.apache.avro.file.DataFileWriter;
@@ -145,7 +144,7 @@ public class AvroUtils {
       throws IOException {
     DatumReader<T> reader = new SpecificDatumReader<>(clazz);
     FileReader<T> fileReader = DataFileReader.openReader(new 
SeekableByteArrayInput(bytes), reader);
-    Preconditions.checkArgument(fileReader.hasNext(), "Could not deserialize 
metadata of type " + clazz);
+    ValidationUtils.checkArgument(fileReader.hasNext(), "Could not deserialize 
metadata of type " + clazz);
     return fileReader.next();
   }
 }
diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/FSUtils.java 
b/hudi-common/src/main/java/org/apache/hudi/common/util/FSUtils.java
index 9ffc38c..add03d5 100644
--- a/hudi-common/src/main/java/org/apache/hudi/common/util/FSUtils.java
+++ b/hudi-common/src/main/java/org/apache/hudi/common/util/FSUtils.java
@@ -28,7 +28,6 @@ import org.apache.hudi.exception.HoodieException;
 import org.apache.hudi.exception.HoodieIOException;
 import org.apache.hudi.exception.InvalidHoodiePathException;
 
-import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileStatus;
@@ -115,11 +114,11 @@ public class FSUtils {
   }
 
   public static String translateMarkerToDataPath(String basePath, String 
markerPath, String instantTs) {
-    
Preconditions.checkArgument(markerPath.endsWith(HoodieTableMetaClient.MARKER_EXTN));
+    
ValidationUtils.checkArgument(markerPath.endsWith(HoodieTableMetaClient.MARKER_EXTN));
     String markerRootPath = Path.getPathWithoutSchemeAndAuthority(
         new Path(String.format("%s/%s/%s", basePath, 
HoodieTableMetaClient.TEMPFOLDER_NAME, instantTs))).toString();
     int begin = markerPath.indexOf(markerRootPath);
-    Preconditions.checkArgument(begin >= 0,
+    ValidationUtils.checkArgument(begin >= 0,
         "Not in marker dir. Marker Path=" + markerPath + ", Expected Marker 
Root=" + markerRootPath);
     String rPath = markerPath.substring(begin + markerRootPath.length() + 1);
     return String.format("%s/%s%s", basePath, 
rPath.replace(HoodieTableMetaClient.MARKER_EXTN, ""),
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/util/FailSafeConsistencyGuard.java
 
b/hudi-common/src/main/java/org/apache/hudi/common/util/FailSafeConsistencyGuard.java
index 70ceed0..e7ce62d 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/util/FailSafeConsistencyGuard.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/util/FailSafeConsistencyGuard.java
@@ -18,7 +18,6 @@
 
 package org.apache.hudi.common.util;
 
-import com.google.common.base.Preconditions;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -47,7 +46,7 @@ public class FailSafeConsistencyGuard implements 
ConsistencyGuard {
   public FailSafeConsistencyGuard(FileSystem fs, ConsistencyGuardConfig 
consistencyGuardConfig) {
     this.fs = fs;
     this.consistencyGuardConfig = consistencyGuardConfig;
-    
Preconditions.checkArgument(consistencyGuardConfig.isConsistencyCheckEnabled());
+    
ValidationUtils.checkArgument(consistencyGuardConfig.isConsistencyCheckEnabled());
   }
 
   @Override
@@ -115,8 +114,8 @@ public class FailSafeConsistencyGuard implements 
ConsistencyGuard {
    * 
    * @param filePath File Path
    * @param visibility Visibility
-   * @return
-   * @throws IOException
+   * @return true (if file visible in Path), false (otherwise)
+   * @throws IOException -
    */
   private boolean checkFileVisibility(Path filePath, FileVisibility 
visibility) throws IOException {
     try {
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/util/NumericUtils.java 
b/hudi-common/src/main/java/org/apache/hudi/common/util/NumericUtils.java
index bb22838..775c1f8 100644
--- a/hudi-common/src/main/java/org/apache/hudi/common/util/NumericUtils.java
+++ b/hudi-common/src/main/java/org/apache/hudi/common/util/NumericUtils.java
@@ -18,6 +18,13 @@
 
 package org.apache.hudi.common.util;
 
+import org.apache.hudi.exception.HoodieException;
+
+import java.nio.charset.StandardCharsets;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+import java.util.Objects;
+
 /**
  * A utility class for numeric.
  */
@@ -31,4 +38,27 @@ public class NumericUtils {
     String pre = "KMGTPE".charAt(exp - 1) + "";
     return String.format("%.1f %sB", bytes / Math.pow(1024, exp), pre);
   }
+
+  public static long getMessageDigestHash(final String algorithmName, final 
String string) {
+    MessageDigest md;
+    try {
+      md = MessageDigest.getInstance(algorithmName);
+    } catch (NoSuchAlgorithmException e) {
+      throw new HoodieException(e);
+    }
+    return 
asLong(Objects.requireNonNull(md).digest(string.getBytes(StandardCharsets.UTF_8)));
+  }
+
+  public static long asLong(byte[] bytes) {
+    ValidationUtils.checkState(bytes.length >= 8, "HashCode#asLong() requires 
>= 8 bytes.");
+    return padToLong(bytes);
+  }
+
+  public static long padToLong(byte[] bytes) {
+    long retVal = (bytes[0] & 0xFF);
+    for (int i = 1; i < Math.min(bytes.length, 8); i++) {
+      retVal |= (bytes[i] & 0xFFL) << (i * 8);
+    }
+    return retVal;
+  }
 }
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/util/RocksDBDAO.java 
b/hudi-common/src/main/java/org/apache/hudi/common/util/RocksDBDAO.java
index 8884e9c..9296fa7 100644
--- a/hudi-common/src/main/java/org/apache/hudi/common/util/RocksDBDAO.java
+++ b/hudi-common/src/main/java/org/apache/hudi/common/util/RocksDBDAO.java
@@ -22,7 +22,6 @@ import org.apache.hudi.common.util.collection.Pair;
 import org.apache.hudi.exception.HoodieException;
 import org.apache.hudi.exception.HoodieIOException;
 
-import com.google.common.base.Preconditions;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
 import org.rocksdb.AbstractImmutableNativeReference;
@@ -105,7 +104,7 @@ public class RocksDBDAO {
       FileIOUtils.mkdir(new File(rocksDBBasePath));
       rocksDB = RocksDB.open(dbOptions, rocksDBBasePath, 
managedColumnFamilies, managedHandles);
 
-      Preconditions.checkArgument(managedHandles.size() == 
managedColumnFamilies.size(),
+      ValidationUtils.checkArgument(managedHandles.size() == 
managedColumnFamilies.size(),
           "Unexpected number of handles are returned");
       for (int index = 0; index < managedHandles.size(); index++) {
         ColumnFamilyHandle handle = managedHandles.get(index);
@@ -113,7 +112,7 @@ public class RocksDBDAO {
         String familyNameFromHandle = new String(handle.getName());
         String familyNameFromDescriptor = new String(descriptor.getName());
 
-        
Preconditions.checkArgument(familyNameFromDescriptor.equals(familyNameFromHandle),
+        
ValidationUtils.checkArgument(familyNameFromDescriptor.equals(familyNameFromHandle),
             "Family Handles not in order with descriptors");
         managedHandlesMap.put(familyNameFromHandle, handle);
         managedDescriptorMap.put(familyNameFromDescriptor, descriptor);
@@ -297,7 +296,7 @@ public class RocksDBDAO {
    * @param <T> Type of object stored.
    */
   public <T extends Serializable> T get(String columnFamilyName, String key) {
-    Preconditions.checkArgument(!closed);
+    ValidationUtils.checkArgument(!closed);
     try {
       byte[] val = getRocksDB().get(managedHandlesMap.get(columnFamilyName), 
key.getBytes());
       return val == null ? null : SerializationUtils.deserialize(val);
@@ -314,7 +313,7 @@ public class RocksDBDAO {
    * @param <T> Type of object stored.
    */
   public <K extends Serializable, T extends Serializable> T get(String 
columnFamilyName, K key) {
-    Preconditions.checkArgument(!closed);
+    ValidationUtils.checkArgument(!closed);
     try {
       byte[] val = getRocksDB().get(managedHandlesMap.get(columnFamilyName), 
SerializationUtils.serialize(key));
       return val == null ? null : SerializationUtils.deserialize(val);
@@ -331,7 +330,7 @@ public class RocksDBDAO {
    * @param <T> Type of value stored
    */
   public <T extends Serializable> Stream<Pair<String, T>> prefixSearch(String 
columnFamilyName, String prefix) {
-    Preconditions.checkArgument(!closed);
+    ValidationUtils.checkArgument(!closed);
     final HoodieTimer timer = new HoodieTimer();
     timer.startTimer();
     long timeTakenMicro = 0;
@@ -360,7 +359,7 @@ public class RocksDBDAO {
    * @param <T> Type of value stored
    */
   public <T extends Serializable> void prefixDelete(String columnFamilyName, 
String prefix) {
-    Preconditions.checkArgument(!closed);
+    ValidationUtils.checkArgument(!closed);
     LOG.info("Prefix DELETE (query=" + prefix + ") on " + columnFamilyName);
     final RocksIterator it = 
getRocksDB().newIterator(managedHandlesMap.get(columnFamilyName));
     it.seek(prefix.getBytes());
@@ -396,7 +395,7 @@ public class RocksDBDAO {
    * @param columnFamilyName Column family name
    */
   public void addColumnFamily(String columnFamilyName) {
-    Preconditions.checkArgument(!closed);
+    ValidationUtils.checkArgument(!closed);
 
     managedDescriptorMap.computeIfAbsent(columnFamilyName, colFamilyName -> {
       try {
@@ -416,7 +415,7 @@ public class RocksDBDAO {
    * @param columnFamilyName Column Family Name
    */
   public void dropColumnFamily(String columnFamilyName) {
-    Preconditions.checkArgument(!closed);
+    ValidationUtils.checkArgument(!closed);
 
     managedDescriptorMap.computeIfPresent(columnFamilyName, (colFamilyName, 
descriptor) -> {
       ColumnFamilyHandle handle = managedHandlesMap.get(colFamilyName);
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/util/ValidationUtils.java 
b/hudi-common/src/main/java/org/apache/hudi/common/util/ValidationUtils.java
new file mode 100644
index 0000000..0f7c43e
--- /dev/null
+++ b/hudi-common/src/main/java/org/apache/hudi/common/util/ValidationUtils.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.common.util;
+
+/*
+ * Simple utility to test validation conditions (to replace Guava's 
PreConditions)
+ */
+public class ValidationUtils {
+
+  /**
+   * Ensures the truth of an expression.
+   */
+  public static void checkArgument(final boolean expression) {
+    if (!expression) {
+      throw new IllegalArgumentException();
+    }
+  }
+
+  /**
+   * Ensures the truth of an expression, throwing the custom errorMessage 
otherwise.
+   */
+  public static void checkArgument(final boolean expression, final String 
errorMessage) {
+    if (!expression) {
+      throw new IllegalArgumentException(errorMessage);
+    }
+  }
+
+  /**
+   * Ensures the truth of an expression involving the state of the calling 
instance, but not
+   * involving any parameters to the calling method.
+   *
+   * @param expression a boolean expression
+   * @throws IllegalStateException if {@code expression} is false
+   */
+  public static void checkState(final boolean expression) {
+    if (!expression) {
+      throw new IllegalStateException();
+    }
+  }
+
+  /**
+   * Ensures the truth of an expression involving the state of the calling 
instance, but not
+   * involving any parameters to the calling method.
+   *
+   * @param expression a boolean expression
+   * @param errorMessage - error message
+   * @throws IllegalStateException if {@code expression} is false
+   */
+  public static void checkState(final boolean expression, String errorMessage) 
{
+    if (!expression) {
+      throw new IllegalStateException(errorMessage);
+    }
+  }
+}
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/util/queue/BoundedInMemoryQueue.java
 
b/hudi-common/src/main/java/org/apache/hudi/common/util/queue/BoundedInMemoryQueue.java
index 356f000..2c2f919 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/util/queue/BoundedInMemoryQueue.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/util/queue/BoundedInMemoryQueue.java
@@ -21,9 +21,9 @@ package org.apache.hudi.common.util.queue;
 import org.apache.hudi.common.util.DefaultSizeEstimator;
 import org.apache.hudi.common.util.Option;
 import org.apache.hudi.common.util.SizeEstimator;
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.exception.HoodieException;
 
-import com.google.common.base.Preconditions;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
 
@@ -262,7 +262,7 @@ public class BoundedInMemoryQueue<I, O> implements 
Iterable<O> {
 
     @Override
     public O next() {
-      Preconditions.checkState(hasNext() && this.nextRecord != null);
+      ValidationUtils.checkState(hasNext() && this.nextRecord != null);
       final O ret = this.nextRecord;
       this.nextRecord = null;
       return ret;
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/versioning/MetadataMigrator.java
 
b/hudi-common/src/main/java/org/apache/hudi/common/versioning/MetadataMigrator.java
index 3b2bec8..f92dcea 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/versioning/MetadataMigrator.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/versioning/MetadataMigrator.java
@@ -19,10 +19,9 @@
 package org.apache.hudi.common.versioning;
 
 import org.apache.hudi.common.table.HoodieTableMetaClient;
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.common.util.collection.Pair;
 
-import com.google.common.base.Preconditions;
-
 import java.util.List;
 import java.util.Map;
 import java.util.stream.Collectors;
@@ -75,8 +74,8 @@ public class MetadataMigrator<T> {
    * @return Metadata conforming to the target version
    */
   public T migrateToVersion(T metadata, int metadataVersion, int 
targetVersion) {
-    Preconditions.checkArgument(targetVersion >= oldestVersion);
-    Preconditions.checkArgument(targetVersion <= latestVersion);
+    ValidationUtils.checkArgument(targetVersion >= oldestVersion);
+    ValidationUtils.checkArgument(targetVersion <= latestVersion);
     if (metadataVersion == targetVersion) {
       return metadata;
     } else if (metadataVersion > targetVersion) {
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/versioning/clean/CleanV1MigrationHandler.java
 
b/hudi-common/src/main/java/org/apache/hudi/common/versioning/clean/CleanV1MigrationHandler.java
index 998d4f0..a7434b5 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/versioning/clean/CleanV1MigrationHandler.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/versioning/clean/CleanV1MigrationHandler.java
@@ -22,10 +22,10 @@ import org.apache.hudi.avro.model.HoodieCleanMetadata;
 import org.apache.hudi.avro.model.HoodieCleanPartitionMetadata;
 import org.apache.hudi.common.table.HoodieTableMetaClient;
 import org.apache.hudi.common.util.FSUtils;
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.common.util.collection.Pair;
 import org.apache.hudi.common.versioning.AbstractMigratorBase;
 
-import com.google.common.base.Preconditions;
 import org.apache.hadoop.fs.Path;
 
 import java.util.Map;
@@ -52,7 +52,7 @@ public class CleanV1MigrationHandler extends 
AbstractMigratorBase<HoodieCleanMet
 
   @Override
   public HoodieCleanMetadata downgradeFrom(HoodieCleanMetadata input) {
-    Preconditions.checkArgument(input.getVersion() == 2,
+    ValidationUtils.checkArgument(input.getVersion() == 2,
         "Input version is " + input.getVersion() + ". Must be 2");
     final Path basePath = new Path(metaClient.getBasePath());
 
@@ -81,16 +81,13 @@ public class CleanV1MigrationHandler extends 
AbstractMigratorBase<HoodieCleanMet
           return Pair.of(partitionPath, cleanPartitionMetadata);
         }).collect(Collectors.toMap(Pair::getKey, Pair::getValue));
 
-    HoodieCleanMetadata metadata = HoodieCleanMetadata.newBuilder()
+    return HoodieCleanMetadata.newBuilder()
         .setEarliestCommitToRetain(input.getEarliestCommitToRetain())
         .setStartCleanTime(input.getStartCleanTime())
         .setTimeTakenInMillis(input.getTimeTakenInMillis())
         .setTotalFilesDeleted(input.getTotalFilesDeleted())
         .setPartitionMetadata(partitionMetadataMap)
         .setVersion(getManagedVersion()).build();
-
-    return metadata;
-
   }
 
   private static String convertToV1Path(Path basePath, String partitionPath, 
String fileName) {
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/versioning/clean/CleanV2MigrationHandler.java
 
b/hudi-common/src/main/java/org/apache/hudi/common/versioning/clean/CleanV2MigrationHandler.java
index 84896a0..14b2886 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/versioning/clean/CleanV2MigrationHandler.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/versioning/clean/CleanV2MigrationHandler.java
@@ -21,10 +21,10 @@ package org.apache.hudi.common.versioning.clean;
 import org.apache.hudi.avro.model.HoodieCleanMetadata;
 import org.apache.hudi.avro.model.HoodieCleanPartitionMetadata;
 import org.apache.hudi.common.table.HoodieTableMetaClient;
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.common.util.collection.Pair;
 import org.apache.hudi.common.versioning.AbstractMigratorBase;
 
-import com.google.common.base.Preconditions;
 import org.apache.hadoop.fs.Path;
 
 import java.util.List;
@@ -46,7 +46,7 @@ public class CleanV2MigrationHandler extends 
AbstractMigratorBase<HoodieCleanMet
 
   @Override
   public HoodieCleanMetadata upgradeFrom(HoodieCleanMetadata input) {
-    Preconditions.checkArgument(input.getVersion() == 1,
+    ValidationUtils.checkArgument(input.getVersion() == 1,
         "Input version is " + input.getVersion() + ". Must be 1");
     HoodieCleanMetadata metadata = new HoodieCleanMetadata();
     metadata.setEarliestCommitToRetain(input.getEarliestCommitToRetain());
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/versioning/compaction/CompactionV1MigrationHandler.java
 
b/hudi-common/src/main/java/org/apache/hudi/common/versioning/compaction/CompactionV1MigrationHandler.java
index 3abd952..9d23d61 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/versioning/compaction/CompactionV1MigrationHandler.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/versioning/compaction/CompactionV1MigrationHandler.java
@@ -22,9 +22,9 @@ import org.apache.hudi.avro.model.HoodieCompactionOperation;
 import org.apache.hudi.avro.model.HoodieCompactionPlan;
 import org.apache.hudi.common.table.HoodieTableMetaClient;
 import org.apache.hudi.common.util.FSUtils;
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.common.versioning.AbstractMigratorBase;
 
-import com.google.common.base.Preconditions;
 import org.apache.hadoop.fs.Path;
 
 import java.util.ArrayList;
@@ -54,19 +54,18 @@ public class CompactionV1MigrationHandler extends 
AbstractMigratorBase<HoodieCom
 
   @Override
   public HoodieCompactionPlan downgradeFrom(HoodieCompactionPlan input) {
-    Preconditions.checkArgument(input.getVersion() == 2, "Input version is " + 
input.getVersion() + ". Must be 2");
+    ValidationUtils.checkArgument(input.getVersion() == 2, "Input version is " 
+ input.getVersion() + ". Must be 2");
     HoodieCompactionPlan compactionPlan = new HoodieCompactionPlan();
     final Path basePath = new Path(metaClient.getBasePath());
     List<HoodieCompactionOperation> v1CompactionOperationList = new 
ArrayList<>();
     if (null != input.getOperations()) {
-      v1CompactionOperationList = input.getOperations().stream().map(inp -> {
-        return 
HoodieCompactionOperation.newBuilder().setBaseInstantTime(inp.getBaseInstantTime())
+      v1CompactionOperationList = input.getOperations().stream().map(inp ->
+        
HoodieCompactionOperation.newBuilder().setBaseInstantTime(inp.getBaseInstantTime())
             
.setFileId(inp.getFileId()).setPartitionPath(inp.getPartitionPath()).setMetrics(inp.getMetrics())
             .setDataFilePath(convertToV1Path(basePath, inp.getPartitionPath(), 
inp.getDataFilePath()))
             .setDeltaFilePaths(inp.getDeltaFilePaths().stream()
                 .map(s -> convertToV1Path(basePath, inp.getPartitionPath(), 
s)).collect(Collectors.toList()))
-            .build();
-      }).collect(Collectors.toList());
+        .build()).collect(Collectors.toList());
     }
     compactionPlan.setOperations(v1CompactionOperationList);
     compactionPlan.setExtraMetadata(input.getExtraMetadata());
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/versioning/compaction/CompactionV2MigrationHandler.java
 
b/hudi-common/src/main/java/org/apache/hudi/common/versioning/compaction/CompactionV2MigrationHandler.java
index cc73a2f..26180e4 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/versioning/compaction/CompactionV2MigrationHandler.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/versioning/compaction/CompactionV2MigrationHandler.java
@@ -21,9 +21,9 @@ package org.apache.hudi.common.versioning.compaction;
 import org.apache.hudi.avro.model.HoodieCompactionOperation;
 import org.apache.hudi.avro.model.HoodieCompactionPlan;
 import org.apache.hudi.common.table.HoodieTableMetaClient;
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.common.versioning.AbstractMigratorBase;
 
-import com.google.common.base.Preconditions;
 import org.apache.hadoop.fs.Path;
 
 import java.util.ArrayList;
@@ -48,17 +48,16 @@ public class CompactionV2MigrationHandler extends 
AbstractMigratorBase<HoodieCom
 
   @Override
   public HoodieCompactionPlan upgradeFrom(HoodieCompactionPlan input) {
-    Preconditions.checkArgument(input.getVersion() == 1, "Input version is " + 
input.getVersion() + ". Must be 1");
+    ValidationUtils.checkArgument(input.getVersion() == 1, "Input version is " 
+ input.getVersion() + ". Must be 1");
     HoodieCompactionPlan compactionPlan = new HoodieCompactionPlan();
     List<HoodieCompactionOperation> v2CompactionOperationList = new 
ArrayList<>();
     if (null != input.getOperations()) {
-      v2CompactionOperationList = input.getOperations().stream().map(inp -> {
-        return 
HoodieCompactionOperation.newBuilder().setBaseInstantTime(inp.getBaseInstantTime())
+      v2CompactionOperationList = input.getOperations().stream().map(inp ->
+        
HoodieCompactionOperation.newBuilder().setBaseInstantTime(inp.getBaseInstantTime())
             
.setFileId(inp.getFileId()).setPartitionPath(inp.getPartitionPath()).setMetrics(inp.getMetrics())
             .setDataFilePath(new 
Path(inp.getDataFilePath()).getName()).setDeltaFilePaths(
                 inp.getDeltaFilePaths().stream().map(s -> new 
Path(s).getName()).collect(Collectors.toList()))
-            .build();
-      }).collect(Collectors.toList());
+        .build()).collect(Collectors.toList());
     }
     compactionPlan.setOperations(v2CompactionOperationList);
     compactionPlan.setExtraMetadata(input.getExtraMetadata());
diff --git 
a/hudi-common/src/test/java/org/apache/hudi/common/minicluster/HdfsTestService.java
 
b/hudi-common/src/test/java/org/apache/hudi/common/minicluster/HdfsTestService.java
index a35fe50..ed4750a 100644
--- 
a/hudi-common/src/test/java/org/apache/hudi/common/minicluster/HdfsTestService.java
+++ 
b/hudi-common/src/test/java/org/apache/hudi/common/minicluster/HdfsTestService.java
@@ -21,7 +21,6 @@ package org.apache.hudi.common.minicluster;
 import org.apache.hudi.common.model.HoodieTestUtils;
 import org.apache.hudi.common.util.FileIOUtils;
 
-import com.google.common.base.Preconditions;
 import com.google.common.io.Files;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -33,6 +32,7 @@ import org.apache.log4j.Logger;
 
 import java.io.File;
 import java.io.IOException;
+import java.util.Objects;
 import java.net.ServerSocket;
 
 /**
@@ -70,7 +70,7 @@ public class HdfsTestService {
   }
 
   public MiniDFSCluster start(boolean format) throws IOException {
-    Preconditions.checkState(workDir != null, "The work dir must be set before 
starting cluster.");
+    Objects.requireNonNull(workDir, "The work dir must be set before starting 
cluster.");
     hadoopConf = HoodieTestUtils.getDefaultHadoopConf();
 
     // If clean, then remove the work dir so we can start fresh.
diff --git 
a/hudi-common/src/test/java/org/apache/hudi/common/minicluster/ZookeeperTestService.java
 
b/hudi-common/src/test/java/org/apache/hudi/common/minicluster/ZookeeperTestService.java
index 670be44..514e298 100644
--- 
a/hudi-common/src/test/java/org/apache/hudi/common/minicluster/ZookeeperTestService.java
+++ 
b/hudi-common/src/test/java/org/apache/hudi/common/minicluster/ZookeeperTestService.java
@@ -18,7 +18,6 @@
 
 package org.apache.hudi.common.minicluster;
 
-import com.google.common.base.Preconditions;
 import com.google.common.io.Files;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
@@ -36,6 +35,7 @@ import java.io.OutputStream;
 import java.io.Reader;
 import java.net.InetSocketAddress;
 import java.net.Socket;
+import java.util.Objects;
 
 /**
  * A Zookeeper minicluster service implementation.
@@ -85,7 +85,7 @@ public class ZookeeperTestService {
   }
 
   public ZooKeeperServer start() throws IOException, InterruptedException {
-    Preconditions.checkState(workDir != null, "The localBaseFsLocation must be 
set before starting cluster.");
+    Objects.requireNonNull(workDir, "The localBaseFsLocation must be set 
before starting cluster.");
 
     setupTestEnv();
     stop();
@@ -171,13 +171,10 @@ public class ZookeeperTestService {
     long start = System.currentTimeMillis();
     while (true) {
       try {
-        Socket sock = new Socket("localhost", port);
-        try {
+        try (Socket sock = new Socket("localhost", port)) {
           OutputStream outstream = sock.getOutputStream();
           outstream.write("stat".getBytes());
           outstream.flush();
-        } finally {
-          sock.close();
         }
       } catch (IOException e) {
         return true;
diff --git 
a/hudi-common/src/test/java/org/apache/hudi/common/table/log/TestHoodieLogFormat.java
 
b/hudi-common/src/test/java/org/apache/hudi/common/table/log/TestHoodieLogFormat.java
index b896d1f..3d47fed 100755
--- 
a/hudi-common/src/test/java/org/apache/hudi/common/table/log/TestHoodieLogFormat.java
+++ 
b/hudi-common/src/test/java/org/apache/hudi/common/table/log/TestHoodieLogFormat.java
@@ -57,8 +57,8 @@ import org.junit.runners.Parameterized;
 import java.io.IOException;
 import java.io.UncheckedIOException;
 import java.net.URISyntaxException;
-import java.util.Arrays;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
diff --git 
a/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestIncrementalFSViewSync.java
 
b/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestIncrementalFSViewSync.java
index e38625a..c00c6a1 100644
--- 
a/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestIncrementalFSViewSync.java
+++ 
b/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestIncrementalFSViewSync.java
@@ -44,10 +44,10 @@ import org.apache.hudi.common.util.CleanerUtils;
 import org.apache.hudi.common.util.CompactionUtils;
 import org.apache.hudi.common.util.FSUtils;
 import org.apache.hudi.common.util.Option;
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.common.util.collection.Pair;
 import org.apache.hudi.exception.HoodieException;
 
-import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Iterators;
 import org.apache.hadoop.fs.Path;
@@ -201,7 +201,7 @@ public class TestIncrementalFSViewSync extends 
HoodieCommonTestHarness {
     view1.sync();
     Map<String, List<String>> instantsToFiles;
 
-    /**
+    /*
      * Case where incremental syncing is catching up on more than one 
ingestion at a time
      */
     // Run 1 ingestion on MOR table (1 delta commits). View1 is now sync up to 
this point
@@ -222,7 +222,7 @@ public class TestIncrementalFSViewSync extends 
HoodieCommonTestHarness {
     view3.sync();
     areViewsConsistent(view1, view2, partitions.size() * 
fileIdsPerPartition.size());
 
-    /**
+    /*
      * Case where a compaction is scheduled and then unscheduled
      */
     scheduleCompaction(view2, "15");
@@ -233,7 +233,7 @@ public class TestIncrementalFSViewSync extends 
HoodieCommonTestHarness {
         getFileSystemView(new 
HoodieTableMetaClient(metaClient.getHadoopConf(), metaClient.getBasePath()));
     view4.sync();
 
-    /**
+    /*
      * Case where a compaction is scheduled, 2 ingestion happens and then a 
compaction happens
      */
     scheduleCompaction(view2, "16");
@@ -247,7 +247,7 @@ public class TestIncrementalFSViewSync extends 
HoodieCommonTestHarness {
         getFileSystemView(new 
HoodieTableMetaClient(metaClient.getHadoopConf(), metaClient.getBasePath()));
     view5.sync();
 
-    /**
+    /*
      * Case where a clean happened and then rounds of ingestion and compaction 
happened
      */
     testCleans(view2, Collections.singletonList("19"),
@@ -266,7 +266,7 @@ public class TestIncrementalFSViewSync extends 
HoodieCommonTestHarness {
         getFileSystemView(new 
HoodieTableMetaClient(metaClient.getHadoopConf(), metaClient.getBasePath()));
     view6.sync();
 
-    /**
+    /*
      * Case where multiple restores and ingestions happened
      */
     testRestore(view2, Collections.singletonList("25"), true, new HashMap<>(), 
Collections.singletonList("24"), "29", true);
@@ -528,7 +528,7 @@ public class TestIncrementalFSViewSync extends 
HoodieCommonTestHarness {
       String newBaseInstant) throws IOException {
     HoodieInstant instant = new HoodieInstant(State.REQUESTED, 
COMPACTION_ACTION, compactionInstantTime);
     boolean deleted = metaClient.getFs().delete(new 
Path(metaClient.getMetaPath(), instant.getFileName()), false);
-    Preconditions.checkArgument(deleted, "Unable to delete compaction 
instant.");
+    ValidationUtils.checkArgument(deleted, "Unable to delete compaction 
instant.");
 
     view.sync();
     Assert.assertEquals(newLastInstant, 
view.getLastInstant().get().getTimestamp());
@@ -719,7 +719,7 @@ public class TestIncrementalFSViewSync extends 
HoodieCommonTestHarness {
     metaClient.getActiveTimeline().createNewInstant(inflightInstant);
     metaClient.getActiveTimeline().saveAsComplete(inflightInstant,
         Option.of(metadata.toJsonString().getBytes(StandardCharsets.UTF_8)));
-    /**
+    /*
     // Delete pending compaction if present
     metaClient.getFs().delete(new Path(metaClient.getMetaPath(),
         new HoodieInstant(State.REQUESTED, HoodieTimeline.COMPACTION_ACTION, 
instant).getFileName()));
diff --git 
a/hudi-common/src/test/java/org/apache/hudi/common/util/TestNumericUtils.java 
b/hudi-common/src/test/java/org/apache/hudi/common/util/TestNumericUtils.java
index 08cb952..cd76383 100644
--- 
a/hudi-common/src/test/java/org/apache/hudi/common/util/TestNumericUtils.java
+++ 
b/hudi-common/src/test/java/org/apache/hudi/common/util/TestNumericUtils.java
@@ -20,7 +20,10 @@ package org.apache.hudi.common.util;
 
 import org.junit.Test;
 
+import java.util.Arrays;
+
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
 
 /**
  * Tests numeric utils.
@@ -37,6 +40,29 @@ public class TestNumericUtils {
     assertEquals("27.0 GB", NumericUtils.humanReadableByteCount(28991029248L));
     assertEquals("1.7 TB", 
NumericUtils.humanReadableByteCount(1855425871872L));
     assertEquals("8.0 EB", 
NumericUtils.humanReadableByteCount(9223372036854775807L));
+  }
+
+  @Test
+  public void testGetMessageDigestHash() {
+    assertEquals(6808551913422584641L, 
NumericUtils.getMessageDigestHash("MD5", "This is a string"));
+    assertEquals(2549749777095932358L, 
NumericUtils.getMessageDigestHash("MD5", "This is a test string"));
+    assertNotEquals(1L, NumericUtils.getMessageDigestHash("MD5", "This"));
+    assertNotEquals(6808551913422584641L, 
NumericUtils.getMessageDigestHash("SHA-256", "This is a string"));
+  }
+
+  private static byte[] byteArrayWithNum(int size, int num) {
+    byte[] bytez = new byte[size];
+    Arrays.fill(bytez, (byte) num);
+    return bytez;
+  }
 
+  @Test
+  public void testPadToLong() {
+    assertEquals(0x0000000099999999L, 
NumericUtils.padToLong(byteArrayWithNum(4, 0x99)));
+    assertEquals(0x0000999999999999L, 
NumericUtils.padToLong(byteArrayWithNum(6, 0x99)));
+    assertEquals(0x9999999999999999L, 
NumericUtils.padToLong(byteArrayWithNum(8, 0x99)));
+    assertEquals(0x1111111111111111L, 
NumericUtils.padToLong(byteArrayWithNum(8, 0x11)));
+    assertEquals(0x0000000011111111L, 
NumericUtils.padToLong(byteArrayWithNum(4, 0x11)));
+    assertEquals(0x0000181818181818L, 
NumericUtils.padToLong(byteArrayWithNum(6, 0x18)));
   }
 }
diff --git 
a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieParquetRealtimeInputFormat.java
 
b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieParquetRealtimeInputFormat.java
index bc717a9..4fe041a 100644
--- 
a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieParquetRealtimeInputFormat.java
+++ 
b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieParquetRealtimeInputFormat.java
@@ -27,12 +27,12 @@ import org.apache.hudi.common.table.timeline.HoodieInstant;
 import org.apache.hudi.common.table.view.HoodieTableFileSystemView;
 import org.apache.hudi.common.util.FSUtils;
 import org.apache.hudi.common.util.Option;
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.exception.HoodieException;
 import org.apache.hudi.exception.HoodieIOException;
 import org.apache.hudi.hadoop.HoodieParquetInputFormat;
 import org.apache.hudi.hadoop.UseFileSplitsFromInputFormat;
 
-import com.google.common.base.Preconditions;
 import com.google.common.collect.Sets;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
@@ -244,7 +244,7 @@ public class HoodieParquetRealtimeInputFormat extends 
HoodieParquetInputFormat i
     LOG.info("Creating record reader with readCols :" + 
jobConf.get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR)
         + ", Ids :" + 
jobConf.get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR));
     // sanity check
-    Preconditions.checkArgument(split instanceof HoodieRealtimeFileSplit,
+    ValidationUtils.checkArgument(split instanceof HoodieRealtimeFileSplit,
         "HoodieRealtimeRecordReader can only work on HoodieRealtimeFileSplit 
and not with " + split);
 
     return new HoodieRealtimeRecordReader((HoodieRealtimeFileSplit) split, 
jobConf,
diff --git a/hudi-hive/pom.xml b/hudi-hive/pom.xml
index 7ff1b3a..01e6ff1 100644
--- a/hudi-hive/pom.xml
+++ b/hudi-hive/pom.xml
@@ -56,11 +56,6 @@
     </dependency>
 
     <dependency>
-      <groupId>com.google.guava</groupId>
-      <artifactId>guava</artifactId>
-    </dependency>
-
-    <dependency>
       <groupId>joda-time</groupId>
       <artifactId>joda-time</artifactId>
     </dependency>
diff --git a/hudi-hive/src/main/java/org/apache/hudi/hive/HoodieHiveClient.java 
b/hudi-hive/src/main/java/org/apache/hudi/hive/HoodieHiveClient.java
index 8d8dd75..acbff7f 100644
--- a/hudi-hive/src/main/java/org/apache/hudi/hive/HoodieHiveClient.java
+++ b/hudi-hive/src/main/java/org/apache/hudi/hive/HoodieHiveClient.java
@@ -28,12 +28,12 @@ import org.apache.hudi.common.table.HoodieTimeline;
 import org.apache.hudi.common.table.timeline.HoodieInstant;
 import org.apache.hudi.common.util.FSUtils;
 import org.apache.hudi.common.util.Option;
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.common.util.collection.Pair;
 import org.apache.hudi.exception.HoodieIOException;
 import org.apache.hudi.exception.InvalidTableException;
 import org.apache.hudi.hive.util.SchemaUtil;
 
-import com.google.common.base.Preconditions;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -179,7 +179,7 @@ public class HoodieHiveClient {
    */
   private String getPartitionClause(String partition) {
     List<String> partitionValues = 
partitionValueExtractor.extractPartitionValuesInPath(partition);
-    Preconditions.checkArgument(syncConfig.partitionFields.size() == 
partitionValues.size(),
+    ValidationUtils.checkArgument(syncConfig.partitionFields.size() == 
partitionValues.size(),
         "Partition key parts " + syncConfig.partitionFields + " does not match 
with partition values " + partitionValues
             + ". Check partition strategy. ");
     List<String> partBuilder = new ArrayList<>();
diff --git 
a/hudi-hive/src/main/java/org/apache/hudi/hive/MultiPartKeysValueExtractor.java 
b/hudi-hive/src/main/java/org/apache/hudi/hive/MultiPartKeysValueExtractor.java
index aa6ec30..ec37a82 100644
--- 
a/hudi-hive/src/main/java/org/apache/hudi/hive/MultiPartKeysValueExtractor.java
+++ 
b/hudi-hive/src/main/java/org/apache/hudi/hive/MultiPartKeysValueExtractor.java
@@ -18,7 +18,7 @@
 
 package org.apache.hudi.hive;
 
-import com.google.common.base.Preconditions;
+import org.apache.hudi.common.util.ValidationUtils;
 
 import java.util.Arrays;
 import java.util.List;
@@ -35,7 +35,7 @@ public class MultiPartKeysValueExtractor implements 
PartitionValueExtractor {
     return Arrays.stream(splits).map(s -> {
       if (s.contains("=")) {
         String[] moreSplit = s.split("=");
-        Preconditions.checkArgument(moreSplit.length == 2, "Partition Field (" 
+ s + ") not in expected format");
+        ValidationUtils.checkArgument(moreSplit.length == 2, "Partition Field 
(" + s + ") not in expected format");
         return moreSplit[1];
       }
       return s;
diff --git a/hudi-hive/src/main/java/org/apache/hudi/hive/SchemaDifference.java 
b/hudi-hive/src/main/java/org/apache/hudi/hive/SchemaDifference.java
index 57bab64..bef40d5 100644
--- a/hudi-hive/src/main/java/org/apache/hudi/hive/SchemaDifference.java
+++ b/hudi-hive/src/main/java/org/apache/hudi/hive/SchemaDifference.java
@@ -44,7 +44,7 @@ public class SchemaDifference {
     this.tableSchema = tableSchema;
     this.deleteColumns = Collections.unmodifiableList(deleteColumns);
     this.updateColumnTypes = Collections.unmodifiableMap(updateColumnTypes);
-    this.addColumnTypes =  Collections.unmodifiableMap(addColumnTypes);
+    this.addColumnTypes = Collections.unmodifiableMap(addColumnTypes);
   }
 
   public List<String> getDeleteColumns() {
diff --git 
a/hudi-hive/src/test/java/org/apache/hudi/hive/util/HiveTestService.java 
b/hudi-hive/src/test/java/org/apache/hudi/hive/util/HiveTestService.java
index fc7675f..0cef82b 100644
--- a/hudi-hive/src/test/java/org/apache/hudi/hive/util/HiveTestService.java
+++ b/hudi-hive/src/test/java/org/apache/hudi/hive/util/HiveTestService.java
@@ -21,8 +21,6 @@ package org.apache.hudi.hive.util;
 import org.apache.hudi.common.model.HoodieTestUtils;
 import org.apache.hudi.common.util.FileIOUtils;
 
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Maps;
 import com.google.common.io.Files;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -53,7 +51,9 @@ import java.io.File;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.SocketException;
+import java.util.HashMap;
 import java.util.Map;
+import java.util.Objects;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 
@@ -73,7 +73,7 @@ public class HiveTestService {
   private int serverPort = 9999;
   private boolean clean = true;
 
-  private Map<String, String> sysProps = Maps.newHashMap();
+  private Map<String, String> sysProps = new HashMap<>();
   private ExecutorService executorService;
   private TServer tServer;
   private HiveServer2 hiveServer;
@@ -87,7 +87,7 @@ public class HiveTestService {
   }
 
   public HiveServer2 start() throws IOException {
-    Preconditions.checkState(workDir != null, "The work dir must be set before 
starting cluster.");
+    Objects.requireNonNull(workDir, "The work dir must be set before starting 
cluster.");
 
     if (hadoopConf == null) {
       hadoopConf = HoodieTestUtils.getDefaultHadoopConf();
diff --git a/hudi-integ-test/pom.xml b/hudi-integ-test/pom.xml
index fe6f66d..e0ee9b4 100644
--- a/hudi-integ-test/pom.xml
+++ b/hudi-integ-test/pom.xml
@@ -115,13 +115,6 @@
     </dependency>
 
     <dependency>
-      <groupId>com.google.guava</groupId>
-      <artifactId>guava</artifactId>
-      <version>20.0</version>
-      <scope>test</scope>
-    </dependency>
-
-    <dependency>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>
       <version>${junit.version}</version>
diff --git 
a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/FileSystemViewHandler.java
 
b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/FileSystemViewHandler.java
index a807e05..8c49394 100644
--- 
a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/FileSystemViewHandler.java
+++ 
b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/FileSystemViewHandler.java
@@ -28,13 +28,13 @@ import org.apache.hudi.common.table.timeline.dto.InstantDTO;
 import org.apache.hudi.common.table.timeline.dto.TimelineDTO;
 import org.apache.hudi.common.table.view.FileSystemViewManager;
 import org.apache.hudi.common.table.view.RemoteHoodieTableFileSystemView;
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.timeline.service.handlers.BaseFileHandler;
 import org.apache.hudi.timeline.service.handlers.FileSliceHandler;
 import org.apache.hudi.timeline.service.handlers.TimelineHandler;
 
 import com.fasterxml.jackson.core.JsonProcessingException;
 import com.fasterxml.jackson.databind.ObjectMapper;
-import com.google.common.base.Preconditions;
 import io.javalin.Context;
 import io.javalin.Handler;
 import io.javalin.Javalin;
@@ -339,7 +339,7 @@ public class FileSystemViewHandler {
                   + " but server has the following timeline "
                   + 
viewManager.getFileSystemView(context.queryParam(RemoteHoodieTableFileSystemView.BASEPATH_PARAM))
                       
.getTimeline().getInstants().collect(Collectors.toList());
-          Preconditions.checkArgument(!isLocalViewBehind(context), errMsg);
+          ValidationUtils.checkArgument(!isLocalViewBehind(context), errMsg);
           long endFinalCheck = System.currentTimeMillis();
           finalCheckTimeTaken = endFinalCheck - beginFinalCheck;
         }
diff --git 
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieWithTimelineServer.java
 
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieWithTimelineServer.java
index e7e5cb8..b6fe64a 100644
--- 
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieWithTimelineServer.java
+++ 
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieWithTimelineServer.java
@@ -20,12 +20,12 @@ package org.apache.hudi.utilities;
 
 import com.beust.jcommander.JCommander;
 import com.beust.jcommander.Parameter;
-import com.google.common.base.Preconditions;
 import io.javalin.Javalin;
 import org.apache.http.HttpResponse;
 import org.apache.http.client.methods.HttpGet;
 import org.apache.http.impl.client.CloseableHttpClient;
 import org.apache.http.impl.client.HttpClientBuilder;
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.spark.api.java.JavaSparkContext;
 
 import java.io.BufferedReader;
@@ -87,7 +87,7 @@ public class HoodieWithTimelineServer implements Serializable 
{
     IntStream.range(0, cfg.numPartitions).forEach(i -> messages.add("Hello 
World"));
     List<String> gotMessages = jsc.parallelize(messages).map(msg -> 
sendRequest(driverHost, cfg.serverPort)).collect();
     System.out.println("Got Messages :" + gotMessages);
-    Preconditions.checkArgument(gotMessages.equals(messages), "Got expected 
reply from Server");
+    ValidationUtils.checkArgument(gotMessages.equals(messages), "Got expected 
reply from Server");
   }
 
   public String sendRequest(String driverHost, int port) {
diff --git 
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/UtilHelpers.java 
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/UtilHelpers.java
index a62c232..50254dd 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/UtilHelpers.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/UtilHelpers.java
@@ -26,6 +26,7 @@ import org.apache.hudi.common.util.DFSPropertiesConfiguration;
 import org.apache.hudi.common.util.Option;
 import org.apache.hudi.common.util.ReflectionUtils;
 import org.apache.hudi.common.util.TypedProperties;
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.config.HoodieCompactionConfig;
 import org.apache.hudi.config.HoodieIndexConfig;
 import org.apache.hudi.config.HoodieWriteConfig;
@@ -36,7 +37,6 @@ import org.apache.hudi.utilities.schema.SchemaProvider;
 import org.apache.hudi.utilities.sources.Source;
 import org.apache.hudi.utilities.transform.Transformer;
 
-import com.google.common.base.Preconditions;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -137,7 +137,7 @@ public class UtilHelpers {
     TypedProperties properties = new TypedProperties();
     props.forEach(x -> {
       String[] kv = x.split("=");
-      Preconditions.checkArgument(kv.length == 2);
+      ValidationUtils.checkArgument(kv.length == 2);
       properties.setProperty(kv[0], kv[1]);
     });
     return properties;
diff --git 
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/DeltaSync.java
 
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/DeltaSync.java
index 4b69d22..3073dfa 100644
--- 
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/DeltaSync.java
+++ 
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/DeltaSync.java
@@ -20,6 +20,7 @@ package org.apache.hudi.utilities.deltastreamer;
 
 import org.apache.hudi.AvroConversionUtils;
 import org.apache.hudi.DataSourceUtils;
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.client.HoodieWriteClient;
 import org.apache.hudi.keygen.KeyGenerator;
 import org.apache.hudi.client.WriteStatus;
@@ -49,7 +50,6 @@ import org.apache.hudi.utilities.sources.InputBatch;
 import org.apache.hudi.utilities.transform.Transformer;
 
 import com.codahale.metrics.Timer;
-import com.google.common.base.Preconditions;
 import org.apache.avro.Schema;
 import org.apache.avro.generic.GenericRecord;
 import org.apache.hadoop.conf.Configuration;
@@ -502,10 +502,10 @@ public class DeltaSync implements Serializable {
     HoodieWriteConfig config = builder.build();
 
     // Validate what deltastreamer assumes of write-config to be really safe
-    Preconditions.checkArgument(config.isInlineCompaction() == 
cfg.isInlineCompactionEnabled());
-    Preconditions.checkArgument(!config.shouldAutoCommit());
-    Preconditions.checkArgument(config.shouldCombineBeforeInsert() == 
cfg.filterDupes);
-    Preconditions.checkArgument(config.shouldCombineBeforeUpsert());
+    ValidationUtils.checkArgument(config.isInlineCompaction() == 
cfg.isInlineCompactionEnabled());
+    ValidationUtils.checkArgument(!config.shouldAutoCommit());
+    ValidationUtils.checkArgument(config.shouldCombineBeforeInsert() == 
cfg.filterDupes);
+    ValidationUtils.checkArgument(config.shouldCombineBeforeUpsert());
 
     return config;
   }
diff --git 
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/HoodieDeltaStreamer.java
 
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/HoodieDeltaStreamer.java
index b2aec69..01ab1cc 100644
--- 
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/HoodieDeltaStreamer.java
+++ 
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/HoodieDeltaStreamer.java
@@ -29,6 +29,7 @@ import org.apache.hudi.common.util.CompactionUtils;
 import org.apache.hudi.common.util.FSUtils;
 import org.apache.hudi.common.util.Option;
 import org.apache.hudi.common.util.TypedProperties;
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.common.util.collection.Pair;
 import org.apache.hudi.exception.HoodieException;
 import org.apache.hudi.exception.HoodieIOException;
@@ -41,7 +42,6 @@ import com.beust.jcommander.IStringConverter;
 import com.beust.jcommander.JCommander;
 import com.beust.jcommander.Parameter;
 import com.beust.jcommander.ParameterException;
-import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -353,7 +353,7 @@ public class HoodieDeltaStreamer implements Serializable {
             new HoodieTableMetaClient(new Configuration(fs.getConf()), 
cfg.targetBasePath, false);
         tableType = meta.getTableType();
         // This will guarantee there is no surprise with table type
-        
Preconditions.checkArgument(tableType.equals(HoodieTableType.valueOf(cfg.tableType)),
+        
ValidationUtils.checkArgument(tableType.equals(HoodieTableType.valueOf(cfg.tableType)),
             "Hoodie table is of type " + tableType + " but passed in CLI 
argument is " + cfg.tableType);
       } else {
         tableType = HoodieTableType.valueOf(cfg.tableType);
diff --git 
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/IncrSourceHelper.java
 
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/IncrSourceHelper.java
index 9787bab..44fd50d 100644
--- 
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/IncrSourceHelper.java
+++ 
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/IncrSourceHelper.java
@@ -22,12 +22,14 @@ import org.apache.hudi.common.table.HoodieTableMetaClient;
 import org.apache.hudi.common.table.HoodieTimeline;
 import org.apache.hudi.common.table.timeline.HoodieInstant;
 import org.apache.hudi.common.util.Option;
+import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.common.util.collection.Pair;
 
-import com.google.common.base.Preconditions;
 import org.apache.spark.api.java.JavaSparkContext;
 import org.apache.spark.sql.Row;
 
+import java.util.Objects;
+
 public class IncrSourceHelper {
 
   /**
@@ -37,7 +39,7 @@ public class IncrSourceHelper {
    */
   private static String getStrictlyLowerTimestamp(String timestamp) {
     long ts = Long.parseLong(timestamp);
-    Preconditions.checkArgument(ts > 0, "Timestamp must be positive");
+    ValidationUtils.checkArgument(ts > 0, "Timestamp must be positive");
     long lower = ts - 1;
     return "" + lower;
   }
@@ -54,7 +56,7 @@ public class IncrSourceHelper {
    */
   public static Pair<String, String> 
calculateBeginAndEndInstants(JavaSparkContext jssc, String srcBasePath,
       int numInstantsPerFetch, Option<String> beginInstant, boolean 
readLatestOnMissingBeginInstant) {
-    Preconditions.checkArgument(numInstantsPerFetch > 0,
+    ValidationUtils.checkArgument(numInstantsPerFetch > 0,
         "Make sure the config 
hoodie.deltastreamer.source.hoodieincr.num_instants is set to a positive 
value");
     HoodieTableMetaClient srcMetaClient = new 
HoodieTableMetaClient(jssc.hadoopConfiguration(), srcBasePath, true);
 
@@ -85,11 +87,11 @@ public class IncrSourceHelper {
    * @param endInstant end instant of the batch
    */
   public static void validateInstantTime(Row row, String instantTime, String 
sinceInstant, String endInstant) {
-    Preconditions.checkNotNull(instantTime);
-    Preconditions.checkArgument(HoodieTimeline.compareTimestamps(instantTime, 
sinceInstant, HoodieTimeline.GREATER),
+    Objects.requireNonNull(instantTime);
+    
ValidationUtils.checkArgument(HoodieTimeline.compareTimestamps(instantTime, 
sinceInstant, HoodieTimeline.GREATER),
         "Instant time(_hoodie_commit_time) in row (" + row + ") was : " + 
instantTime + "but expected to be between "
             + sinceInstant + "(excl) - " + endInstant + "(incl)");
-    Preconditions.checkArgument(
+    ValidationUtils.checkArgument(
         HoodieTimeline.compareTimestamps(instantTime, endInstant, 
HoodieTimeline.LESSER_OR_EQUAL),
         "Instant time(_hoodie_commit_time) in row (" + row + ") was : " + 
instantTime + "but expected to be between "
             + sinceInstant + "(excl) - " + endInstant + "(incl)");

Reply via email to