This is an automated email from the ASF dual-hosted git repository.

szita pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new 5de00c7  HIVE-22821: Add necessary endpoints for proactive cache 
eviction (Adam Szita, reviewed by Peter Vary, Slim Bouguerra)
5de00c7 is described below

commit 5de00c789fbd071576d0d8d748dfbc78553d645f
Author: Adam Szita <sz...@cloudera.com>
AuthorDate: Mon Apr 20 15:31:42 2020 +0200

    HIVE-22821: Add necessary endpoints for proactive cache eviction (Adam 
Szita, reviewed by Peter Vary, Slim Bouguerra)
---
 .../java/org/apache/hadoop/hive/conf/HiveConf.java |    3 +
 .../org/apache/hadoop/hive/llap/io/api/LlapIo.java |    9 +
 .../llap/daemon/rpc/LlapDaemonProtocolProtos.java  | 2399 +++++++++++++++++++-
 .../impl/LlapManagementProtocolClientImpl.java     |   11 +
 llap-common/src/protobuf/LlapDaemonProtocol.proto  |   29 +-
 .../hive/llap/cache/CacheContentsTracker.java      |    5 +
 .../llap/cache/LowLevelCacheMemoryManager.java     |   18 +
 .../hive/llap/cache/LowLevelCachePolicy.java       |   10 +
 .../hive/llap/cache/LowLevelFifoCachePolicy.java   |    6 +
 .../hive/llap/cache/LowLevelLrfuCachePolicy.java   |    7 +
 .../llap/daemon/impl/LlapProtocolServerImpl.java   |   17 +
 .../hadoop/hive/llap/io/api/impl/LlapIoImpl.java   |   24 +
 .../hive/llap/cache/TestLowLevelCacheImpl.java     |    6 +
 .../hive/llap/cache/TestOrcMetadataCache.java      |    6 +
 .../org/apache/hadoop/hive/llap/LlapHiveUtils.java |    4 +
 .../apache/hadoop/hive/llap/ProactiveEviction.java |  346 +++
 .../ddl/database/drop/DropDatabaseOperation.java   |    9 +
 .../hive/ql/ddl/table/drop/DropTableOperation.java |   12 +
 .../drop/AlterTableDropPartitionOperation.java     |   14 +
 19 files changed, 2906 insertions(+), 29 deletions(-)

diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 9e46e7b..16bae92 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -4285,6 +4285,9 @@ public class HiveConf extends Configuration {
     LLAP_IO_CVB_BUFFERED_SIZE("hive.llap.io.cvb.memory.consumption.", 1L << 30,
         "The amount of bytes used to buffer CVB between IO and Processor 
Threads default to 1GB, "
             + "this will be used to compute a best effort queue size for VRBs 
produced by a LLAP IO thread."),
+    
LLAP_IO_PROACTIVE_EVICTION_ENABLED("hive.llap.io.proactive.eviction.enabled", 
true,
+        "If true proactive cache eviction is enabled, thus LLAP will 
proactively evict buffers" +
+         " that belong to dropped Hive entities (DBs, tables, partitions, or 
temp tables."),
     LLAP_IO_SHARE_OBJECT_POOLS("hive.llap.io.share.object.pools", false,
         "Whether to used shared object pools in LLAP IO. A safety flag."),
     LLAP_AUTO_ALLOW_UBER("hive.llap.auto.allow.uber", false,
diff --git 
a/llap-client/src/java/org/apache/hadoop/hive/llap/io/api/LlapIo.java 
b/llap-client/src/java/org/apache/hadoop/hive/llap/io/api/LlapIo.java
index e5c4a00..4a5bf73 100644
--- a/llap-client/src/java/org/apache/hadoop/hive/llap/io/api/LlapIo.java
+++ b/llap-client/src/java/org/apache/hadoop/hive/llap/io/api/LlapIo.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hive.llap.io.api;
 
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
 import org.apache.hadoop.hive.serde2.Deserializer;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapred.InputFormat;
@@ -33,5 +34,13 @@ public interface LlapIo<T> {
    * called when the system is idle.
    */
   long purge();
+
+  /**
+   * Handles request to evict entities specified in the request object.
+   * @param protoRequest lists Hive entities (DB, table, etc..) whose LLAP 
buffers should be evicted.
+   * @return number of evicted bytes.
+   */
+  long evictEntity(LlapDaemonProtocolProtos.EvictEntityRequestProto 
protoRequest);
+
   void initCacheOnlyInputFormat(InputFormat<?, ?> inputFormat);
 }
diff --git 
a/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
 
b/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
index cb383ed..537a362 100644
--- 
a/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
+++ 
b/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
@@ -22558,6 +22558,2244 @@ public final class LlapDaemonProtocolProtos {
     // @@protoc_insertion_point(class_scope:SetCapacityResponseProto)
   }
 
+  public interface EvictEntityRequestProtoOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required string db_name = 1;
+    /**
+     * <code>required string db_name = 1;</code>
+     */
+    boolean hasDbName();
+    /**
+     * <code>required string db_name = 1;</code>
+     */
+    java.lang.String getDbName();
+    /**
+     * <code>required string db_name = 1;</code>
+     */
+    com.google.protobuf.ByteString
+        getDbNameBytes();
+
+    // repeated .TableProto table = 2;
+    /**
+     * <code>repeated .TableProto table = 2;</code>
+     */
+    
java.util.List<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto>
 
+        getTableList();
+    /**
+     * <code>repeated .TableProto table = 2;</code>
+     */
+    org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto 
getTable(int index);
+    /**
+     * <code>repeated .TableProto table = 2;</code>
+     */
+    int getTableCount();
+    /**
+     * <code>repeated .TableProto table = 2;</code>
+     */
+    java.util.List<? extends 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProtoOrBuilder>
 
+        getTableOrBuilderList();
+    /**
+     * <code>repeated .TableProto table = 2;</code>
+     */
+    
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProtoOrBuilder
 getTableOrBuilder(
+        int index);
+  }
+  /**
+   * Protobuf type {@code EvictEntityRequestProto}
+   */
+  public static final class EvictEntityRequestProto extends
+      com.google.protobuf.GeneratedMessage
+      implements EvictEntityRequestProtoOrBuilder {
+    // Use EvictEntityRequestProto.newBuilder() to construct.
+    private 
EvictEntityRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> 
builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private EvictEntityRequestProto(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final EvictEntityRequestProto defaultInstance;
+    public static EvictEntityRequestProto getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public EvictEntityRequestProto getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private EvictEntityRequestProto(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              bitField0_ |= 0x00000001;
+              dbName_ = input.readBytes();
+              break;
+            }
+            case 18: {
+              if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+                table_ = new 
java.util.ArrayList<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto>();
+                mutable_bitField0_ |= 0x00000002;
+              }
+              
table_.add(input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.PARSER,
 extensionRegistry));
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+          table_ = java.util.Collections.unmodifiableList(table_);
+        }
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityRequestProto_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityRequestProto_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto.class,
 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<EvictEntityRequestProto> PARSER =
+        new com.google.protobuf.AbstractParser<EvictEntityRequestProto>() {
+      public EvictEntityRequestProto parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new EvictEntityRequestProto(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<EvictEntityRequestProto> 
getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required string db_name = 1;
+    public static final int DB_NAME_FIELD_NUMBER = 1;
+    private java.lang.Object dbName_;
+    /**
+     * <code>required string db_name = 1;</code>
+     */
+    public boolean hasDbName() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required string db_name = 1;</code>
+     */
+    public java.lang.String getDbName() {
+      java.lang.Object ref = dbName_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          dbName_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string db_name = 1;</code>
+     */
+    public com.google.protobuf.ByteString
+        getDbNameBytes() {
+      java.lang.Object ref = dbName_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        dbName_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    // repeated .TableProto table = 2;
+    public static final int TABLE_FIELD_NUMBER = 2;
+    private 
java.util.List<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto>
 table_;
+    /**
+     * <code>repeated .TableProto table = 2;</code>
+     */
+    public 
java.util.List<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto>
 getTableList() {
+      return table_;
+    }
+    /**
+     * <code>repeated .TableProto table = 2;</code>
+     */
+    public java.util.List<? extends 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProtoOrBuilder>
 
+        getTableOrBuilderList() {
+      return table_;
+    }
+    /**
+     * <code>repeated .TableProto table = 2;</code>
+     */
+    public int getTableCount() {
+      return table_.size();
+    }
+    /**
+     * <code>repeated .TableProto table = 2;</code>
+     */
+    public 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto 
getTable(int index) {
+      return table_.get(index);
+    }
+    /**
+     * <code>repeated .TableProto table = 2;</code>
+     */
+    public 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProtoOrBuilder
 getTableOrBuilder(
+        int index) {
+      return table_.get(index);
+    }
+
+    private void initFields() {
+      dbName_ = "";
+      table_ = java.util.Collections.emptyList();
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasDbName()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      for (int i = 0; i < getTableCount(); i++) {
+        if (!getTable(i).isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBytes(1, getDbNameBytes());
+      }
+      for (int i = 0; i < table_.size(); i++) {
+        output.writeMessage(2, table_.get(i));
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(1, getDbNameBytes());
+      }
+      for (int i = 0; i < table_.size(); i++) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(2, table_.get(i));
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto))
 {
+        return super.equals(obj);
+      }
+      
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto
 other = 
(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto)
 obj;
+
+      boolean result = true;
+      result = result && (hasDbName() == other.hasDbName());
+      if (hasDbName()) {
+        result = result && getDbName()
+            .equals(other.getDbName());
+      }
+      result = result && getTableList()
+          .equals(other.getTableList());
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasDbName()) {
+        hash = (37 * hash) + DB_NAME_FIELD_NUMBER;
+        hash = (53 * hash) + getDbName().hashCode();
+      }
+      if (getTableCount() > 0) {
+        hash = (37 * hash) + TABLE_FIELD_NUMBER;
+        hash = (53 * hash) + getTableList().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto
 parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto
 parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto
 parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto
 parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto
 parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto
 parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto
 parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto
 parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto
 parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto
 parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder 
newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto
 prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code EvictEntityRequestProto}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProtoOrBuilder
 {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityRequestProto_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityRequestProto_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto.class,
 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto.Builder.class);
+      }
+
+      // Construct using 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getTableFieldBuilder();
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        dbName_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
+        if (tableBuilder_ == null) {
+          table_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000002);
+        } else {
+          tableBuilder_.clear();
+        }
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityRequestProto_descriptor;
+      }
+
+      public 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto
 getDefaultInstanceForType() {
+        return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto.getDefaultInstance();
+      }
+
+      public 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto
 build() {
+        
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto
 result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto
 buildPartial() {
+        
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto
 result = new 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.dbName_ = dbName_;
+        if (tableBuilder_ == null) {
+          if (((bitField0_ & 0x00000002) == 0x00000002)) {
+            table_ = java.util.Collections.unmodifiableList(table_);
+            bitField0_ = (bitField0_ & ~0x00000002);
+          }
+          result.table_ = table_;
+        } else {
+          result.table_ = tableBuilder_.build();
+        }
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto)
 {
+          return 
mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder 
mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto
 other) {
+        if (other == 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto.getDefaultInstance())
 return this;
+        if (other.hasDbName()) {
+          bitField0_ |= 0x00000001;
+          dbName_ = other.dbName_;
+          onChanged();
+        }
+        if (tableBuilder_ == null) {
+          if (!other.table_.isEmpty()) {
+            if (table_.isEmpty()) {
+              table_ = other.table_;
+              bitField0_ = (bitField0_ & ~0x00000002);
+            } else {
+              ensureTableIsMutable();
+              table_.addAll(other.table_);
+            }
+            onChanged();
+          }
+        } else {
+          if (!other.table_.isEmpty()) {
+            if (tableBuilder_.isEmpty()) {
+              tableBuilder_.dispose();
+              tableBuilder_ = null;
+              table_ = other.table_;
+              bitField0_ = (bitField0_ & ~0x00000002);
+              tableBuilder_ = 
+                com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+                   getTableFieldBuilder() : null;
+            } else {
+              tableBuilder_.addAllMessages(other.table_);
+            }
+          }
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasDbName()) {
+          
+          return false;
+        }
+        for (int i = 0; i < getTableCount(); i++) {
+          if (!getTable(i).isInitialized()) {
+            
+            return false;
+          }
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto
 parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = 
(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto)
 e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required string db_name = 1;
+      private java.lang.Object dbName_ = "";
+      /**
+       * <code>required string db_name = 1;</code>
+       */
+      public boolean hasDbName() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required string db_name = 1;</code>
+       */
+      public java.lang.String getDbName() {
+        java.lang.Object ref = dbName_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          dbName_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string db_name = 1;</code>
+       */
+      public com.google.protobuf.ByteString
+          getDbNameBytes() {
+        java.lang.Object ref = dbName_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          dbName_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string db_name = 1;</code>
+       */
+      public Builder setDbName(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        dbName_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string db_name = 1;</code>
+       */
+      public Builder clearDbName() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        dbName_ = getDefaultInstance().getDbName();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string db_name = 1;</code>
+       */
+      public Builder setDbNameBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        dbName_ = value;
+        onChanged();
+        return this;
+      }
+
+      // repeated .TableProto table = 2;
+      private 
java.util.List<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto>
 table_ =
+        java.util.Collections.emptyList();
+      private void ensureTableIsMutable() {
+        if (!((bitField0_ & 0x00000002) == 0x00000002)) {
+          table_ = new 
java.util.ArrayList<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto>(table_);
+          bitField0_ |= 0x00000002;
+         }
+      }
+
+      private com.google.protobuf.RepeatedFieldBuilder<
+          
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto, 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.Builder,
 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProtoOrBuilder>
 tableBuilder_;
+
+      /**
+       * <code>repeated .TableProto table = 2;</code>
+       */
+      public 
java.util.List<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto>
 getTableList() {
+        if (tableBuilder_ == null) {
+          return java.util.Collections.unmodifiableList(table_);
+        } else {
+          return tableBuilder_.getMessageList();
+        }
+      }
+      /**
+       * <code>repeated .TableProto table = 2;</code>
+       */
+      public int getTableCount() {
+        if (tableBuilder_ == null) {
+          return table_.size();
+        } else {
+          return tableBuilder_.getCount();
+        }
+      }
+      /**
+       * <code>repeated .TableProto table = 2;</code>
+       */
+      public 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto 
getTable(int index) {
+        if (tableBuilder_ == null) {
+          return table_.get(index);
+        } else {
+          return tableBuilder_.getMessage(index);
+        }
+      }
+      /**
+       * <code>repeated .TableProto table = 2;</code>
+       */
+      public Builder setTable(
+          int index, 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto 
value) {
+        if (tableBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureTableIsMutable();
+          table_.set(index, value);
+          onChanged();
+        } else {
+          tableBuilder_.setMessage(index, value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .TableProto table = 2;</code>
+       */
+      public Builder setTable(
+          int index, 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.Builder
 builderForValue) {
+        if (tableBuilder_ == null) {
+          ensureTableIsMutable();
+          table_.set(index, builderForValue.build());
+          onChanged();
+        } else {
+          tableBuilder_.setMessage(index, builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .TableProto table = 2;</code>
+       */
+      public Builder 
addTable(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto
 value) {
+        if (tableBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureTableIsMutable();
+          table_.add(value);
+          onChanged();
+        } else {
+          tableBuilder_.addMessage(value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .TableProto table = 2;</code>
+       */
+      public Builder addTable(
+          int index, 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto 
value) {
+        if (tableBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureTableIsMutable();
+          table_.add(index, value);
+          onChanged();
+        } else {
+          tableBuilder_.addMessage(index, value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .TableProto table = 2;</code>
+       */
+      public Builder addTable(
+          
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.Builder
 builderForValue) {
+        if (tableBuilder_ == null) {
+          ensureTableIsMutable();
+          table_.add(builderForValue.build());
+          onChanged();
+        } else {
+          tableBuilder_.addMessage(builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .TableProto table = 2;</code>
+       */
+      public Builder addTable(
+          int index, 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.Builder
 builderForValue) {
+        if (tableBuilder_ == null) {
+          ensureTableIsMutable();
+          table_.add(index, builderForValue.build());
+          onChanged();
+        } else {
+          tableBuilder_.addMessage(index, builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .TableProto table = 2;</code>
+       */
+      public Builder addAllTable(
+          java.lang.Iterable<? extends 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto> 
values) {
+        if (tableBuilder_ == null) {
+          ensureTableIsMutable();
+          super.addAll(values, table_);
+          onChanged();
+        } else {
+          tableBuilder_.addAllMessages(values);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .TableProto table = 2;</code>
+       */
+      public Builder clearTable() {
+        if (tableBuilder_ == null) {
+          table_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000002);
+          onChanged();
+        } else {
+          tableBuilder_.clear();
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .TableProto table = 2;</code>
+       */
+      public Builder removeTable(int index) {
+        if (tableBuilder_ == null) {
+          ensureTableIsMutable();
+          table_.remove(index);
+          onChanged();
+        } else {
+          tableBuilder_.remove(index);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .TableProto table = 2;</code>
+       */
+      public 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.Builder
 getTableBuilder(
+          int index) {
+        return getTableFieldBuilder().getBuilder(index);
+      }
+      /**
+       * <code>repeated .TableProto table = 2;</code>
+       */
+      public 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProtoOrBuilder
 getTableOrBuilder(
+          int index) {
+        if (tableBuilder_ == null) {
+          return table_.get(index);  } else {
+          return tableBuilder_.getMessageOrBuilder(index);
+        }
+      }
+      /**
+       * <code>repeated .TableProto table = 2;</code>
+       */
+      public java.util.List<? extends 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProtoOrBuilder>
 
+           getTableOrBuilderList() {
+        if (tableBuilder_ != null) {
+          return tableBuilder_.getMessageOrBuilderList();
+        } else {
+          return java.util.Collections.unmodifiableList(table_);
+        }
+      }
+      /**
+       * <code>repeated .TableProto table = 2;</code>
+       */
+      public 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.Builder
 addTableBuilder() {
+        return getTableFieldBuilder().addBuilder(
+            
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.getDefaultInstance());
+      }
+      /**
+       * <code>repeated .TableProto table = 2;</code>
+       */
+      public 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.Builder
 addTableBuilder(
+          int index) {
+        return getTableFieldBuilder().addBuilder(
+            index, 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.getDefaultInstance());
+      }
+      /**
+       * <code>repeated .TableProto table = 2;</code>
+       */
+      public 
java.util.List<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.Builder>
 
+           getTableBuilderList() {
+        return getTableFieldBuilder().getBuilderList();
+      }
+      private com.google.protobuf.RepeatedFieldBuilder<
+          
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto, 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.Builder,
 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProtoOrBuilder>
 
+          getTableFieldBuilder() {
+        if (tableBuilder_ == null) {
+          tableBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+              
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto, 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.Builder,
 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProtoOrBuilder>(
+                  table_,
+                  ((bitField0_ & 0x00000002) == 0x00000002),
+                  getParentForChildren(),
+                  isClean());
+          table_ = null;
+        }
+        return tableBuilder_;
+      }
+
+      // @@protoc_insertion_point(builder_scope:EvictEntityRequestProto)
+    }
+
+    static {
+      defaultInstance = new EvictEntityRequestProto(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:EvictEntityRequestProto)
+  }
+
+  public interface TableProtoOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required string table_name = 1;
+    /**
+     * <code>required string table_name = 1;</code>
+     */
+    boolean hasTableName();
+    /**
+     * <code>required string table_name = 1;</code>
+     */
+    java.lang.String getTableName();
+    /**
+     * <code>required string table_name = 1;</code>
+     */
+    com.google.protobuf.ByteString
+        getTableNameBytes();
+
+    // repeated string part_key = 2;
+    /**
+     * <code>repeated string part_key = 2;</code>
+     */
+    java.util.List<java.lang.String>
+    getPartKeyList();
+    /**
+     * <code>repeated string part_key = 2;</code>
+     */
+    int getPartKeyCount();
+    /**
+     * <code>repeated string part_key = 2;</code>
+     */
+    java.lang.String getPartKey(int index);
+    /**
+     * <code>repeated string part_key = 2;</code>
+     */
+    com.google.protobuf.ByteString
+        getPartKeyBytes(int index);
+
+    // repeated string part_val = 3;
+    /**
+     * <code>repeated string part_val = 3;</code>
+     */
+    java.util.List<java.lang.String>
+    getPartValList();
+    /**
+     * <code>repeated string part_val = 3;</code>
+     */
+    int getPartValCount();
+    /**
+     * <code>repeated string part_val = 3;</code>
+     */
+    java.lang.String getPartVal(int index);
+    /**
+     * <code>repeated string part_val = 3;</code>
+     */
+    com.google.protobuf.ByteString
+        getPartValBytes(int index);
+  }
+  /**
+   * Protobuf type {@code TableProto}
+   */
+  public static final class TableProto extends
+      com.google.protobuf.GeneratedMessage
+      implements TableProtoOrBuilder {
+    // Use TableProto.newBuilder() to construct.
+    private TableProto(com.google.protobuf.GeneratedMessage.Builder<?> 
builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private TableProto(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final TableProto defaultInstance;
+    public static TableProto getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public TableProto getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private TableProto(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              bitField0_ |= 0x00000001;
+              tableName_ = input.readBytes();
+              break;
+            }
+            case 18: {
+              if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+                partKey_ = new com.google.protobuf.LazyStringArrayList();
+                mutable_bitField0_ |= 0x00000002;
+              }
+              partKey_.add(input.readBytes());
+              break;
+            }
+            case 26: {
+              if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+                partVal_ = new com.google.protobuf.LazyStringArrayList();
+                mutable_bitField0_ |= 0x00000004;
+              }
+              partVal_.add(input.readBytes());
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+          partKey_ = new 
com.google.protobuf.UnmodifiableLazyStringList(partKey_);
+        }
+        if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+          partVal_ = new 
com.google.protobuf.UnmodifiableLazyStringList(partVal_);
+        }
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TableProto_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TableProto_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.class,
 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<TableProto> PARSER =
+        new com.google.protobuf.AbstractParser<TableProto>() {
+      public TableProto parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new TableProto(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<TableProto> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required string table_name = 1;
+    public static final int TABLE_NAME_FIELD_NUMBER = 1;
+    private java.lang.Object tableName_;
+    /**
+     * <code>required string table_name = 1;</code>
+     */
+    public boolean hasTableName() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required string table_name = 1;</code>
+     */
+    public java.lang.String getTableName() {
+      java.lang.Object ref = tableName_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          tableName_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string table_name = 1;</code>
+     */
+    public com.google.protobuf.ByteString
+        getTableNameBytes() {
+      java.lang.Object ref = tableName_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        tableName_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    // repeated string part_key = 2;
+    public static final int PART_KEY_FIELD_NUMBER = 2;
+    private com.google.protobuf.LazyStringList partKey_;
+    /**
+     * <code>repeated string part_key = 2;</code>
+     */
+    public java.util.List<java.lang.String>
+        getPartKeyList() {
+      return partKey_;
+    }
+    /**
+     * <code>repeated string part_key = 2;</code>
+     */
+    public int getPartKeyCount() {
+      return partKey_.size();
+    }
+    /**
+     * <code>repeated string part_key = 2;</code>
+     */
+    public java.lang.String getPartKey(int index) {
+      return partKey_.get(index);
+    }
+    /**
+     * <code>repeated string part_key = 2;</code>
+     */
+    public com.google.protobuf.ByteString
+        getPartKeyBytes(int index) {
+      return partKey_.getByteString(index);
+    }
+
+    // repeated string part_val = 3;
+    public static final int PART_VAL_FIELD_NUMBER = 3;
+    private com.google.protobuf.LazyStringList partVal_;
+    /**
+     * <code>repeated string part_val = 3;</code>
+     */
+    public java.util.List<java.lang.String>
+        getPartValList() {
+      return partVal_;
+    }
+    /**
+     * <code>repeated string part_val = 3;</code>
+     */
+    public int getPartValCount() {
+      return partVal_.size();
+    }
+    /**
+     * <code>repeated string part_val = 3;</code>
+     */
+    public java.lang.String getPartVal(int index) {
+      return partVal_.get(index);
+    }
+    /**
+     * <code>repeated string part_val = 3;</code>
+     */
+    public com.google.protobuf.ByteString
+        getPartValBytes(int index) {
+      return partVal_.getByteString(index);
+    }
+
+    private void initFields() {
+      tableName_ = "";
+      partKey_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+      partVal_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasTableName()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBytes(1, getTableNameBytes());
+      }
+      for (int i = 0; i < partKey_.size(); i++) {
+        output.writeBytes(2, partKey_.getByteString(i));
+      }
+      for (int i = 0; i < partVal_.size(); i++) {
+        output.writeBytes(3, partVal_.getByteString(i));
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(1, getTableNameBytes());
+      }
+      {
+        int dataSize = 0;
+        for (int i = 0; i < partKey_.size(); i++) {
+          dataSize += com.google.protobuf.CodedOutputStream
+            .computeBytesSizeNoTag(partKey_.getByteString(i));
+        }
+        size += dataSize;
+        size += 1 * getPartKeyList().size();
+      }
+      {
+        int dataSize = 0;
+        for (int i = 0; i < partVal_.size(); i++) {
+          dataSize += com.google.protobuf.CodedOutputStream
+            .computeBytesSizeNoTag(partVal_.getByteString(i));
+        }
+        size += dataSize;
+        size += 1 * getPartValList().size();
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto)) {
+        return super.equals(obj);
+      }
+      
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto 
other = 
(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto) 
obj;
+
+      boolean result = true;
+      result = result && (hasTableName() == other.hasTableName());
+      if (hasTableName()) {
+        result = result && getTableName()
+            .equals(other.getTableName());
+      }
+      result = result && getPartKeyList()
+          .equals(other.getPartKeyList());
+      result = result && getPartValList()
+          .equals(other.getPartValList());
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasTableName()) {
+        hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
+        hash = (53 * hash) + getTableName().hashCode();
+      }
+      if (getPartKeyCount() > 0) {
+        hash = (37 * hash) + PART_KEY_FIELD_NUMBER;
+        hash = (53 * hash) + getPartKeyList().hashCode();
+      }
+      if (getPartValCount() > 0) {
+        hash = (37 * hash) + PART_VAL_FIELD_NUMBER;
+        hash = (53 * hash) + getPartValList().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto 
parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto 
parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto 
parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto 
parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto 
parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto 
parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto 
parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto 
parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto 
parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto 
parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder 
newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto
 prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code TableProto}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProtoOrBuilder
 {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TableProto_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TableProto_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.class,
 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.Builder.class);
+      }
+
+      // Construct using 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        tableName_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
+        partKey_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+        bitField0_ = (bitField0_ & ~0x00000002);
+        partVal_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+        bitField0_ = (bitField0_ & ~0x00000004);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TableProto_descriptor;
+      }
+
+      public 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto 
getDefaultInstanceForType() {
+        return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.getDefaultInstance();
+      }
+
+      public 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto 
build() {
+        
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto 
result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto 
buildPartial() {
+        
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto 
result = new 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.tableName_ = tableName_;
+        if (((bitField0_ & 0x00000002) == 0x00000002)) {
+          partKey_ = new com.google.protobuf.UnmodifiableLazyStringList(
+              partKey_);
+          bitField0_ = (bitField0_ & ~0x00000002);
+        }
+        result.partKey_ = partKey_;
+        if (((bitField0_ & 0x00000004) == 0x00000004)) {
+          partVal_ = new com.google.protobuf.UnmodifiableLazyStringList(
+              partVal_);
+          bitField0_ = (bitField0_ & ~0x00000004);
+        }
+        result.partVal_ = partVal_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto) {
+          return 
mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder 
mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto
 other) {
+        if (other == 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.getDefaultInstance())
 return this;
+        if (other.hasTableName()) {
+          bitField0_ |= 0x00000001;
+          tableName_ = other.tableName_;
+          onChanged();
+        }
+        if (!other.partKey_.isEmpty()) {
+          if (partKey_.isEmpty()) {
+            partKey_ = other.partKey_;
+            bitField0_ = (bitField0_ & ~0x00000002);
+          } else {
+            ensurePartKeyIsMutable();
+            partKey_.addAll(other.partKey_);
+          }
+          onChanged();
+        }
+        if (!other.partVal_.isEmpty()) {
+          if (partVal_.isEmpty()) {
+            partVal_ = other.partVal_;
+            bitField0_ = (bitField0_ & ~0x00000004);
+          } else {
+            ensurePartValIsMutable();
+            partVal_.addAll(other.partVal_);
+          }
+          onChanged();
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasTableName()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto 
parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = 
(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto) 
e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required string table_name = 1;
+      private java.lang.Object tableName_ = "";
+      /**
+       * <code>required string table_name = 1;</code>
+       */
+      public boolean hasTableName() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required string table_name = 1;</code>
+       */
+      public java.lang.String getTableName() {
+        java.lang.Object ref = tableName_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          tableName_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string table_name = 1;</code>
+       */
+      public com.google.protobuf.ByteString
+          getTableNameBytes() {
+        java.lang.Object ref = tableName_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          tableName_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string table_name = 1;</code>
+       */
+      public Builder setTableName(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        tableName_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string table_name = 1;</code>
+       */
+      public Builder clearTableName() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        tableName_ = getDefaultInstance().getTableName();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string table_name = 1;</code>
+       */
+      public Builder setTableNameBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        tableName_ = value;
+        onChanged();
+        return this;
+      }
+
+      // repeated string part_key = 2;
+      private com.google.protobuf.LazyStringList partKey_ = 
com.google.protobuf.LazyStringArrayList.EMPTY;
+      private void ensurePartKeyIsMutable() {
+        if (!((bitField0_ & 0x00000002) == 0x00000002)) {
+          partKey_ = new com.google.protobuf.LazyStringArrayList(partKey_);
+          bitField0_ |= 0x00000002;
+         }
+      }
+      /**
+       * <code>repeated string part_key = 2;</code>
+       */
+      public java.util.List<java.lang.String>
+          getPartKeyList() {
+        return java.util.Collections.unmodifiableList(partKey_);
+      }
+      /**
+       * <code>repeated string part_key = 2;</code>
+       */
+      public int getPartKeyCount() {
+        return partKey_.size();
+      }
+      /**
+       * <code>repeated string part_key = 2;</code>
+       */
+      public java.lang.String getPartKey(int index) {
+        return partKey_.get(index);
+      }
+      /**
+       * <code>repeated string part_key = 2;</code>
+       */
+      public com.google.protobuf.ByteString
+          getPartKeyBytes(int index) {
+        return partKey_.getByteString(index);
+      }
+      /**
+       * <code>repeated string part_key = 2;</code>
+       */
+      public Builder setPartKey(
+          int index, java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  ensurePartKeyIsMutable();
+        partKey_.set(index, value);
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>repeated string part_key = 2;</code>
+       */
+      public Builder addPartKey(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  ensurePartKeyIsMutable();
+        partKey_.add(value);
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>repeated string part_key = 2;</code>
+       */
+      public Builder addAllPartKey(
+          java.lang.Iterable<java.lang.String> values) {
+        ensurePartKeyIsMutable();
+        super.addAll(values, partKey_);
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>repeated string part_key = 2;</code>
+       */
+      public Builder clearPartKey() {
+        partKey_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+        bitField0_ = (bitField0_ & ~0x00000002);
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>repeated string part_key = 2;</code>
+       */
+      public Builder addPartKeyBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  ensurePartKeyIsMutable();
+        partKey_.add(value);
+        onChanged();
+        return this;
+      }
+
+      // repeated string part_val = 3;
+      private com.google.protobuf.LazyStringList partVal_ = 
com.google.protobuf.LazyStringArrayList.EMPTY;
+      private void ensurePartValIsMutable() {
+        if (!((bitField0_ & 0x00000004) == 0x00000004)) {
+          partVal_ = new com.google.protobuf.LazyStringArrayList(partVal_);
+          bitField0_ |= 0x00000004;
+         }
+      }
+      /**
+       * <code>repeated string part_val = 3;</code>
+       */
+      public java.util.List<java.lang.String>
+          getPartValList() {
+        return java.util.Collections.unmodifiableList(partVal_);
+      }
+      /**
+       * <code>repeated string part_val = 3;</code>
+       */
+      public int getPartValCount() {
+        return partVal_.size();
+      }
+      /**
+       * <code>repeated string part_val = 3;</code>
+       */
+      public java.lang.String getPartVal(int index) {
+        return partVal_.get(index);
+      }
+      /**
+       * <code>repeated string part_val = 3;</code>
+       */
+      public com.google.protobuf.ByteString
+          getPartValBytes(int index) {
+        return partVal_.getByteString(index);
+      }
+      /**
+       * <code>repeated string part_val = 3;</code>
+       */
+      public Builder setPartVal(
+          int index, java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  ensurePartValIsMutable();
+        partVal_.set(index, value);
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>repeated string part_val = 3;</code>
+       */
+      public Builder addPartVal(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  ensurePartValIsMutable();
+        partVal_.add(value);
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>repeated string part_val = 3;</code>
+       */
+      public Builder addAllPartVal(
+          java.lang.Iterable<java.lang.String> values) {
+        ensurePartValIsMutable();
+        super.addAll(values, partVal_);
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>repeated string part_val = 3;</code>
+       */
+      public Builder clearPartVal() {
+        partVal_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+        bitField0_ = (bitField0_ & ~0x00000004);
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>repeated string part_val = 3;</code>
+       */
+      public Builder addPartValBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  ensurePartValIsMutable();
+        partVal_.add(value);
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:TableProto)
+    }
+
+    static {
+      defaultInstance = new TableProto(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:TableProto)
+  }
+
+  public interface EvictEntityResponseProtoOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required int64 evicted_bytes = 1;
+    /**
+     * <code>required int64 evicted_bytes = 1;</code>
+     */
+    boolean hasEvictedBytes();
+    /**
+     * <code>required int64 evicted_bytes = 1;</code>
+     */
+    long getEvictedBytes();
+  }
+  /**
+   * Protobuf type {@code EvictEntityResponseProto}
+   */
+  public static final class EvictEntityResponseProto extends
+      com.google.protobuf.GeneratedMessage
+      implements EvictEntityResponseProtoOrBuilder {
+    // Use EvictEntityResponseProto.newBuilder() to construct.
+    private 
EvictEntityResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> 
builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private EvictEntityResponseProto(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final EvictEntityResponseProto defaultInstance;
+    public static EvictEntityResponseProto getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public EvictEntityResponseProto getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private EvictEntityResponseProto(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 8: {
+              bitField0_ |= 0x00000001;
+              evictedBytes_ = input.readInt64();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityResponseProto_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityResponseProto_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto.class,
 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<EvictEntityResponseProto> PARSER =
+        new com.google.protobuf.AbstractParser<EvictEntityResponseProto>() {
+      public EvictEntityResponseProto parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new EvictEntityResponseProto(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<EvictEntityResponseProto> 
getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required int64 evicted_bytes = 1;
+    public static final int EVICTED_BYTES_FIELD_NUMBER = 1;
+    private long evictedBytes_;
+    /**
+     * <code>required int64 evicted_bytes = 1;</code>
+     */
+    public boolean hasEvictedBytes() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required int64 evicted_bytes = 1;</code>
+     */
+    public long getEvictedBytes() {
+      return evictedBytes_;
+    }
+
+    private void initFields() {
+      evictedBytes_ = 0L;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasEvictedBytes()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeInt64(1, evictedBytes_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeInt64Size(1, evictedBytes_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto))
 {
+        return super.equals(obj);
+      }
+      
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto
 other = 
(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto)
 obj;
+
+      boolean result = true;
+      result = result && (hasEvictedBytes() == other.hasEvictedBytes());
+      if (hasEvictedBytes()) {
+        result = result && (getEvictedBytes()
+            == other.getEvictedBytes());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasEvictedBytes()) {
+        hash = (37 * hash) + EVICTED_BYTES_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getEvictedBytes());
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto
 parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto
 parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto
 parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto
 parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto
 parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto
 parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto
 parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto
 parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto
 parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto
 parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder 
newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto
 prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code EvictEntityResponseProto}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProtoOrBuilder
 {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityResponseProto_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityResponseProto_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto.class,
 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto.Builder.class);
+      }
+
+      // Construct using 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        evictedBytes_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityResponseProto_descriptor;
+      }
+
+      public 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto
 getDefaultInstanceForType() {
+        return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto.getDefaultInstance();
+      }
+
+      public 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto
 build() {
+        
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto
 result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto
 buildPartial() {
+        
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto
 result = new 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.evictedBytes_ = evictedBytes_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto)
 {
+          return 
mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder 
mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto
 other) {
+        if (other == 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto.getDefaultInstance())
 return this;
+        if (other.hasEvictedBytes()) {
+          setEvictedBytes(other.getEvictedBytes());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasEvictedBytes()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto
 parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = 
(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto)
 e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required int64 evicted_bytes = 1;
+      private long evictedBytes_ ;
+      /**
+       * <code>required int64 evicted_bytes = 1;</code>
+       */
+      public boolean hasEvictedBytes() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required int64 evicted_bytes = 1;</code>
+       */
+      public long getEvictedBytes() {
+        return evictedBytes_;
+      }
+      /**
+       * <code>required int64 evicted_bytes = 1;</code>
+       */
+      public Builder setEvictedBytes(long value) {
+        bitField0_ |= 0x00000001;
+        evictedBytes_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required int64 evicted_bytes = 1;</code>
+       */
+      public Builder clearEvictedBytes() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        evictedBytes_ = 0L;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:EvictEntityResponseProto)
+    }
+
+    static {
+      defaultInstance = new EvictEntityResponseProto(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:EvictEntityResponseProto)
+  }
+
   /**
    * Protobuf service {@code LlapDaemonProtocol}
    */
@@ -23185,6 +25423,14 @@ public final class LlapDaemonProtocolProtos {
           
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto
 request,
           
com.google.protobuf.RpcCallback<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto>
 done);
 
+      /**
+       * <code>rpc evictEntity(.EvictEntityRequestProto) returns 
(.EvictEntityResponseProto);</code>
+       */
+      public abstract void evictEntity(
+          com.google.protobuf.RpcController controller,
+          
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto
 request,
+          
com.google.protobuf.RpcCallback<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto>
 done);
+
     }
 
     public static com.google.protobuf.Service newReflectiveService(
@@ -23222,6 +25468,14 @@ public final class LlapDaemonProtocolProtos {
           impl.setCapacity(controller, request, done);
         }
 
+        @java.lang.Override
+        public  void evictEntity(
+            com.google.protobuf.RpcController controller,
+            
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto
 request,
+            
com.google.protobuf.RpcCallback<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto>
 done) {
+          impl.evictEntity(controller, request, done);
+        }
+
       };
     }
 
@@ -23252,6 +25506,8 @@ public final class LlapDaemonProtocolProtos {
               return impl.getDaemonMetrics(controller, 
(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto)request);
             case 3:
               return impl.setCapacity(controller, 
(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto)request);
+            case 4:
+              return impl.evictEntity(controller, 
(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto)request);
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -23274,6 +25530,8 @@ public final class LlapDaemonProtocolProtos {
               return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto.getDefaultInstance();
             case 3:
               return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto.getDefaultInstance();
+            case 4:
+              return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -23296,6 +25554,8 @@ public final class LlapDaemonProtocolProtos {
               return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto.getDefaultInstance();
             case 3:
               return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto.getDefaultInstance();
+            case 4:
+              return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -23336,6 +25596,14 @@ public final class LlapDaemonProtocolProtos {
         
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto
 request,
         
com.google.protobuf.RpcCallback<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto>
 done);
 
+    /**
+     * <code>rpc evictEntity(.EvictEntityRequestProto) returns 
(.EvictEntityResponseProto);</code>
+     */
+    public abstract void evictEntity(
+        com.google.protobuf.RpcController controller,
+        
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto
 request,
+        
com.google.protobuf.RpcCallback<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto>
 done);
+
     public static final
         com.google.protobuf.Descriptors.ServiceDescriptor
         getDescriptor() {
@@ -23378,6 +25646,11 @@ public final class LlapDaemonProtocolProtos {
             
com.google.protobuf.RpcUtil.<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto>specializeCallback(
               done));
           return;
+        case 4:
+          this.evictEntity(controller, 
(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto)request,
+            
com.google.protobuf.RpcUtil.<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto>specializeCallback(
+              done));
+          return;
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -23400,6 +25673,8 @@ public final class LlapDaemonProtocolProtos {
           return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto.getDefaultInstance();
         case 3:
           return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto.getDefaultInstance();
+        case 4:
+          return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -23422,6 +25697,8 @@ public final class LlapDaemonProtocolProtos {
           return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto.getDefaultInstance();
         case 3:
           return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto.getDefaultInstance();
+        case 4:
+          return 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -23502,6 +25779,21 @@ public final class LlapDaemonProtocolProtos {
             
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto.class,
             
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto.getDefaultInstance()));
       }
+
+      public  void evictEntity(
+          com.google.protobuf.RpcController controller,
+          
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto
 request,
+          
com.google.protobuf.RpcCallback<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto>
 done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(4),
+          controller,
+          request,
+          
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto.getDefaultInstance(),
+          com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto.class,
+            
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto.getDefaultInstance()));
+      }
     }
 
     public static BlockingInterface newBlockingStub(
@@ -23529,6 +25821,11 @@ public final class LlapDaemonProtocolProtos {
           com.google.protobuf.RpcController controller,
           
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto
 request)
           throws com.google.protobuf.ServiceException;
+
+      public 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto
 evictEntity(
+          com.google.protobuf.RpcController controller,
+          
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto
 request)
+          throws com.google.protobuf.ServiceException;
     }
 
     private static final class BlockingStub implements BlockingInterface {
@@ -23585,6 +25882,18 @@ public final class LlapDaemonProtocolProtos {
           
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto.getDefaultInstance());
       }
 
+
+      public 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto
 evictEntity(
+          com.google.protobuf.RpcController controller,
+          
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto
 request)
+          throws com.google.protobuf.ServiceException {
+        return 
(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto)
 channel.callBlockingMethod(
+          getDescriptor().getMethods().get(4),
+          controller,
+          request,
+          
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto.getDefaultInstance());
+      }
+
     }
 
     // @@protoc_insertion_point(class_scope:LlapManagementProtocol)
@@ -23745,6 +26054,21 @@ public final class LlapDaemonProtocolProtos {
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_SetCapacityResponseProto_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_EvictEntityRequestProto_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_EvictEntityRequestProto_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_TableProto_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_TableProto_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_EvictEntityResponseProto_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_EvictEntityResponseProto_fieldAccessorTable;
 
   public static com.google.protobuf.Descriptors.FileDescriptor
       getDescriptor() {
@@ -23833,31 +26157,38 @@ public final class LlapDaemonProtocolProtos {
       "roto\022\032\n\007metrics\030\001 \003(\0132\t.MapEntry\"A\n\027SetC" +
       "apacityRequestProto\022\023\n\013executorNum\030\001 \001(\005" +
       "\022\021\n\tqueueSize\030\002 \001(\005\"\032\n\030SetCapacityRespon" +
-      "seProto*2\n\020SourceStateProto\022\017\n\013S_SUCCEED",
-      "ED\020\001\022\r\n\tS_RUNNING\020\002*E\n\024SubmissionStatePr" +
-      
"oto\022\014\n\010ACCEPTED\020\001\022\014\n\010REJECTED\020\002\022\021\n\rEVICT"
 +
-      "ED_OTHER\020\0032\337\003\n\022LlapDaemonProtocol\022B\n\013reg" +
-      "isterDag\022\030.RegisterDagRequestProto\032\031.Reg" +
-      "isterDagResponseProto\022?\n\nsubmitWork\022\027.Su" +
-      "bmitWorkRequestProto\032\030.SubmitWorkRespons" +
-      "eProto\022W\n\022sourceStateUpdated\022\037.SourceSta" +
-      "teUpdatedRequestProto\032 .SourceStateUpdat" +
-      "edResponseProto\022H\n\rqueryComplete\022\032.Query" +
-      "CompleteRequestProto\032\033.QueryCompleteResp",
-      "onseProto\022T\n\021terminateFragment\022\036.Termina" +
-      "teFragmentRequestProto\032\037.TerminateFragme" +
-      "ntResponseProto\022K\n\016updateFragment\022\033.Upda" +
-      "teFragmentRequestProto\032\034.UpdateFragmentR" +
-      "esponseProto2\265\002\n\026LlapManagementProtocol\022" +
-      "C\n\022getDelegationToken\022\025.GetTokenRequestP" +
-      "roto\032\026.GetTokenResponseProto\022?\n\npurgeCac" +
-      "he\022\027.PurgeCacheRequestProto\032\030.PurgeCache" +
-      "ResponseProto\022Q\n\020getDaemonMetrics\022\035.GetD" +
-      "aemonMetricsRequestProto\032\036.GetDaemonMetr",
-      "icsResponseProto\022B\n\013setCapacity\022\030.SetCap" +
-      "acityRequestProto\032\031.SetCapacityResponseP" +
-      "rotoBH\n&org.apache.hadoop.hive.llap.daem" +
-      "on.rpcB\030LlapDaemonProtocolProtos\210\001\001\240\001\001"
+      "seProto\"F\n\027EvictEntityRequestProto\022\017\n\007db",
+      "_name\030\001 \002(\t\022\032\n\005table\030\002 
\003(\0132\013.TableProto\"" +
+      "D\n\nTableProto\022\022\n\ntable_name\030\001 \002(\t\022\020\n\010par" 
+
+      "t_key\030\002 \003(\t\022\020\n\010part_val\030\003 
\003(\t\"1\n\030EvictEn" +
+      "tityResponseProto\022\025\n\revicted_bytes\030\001 \002(\003" +
+      "*2\n\020SourceStateProto\022\017\n\013S_SUCCEEDED\020\001\022\r\n" +
+      "\tS_RUNNING\020\002*E\n\024SubmissionStateProto\022\014\n\010" +
+      "ACCEPTED\020\001\022\014\n\010REJECTED\020\002\022\021\n\rEVICTED_OTHE" 
+
+      "R\020\0032\337\003\n\022LlapDaemonProtocol\022B\n\013registerDa" +
+      "g\022\030.RegisterDagRequestProto\032\031.RegisterDa" +
+      "gResponseProto\022?\n\nsubmitWork\022\027.SubmitWor",
+      "kRequestProto\032\030.SubmitWorkResponseProto\022" +
+      "W\n\022sourceStateUpdated\022\037.SourceStateUpdat" +
+      "edRequestProto\032 .SourceStateUpdatedRespo" +
+      "nseProto\022H\n\rqueryComplete\022\032.QueryComplet" +
+      "eRequestProto\032\033.QueryCompleteResponsePro" +
+      "to\022T\n\021terminateFragment\022\036.TerminateFragm" +
+      "entRequestProto\032\037.TerminateFragmentRespo" +
+      "nseProto\022K\n\016updateFragment\022\033.UpdateFragm" +
+      "entRequestProto\032\034.UpdateFragmentResponse" +
+      "Proto2\371\002\n\026LlapManagementProtocol\022C\n\022getD",
+      "elegationToken\022\025.GetTokenRequestProto\032\026." +
+      "GetTokenResponseProto\022?\n\npurgeCache\022\027.Pu" +
+      "rgeCacheRequestProto\032\030.PurgeCacheRespons" +
+      "eProto\022Q\n\020getDaemonMetrics\022\035.GetDaemonMe" +
+      "tricsRequestProto\032\036.GetDaemonMetricsResp" +
+      "onseProto\022B\n\013setCapacity\022\030.SetCapacityRe" +
+      "questProto\032\031.SetCapacityResponseProto\022B\n" +
+      "\013evictEntity\022\030.EvictEntityRequestProto\032\031" +
+      ".EvictEntityResponseProtoBH\n&org.apache." +
+      "hadoop.hive.llap.daemon.rpcB\030LlapDaemonP",
+      "rotocolProtos\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner 
assigner =
       new 
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -24050,6 +26381,24 @@ public final class LlapDaemonProtocolProtos {
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_SetCapacityResponseProto_descriptor,
               new java.lang.String[] { });
+          internal_static_EvictEntityRequestProto_descriptor =
+            getDescriptor().getMessageTypes().get(31);
+          internal_static_EvictEntityRequestProto_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_EvictEntityRequestProto_descriptor,
+              new java.lang.String[] { "DbName", "Table", });
+          internal_static_TableProto_descriptor =
+            getDescriptor().getMessageTypes().get(32);
+          internal_static_TableProto_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_TableProto_descriptor,
+              new java.lang.String[] { "TableName", "PartKey", "PartVal", });
+          internal_static_EvictEntityResponseProto_descriptor =
+            getDescriptor().getMessageTypes().get(33);
+          internal_static_EvictEntityResponseProto_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_EvictEntityResponseProto_descriptor,
+              new java.lang.String[] { "EvictedBytes", });
           return null;
         }
       };
diff --git 
a/llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapManagementProtocolClientImpl.java
 
b/llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapManagementProtocolClientImpl.java
index d4b8cce..86e3cb0 100644
--- 
a/llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapManagementProtocolClientImpl.java
+++ 
b/llap-common/src/java/org/apache/hadoop/hive/llap/impl/LlapManagementProtocolClientImpl.java
@@ -109,4 +109,15 @@ public class LlapManagementProtocolClientImpl implements 
LlapManagementProtocolP
       throw new ServiceException(e);
     }
   }
+
+  @Override
+  public LlapDaemonProtocolProtos.EvictEntityResponseProto evictEntity(
+      RpcController controller, 
LlapDaemonProtocolProtos.EvictEntityRequestProto request)
+      throws ServiceException {
+    try {
+      return getProxy().evictEntity(null, request);
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
 }
diff --git a/llap-common/src/protobuf/LlapDaemonProtocol.proto 
b/llap-common/src/protobuf/LlapDaemonProtocol.proto
index ffc3c81..9f0d2f3 100644
--- a/llap-common/src/protobuf/LlapDaemonProtocol.proto
+++ b/llap-common/src/protobuf/LlapDaemonProtocol.proto
@@ -231,6 +231,26 @@ message SetCapacityRequestProto {
 message SetCapacityResponseProto {
 }
 
+// Used for proactive eviction request. Must contain one DB name, and 
optionally table information.
+message EvictEntityRequestProto {
+  required string db_name = 1;
+  repeated TableProto table = 2;
+}
+
+// Used in EvictEntityRequestProto, can be used for non-partitioned and 
partitioned tables too.
+// For the latter part_key contains only the keys, part_val has the values for 
all partitions on all keys:
+// e.g.: for partitions pk0=p00/pk1=p01/pk2=p02 and pk0=p10/pk1=p11/pk2=p12
+// part_key: [pk0, pk1, pk2], part_val: [p00, p01, p02, p10, p11, p12]
+message TableProto {
+  required string table_name = 1;
+  repeated string part_key = 2;
+  repeated string part_val = 3;
+}
+
+message EvictEntityResponseProto {
+  required int64 evicted_bytes = 1;
+}
+
 service LlapDaemonProtocol {
   rpc registerDag(RegisterDagRequestProto) returns (RegisterDagResponseProto);
   rpc submitWork(SubmitWorkRequestProto) returns (SubmitWorkResponseProto);
@@ -241,8 +261,9 @@ service LlapDaemonProtocol {
 }
 
 service LlapManagementProtocol {
-  rpc getDelegationToken(GetTokenRequestProto) returns (GetTokenResponseProto);
-  rpc purgeCache(PurgeCacheRequestProto) returns (PurgeCacheResponseProto);
-  rpc getDaemonMetrics(GetDaemonMetricsRequestProto) returns 
(GetDaemonMetricsResponseProto);
-  rpc setCapacity(SetCapacityRequestProto) returns (SetCapacityResponseProto);
+  rpc getDelegationToken (GetTokenRequestProto) returns 
(GetTokenResponseProto);
+  rpc purgeCache (PurgeCacheRequestProto) returns (PurgeCacheResponseProto);
+  rpc getDaemonMetrics (GetDaemonMetricsRequestProto) returns 
(GetDaemonMetricsResponseProto);
+  rpc setCapacity (SetCapacityRequestProto) returns (SetCapacityResponseProto);
+  rpc evictEntity (EvictEntityRequestProto) returns (EvictEntityResponseProto);
 }
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/CacheContentsTracker.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/CacheContentsTracker.java
index 733b30c..d22c557 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/CacheContentsTracker.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/CacheContentsTracker.java
@@ -23,6 +23,7 @@ import java.util.Iterator;
 import java.util.Map;
 import java.util.TreeMap;
 import java.util.concurrent.ConcurrentSkipListMap;
+import java.util.function.Predicate;
 
 import org.apache.hadoop.hive.common.io.CacheTag;
 import org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority;
@@ -161,6 +162,10 @@ public class CacheContentsTracker implements 
LowLevelCachePolicy, EvictionListen
     return realPolicy.purge();
   }
 
+  @Override
+  public long evictEntity(Predicate<LlapCacheableBuffer> predicate) {
+    return realPolicy.evictEntity(predicate);
+  }
 
   @Override
   public long evictSomeBlocks(long memoryToReserve) {
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
index 689a5d5..afe1082 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
@@ -20,7 +20,9 @@ package org.apache.hadoop.hive.llap.cache;
 
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Predicate;
 
+import org.apache.hadoop.hive.llap.ProactiveEviction;
 import org.apache.hadoop.hive.llap.io.api.impl.LlapIoImpl;
 import org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics;
 
@@ -164,6 +166,22 @@ public class LowLevelCacheMemoryManager implements 
MemoryManager {
     return evicted;
   }
 
+  public long evictEntity(Predicate<LlapCacheableBuffer> predicate) {
+    if (evictor == null) {
+      return 0;
+    }
+    long evicted = evictor.evictEntity(predicate);
+    if (evicted == 0) {
+      return 0;
+    }
+    long usedMem = -1;
+    do {
+      usedMem = usedMemory.get();
+    } while (!usedMemory.compareAndSet(usedMem, usedMem - evicted));
+    metrics.incrCacheCapacityUsed(-evicted);
+    return evicted;
+  }
+
   @VisibleForTesting
   public long getCurrentUsedSize() {
     return usedMemory.get();
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCachePolicy.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCachePolicy.java
index aa5ad66..a4bc219 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCachePolicy.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCachePolicy.java
@@ -18,6 +18,9 @@
 
 package org.apache.hadoop.hive.llap.cache;
 
+
+import java.util.function.Predicate;
+
 import org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority;
 
 /**
@@ -75,4 +78,11 @@ public interface LowLevelCachePolicy extends LlapIoDebugDump 
{
    * @return amount (bytes) of memory evicted.
    */
   long purge();
+
+  /**
+   * Evicts buffers that match true for the supplied predicate.
+   * @param predicate the predicate buffers will be matched against.
+   * @return evicted byte count
+   */
+  long evictEntity(Predicate<LlapCacheableBuffer> predicate);
 }
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java
index bdc6721..92c0461 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java
@@ -23,6 +23,7 @@ import java.util.LinkedList;
 
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
+import java.util.function.Predicate;
 
 import org.apache.hadoop.hive.llap.LlapUtil;
 import org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority;
@@ -72,6 +73,11 @@ public class LowLevelFifoCachePolicy implements 
LowLevelCachePolicy {
   }
 
   @Override
+  public long evictEntity(Predicate<LlapCacheableBuffer> predicate) {
+    return 0;
+  }
+
+  @Override
   public long evictSomeBlocks(long memoryToReserve) {
     return evictInternal(memoryToReserve, -1);
   }
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java
index 2afb899..78c001e 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.llap.cache;
 
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.ReentrantLock;
+import java.util.function.Predicate;
 
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
@@ -241,6 +242,12 @@ public final class LowLevelLrfuCachePolicy implements 
LowLevelCachePolicy {
   }
 
   @Override
+  public long evictEntity(Predicate<LlapCacheableBuffer> predicate) {
+    // TODO: HIVE-23197
+    return 0;
+  }
+
+  @Override
   public long purge() {
     long evicted = 0;
     LlapCacheableBuffer oldTail;
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java
index e7a4fdd..5509f8a5 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java
@@ -333,6 +333,23 @@ public class LlapProtocolServerImpl extends AbstractService
     }
   }
 
+  @Override
+  public LlapDaemonProtocolProtos.EvictEntityResponseProto evictEntity(
+      RpcController controller, 
LlapDaemonProtocolProtos.EvictEntityRequestProto protoRequest)
+      throws ServiceException {
+    LlapDaemonProtocolProtos.EvictEntityResponseProto.Builder 
responseProtoBuilder =
+        LlapDaemonProtocolProtos.EvictEntityResponseProto.newBuilder();
+
+    LlapIo<?> llapIo = LlapProxy.getIo();
+    if (llapIo != null) {
+      long evicted = llapIo.evictEntity(protoRequest);
+      responseProtoBuilder.setEvictedBytes(evicted);
+    } else {
+      responseProtoBuilder.setEvictedBytes(-1L);
+    }
+    return responseProtoBuilder.build();
+  }
+
   private boolean determineIfSigningIsRequired(UserGroupInformation 
callingUser) {
     switch (isSigningRequiredConfig) {
     case FALSE: return false;
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java 
b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java
index fadefa2..0c0e52e 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java
@@ -25,10 +25,13 @@ import java.util.List;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.TimeUnit;
+import java.util.function.Predicate;
 
 import javax.management.ObjectName;
 
 import org.apache.hadoop.hive.common.io.CacheTag;
+import org.apache.hadoop.hive.llap.ProactiveEviction;
+import org.apache.hadoop.hive.llap.cache.LlapCacheableBuffer;
 import org.apache.hadoop.hive.llap.daemon.impl.StatsRecordingThreadPool;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -58,6 +61,7 @@ import 
org.apache.hadoop.hive.llap.cache.SerDeLowLevelCacheImpl;
 import org.apache.hadoop.hive.llap.cache.SimpleAllocator;
 import org.apache.hadoop.hive.llap.cache.SimpleBufferManager;
 import org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
 import org.apache.hadoop.hive.llap.io.api.LlapIo;
 import org.apache.hadoop.hive.llap.io.decode.ColumnVectorProducer;
 import org.apache.hadoop.hive.llap.io.decode.GenericColumnVectorProducer;
@@ -245,6 +249,26 @@ public class LlapIoImpl implements 
LlapIo<VectorizedRowBatch>, LlapIoDebugDump {
     return 0;
   }
 
+  public long evictEntity(LlapDaemonProtocolProtos.EvictEntityRequestProto 
protoRequest) {
+    if (memoryManager == null || !HiveConf.getBoolVar(daemonConf, 
ConfVars.LLAP_IO_PROACTIVE_EVICTION_ENABLED)) {
+      return -1;
+    }
+    final ProactiveEviction.Request request = 
ProactiveEviction.Request.Builder.create()
+        .fromProtoRequest(protoRequest).build();
+    Predicate<LlapCacheableBuffer> predicate = buffer -> 
request.isTagMatch(buffer.getTag());
+    LOG.debug("Starting proactive eviction.");
+    long evictedBytes = memoryManager.evictEntity(predicate);
+    if (LOG.isDebugEnabled()) {
+      StringBuilder sb = new StringBuilder();
+      sb.append("Evicted ").append(evictedBytes).append(" bytes from LLAP 
cache buffers that belong to table(s): ");
+      for (String table : 
request.getEntities().get(request.getSingleDbName()).keySet()) {
+        sb.append(table).append(" ");
+      }
+      LOG.debug(sb.toString());
+    }
+    return evictedBytes;
+  }
+
   @Override
   public InputFormat<NullWritable, VectorizedRowBatch> getInputFormat(
       InputFormat<?, ?> sourceInputFormat, Deserializer sourceSerDe) {
diff --git 
a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelCacheImpl.java
 
b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelCacheImpl.java
index 1c2eef2..33afe86 100644
--- 
a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelCacheImpl.java
+++ 
b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelCacheImpl.java
@@ -30,6 +30,7 @@ import java.util.concurrent.Executor;
 import java.util.concurrent.Executors;
 import java.util.concurrent.FutureTask;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Predicate;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -121,6 +122,11 @@ public class TestLowLevelCacheImpl {
     }
 
     @Override
+    public long evictEntity(Predicate<LlapCacheableBuffer> predicate) {
+      return 0;
+    }
+
+    @Override
     public void debugDumpShort(StringBuilder sb) {
     }
   }
diff --git 
a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java
 
b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java
index e5953e2..d8632b0 100644
--- 
a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java
+++ 
b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.*;
 import java.nio.ByteBuffer;
 import java.util.Random;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.function.Predicate;
 
 import org.apache.hadoop.hive.common.io.DataCache;
 import org.apache.hadoop.hive.common.io.DiskRange;
@@ -66,6 +67,11 @@ public class TestOrcMetadataCache {
       return 0;
     }
 
+    @Override
+    public long evictEntity(Predicate<LlapCacheableBuffer> predicate) {
+      return 0;
+    }
+
     public void verifyEquals(int i) {
       assertEquals(i, lockCount);
       assertEquals(i, unlockCount);
diff --git a/ql/src/java/org/apache/hadoop/hive/llap/LlapHiveUtils.java 
b/ql/src/java/org/apache/hadoop/hive/llap/LlapHiveUtils.java
index 9ad1486..e998fa1 100644
--- a/ql/src/java/org/apache/hadoop/hive/llap/LlapHiveUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/llap/LlapHiveUtils.java
@@ -116,4 +116,8 @@ public final class LlapHiveUtils {
     }
   }
 
+  public static boolean isLlapMode(HiveConf conf) {
+    return 
"llap".equalsIgnoreCase(conf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_MODE));
+  }
+
 }
diff --git a/ql/src/java/org/apache/hadoop/hive/llap/ProactiveEviction.java 
b/ql/src/java/org/apache/hadoop/hive/llap/ProactiveEviction.java
new file mode 100644
index 0000000..ba6d33e
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/llap/ProactiveEviction.java
@@ -0,0 +1,346 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.llap;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import javax.net.SocketFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.common.io.CacheTag;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
+import org.apache.hadoop.hive.llap.impl.LlapManagementProtocolClientImpl;
+import org.apache.hadoop.hive.llap.registry.LlapServiceInstance;
+import org.apache.hadoop.hive.llap.registry.impl.LlapRegistryService;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hive.common.util.ShutdownHookManager;
+
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Through this class the caller (typically HS2) can request eviction of 
buffers from LLAP cache by specifying a DB,
+ * table or partition name/(value). Request sending is implemented here.
+ */
+public final class ProactiveEviction {
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(ProactiveEviction.class);
+
+  static {
+    ShutdownHookManager.addShutdownHook(new Runnable() {
+      @Override
+      public void run() {
+        if (EXECUTOR != null) {
+          EXECUTOR.shutdownNow();
+        }
+      }
+    });
+  }
+
+  private static final ExecutorService EXECUTOR = 
Executors.newSingleThreadExecutor(
+      new 
ThreadFactoryBuilder().setNameFormat("Proactive-Eviction-Requester").setDaemon(true).build());
+
+  private ProactiveEviction() {
+    // Not to be used;
+  }
+
+  /**
+   * Trigger LLAP cache eviction of buffers related to entities residing in 
request parameter.
+   * @param conf
+   * @param request
+   */
+  public static void evict(Configuration conf, Request request) {
+    if (!HiveConf.getBoolVar(conf, 
HiveConf.ConfVars.LLAP_IO_PROACTIVE_EVICTION_ENABLED)) {
+      return;
+    }
+
+    try {
+      LlapRegistryService llapRegistryService = 
LlapRegistryService.getClient(conf);
+      Collection<LlapServiceInstance> instances = 
llapRegistryService.getInstances().getAll();
+      if (instances.size() == 0) {
+        // Not in LLAP mode.
+        return;
+      }
+      LOG.info("Requesting proactive LLAP cache eviction.");
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(request.toString());
+      }
+      // Fire and forget - requests are enqueued on the single threaded 
executor and this (caller) thread won't wait.
+      for (LlapServiceInstance instance : instances) {
+        EvictionRequestTask task = new EvictionRequestTask(conf, instance, 
request);
+        EXECUTOR.execute(task);
+      }
+
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  /**
+   * The executable task to carry out request sending.
+   */
+  public static class EvictionRequestTask implements Runnable {
+    private final Request request;
+    private Configuration conf;
+    private LlapServiceInstance instance;
+    private SocketFactory socketFactory;
+    private RetryPolicy retryPolicy;
+
+    EvictionRequestTask(Configuration conf, LlapServiceInstance 
llapServiceInstance, Request request) {
+      this.conf = conf;
+      this.instance = llapServiceInstance;
+      this.socketFactory = NetUtils.getDefaultSocketFactory(conf);
+      //not making this configurable, best effort
+      this.retryPolicy = RetryPolicies.retryUpToMaximumTimeWithFixedSleep(
+          10000, 2000L, TimeUnit.MILLISECONDS);
+      this.request = request;
+    }
+
+    @Override
+    public void run() {
+      if (request.isEmpty()) {
+        throw new IllegalArgumentException("No entities set to trigger 
eviction on.");
+      }
+      try {
+        LlapManagementProtocolClientImpl client = new 
LlapManagementProtocolClientImpl(conf, instance.getHost(),
+            instance.getManagementPort(), retryPolicy, socketFactory);
+
+        List<LlapDaemonProtocolProtos.EvictEntityRequestProto> protoRequests = 
request.toProtoRequests();
+
+        long evictedBytes = 0;
+        for (LlapDaemonProtocolProtos.EvictEntityRequestProto protoRequest : 
protoRequests) {
+          LOG.debug("Requesting proactive eviction for entities in database 
{}", protoRequest.getDbName());
+          LlapDaemonProtocolProtos.EvictEntityResponseProto response = 
client.evictEntity(null, protoRequest);
+          evictedBytes += response.getEvictedBytes();
+          LOG.debug("Proactively evicted {} bytes", 
response.getEvictedBytes());
+        }
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Proactive eviction freed {} bytes on LLAP daemon {} in 
total", evictedBytes, instance.toString());
+        }
+      } catch (Exception e) {
+        LOG.warn("Exception while requesting proactive eviction.", e);
+      }
+    }
+  }
+
+  /**
+   * Holds information on entities: DB name(s), table name(s), partitions.
+   */
+  public static final class Request {
+
+    // Holds a hierarchical structure of DBs, tables and partitions such as:
+    // { testdb : { testtab0 : [], testtab1 : [ {pk0 : p0v0, pk1 : p0v1}, {pk0 
: p1v0, pk1 : p1v1} ] }, testdb2 : {} }
+    private final Map<String, Map<String, Set<LinkedHashMap<String, String>>>> 
entities;
+
+    private Request(Map<String, Map<String, Set<LinkedHashMap<String, 
String>>>> entities) {
+      this.entities = entities;
+    }
+
+    public Map<String, Map<String, Set<LinkedHashMap<String, String>>>> 
getEntities() {
+      return entities;
+    }
+
+    public boolean isEmpty() {
+      return entities.isEmpty();
+    }
+
+    /**
+     * Request often times only contains tables/partitions of 1 DB only.
+     * @return the single DB name, null if the count of DBs present is not 
exactly 1.
+     */
+    public String getSingleDbName() {
+      if (entities.size() == 1) {
+        return entities.keySet().stream().findFirst().get();
+      }
+      return null;
+    }
+
+    /**
+     * Translate to Protobuf requests.
+     * @return list of request instances ready to be sent over protobuf.
+     */
+    public List<LlapDaemonProtocolProtos.EvictEntityRequestProto> 
toProtoRequests() {
+
+      List<LlapDaemonProtocolProtos.EvictEntityRequestProto> protoRequests = 
new LinkedList<>();
+
+      for (Map.Entry<String, Map<String, Set<LinkedHashMap<String, String>>>> 
dbEntry : entities.entrySet()) {
+        String dbName = dbEntry.getKey();
+        Map<String, Set<LinkedHashMap<String, String>>> tables = 
dbEntry.getValue();
+
+        LlapDaemonProtocolProtos.EvictEntityRequestProto.Builder 
requestBuilder =
+            LlapDaemonProtocolProtos.EvictEntityRequestProto.newBuilder();
+        LlapDaemonProtocolProtos.TableProto.Builder tableBuilder = null;
+
+        requestBuilder.setDbName(dbName.toLowerCase());
+        for (Map.Entry<String, Set<LinkedHashMap<String, String>>> tableEntry 
: tables.entrySet()) {
+          String tableName = tableEntry.getKey();
+          tableBuilder = LlapDaemonProtocolProtos.TableProto.newBuilder();
+          tableBuilder.setTableName(tableName.toLowerCase());
+
+          Set<LinkedHashMap<String, String>> partitions = 
tableEntry.getValue();
+          Set<String> partitionKeys = null;
+
+          for (Map<String, String> partitionSpec : partitions) {
+            if (partitionKeys == null) {
+              // For a given table the set of partition columns (keys) should 
not change.
+              partitionKeys = new LinkedHashSet<>(partitionSpec.keySet());
+              tableBuilder.addAllPartKey(partitionKeys);
+            }
+            for (String partKey : tableBuilder.getPartKeyList()) {
+              tableBuilder.addPartVal(partitionSpec.get(partKey));
+            }
+          }
+          requestBuilder.addTable(tableBuilder.build());
+        }
+        protoRequests.add(requestBuilder.build());
+      }
+      return protoRequests;
+    }
+
+    /**
+     * Match a CacheTag to this eviction request.
+     * @param cacheTag
+     * @return true if cacheTag matches and the related buffer is eligible for 
proactive eviction, false otherwise.
+     */
+    public boolean isTagMatch(CacheTag cacheTag) {
+      // TODO: HIVE-23198
+      return false;
+    }
+
+    @Override
+    public String toString() {
+      return "Request { entities = " + entities + " }";
+    }
+
+    /**
+     * Lets callers specify what entities are requested to be evicted, and 
builds a Request instance accordingly.
+     */
+    public static final class Builder {
+
+      private final Map<String, Map<String, Set<LinkedHashMap<String, 
String>>>> entities;
+
+      private Builder() {
+        this.entities = new HashMap<>();
+      }
+
+      public static Builder create() {
+        return new Builder();
+      }
+
+      public Builder addPartitionOfATable(String db, String tableName, 
LinkedHashMap<String, String> partSpec) {
+        ensureDb(db);
+        ensureTable(db, tableName);
+        entities.get(db).get(tableName).add(partSpec);
+        return this;
+      }
+
+      public Builder addDb(String db) {
+        ensureDb(db);
+        return this;
+      }
+
+      public Builder addTable(String db, String table) {
+        ensureDb(db);
+        ensureTable(db, table);
+        return this;
+      }
+
+      public Request build() {
+        return new Request(entities);
+      }
+
+      private void ensureDb(String dbName) {
+        Map<String, Set<LinkedHashMap<String, String>>> tables = 
entities.get(dbName);
+        if (tables == null) {
+          tables = new HashMap<>();
+          entities.put(dbName, tables);
+        }
+      }
+
+      private void ensureTable(String dbName, String tableName) {
+        ensureDb(dbName);
+        Map<String, Set<LinkedHashMap<String, String>>> tables = 
entities.get(dbName);
+
+        Set<LinkedHashMap<String, String>> partitions = tables.get(tableName);
+        if (partitions == null) {
+          partitions = new HashSet<>();
+          tables.put(tableName, partitions);
+        }
+      }
+
+      /**
+       * Translate from Protobuf request.
+       * @param protoRequest
+       * @return the builder itself.
+       */
+      public Builder 
fromProtoRequest(LlapDaemonProtocolProtos.EvictEntityRequestProto protoRequest) 
{
+        entities.clear();
+        String dbName = protoRequest.getDbName().toLowerCase();
+
+        Map<String, Set<LinkedHashMap<String, String>>> entitiesInDb = new 
HashMap<>();
+        List<LlapDaemonProtocolProtos.TableProto> tables = 
protoRequest.getTableList();
+
+        if (tables != null && !tables.isEmpty()) {
+          for (LlapDaemonProtocolProtos.TableProto table : tables) {
+            String dbAndTableName =
+                (new 
StringBuilder().append(dbName).append('.').append(table.getTableName())).toString().toLowerCase();
+
+            if (table.getPartValCount() == 0) {
+              entitiesInDb.put(dbAndTableName, null);
+              continue;
+            }
+            Set<LinkedHashMap<String, String>> partitions = new HashSet<>();
+            LinkedHashMap<String, String> partDesc = new LinkedHashMap<>();
+
+            for (int valIx = 0; valIx < table.getPartValCount(); ++valIx) {
+              int keyIx = valIx % table.getPartKeyCount();
+
+              partDesc.put(table.getPartKey(keyIx).toLowerCase(), 
table.getPartVal(valIx));
+
+              if (keyIx == table.getPartKeyCount() - 1) {
+                partitions.add(partDesc);
+                partDesc = new LinkedHashMap<>();
+              }
+            }
+
+            entitiesInDb.put(dbAndTableName, partitions);
+          }
+        }
+        entities.put(dbName, entitiesInDb);
+        return this;
+      }
+    }
+  }
+
+}
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/drop/DropDatabaseOperation.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/drop/DropDatabaseOperation.java
index 4ce89a9..a116a0e 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/drop/DropDatabaseOperation.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/drop/DropDatabaseOperation.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.hive.ql.ddl.database.drop;
 
+import org.apache.hadoop.hive.llap.LlapHiveUtils;
+import org.apache.hadoop.hive.llap.ProactiveEviction;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.ql.ErrorMsg;
@@ -48,6 +50,13 @@ public class DropDatabaseOperation extends 
DDLOperation<DropDatabaseDesc> {
       }
 
       context.getDb().dropDatabase(dbName, true, desc.getIfExists(), 
desc.isCasdade());
+
+      if (LlapHiveUtils.isLlapMode(context.getConf())) {
+        ProactiveEviction.Request.Builder llapEvictRequestBuilder =
+            ProactiveEviction.Request.Builder.create();
+        llapEvictRequestBuilder.addDb(dbName);
+        ProactiveEviction.evict(context.getConf(), 
llapEvictRequestBuilder.build());
+      }
       // Unregister the functions as well
       if (desc.isCasdade()) {
         FunctionRegistry.unregisterPermanentFunctions(dbName);
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/drop/DropTableOperation.java 
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/drop/DropTableOperation.java
index 17458cf..72b694f 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/drop/DropTableOperation.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/drop/DropTableOperation.java
@@ -18,6 +18,9 @@
 
 package org.apache.hadoop.hive.ql.ddl.table.drop;
 
+import org.apache.hadoop.hive.common.TableName;
+import org.apache.hadoop.hive.llap.LlapHiveUtils;
+import org.apache.hadoop.hive.llap.ProactiveEviction;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
 import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
 import org.apache.hadoop.hive.ql.ddl.DDLUtils;
@@ -28,6 +31,7 @@ import 
org.apache.hadoop.hive.ql.metadata.InvalidTableException;
 import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.PartitionIterable;
 import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.parse.HiveTableName;
 import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
 
 import com.google.common.collect.Iterables;
@@ -102,6 +106,14 @@ public class DropTableOperation extends 
DDLOperation<DropTableDesc> {
     context.getDb().dropTable(desc.getTableName(), desc.isPurge());
     DDLUtils.addIfAbsentByName(new WriteEntity(table, 
WriteEntity.WriteType.DDL_NO_LOCK), context);
 
+    if (LlapHiveUtils.isLlapMode(context.getConf())) {
+      TableName tableName = HiveTableName.of(table);
+      ProactiveEviction.Request.Builder llapEvictRequestBuilder =
+              ProactiveEviction.Request.Builder.create();
+      llapEvictRequestBuilder.addTable(tableName.getDb(), 
tableName.getTable());
+      ProactiveEviction.evict(context.getConf(), 
llapEvictRequestBuilder.build());
+    }
+
     return 0;
   }
 
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterTableDropPartitionOperation.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterTableDropPartitionOperation.java
index 5caa31c..ae2c341 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterTableDropPartitionOperation.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterTableDropPartitionOperation.java
@@ -23,6 +23,8 @@ import java.util.List;
 
 import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.hive.common.TableName;
+import org.apache.hadoop.hive.llap.LlapHiveUtils;
+import org.apache.hadoop.hive.llap.ProactiveEviction;
 import org.apache.hadoop.hive.metastore.PartitionDropOptions;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.ql.ddl.DDLOperation;
@@ -117,10 +119,22 @@ public class AlterTableDropPartitionOperation extends 
DDLOperation<AlterTableDro
         
PartitionDropOptions.instance().deleteData(true).ifExists(true).purgeData(desc.getIfPurge());
     List<Partition> droppedPartitions = 
context.getDb().dropPartitions(tablenName.getDb(), tablenName.getTable(),
         partitionExpressions, options);
+
+    ProactiveEviction.Request.Builder llapEvictRequestBuilder =
+        LlapHiveUtils.isLlapMode(context.getConf()) ?
+            ProactiveEviction.Request.Builder.create() : null;
+
     for (Partition partition : droppedPartitions) {
       context.getConsole().printInfo("Dropped the partition " + 
partition.getName());
       // We have already locked the table, don't lock the partitions.
       DDLUtils.addIfAbsentByName(new WriteEntity(partition, 
WriteEntity.WriteType.DDL_NO_LOCK), context);
+
+      if (llapEvictRequestBuilder != null) {
+        llapEvictRequestBuilder.addPartitionOfATable(tablenName.getDb(), 
tablenName.getTable(), partition.getSpec());
+      }
+    }
+    if (llapEvictRequestBuilder != null) {
+      ProactiveEviction.evict(context.getConf(), 
llapEvictRequestBuilder.build());
     }
   }
 }

Reply via email to