This is an automated email from the ASF dual-hosted git repository.

sankarh pushed a commit to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/branch-3 by this push:
     new 5a9aa562581 HIVE-27611: Backport of HIVE-22168: Remove very expensive 
logging from the llap cache hotpath (Slim B via Jesus Camacho Rodriguez)
5a9aa562581 is described below

commit 5a9aa5625810296df84829be1261ec5d502ccc9e
Author: Aman Raj <104416558+amanraj2...@users.noreply.github.com>
AuthorDate: Mon Aug 28 20:46:20 2023 +0530

    HIVE-27611: Backport of HIVE-22168: Remove very expensive logging from the 
llap cache hotpath (Slim B via Jesus Camacho Rodriguez)
    
    Signed-off-by: Sankar Hariappan <sank...@apache.org>
    Closes (#4590)
---
 .../java/org/apache/hadoop/hive/llap/LlapCacheAwareFs.java   |  4 ++--
 .../hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java     | 12 ++++++------
 2 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/ql/src/java/org/apache/hadoop/hive/llap/LlapCacheAwareFs.java 
b/ql/src/java/org/apache/hadoop/hive/llap/LlapCacheAwareFs.java
index f68ebd7c6d6..ea354683d8c 100644
--- a/ql/src/java/org/apache/hadoop/hive/llap/LlapCacheAwareFs.java
+++ b/ql/src/java/org/apache/hadoop/hive/llap/LlapCacheAwareFs.java
@@ -213,8 +213,8 @@ public class LlapCacheAwareFs extends FileSystem {
           return new CacheChunk(buffer, startOffset, endOffset);
         }
       }, gotAllData);
-      if (LOG.isInfoEnabled()) {
-        LOG.info("Buffers after cache " + 
RecordReaderUtils.stringifyDiskRanges(drl));
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Buffers after cache " + 
RecordReaderUtils.stringifyDiskRanges(drl));
       }
       if (gotAllData.value) {
         long sizeRead = 0;
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
index 348f9df773f..91173818f55 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
@@ -564,15 +564,15 @@ class EncodedReaderImpl implements EncodedReader {
       long stripeOffset, boolean hasFileId, IdentityHashMap<ByteBuffer, 
Boolean> toRelease)
           throws IOException {
     DiskRangeList.MutateHelper toRead = new 
DiskRangeList.MutateHelper(listToRead);
-    if (LOG.isInfoEnabled()) {
-      LOG.info("Resulting disk ranges to read (file " + fileKey + "): "
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Resulting disk ranges to read (file " + fileKey + "): "
           + RecordReaderUtils.stringifyDiskRanges(toRead.next));
     }
     BooleanRef isAllInCache = new BooleanRef();
     if (hasFileId) {
       cacheWrapper.getFileData(fileKey, toRead.next, stripeOffset, CC_FACTORY, 
isAllInCache);
-      if (LOG.isInfoEnabled()) {
-        LOG.info("Disk ranges after cache (found everything " + 
isAllInCache.value + "; file "
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Disk ranges after cache (found everything " + 
isAllInCache.value + "; file "
             + fileKey + ", base offset " + stripeOffset  + "): "
             + RecordReaderUtils.stringifyDiskRanges(toRead.next));
       }
@@ -2009,8 +2009,8 @@ class EncodedReaderImpl implements EncodedReader {
         releaseBuffers(toRelease.keySet(), true);
         toRelease.clear();
       }
-      if (LOG.isInfoEnabled()) {
-        LOG.info("Disk ranges after pre-read (file " + fileKey + ", base 
offset "
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Disk ranges after pre-read (file " + fileKey + ", base 
offset "
             + stripeOffset + "): " + 
RecordReaderUtils.stringifyDiskRanges(toRead.next));
       }
       iter = toRead.next; // Reset the iter to start.

Reply via email to