This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new a031388  HDDS-2266. Avoid evaluation of LOG.trace and LOG.debug 
statement in the read/write path. (#1633)
a031388 is described below

commit a031388a2e8b7ac60ebca5a08216e2dd19ea6933
Author: Siddharth <swa...@hortonworks.com>
AuthorDate: Thu Oct 10 03:00:11 2019 -0700

    HDDS-2266. Avoid evaluation of LOG.trace and LOG.debug statement in the 
read/write path. (#1633)
---
 .../apache/hadoop/hdds/scm/pipeline/Pipeline.java  |  3 +-
 .../client/io/BlockOutputStreamEntryPool.java      | 10 ++--
 .../hadoop/ozone/client/io/KeyInputStream.java     |  6 ++-
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  | 10 ++--
 .../hadoop/ozone/om/S3SecretManagerImpl.java       |  4 +-
 .../ozone/om/ha/OMFailoverProxyProvider.java       |  6 ++-
 .../hadoop/ozone/om/helpers/OMRatisHelper.java     |  4 +-
 .../hadoop/ozone/om/lock/OzoneManagerLock.java     | 24 ++++++----
 .../security/OzoneBlockTokenSecretManager.java     |  2 +-
 .../OzoneDelegationTokenSecretManager.java         |  6 ++-
 .../security/OzoneDelegationTokenSelector.java     |  8 +++-
 .../hadoop/ozone/security/OzoneSecretManager.java  |  6 ++-
 .../apache/hadoop/ozone/om/BucketManagerImpl.java  |  6 ++-
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 27 +++++++----
 .../hadoop/ozone/om/OpenKeyCleanupService.java     |  4 +-
 .../org/apache/hadoop/ozone/om/OzoneManager.java   | 10 ++--
 .../apache/hadoop/ozone/om/PrefixManagerImpl.java  | 11 +++--
 .../apache/hadoop/ozone/om/VolumeManagerImpl.java  | 16 +++++--
 .../ozone/om/ratis/OzoneManagerDoubleBuffer.java   |  8 ++--
 .../ozone/om/ratis/OzoneManagerRatisClient.java    | 53 ++++++++++++----------
 .../ozone/om/ratis/OzoneManagerRatisServer.java    |  6 ++-
 .../request/bucket/acl/OMBucketSetAclRequest.java  |  4 +-
 .../request/volume/acl/OMVolumeSetAclRequest.java  |  6 ++-
 .../OzoneManagerHARequestHandlerImpl.java          |  4 +-
 ...OzoneManagerProtocolServerSideTranslatorPB.java |  4 +-
 .../protocolPB/OzoneManagerRequestHandler.java     |  4 +-
 .../ozone/security/acl/OzoneNativeAuthorizer.java  |  8 ++--
 .../hadoop/fs/ozone/BasicOzoneFileSystem.java      |  4 +-
 .../apache/hadoop/ozone/s3/AWSV4AuthParser.java    | 10 ++--
 .../hadoop/ozone/s3/OzoneClientProducer.java       |  5 +-
 .../ozone/s3/exception/OS3ExceptionMapper.java     |  4 +-
 31 files changed, 182 insertions(+), 101 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
index c62d9773..2828f6e 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
@@ -41,8 +41,7 @@ import java.util.stream.Collectors;
  */
 public final class Pipeline {
 
-  private static final Logger LOG = LoggerFactory
-      .getLogger(Pipeline.class);
+  private static final Logger LOG = LoggerFactory.getLogger(Pipeline.class);
   private final PipelineID id;
   private final ReplicationType type;
   private final ReplicationFactor factor;
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java
index 045997f..b179ca5 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java
@@ -193,10 +193,12 @@ public class BlockOutputStreamEntryPool {
                 .setPipeline(streamEntry.getPipeline()).build();
         locationInfoList.add(info);
       }
-      LOG.debug(
-          "block written " + streamEntry.getBlockID() + ", length " + length
-              + " bcsID " + streamEntry.getBlockID()
-              .getBlockCommitSequenceId());
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(
+            "block written " + streamEntry.getBlockID() + ", length " + length
+                + " bcsID " + streamEntry.getBlockID()
+                .getBlockCommitSequenceId());
+      }
     }
     return locationInfoList;
   }
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java
index fa1672a..ecbb329 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java
@@ -97,8 +97,10 @@ public class KeyInputStream extends InputStream implements 
Seekable {
     long keyLength = 0;
     for (int i = 0; i < blockInfos.size(); i++) {
       OmKeyLocationInfo omKeyLocationInfo = blockInfos.get(i);
-      LOG.debug("Adding stream for accessing {}. The stream will be " +
-          "initialized later.", omKeyLocationInfo);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Adding stream for accessing {}. The stream will be " +
+            "initialized later.", omKeyLocationInfo);
+      }
 
       addStream(omKeyLocationInfo, xceiverClientManager,
           verifyChecksum);
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index d0dd124..06351ab 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -439,10 +439,14 @@ public class RpcClient implements ClientProtocol {
         ozoneManagerClient.getDelegationToken(renewer);
     if (token != null) {
       token.setService(dtService);
-      LOG.debug("Created token {} for dtService {}", token, dtService);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Created token {} for dtService {}", token, dtService);
+      }
     } else {
-      LOG.debug("Cannot get ozone delegation token for renewer {} to access " +
-          "service {}", renewer, dtService);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Cannot get ozone delegation token for renewer {} to " +
+            "access service {}", renewer, dtService);
+      }
     }
     return token;
   }
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java
index 2fdf543..fb56658 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java
@@ -75,7 +75,9 @@ public class S3SecretManagerImpl implements S3SecretManager {
     } finally {
       omMetadataManager.getLock().releaseLock(S3_SECRET_LOCK, kerberosID);
     }
-    LOG.trace("Secret for accessKey:{}, proto:{}", kerberosID, result);
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Secret for accessKey:{}, proto:{}", kerberosID, result);
+    }
     return result;
   }
 
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java
index 62d8fdc..32684de 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java
@@ -214,8 +214,10 @@ public class OMFailoverProxyProvider implements
   @Override
   public void performFailover(OzoneManagerProtocolPB currentProxy) {
     int newProxyIndex = incrementProxyIndex();
-    LOG.debug("Failing over OM proxy to index: {}, nodeId: {}",
-        newProxyIndex, omNodeIDList.get(newProxyIndex));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Failing over OM proxy to index: {}, nodeId: {}",
+          newProxyIndex, omNodeIDList.get(newProxyIndex));
+    }
   }
 
   /**
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OMRatisHelper.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OMRatisHelper.java
index bc64d6c..c1930c8 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OMRatisHelper.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OMRatisHelper.java
@@ -61,7 +61,9 @@ public final class OMRatisHelper {
    */
   public static RaftClient newRaftClient(RpcType rpcType, String omId, 
RaftGroup
       group, RetryPolicy retryPolicy, Configuration conf) {
-    LOG.trace("newRaftClient: {}, leader={}, group={}", rpcType, omId, group);
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("newRaftClient: {}, leader={}, group={}", rpcType, omId, 
group);
+    }
     final RaftProperties properties = new RaftProperties();
     RaftConfigKeys.Rpc.setType(properties, rpcType);
 
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java
index c6a99ac..a97a26c 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java
@@ -168,8 +168,10 @@ public class OzoneManagerLock {
       throw new RuntimeException(errorMessage);
     } else {
       lockFn.accept(resourceName);
-      LOG.debug("Acquired {} {} lock on resource {}", lockType, resource.name,
-          resourceName);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Acquired {} {} lock on resource {}", lockType, 
resource.name,
+            resourceName);
+      }
       lockSet.set(resource.setLock(lockSet.get()));
       return true;
     }
@@ -264,8 +266,10 @@ public class OzoneManagerLock {
           throw ex;
         }
       }
-      LOG.debug("Acquired Write {} lock on resource {} and {}", resource.name,
-          firstUser, secondUser);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Acquired Write {} lock on resource {} and {}", 
resource.name,
+            firstUser, secondUser);
+      }
       lockSet.set(resource.setLock(lockSet.get()));
       return true;
     }
@@ -300,8 +304,10 @@ public class OzoneManagerLock {
       manager.writeUnlock(firstUser);
       manager.writeUnlock(secondUser);
     }
-    LOG.debug("Release Write {} lock on resource {} and {}", resource.name,
-        firstUser, secondUser);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Release Write {} lock on resource {} and {}", resource.name,
+          firstUser, secondUser);
+    }
     lockSet.set(resource.clearLock(lockSet.get()));
   }
 
@@ -352,8 +358,10 @@ public class OzoneManagerLock {
     // locks, as some locks support acquiring lock again.
     lockFn.accept(resourceName);
     // clear lock
-    LOG.debug("Release {} {}, lock on resource {}", lockType, resource.name,
-        resourceName);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Release {} {}, lock on resource {}", lockType, resource.name,
+          resourceName);
+    }
     lockSet.set(resource.clearLock(lockSet.get()));
   }
 
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneBlockTokenSecretManager.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneBlockTokenSecretManager.java
index b3f607a..5cc7823 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneBlockTokenSecretManager.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneBlockTokenSecretManager.java
@@ -89,7 +89,7 @@ public class OzoneBlockTokenSecretManager extends
     if (LOG.isTraceEnabled()) {
       long expiryTime = tokenIdentifier.getExpiryDate();
       String tokenId = tokenIdentifier.toString();
-      LOG.trace("Issued delegation token -> expiryTime:{},tokenId:{}",
+      LOG.trace("Issued delegation token -> expiryTime:{}, tokenId:{}",
           expiryTime, tokenId);
     }
     // Pass blockId as service.
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java
index 7e03095..0de8ac6 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java
@@ -289,8 +289,10 @@ public class OzoneDelegationTokenSecretManager
       String canceller) throws IOException {
     OzoneTokenIdentifier id = OzoneTokenIdentifier.readProtoBuf(
         token.getIdentifier());
-    LOG.debug("Token cancellation requested for identifier: {}",
-        formatTokenId(id));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Token cancellation requested for identifier: {}",
+          formatTokenId(id));
+    }
 
     if (id.getUser() == null) {
       throw new InvalidToken("Token with no owner " + formatTokenId(id));
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSelector.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSelector.java
index dd2ab1f..68afaaf 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSelector.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSelector.java
@@ -43,9 +43,13 @@ public class OzoneDelegationTokenSelector
   @Override
   public Token<OzoneTokenIdentifier> selectToken(Text service,
       Collection<Token<? extends TokenIdentifier>> tokens) {
-    LOG.trace("Getting token for service {}", service);
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Getting token for service {}", service);
+    }
     Token token = getSelectedTokens(service, tokens);
-    LOG.debug("Got tokens: {} for service {}", token, service);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Got tokens: {} for service {}", token, service);
+    }
     return token;
   }
 
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretManager.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretManager.java
index 78f0565..06fc071 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretManager.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretManager.java
@@ -110,8 +110,10 @@ public abstract class OzoneSecretManager<T extends 
TokenIdentifier>
 
   @Override
   public byte[] createPassword(T identifier) {
-    logger.debug("Creating password for identifier: {}, currentKey: {}",
-        formatTokenId(identifier), currentKey.getKeyId());
+    if (logger.isDebugEnabled()) {
+      logger.debug("Creating password for identifier: {}, currentKey: {}",
+          formatTokenId(identifier), currentKey.getKeyId());
+    }
     byte[] password = null;
     try {
       password = createPassword(identifier.getBytes(),
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
index 5404456..d64eae4 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
@@ -570,8 +570,10 @@ public class BucketManagerImpl implements BucketManager {
       }
       boolean hasAccess = OzoneAclUtil.checkAclRights(bucketInfo.getAcls(),
           context);
-      LOG.debug("user:{} has access rights for bucket:{} :{} ",
-          context.getClientUgi(), ozObject.getBucketName(), hasAccess);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("user:{} has access rights for bucket:{} :{} ",
+            context.getClientUgi(), ozObject.getBucketName(), hasAccess);
+      }
       return hasAccess;
     } catch (IOException ex) {
       if(ex instanceof OMException) {
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index f3ae9b1..20b7fdf 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -1661,8 +1661,10 @@ public class KeyManagerImpl implements KeyManager {
         if (keyInfo == null) {
           // the key does not exist, but it is a parent "dir" of some key
           // let access be determined based on volume/bucket/prefix ACL
-          LOG.debug("key:{} is non-existent parent, permit access to user:{}",
-              keyName, context.getClientUgi());
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("key:{} is non-existent parent, permit access to 
user:{}",
+                keyName, context.getClientUgi());
+          }
           return true;
         }
       } catch (OMException e) {
@@ -1678,8 +1680,10 @@ public class KeyManagerImpl implements KeyManager {
 
       boolean hasAccess = OzoneAclUtil.checkAclRight(
           keyInfo.getAcls(), context);
-      LOG.debug("user:{} has access rights for key:{} :{} ",
-          context.getClientUgi(), ozObject.getKeyName(), hasAccess);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("user:{} has access rights for key:{} :{} ",
+            context.getClientUgi(), ozObject.getKeyName(), hasAccess);
+      }
       return hasAccess;
     } catch (IOException ex) {
       if(ex instanceof OMException) {
@@ -1766,10 +1770,11 @@ public class KeyManagerImpl implements KeyManager {
       if (keys.iterator().hasNext()) {
         return new OzoneFileStatus(keyName);
       }
-
-      LOG.debug("Unable to get file status for the key: volume:" + volumeName +
-          " bucket:" + bucketName + " key:" + keyName + " with error no " +
-          "such file exists:");
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Unable to get file status for the key: volume: {}, bucket:" 
+
+                " {}, key: {}, with error: No such file exists.", volumeName,
+            bucketName, keyName);
+      }
       throw new OMException("Unable to get file status: volume: " +
           volumeName + " bucket: " + bucketName + " key: " + keyName,
           FILE_NOT_FOUND);
@@ -2132,8 +2137,10 @@ public class KeyManagerImpl implements KeyManager {
             List<DatanodeDetails> sortedNodes = scmClient.getBlockClient()
                 .sortDatanodes(nodeList, clientMachine);
             k.getPipeline().setNodesInOrder(sortedNodes);
-            LOG.debug("Sort datanodes {} for client {}, return {}", nodes,
-                clientMachine, sortedNodes);
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("Sort datanodes {} for client {}, return {}", nodes,
+                  clientMachine, sortedNodes);
+            }
           } catch (IOException e) {
             LOG.warn("Unable to sort datanodes based on distance to " +
                 "client, volume=" + keyInfo.getVolumeName() +
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java
index fa4be65..79bc39f 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java
@@ -88,7 +88,9 @@ public class OpenKeyCleanupService extends BackgroundService {
             if (result.isSuccess()) {
               try {
                 keyManager.deleteExpiredOpenKey(result.getObjectKey());
-                LOG.debug("Key {} deleted from OM DB", result.getObjectKey());
+                if (LOG.isDebugEnabled()) {
+                  LOG.debug("Key {} deleted from OM DB", 
result.getObjectKey());
+                }
                 deletedSize += 1;
               } catch (IOException e) {
                 LOG.warn("Failed to delete hanging-open key {}",
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index a6503d7..0cd087e 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -734,10 +734,12 @@ public final class OzoneManager extends 
ServiceRuntimeInfoImpl
 
     if (SecurityUtil.getAuthenticationMethod(conf).equals(
         AuthenticationMethod.KERBEROS)) {
-      LOG.debug("Ozone security is enabled. Attempting login for OM user. "
-              + "Principal: {},keytab: {}", conf.get(
-          OZONE_OM_KERBEROS_PRINCIPAL_KEY),
-          conf.get(OZONE_OM_KERBEROS_KEYTAB_FILE_KEY));
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Ozone security is enabled. Attempting login for OM user. "
+                + "Principal: {}, keytab: {}", conf.get(
+            OZONE_OM_KERBEROS_PRINCIPAL_KEY),
+            conf.get(OZONE_OM_KERBEROS_KEYTAB_FILE_KEY));
+      }
 
       UserGroupInformation.setConfiguration(conf);
 
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java
index 0eafff9..c89b32e 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java
@@ -139,7 +139,10 @@ public class PrefixManagerImpl implements PrefixManager {
       OMPrefixAclOpResult omPrefixAclOpResult = removeAcl(obj, acl, 
prefixInfo);
 
       if (!omPrefixAclOpResult.isOperationsResult()) {
-        LOG.debug("acl {} does not exist for prefix path {} ", acl, 
prefixPath);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("acl {} does not exist for prefix path {} ",
+              acl, prefixPath);
+        }
         return false;
       }
 
@@ -236,8 +239,10 @@ public class PrefixManagerImpl implements PrefixManager {
         if (lastNode != null && lastNode.getValue() != null) {
           boolean hasAccess = OzoneAclUtil.checkAclRights(lastNode.getValue().
               getAcls(), context);
-          LOG.debug("user:{} has access rights for ozObj:{} ::{} ",
-              context.getClientUgi(), ozObject, hasAccess);
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("user:{} has access rights for ozObj:{} ::{} ",
+                context.getClientUgi(), ozObject, hasAccess);
+          }
           return hasAccess;
         } else {
           return true;
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java
index 04cf09e..7375eb8 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java
@@ -108,7 +108,7 @@ public class VolumeManagerImpl implements VolumeManager {
     if (volumeList != null) {
       prevVolList.addAll(volumeList.getVolumeNamesList());
     } else {
-      LOG.debug("volume:{} not found for user:{}");
+      LOG.debug("volume:{} not found for user:{}", volume, owner);
       throw new OMException(ResultCodes.USER_NOT_FOUND);
     }
 
@@ -503,7 +503,9 @@ public class VolumeManagerImpl implements VolumeManager {
       try {
         volumeArgs.addAcl(acl);
       } catch (OMException ex) {
-        LOG.debug("Add acl failed.", ex);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Add acl failed.", ex);
+        }
         return false;
       }
       metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs);
@@ -553,7 +555,9 @@ public class VolumeManagerImpl implements VolumeManager {
       try {
         volumeArgs.removeAcl(acl);
       } catch (OMException ex) {
-        LOG.debug("Remove acl failed.", ex);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Remove acl failed.", ex);
+        }
         return false;
       }
       metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs);
@@ -685,8 +689,10 @@ public class VolumeManagerImpl implements VolumeManager {
       Preconditions.checkState(volume.equals(volumeArgs.getVolume()));
       boolean hasAccess = volumeArgs.getAclMap().hasAccess(
           context.getAclRights(), context.getClientUgi());
-      LOG.debug("user:{} has access rights for volume:{} :{} ",
-          context.getClientUgi(), ozObject.getVolumeName(), hasAccess);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("user:{} has access rights for volume:{} :{} ",
+            context.getClientUgi(), ozObject.getVolumeName(), hasAccess);
+      }
       return hasAccess;
     } catch (IOException ex) {
       LOG.error("Check access operation failed for volume:{}", volume, ex);
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
index b4f5b8d..e5cadff 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
@@ -148,9 +148,11 @@ public class OzoneManagerDoubleBuffer {
           flushedTransactionCount.addAndGet(flushedTransactionsSize);
           flushIterations.incrementAndGet();
 
-          LOG.debug("Sync Iteration {} flushed transactions in this " +
-                  "iteration{}", flushIterations.get(),
-              flushedTransactionsSize);
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Sync Iteration {} flushed transactions in this " +
+                    "iteration{}", flushIterations.get(),
+                flushedTransactionsSize);
+          }
 
           long lastRatisTransactionIndex =
               readyBuffer.stream().map(DoubleBufferEntry::getTrxLogIndex)
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisClient.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisClient.java
index 2cbef50..6f97f56 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisClient.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisClient.java
@@ -99,8 +99,10 @@ public final class OzoneManagerRatisClient implements 
Closeable {
   }
 
   public void connect() {
-    LOG.debug("Connecting to OM Ratis Server GroupId:{} OM:{}",
-        raftGroup.getGroupId().getUuid().toString(), omNodeID);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Connecting to OM Ratis Server GroupId:{} OM:{}",
+          raftGroup.getGroupId().getUuid().toString(), omNodeID);
+    }
 
     // TODO : XceiverClient ratis should pass the config value of
     // maxOutstandingRequests so as to set the upper bound on max no of async
@@ -147,8 +149,7 @@ public final class OzoneManagerRatisClient implements 
Closeable {
     if (message.contains(STATUS_CODE)) {
       String errorCode = message.substring(message.indexOf(STATUS_CODE) +
           STATUS_CODE.length());
-      LOG.debug("Parsing error message for error code " +
-          errorCode);
+      LOG.debug("Parsing error message for error code {}", errorCode);
       return OzoneManagerProtocolProtos.Status.valueOf(errorCode.trim());
     } else {
       return OzoneManagerProtocolProtos.Status.INTERNAL_ERROR;
@@ -166,25 +167,27 @@ public final class OzoneManagerRatisClient implements 
Closeable {
     CompletableFuture<RaftClientReply> raftClientReply =
         sendRequestAsync(request);
 
-    return raftClientReply.whenComplete((reply, e) -> LOG.debug(
-        "received reply {} for request: cmdType={} traceID={} " +
-            "exception: {}", reply, request.getCmdType(),
-        request.getTraceID(), e))
-        .thenApply(reply -> {
-          try {
-            Preconditions.checkNotNull(reply);
-            if (!reply.isSuccess()) {
-              RaftException exception = reply.getException();
-              Preconditions.checkNotNull(exception, "Raft reply failure " +
-                  "but no exception propagated.");
-              throw new CompletionException(exception);
-            }
-            return OMRatisHelper.getOMResponseFromRaftClientReply(reply);
-
-          } catch (InvalidProtocolBufferException e) {
-            throw new CompletionException(e);
-          }
-        });
+    return raftClientReply.whenComplete((reply, e) -> {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("received reply {} for request: cmdType={} traceID={} " +
+                "exception: {}", reply, request.getCmdType(),
+            request.getTraceID(), e);
+      }
+    }).thenApply(reply -> {
+      try {
+        Preconditions.checkNotNull(reply);
+        if (!reply.isSuccess()) {
+          RaftException exception = reply.getException();
+          Preconditions.checkNotNull(exception, "Raft reply failure " +
+              "but no exception propagated.");
+          throw new CompletionException(exception);
+        }
+        return OMRatisHelper.getOMResponseFromRaftClientReply(reply);
+
+      } catch (InvalidProtocolBufferException e) {
+        throw new CompletionException(e);
+      }
+    });
   }
 
   /**
@@ -198,7 +201,9 @@ public final class OzoneManagerRatisClient implements 
Closeable {
       OMRequest request) {
     boolean isReadOnlyRequest = OmUtils.isReadOnly(request);
     ByteString byteString = OMRatisHelper.convertRequestToByteString(request);
-    LOG.debug("sendOMRequestAsync {} {}", isReadOnlyRequest, request);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("sendOMRequestAsync {} {}", isReadOnlyRequest, request);
+    }
     return isReadOnlyRequest ? raftClient.sendReadOnlyAsync(() -> byteString) :
         raftClient.sendAsync(() -> byteString);
   }
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java
index 69a7ae9..7cab9d2 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java
@@ -169,8 +169,10 @@ public final class OzoneManagerRatisServer {
       omResponse.setMessage(stateMachineException.getCause().getMessage());
       omResponse.setStatus(parseErrorStatus(
           stateMachineException.getCause().getMessage()));
-      LOG.debug("Error while executing ratis request. " +
-          "stateMachineException: ", stateMachineException);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Error while executing ratis request. " +
+            "stateMachineException: ", stateMachineException);
+      }
       return omResponse.build();
     }
 
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java
index 46db75d..b97de95 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java
@@ -103,7 +103,9 @@ public class OMBucketSetAclRequest extends 
OMBucketAclRequest {
   void onComplete(boolean operationResult, IOException exception,
       OMMetrics omMetrics) {
     if (operationResult) {
-      LOG.debug("Set acl: {} for path: {} success!", getAcls(), getPath());
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Set acl: {} for path: {} success!", getAcls(), getPath());
+      }
     } else {
       omMetrics.incNumBucketUpdateFails();
       if (exception == null) {
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java
index 01b5edc..a5abbcc 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java
@@ -96,8 +96,10 @@ public class OMVolumeSetAclRequest extends 
OMVolumeAclRequest {
   @Override
   void onComplete(IOException ex) {
     if (ex == null) {
-      LOG.debug("Set acls: {} to volume: {} success!",
-          getAcls(), getVolumeName());
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Set acls: {} to volume: {} success!",
+            getAcls(), getVolumeName());
+      }
     } else {
       LOG.error("Set acls {} to volume {} failed!",
           getAcls(), getVolumeName(), ex);
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandlerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandlerImpl.java
index 66f4892..2d305d7 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandlerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandlerImpl.java
@@ -48,7 +48,9 @@ public class OzoneManagerHARequestHandlerImpl
   @Override
   public OMResponse handleApplyTransaction(OMRequest omRequest,
       long transactionLogIndex) {
-    LOG.debug("Received OMRequest: {}, ", omRequest);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Received OMRequest: {}, ", omRequest);
+    }
     Type cmdType = omRequest.getCmdType();
     switch (cmdType) {
     case CreateVolume:
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
index d4c029b..ff2c966 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
@@ -225,7 +225,9 @@ public class OzoneManagerProtocolServerSideTranslatorPB 
implements
     }
     try {
       omClientResponse.getFlushFuture().get();
-      LOG.trace("Future for {} is completed", request);
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("Future for {} is completed", request);
+      }
     } catch (ExecutionException | InterruptedException ex) {
       // terminate OM. As if we are in this stage means, while getting
       // response from flush future, we got an exception.
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
index 01e59b4..ef96e0c 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
@@ -149,7 +149,9 @@ public class OzoneManagerRequestHandler implements 
RequestHandler {
   @SuppressWarnings("methodlength")
   @Override
   public OMResponse handle(OMRequest request) {
-    LOG.debug("Received OMRequest: {}, ", request);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Received OMRequest: {}, ", request);
+    }
     Type cmdType = request.getCmdType();
     OMResponse.Builder responseBuilder = OMResponse.newBuilder()
         .setCmdType(cmdType)
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneNativeAuthorizer.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneNativeAuthorizer.java
index 5acd37e..0b7c51a 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneNativeAuthorizer.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneNativeAuthorizer.java
@@ -79,20 +79,20 @@ public class OzoneNativeAuthorizer implements 
IAccessAuthorizer {
 
     switch (objInfo.getResourceType()) {
     case VOLUME:
-      LOG.trace("Checking access for volume:" + objInfo);
+      LOG.trace("Checking access for volume: {}", objInfo);
       return volumeManager.checkAccess(objInfo, context);
     case BUCKET:
-      LOG.trace("Checking access for bucket:" + objInfo);
+      LOG.trace("Checking access for bucket: {}", objInfo);
       return (bucketManager.checkAccess(objInfo, context)
           && volumeManager.checkAccess(objInfo, context));
     case KEY:
-      LOG.trace("Checking access for Key:" + objInfo);
+      LOG.trace("Checking access for Key: {}", objInfo);
       return (keyManager.checkAccess(objInfo, context)
           && prefixManager.checkAccess(objInfo, context)
           && bucketManager.checkAccess(objInfo, context)
           && volumeManager.checkAccess(objInfo, context));
     case PREFIX:
-      LOG.trace("Checking access for Prefix:" + objInfo);
+      LOG.trace("Checking access for Prefix: {]", objInfo);
       return (prefixManager.checkAccess(objInfo, context)
           && bucketManager.checkAccess(objInfo, context)
           && volumeManager.checkAccess(objInfo, context));
diff --git 
a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
index 4147c8f..298fd2e 100644
--- 
a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
+++ 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
@@ -425,7 +425,9 @@ public class BasicOzoneFileSystem extends FileSystem {
       DeleteIterator iterator = new DeleteIterator(f, recursive);
       return iterator.iterate();
     } catch (FileNotFoundException e) {
-      LOG.debug("Couldn't delete {} - does not exist", f);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Couldn't delete {} - does not exist", f);
+      }
       return false;
     }
   }
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSV4AuthParser.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSV4AuthParser.java
index 9b65b38..82ffa0c 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSV4AuthParser.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSV4AuthParser.java
@@ -110,10 +110,14 @@ public class AWSV4AuthParser implements AWSAuthParser {
 
     canonicalRequest = buildCanonicalRequest();
     strToSign.append(hash(canonicalRequest));
-    LOG.debug("canonicalRequest:[{}]", canonicalRequest);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("canonicalRequest:[{}]", canonicalRequest);
+    }
 
-    headerMap.keySet().forEach(k -> LOG.trace("Header:{},value:{}", k,
-        headerMap.get(k)));
+    if (LOG.isTraceEnabled()) {
+      headerMap.keySet().forEach(k -> LOG.trace("Header:{},value:{}", k,
+          headerMap.get(k)));
+    }
 
     LOG.debug("StringToSign:[{}]", strToSign);
     stringToSign = strToSign.toString();
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java
index abaca03..d42c005 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java
@@ -86,8 +86,9 @@ public class OzoneClientProducer {
             identifier.setSignature(v4RequestParser.getSignature());
             identifier.setAwsAccessId(v4RequestParser.getAwsAccessId());
             identifier.setOwner(new Text(v4RequestParser.getAwsAccessId()));
-
-            LOG.trace("Adding token for service:{}", omService);
+            if (LOG.isTraceEnabled()) {
+              LOG.trace("Adding token for service:{}", omService);
+            }
             Token<OzoneTokenIdentifier> token = new 
Token(identifier.getBytes(),
                 identifier.getSignature().getBytes(UTF_8),
                 identifier.getKind(),
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3ExceptionMapper.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3ExceptionMapper.java
index 43f335e..588dafa 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3ExceptionMapper.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3ExceptionMapper.java
@@ -42,7 +42,9 @@ public class OS3ExceptionMapper implements 
ExceptionMapper<OS3Exception> {
 
   @Override
   public Response toResponse(OS3Exception exception) {
-    LOG.debug("Returning exception. ex: {}", exception.toString());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Returning exception. ex: {}", exception.toString());
+    }
     exception.setRequestId(requestIdentifier.getRequestId());
     return Response.status(exception.getHttpCode())
         .entity(exception.toXml()).build();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to