HDFS-13849. Migrate logging to slf4j in hadoop-hdfs-httpfs, hadoop-hdfs-nfs, hadoop-hdfs-rbf, hadoop-hdfs-native-client. Contributed by Ian Pickering.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b1fa569 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b1fa569 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b1fa569 Branch: refs/heads/HDFS-12943 Commit: 7b1fa5693efc687492776d43ab482601cbb30dfd Parents: e8b063f Author: Giovanni Matteo Fumarola <gif...@apache.com> Authored: Mon Aug 27 10:18:05 2018 -0700 Committer: Giovanni Matteo Fumarola <gif...@apache.com> Committed: Mon Aug 27 10:18:05 2018 -0700 ---------------------------------------------------------------------- .../src/main/native/fuse-dfs/test/TestFuseDFS.java | 6 +++--- .../apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java | 7 ++++--- .../apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java | 6 +++--- .../org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java | 4 ++-- .../apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java | 13 +++++++------ .../hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java | 7 ++++--- .../java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java | 6 +++--- .../org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java | 6 +++--- .../java/org/apache/hadoop/hdfs/nfs/TestMountd.java | 6 +++--- .../apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java | 9 +++++---- .../federation/router/RouterPermissionChecker.java | 7 ++++--- .../hdfs/server/federation/store/RecordStore.java | 6 +++--- 12 files changed, 44 insertions(+), 39 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b1fa569/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/test/TestFuseDFS.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/test/TestFuseDFS.java b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/test/TestFuseDFS.java index a5d9abd..dabbe00 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/test/TestFuseDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/test/TestFuseDFS.java @@ -22,8 +22,8 @@ import java.util.ArrayList; import java.util.concurrent.atomic.*; import org.apache.log4j.Level; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.*; import org.apache.hadoop.fs.permission.*; @@ -48,7 +48,7 @@ public class TestFuseDFS { private static Runtime r; private static String mountPoint; - private static final Log LOG = LogFactory.getLog(TestFuseDFS.class); + private static final Logger LOG = LoggerFactory.getLogger(TestFuseDFS.class); { GenericTestUtils.setLogLevel(LOG, Level.ALL); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b1fa569/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java index 4ae51c6..2721395 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java @@ -26,8 +26,8 @@ import java.util.Collections; import java.util.List; import java.util.HashMap; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; @@ -61,7 +61,8 @@ import com.google.common.annotations.VisibleForTesting; * RPC program corresponding to mountd daemon. See {@link Mountd}. */ public class RpcProgramMountd extends RpcProgram implements MountInterface { - private static final Log LOG = LogFactory.getLog(RpcProgramMountd.class); + private static final Logger LOG = + LoggerFactory.getLogger(RpcProgramMountd.class); public static final int PROGRAM = 100005; public static final int VERSION_1 = 1; public static final int VERSION_2 = 2; http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b1fa569/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java index ee3f90a..cbbcccf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java @@ -22,8 +22,8 @@ import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * This class is a thread pool to easily schedule async data operations. Current @@ -31,7 +31,7 @@ import org.apache.commons.logging.LogFactory; * for readahead operations too. */ public class AsyncDataService { - static final Log LOG = LogFactory.getLog(AsyncDataService.class); + static final Logger LOG = LoggerFactory.getLogger(AsyncDataService.class); // ThreadPool core pool size private static final int CORE_THREADS_PER_VOLUME = 1; http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b1fa569/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java index 6067a5d..f25797e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java @@ -1211,11 +1211,11 @@ class OpenFileCtx { LOG.info("Clean up open file context for fileId: {}", latestAttr.getFileId()); - cleanup(); + cleanupWithLogger(); } } - synchronized void cleanup() { + synchronized void cleanupWithLogger() { if (!activeState) { LOG.info("Current OpenFileCtx is already inactive, no need to cleanup."); return; http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b1fa569/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java index e23e490..cb9e2c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java @@ -22,8 +22,8 @@ import java.util.Iterator; import java.util.Map.Entry; import java.util.concurrent.ConcurrentMap; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.nfs.nfs3.FileHandle; @@ -39,7 +39,8 @@ import com.google.common.collect.Maps; * used to maintain the writing context for a single file. */ class OpenFileCtxCache { - private static final Log LOG = LogFactory.getLog(OpenFileCtxCache.class); + private static final Logger LOG = + LoggerFactory.getLogger(OpenFileCtxCache.class); // Insert and delete with openFileMap are synced private final ConcurrentMap<FileHandle, OpenFileCtx> openFileMap = Maps .newConcurrentMap(); @@ -138,7 +139,7 @@ class OpenFileCtxCache { // Cleanup the old stream outside the lock if (toEvict != null) { - toEvict.cleanup(); + toEvict.cleanupWithLogger(); } return true; } @@ -178,7 +179,7 @@ class OpenFileCtxCache { // Invoke the cleanup outside the lock for (OpenFileCtx ofc : ctxToRemove) { - ofc.cleanup(); + ofc.cleanupWithLogger(); } } @@ -214,7 +215,7 @@ class OpenFileCtxCache { // Invoke the cleanup outside the lock for (OpenFileCtx ofc : cleanedContext) { - ofc.cleanup(); + ofc.cleanupWithLogger(); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b1fa569/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java index e495486..549f5be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java @@ -22,8 +22,8 @@ import java.net.SocketException; import org.apache.commons.daemon.Daemon; import org.apache.commons.daemon.DaemonContext; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; @@ -37,7 +37,8 @@ import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; * Debian: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=594880 */ public class PrivilegedNfsGatewayStarter implements Daemon { - static final Log LOG = LogFactory.getLog(PrivilegedNfsGatewayStarter.class); + static final Logger LOG = + LoggerFactory.getLogger(PrivilegedNfsGatewayStarter.class); private String[] args = null; private DatagramSocket registrationSocket = null; private Nfs3 nfs3Server = null; http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b1fa569/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java index 5d66751..98f3d6c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java @@ -22,8 +22,8 @@ import java.io.IOException; import java.io.RandomAccessFile; import java.nio.ByteBuffer; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow; @@ -37,7 +37,7 @@ import com.google.common.base.Preconditions; * xid and reply status. */ class WriteCtx { - public static final Log LOG = LogFactory.getLog(WriteCtx.class); + public static final Logger LOG = LoggerFactory.getLogger(WriteCtx.class); /** * In memory write data has 3 states. ALLOW_DUMP: not sequential write, still http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b1fa569/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java index 0a3450d..3554239 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java @@ -20,8 +20,8 @@ package org.apache.hadoop.hdfs.nfs.nfs3; import java.io.IOException; import java.util.EnumSet; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.hdfs.DFSClient; @@ -51,7 +51,7 @@ import com.google.common.annotations.VisibleForTesting; * Manage the writes and responds asynchronously. */ public class WriteManager { - public static final Log LOG = LogFactory.getLog(WriteManager.class); + public static final Logger LOG = LoggerFactory.getLogger(WriteManager.class); private final NfsConfiguration config; private final IdMappingServiceProvider iug; http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b1fa569/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java index 32ed20f..fe92c90 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java @@ -21,8 +21,8 @@ package org.apache.hadoop.hdfs.nfs; import java.io.IOException; import java.net.InetAddress; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; @@ -35,7 +35,7 @@ import static org.junit.Assert.assertTrue; public class TestMountd { - public static final Log LOG = LogFactory.getLog(TestMountd.class); + public static final Logger LOG = LoggerFactory.getLogger(TestMountd.class); @Test public void testStart() throws IOException { http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b1fa569/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java index 1d152ce..4e53c72 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java @@ -21,8 +21,8 @@ package org.apache.hadoop.hdfs.nfs; import java.nio.ByteBuffer; import java.util.Arrays; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.hdfs.nfs.nfs3.Nfs3Utils; @@ -51,7 +51,8 @@ import org.jboss.netty.channel.Channels; import org.jboss.netty.channel.MessageEvent; public class TestOutOfOrderWrite { - public final static Log LOG = LogFactory.getLog(TestOutOfOrderWrite.class); + public final static Logger LOG = + LoggerFactory.getLogger(TestOutOfOrderWrite.class); static FileHandle handle = null; static Channel channel; @@ -179,4 +180,4 @@ public class TestOutOfOrderWrite { // TODO: convert to Junit test, and validate result automatically } -} \ No newline at end of file +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b1fa569/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterPermissionChecker.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterPermissionChecker.java index 63d190c..cf660d3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterPermissionChecker.java @@ -21,8 +21,8 @@ import java.io.IOException; import java.util.Arrays; import java.util.List; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; @@ -35,7 +35,8 @@ import org.apache.hadoop.security.UserGroupInformation; * Class that helps in checking permissions in Router-based federation. */ public class RouterPermissionChecker extends FSPermissionChecker { - static final Log LOG = LogFactory.getLog(RouterPermissionChecker.class); + static final Logger LOG = + LoggerFactory.getLogger(RouterPermissionChecker.class); /** Mount table default permission. */ public static final short MOUNT_TABLE_PERMISSION_DEFAULT = 00755; http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b1fa569/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java index 53a8b82..92aa584 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java @@ -19,8 +19,8 @@ package org.apache.hadoop.hdfs.server.federation.store; import java.lang.reflect.Constructor; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver; @@ -36,7 +36,7 @@ import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord; @InterfaceStability.Evolving public abstract class RecordStore<R extends BaseRecord> { - private static final Log LOG = LogFactory.getLog(RecordStore.class); + private static final Logger LOG = LoggerFactory.getLogger(RecordStore.class); /** Class of the record stored in this State Store. */ --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org