shuzirra commented on a change in pull request #3259: URL: https://github.com/apache/hadoop/pull/3259#discussion_r682942111
########## File path: hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java ########## @@ -290,6 +302,36 @@ public void operationComplete(ChannelFuture future) throws Exception { shuffleConnections.decr(); } } + + static class NettyChannelHelper { + static ChannelFuture writeToChannel(Channel ch, Object obj) { + LOG.debug("Writing {} to channel: {}", obj.getClass().getSimpleName(), ch.id()); + return ch.writeAndFlush(obj); + } + + static void writeToChannelAndClose(Channel ch, Object obj) { Review comment: This should also return a ChannelFuture, the write operation might fail, and there is no way to subscribe for that event if we don't return the future. The ChannelFutureListener.CLOSE will handle the closing part, but we might want to handle the errors as well. ########## File path: hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/resources/log4j.properties ########## @@ -12,8 +12,10 @@ # log4j configuration used during build and unit tests -log4j.rootLogger=info,stdout +log4j.rootLogger=debug,stdout log4j.threshold=ALL log4j.appender.stdout=org.apache.log4j.ConsoleAppender log4j.appender.stdout.layout=org.apache.log4j.PatternLayout log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2} (%F:%M(%L)) - %m%n +log4j.logger.io.netty=DEBUG +log4j.logger.org.apache.hadoop.mapred=DEBUG Review comment: Before commit please change the default back to info log level :) ########## File path: hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java ########## @@ -785,37 +833,54 @@ private void removeJobShuffleInfo(JobID jobId) throws IOException { } } - static class TimeoutHandler extends IdleStateAwareChannelHandler { + @VisibleForTesting + public void setUseOutboundExceptionHandler(boolean useHandler) { + this.useOutboundExceptionHandler = useHandler; + } + static class TimeoutHandler extends IdleStateHandler { + private final int connectionKeepAliveTimeOut; private boolean enabledTimeout; + public TimeoutHandler(int connectionKeepAliveTimeOut) { + //disable reader timeout + //set writer timeout to configured timeout value + //disable all idle timeout + super(0, connectionKeepAliveTimeOut, 0); Review comment: This was a bit confusing for me, and the netty documentation didn't help, so I think it might be a good ide to specify the time unit here (SECONDS), because netty documentation did not specify what is the default time unit, and my assumption was ms. But based on netty code https://github.com/netty/netty/blob/21df18deac2d94d8704d6deeb50e9f02f56e6cce/handler/src/main/java/io/netty/handler/timeout/IdleStateHandler.java#L156 this(readerIdleTimeSeconds, writerIdleTimeSeconds, allIdleTimeSeconds, TimeUnit.SECONDS); it is seconds. So perhaps we should specify it here, to make it more consistent. It is really easy to mess up time units. ########## File path: hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java ########## @@ -920,31 +1002,50 @@ public void channelOpen(ChannelHandlerContext ctx, ChannelStateEvent evt) // fetch failure. headers.put(RETRY_AFTER_HEADER, String.valueOf(FETCH_RETRY_DELAY)); sendError(ctx, "", TOO_MANY_REQ_STATUS, headers); - return; + } else { + super.channelActive(ctx); + accepted.add(ctx.channel()); + LOG.debug("Added channel: {}. Accepted number of connections={}", + ctx.channel(), acceptedConnections.get()); } - accepted.add(evt.getChannel()); } @Override - public void messageReceived(ChannelHandlerContext ctx, MessageEvent evt) + public void channelInactive(ChannelHandlerContext ctx) throws Exception { + LOG.trace("Executing channelInactive"); + super.channelInactive(ctx); + acceptedConnections.decrementAndGet(); + LOG.debug("New value of Accepted number of connections={}", + acceptedConnections.get()); + } + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { - HttpRequest request = (HttpRequest) evt.getMessage(); - if (request.getMethod() != GET) { + LOG.trace("Executing channelRead"); + HttpRequest request = (HttpRequest) msg; + LOG.debug("Received HTTP request: {}", request); + if (request.method() != GET) { sendError(ctx, METHOD_NOT_ALLOWED); return; } // Check whether the shuffle version is compatible + String shuffleVersion = ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION; + if (request.headers() != null) { + shuffleVersion = request.headers() + .get(ShuffleHeader.HTTP_HEADER_VERSION); + } + LOG.debug("Shuffle version: {}", shuffleVersion); if (!ShuffleHeader.DEFAULT_HTTP_HEADER_NAME.equals( request.headers() != null ? request.headers().get(ShuffleHeader.HTTP_HEADER_NAME) : null) || !ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION.equals( request.headers() != null ? - request.headers() - .get(ShuffleHeader.HTTP_HEADER_VERSION) : null)) { + shuffleVersion : null)) { sendError(ctx, "Incompatible shuffle request version", BAD_REQUEST); } Review comment: This condition is a bit complex can we simplify it? For the header version we do the heavy lifting a few lines before we could move the check there. Also we could add logging here to know the nature of the shuffle request incompatibility. ########## File path: hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java ########## @@ -1322,18 +1441,16 @@ protected void sendError(ChannelHandlerContext ctx, String msg, for (Map.Entry<String, String> header : headers.entrySet()) { response.headers().set(header.getKey(), header.getValue()); } - response.setContent( - ChannelBuffers.copiedBuffer(msg, CharsetUtil.UTF_8)); // Close the connection as soon as the error message is sent. - ctx.getChannel().write(response).addListener(ChannelFutureListener.CLOSE); + writeToChannelAndClose(ctx.channel(), response); } @Override - public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { - Channel ch = e.getChannel(); - Throwable cause = e.getCause(); + LOG.debug("Executing exceptionCaught"); Review comment: Log the exception here, rather than in the if branches, this way if we need to debug we see what's really going on. Or just remove this. The only way we dont get anything in the log if we hit the TooLongFrameException, all other brnanches already produce a debug or even an error message. ########## File path: hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/LoggingHttpResponseEncoder.java ########## @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPromise; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpResponse; +import io.netty.handler.codec.http.HttpResponseEncoder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; + +class LoggingHttpResponseEncoder extends HttpResponseEncoder { + private static final Logger LOG = LoggerFactory.getLogger(LoggingHttpResponseEncoder.class); + private final boolean logStacktraceOfEncodingMethods; + + public LoggingHttpResponseEncoder(boolean logStacktraceOfEncodingMethods) { + this.logStacktraceOfEncodingMethods = logStacktraceOfEncodingMethods; + } + + @Override + public boolean acceptOutboundMessage(Object msg) throws Exception { + printExecutingMethod(); + LOG.info("OUTBOUND MESSAGE: " + msg); + return super.acceptOutboundMessage(msg); + } + + @Override + protected void encodeInitialLine(ByteBuf buf, HttpResponse response) throws Exception { + LOG.debug("Executing method: {}, response: {}", + getExecutingMethodName(), response); + logStacktraceIfRequired(); + super.encodeInitialLine(buf, response); + } + + @Override + protected void encode(ChannelHandlerContext ctx, Object msg, + List<Object> out) throws Exception { + LOG.debug("Encoding to channel {}: {}", ctx.channel(), msg); + printExecutingMethod(); + logStacktraceIfRequired(); + super.encode(ctx, msg, out); + } + + @Override + protected void encodeHeaders(HttpHeaders headers, ByteBuf buf) { + printExecutingMethod(); + super.encodeHeaders(headers, buf); + } + + @Override + public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise + promise) throws Exception { + LOG.debug("Writing to channel {}: {}", ctx.channel(), msg); + printExecutingMethod(); + super.write(ctx, msg, promise); + } + + private void logStacktraceIfRequired() { + if (logStacktraceOfEncodingMethods) { + LOG.debug("Stacktrace: ", new Throwable()); + } + } + + private void printExecutingMethod() { + String methodName = getExecutingMethodName(); + LOG.debug("Executing method: {}", methodName); + } + + private String getExecutingMethodName() { + try { + StackTraceElement[] stackTrace = Thread.currentThread() + .getStackTrace(); + // Array items (indices): + // 0: java.lang.Thread.getStackTrace(...) + // 1: TestShuffleHandler$LoggingHttpResponseEncoder + // .getExecutingMethodName(...) + String methodName = stackTrace[2].getMethodName(); + //If this method was called from printExecutingMethod, + // we have yet another stack frame + if (methodName.endsWith("printExecutingMethod")) { Review comment: I don't think it's a good idea to hardcode a method name here, also it's a bit rigid solution, other helper / dumper/ analyser methods might need this, where the depth of the actual method call can vary, I'd recommend to introduce an integer parameter which tells this method how many extra stack frames should be omitted (starting from 2), in this case you can call the method with (1) to omit one extra frame no matter what the caller's name is. It is a bit more maintainable this way I thin. ########## File path: hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java ########## @@ -904,11 +986,11 @@ public void setPort(int port) { } @Override - public void channelOpen(ChannelHandlerContext ctx, ChannelStateEvent evt) + public void channelActive(ChannelHandlerContext ctx) throws Exception { - super.channelOpen(ctx, evt); - - if ((maxShuffleConnections > 0) && (accepted.size() >= maxShuffleConnections)) { + LOG.debug("channelActive"); Review comment: This is a really vague message, we don't know what channel, and under a heavy load we would only see a lot of "channelActive" messages, without context. Especially the whole netty is asynchronous (and multithreaded) so it would be quite impossible to identify this log. Isn't there a way to add a channel ID or something? In this case we could use that for all messages generated by the channel ########## File path: hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java ########## @@ -828,27 +893,44 @@ public void destroy() { } } - @Override - public ChannelPipeline getPipeline() throws Exception { - ChannelPipeline pipeline = Channels.pipeline(); + @Override protected void initChannel(SocketChannel ch) throws Exception { + ChannelPipeline pipeline = ch.pipeline(); if (sslFactory != null) { pipeline.addLast("ssl", new SslHandler(sslFactory.createSSLEngine())); } pipeline.addLast("decoder", new HttpRequestDecoder()); - pipeline.addLast("aggregator", new HttpChunkAggregator(1 << 16)); - pipeline.addLast("encoder", new HttpResponseEncoder()); + pipeline.addLast("aggregator", new HttpObjectAggregator(1 << 16)); + pipeline.addLast(ENCODER_HANDLER_NAME, new HttpResponseEncoder()); pipeline.addLast("chunking", new ChunkedWriteHandler()); pipeline.addLast("shuffle", SHUFFLE); - pipeline.addLast("idle", idleStateHandler); - pipeline.addLast(TIMEOUT_HANDLER, new TimeoutHandler()); - return pipeline; + addOutboundHandlersIfRequired(pipeline); + pipeline.addLast(TIMEOUT_HANDLER, new TimeoutHandler(connectionKeepAliveTimeOut)); // TODO factor security manager into pipeline // TODO factor out encode/decode to permit binary shuffle // TODO factor out decode of index to permit alt. models } + + private void addOutboundHandlersIfRequired(ChannelPipeline pipeline) { + if (useOutboundExceptionHandler) { + //https://stackoverflow.com/questions/50612403/catch-all-exception-handling-for-outbound-channelhandler + pipeline.addLast("outboundExceptionHandler", new ChannelOutboundHandlerAdapter() { + @Override + public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { + promise.addListener(ChannelFutureListener.FIRE_EXCEPTION_ON_FAILURE); + super.write(ctx, msg, promise); + } + }); + } + if (useOutboundLogger) { + //Replace HttpResponseEncoder with LoggingHttpResponseEncoder + //Need to use the same name as before, otherwise we would have 2 encoders + pipeline.replace(ENCODER_HANDLER_NAME, ENCODER_HANDLER_NAME, new LoggingHttpResponseEncoder(false)); Review comment: Why replace here, instead of changing the code at the point we are adding the original HttpResponseEncoder? Also the hardcoeded "false" argument makes this argument kind of useless, I think it should go to config along with the other "to-be-moved-to-config" fields (useOutboundExceptionHandler and useOutboundLogger). ########## File path: hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java ########## @@ -828,27 +893,44 @@ public void destroy() { } } - @Override - public ChannelPipeline getPipeline() throws Exception { - ChannelPipeline pipeline = Channels.pipeline(); + @Override protected void initChannel(SocketChannel ch) throws Exception { + ChannelPipeline pipeline = ch.pipeline(); if (sslFactory != null) { pipeline.addLast("ssl", new SslHandler(sslFactory.createSSLEngine())); } pipeline.addLast("decoder", new HttpRequestDecoder()); - pipeline.addLast("aggregator", new HttpChunkAggregator(1 << 16)); - pipeline.addLast("encoder", new HttpResponseEncoder()); + pipeline.addLast("aggregator", new HttpObjectAggregator(1 << 16)); Review comment: Perhaps we should introduce a constant for the MAX_CONTENT_LENGTH instead of the inline 65536. ########## File path: hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java ########## @@ -920,31 +1002,50 @@ public void channelOpen(ChannelHandlerContext ctx, ChannelStateEvent evt) // fetch failure. headers.put(RETRY_AFTER_HEADER, String.valueOf(FETCH_RETRY_DELAY)); sendError(ctx, "", TOO_MANY_REQ_STATUS, headers); - return; + } else { + super.channelActive(ctx); + accepted.add(ctx.channel()); + LOG.debug("Added channel: {}. Accepted number of connections={}", + ctx.channel(), acceptedConnections.get()); } - accepted.add(evt.getChannel()); } @Override - public void messageReceived(ChannelHandlerContext ctx, MessageEvent evt) + public void channelInactive(ChannelHandlerContext ctx) throws Exception { + LOG.trace("Executing channelInactive"); + super.channelInactive(ctx); + acceptedConnections.decrementAndGet(); + LOG.debug("New value of Accepted number of connections={}", + acceptedConnections.get()); + } + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { - HttpRequest request = (HttpRequest) evt.getMessage(); - if (request.getMethod() != GET) { + LOG.trace("Executing channelRead"); + HttpRequest request = (HttpRequest) msg; + LOG.debug("Received HTTP request: {}", request); + if (request.method() != GET) { sendError(ctx, METHOD_NOT_ALLOWED); return; } // Check whether the shuffle version is compatible + String shuffleVersion = ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION; + if (request.headers() != null) { + shuffleVersion = request.headers() + .get(ShuffleHeader.HTTP_HEADER_VERSION); + } + LOG.debug("Shuffle version: {}", shuffleVersion); Review comment: If we really need to know the shuffle version, then we should also know it's origin, so we should know if it was sent with the headers or we just used the default. Or even better it's enough to log if we get it from the request headers, since we know what the default is. ########## File path: hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/LoggingHttpResponseEncoder.java ########## @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPromise; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpResponse; +import io.netty.handler.codec.http.HttpResponseEncoder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; + +class LoggingHttpResponseEncoder extends HttpResponseEncoder { + private static final Logger LOG = LoggerFactory.getLogger(LoggingHttpResponseEncoder.class); + private final boolean logStacktraceOfEncodingMethods; + + public LoggingHttpResponseEncoder(boolean logStacktraceOfEncodingMethods) { + this.logStacktraceOfEncodingMethods = logStacktraceOfEncodingMethods; + } + + @Override + public boolean acceptOutboundMessage(Object msg) throws Exception { + printExecutingMethod(); + LOG.info("OUTBOUND MESSAGE: " + msg); + return super.acceptOutboundMessage(msg); + } + + @Override + protected void encodeInitialLine(ByteBuf buf, HttpResponse response) throws Exception { + LOG.debug("Executing method: {}, response: {}", + getExecutingMethodName(), response); + logStacktraceIfRequired(); + super.encodeInitialLine(buf, response); + } + + @Override + protected void encode(ChannelHandlerContext ctx, Object msg, + List<Object> out) throws Exception { + LOG.debug("Encoding to channel {}: {}", ctx.channel(), msg); + printExecutingMethod(); + logStacktraceIfRequired(); + super.encode(ctx, msg, out); + } + + @Override + protected void encodeHeaders(HttpHeaders headers, ByteBuf buf) { + printExecutingMethod(); + super.encodeHeaders(headers, buf); + } + + @Override + public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise + promise) throws Exception { + LOG.debug("Writing to channel {}: {}", ctx.channel(), msg); + printExecutingMethod(); + super.write(ctx, msg, promise); + } + + private void logStacktraceIfRequired() { + if (logStacktraceOfEncodingMethods) { + LOG.debug("Stacktrace: ", new Throwable()); + } + } + + private void printExecutingMethod() { + String methodName = getExecutingMethodName(); + LOG.debug("Executing method: {}", methodName); + } + + private String getExecutingMethodName() { + try { + StackTraceElement[] stackTrace = Thread.currentThread() + .getStackTrace(); + // Array items (indices): + // 0: java.lang.Thread.getStackTrace(...) + // 1: TestShuffleHandler$LoggingHttpResponseEncoder + // .getExecutingMethodName(...) + String methodName = stackTrace[2].getMethodName(); + //If this method was called from printExecutingMethod, + // we have yet another stack frame + if (methodName.endsWith("printExecutingMethod")) { + methodName = stackTrace[3].getMethodName(); + } + String className = this.getClass().getSimpleName(); + return className + "#" + methodName; + } catch (Throwable t) { + LOG.error("Error while getting execution method name", t); + return null; Review comment: We either stop here, and throw a runtime exception, or return a valid string here. This is a helper method, so I'd recommend to not stop here, but I think returning a string with "unknown" or "undefined" can be better, because this null can sneak to somewhere and cause a null pointer exception, so either fail early or don't fail at all, but here the null can be dangerous. ########## File path: hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java ########## @@ -920,31 +1002,50 @@ public void channelOpen(ChannelHandlerContext ctx, ChannelStateEvent evt) // fetch failure. headers.put(RETRY_AFTER_HEADER, String.valueOf(FETCH_RETRY_DELAY)); sendError(ctx, "", TOO_MANY_REQ_STATUS, headers); - return; + } else { + super.channelActive(ctx); + accepted.add(ctx.channel()); + LOG.debug("Added channel: {}. Accepted number of connections={}", + ctx.channel(), acceptedConnections.get()); } - accepted.add(evt.getChannel()); } @Override - public void messageReceived(ChannelHandlerContext ctx, MessageEvent evt) + public void channelInactive(ChannelHandlerContext ctx) throws Exception { + LOG.trace("Executing channelInactive"); + super.channelInactive(ctx); + acceptedConnections.decrementAndGet(); + LOG.debug("New value of Accepted number of connections={}", + acceptedConnections.get()); + } + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { - HttpRequest request = (HttpRequest) evt.getMessage(); - if (request.getMethod() != GET) { + LOG.trace("Executing channelRead"); Review comment: Some channel identification would be nice here, and actually in all channel related messages if possible, to be able to determine to which channel does the message belong. ########## File path: hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java ########## @@ -920,31 +1002,50 @@ public void channelOpen(ChannelHandlerContext ctx, ChannelStateEvent evt) // fetch failure. headers.put(RETRY_AFTER_HEADER, String.valueOf(FETCH_RETRY_DELAY)); sendError(ctx, "", TOO_MANY_REQ_STATUS, headers); - return; + } else { + super.channelActive(ctx); + accepted.add(ctx.channel()); + LOG.debug("Added channel: {}. Accepted number of connections={}", + ctx.channel(), acceptedConnections.get()); } - accepted.add(evt.getChannel()); } @Override - public void messageReceived(ChannelHandlerContext ctx, MessageEvent evt) + public void channelInactive(ChannelHandlerContext ctx) throws Exception { + LOG.trace("Executing channelInactive"); + super.channelInactive(ctx); + acceptedConnections.decrementAndGet(); + LOG.debug("New value of Accepted number of connections={}", + acceptedConnections.get()); Review comment: It's just a debug message, but this way we can report inconsistent values, which can lead to confusion. You should save the result of the acceptedConnections.decrementAndGet(); and log that, instead of reading the value again, since between the two lines the value might change. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-issues-h...@hadoop.apache.org