szilard-nemeth commented on a change in pull request #3259:
URL: https://github.com/apache/hadoop/pull/3259#discussion_r684216985



##########
File path: 
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java
##########
@@ -106,10 +129,584 @@
       LoggerFactory.getLogger(TestShuffleHandler.class);
   private static final File ABS_LOG_DIR = GenericTestUtils.getTestDir(
       TestShuffleHandler.class.getSimpleName() + "LocDir");
+  private static final long ATTEMPT_ID = 12345L;
+  private static final long ATTEMPT_ID_2 = 12346L;
+  
+
+  //Control test execution properties with these flags
+  private static final boolean DEBUG_MODE = false;
+  //WARNING: If this is set to true and proxy server is not running, tests 
will fail!
+  private static final boolean USE_PROXY = false;
+  private static final int HEADER_WRITE_COUNT = 100000;
+  private static TestExecution TEST_EXECUTION;
+
+  private static class TestExecution {
+    private static final int DEFAULT_KEEP_ALIVE_TIMEOUT = -100;
+    private static final int DEBUG_FRIENDLY_KEEP_ALIVE = 1000;
+    private static final int DEFAULT_PORT = 0; //random port
+    private static final int FIXED_PORT = 8088;
+    private static final String PROXY_HOST = "127.0.0.1";
+    private static final int PROXY_PORT = 8888;
+    private final boolean debugMode;
+    private final boolean useProxy;
+
+    public TestExecution(boolean debugMode, boolean useProxy) {
+      this.debugMode = debugMode;
+      this.useProxy = useProxy;
+    }
+
+    int getKeepAliveTimeout() {
+      if (debugMode) {
+        return DEBUG_FRIENDLY_KEEP_ALIVE;
+      }
+      return DEFAULT_KEEP_ALIVE_TIMEOUT;
+    }
+    
+    HttpURLConnection openConnection(URL url) throws IOException {
+      HttpURLConnection conn;
+      if (useProxy) {
+        Proxy proxy
+            = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(PROXY_HOST, 
PROXY_PORT));
+        conn = (HttpURLConnection) url.openConnection(proxy);
+      } else {
+        conn = (HttpURLConnection) url.openConnection();
+      }
+      return conn;
+    }
+    
+    int shuffleHandlerPort() {
+      if (debugMode) {
+        return FIXED_PORT;
+      } else {
+        return DEFAULT_PORT;
+      }
+    }
+    
+    void parameterizeConnection(URLConnection conn) {
+      if (DEBUG_MODE) {
+        conn.setReadTimeout(1000000);
+        conn.setConnectTimeout(1000000);
+      }
+    }
+  }
+  
+  private static class ResponseConfig {
+    private static final int ONE_HEADER_DISPLACEMENT = 1;
+    
+    private final int headerWriteCount;
+    private final long actualHeaderWriteCount;
+    private final int mapOutputCount;
+    private final int contentLengthOfOneMapOutput;
+    private long headerSize;
+    public long contentLengthOfResponse;
+
+    public ResponseConfig(int headerWriteCount, int mapOutputCount, int 
contentLengthOfOneMapOutput) {
+      if (mapOutputCount <= 0 && contentLengthOfOneMapOutput > 0) {
+        throw new IllegalStateException("mapOutputCount should be at least 1");
+      }
+      this.headerWriteCount = headerWriteCount;
+      this.mapOutputCount = mapOutputCount;
+      this.contentLengthOfOneMapOutput = contentLengthOfOneMapOutput;
+      //MapOutputSender#send will send header N + 1 times
+      //So, (N + 1) * headerSize should be the Content-length header + the 
expected Content-length as well
+      this.actualHeaderWriteCount = headerWriteCount + ONE_HEADER_DISPLACEMENT;

Review comment:
       Modified the code to send the header only N times.

##########
File path: 
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java
##########
@@ -106,10 +129,584 @@
       LoggerFactory.getLogger(TestShuffleHandler.class);
   private static final File ABS_LOG_DIR = GenericTestUtils.getTestDir(
       TestShuffleHandler.class.getSimpleName() + "LocDir");
+  private static final long ATTEMPT_ID = 12345L;
+  private static final long ATTEMPT_ID_2 = 12346L;
+  
+
+  //Control test execution properties with these flags
+  private static final boolean DEBUG_MODE = false;
+  //WARNING: If this is set to true and proxy server is not running, tests 
will fail!
+  private static final boolean USE_PROXY = false;
+  private static final int HEADER_WRITE_COUNT = 100000;
+  private static TestExecution TEST_EXECUTION;
+
+  private static class TestExecution {
+    private static final int DEFAULT_KEEP_ALIVE_TIMEOUT = -100;
+    private static final int DEBUG_FRIENDLY_KEEP_ALIVE = 1000;
+    private static final int DEFAULT_PORT = 0; //random port
+    private static final int FIXED_PORT = 8088;
+    private static final String PROXY_HOST = "127.0.0.1";
+    private static final int PROXY_PORT = 8888;
+    private final boolean debugMode;
+    private final boolean useProxy;
+
+    public TestExecution(boolean debugMode, boolean useProxy) {
+      this.debugMode = debugMode;
+      this.useProxy = useProxy;
+    }
+
+    int getKeepAliveTimeout() {
+      if (debugMode) {
+        return DEBUG_FRIENDLY_KEEP_ALIVE;
+      }
+      return DEFAULT_KEEP_ALIVE_TIMEOUT;
+    }
+    
+    HttpURLConnection openConnection(URL url) throws IOException {
+      HttpURLConnection conn;
+      if (useProxy) {
+        Proxy proxy
+            = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(PROXY_HOST, 
PROXY_PORT));
+        conn = (HttpURLConnection) url.openConnection(proxy);
+      } else {
+        conn = (HttpURLConnection) url.openConnection();
+      }
+      return conn;
+    }
+    
+    int shuffleHandlerPort() {
+      if (debugMode) {
+        return FIXED_PORT;
+      } else {
+        return DEFAULT_PORT;
+      }
+    }
+    
+    void parameterizeConnection(URLConnection conn) {
+      if (DEBUG_MODE) {
+        conn.setReadTimeout(1000000);
+        conn.setConnectTimeout(1000000);
+      }
+    }
+  }
+  
+  private static class ResponseConfig {
+    private static final int ONE_HEADER_DISPLACEMENT = 1;
+    
+    private final int headerWriteCount;
+    private final long actualHeaderWriteCount;
+    private final int mapOutputCount;
+    private final int contentLengthOfOneMapOutput;
+    private long headerSize;
+    public long contentLengthOfResponse;
+
+    public ResponseConfig(int headerWriteCount, int mapOutputCount, int 
contentLengthOfOneMapOutput) {
+      if (mapOutputCount <= 0 && contentLengthOfOneMapOutput > 0) {
+        throw new IllegalStateException("mapOutputCount should be at least 1");
+      }
+      this.headerWriteCount = headerWriteCount;
+      this.mapOutputCount = mapOutputCount;
+      this.contentLengthOfOneMapOutput = contentLengthOfOneMapOutput;
+      //MapOutputSender#send will send header N + 1 times
+      //So, (N + 1) * headerSize should be the Content-length header + the 
expected Content-length as well
+      this.actualHeaderWriteCount = headerWriteCount + ONE_HEADER_DISPLACEMENT;
+    }
+
+    private void setHeaderSize(long headerSize) {
+      this.headerSize = headerSize;
+      long contentLengthOfAllHeaders = actualHeaderWriteCount * headerSize;
+      this.contentLengthOfResponse = 
computeContentLengthOfResponse(contentLengthOfAllHeaders);
+      LOG.debug("Content-length of all headers: {}", 
contentLengthOfAllHeaders);
+      LOG.debug("Content-length of one MapOutput: {}", 
contentLengthOfOneMapOutput);
+      LOG.debug("Content-length of final HTTP response: {}", 
contentLengthOfResponse);
+    }
+
+    private long computeContentLengthOfResponse(long 
contentLengthOfAllHeaders) {
+      int mapOutputCountMultiplier = mapOutputCount;
+      if (mapOutputCount == 0) {
+        mapOutputCountMultiplier = 1;
+      }

Review comment:
       Modified the code to send the header only N times.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-issues-h...@hadoop.apache.org

Reply via email to