[hadoop] branch branch-3.3.5 updated: HADOOP-11245. Update NFS gateway to use Netty4 (#2832) (#4997)

2022-10-10 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.3.5
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3.5 by this push:
 new 0d067f69da7 HADOOP-11245. Update NFS gateway to use Netty4 (#2832) 
(#4997)
0d067f69da7 is described below

commit 0d067f69da76efcb90a7bb3a1c003c9498811d24
Author: Ashutosh Gupta 
AuthorDate: Mon Oct 10 22:27:43 2022 +0100

HADOOP-11245. Update NFS gateway to use Netty4 (#2832) (#4997)

Reviewed-by: Tsz-Wo Nicholas Sze 

Co-authored-by: Wei-Chiu Chuang 
(cherry picked from commit 6847ec0647c3063bcbf9cf0315a77e247cae8534)
---
 hadoop-common-project/hadoop-nfs/pom.xml   |   2 +-
 .../java/org/apache/hadoop/mount/MountdBase.java   |  14 ++-
 .../java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java  |   7 +-
 .../apache/hadoop/oncrpc/RegistrationClient.java   |  13 +--
 .../java/org/apache/hadoop/oncrpc/RpcInfo.java |  12 +-
 .../java/org/apache/hadoop/oncrpc/RpcProgram.java  |  19 ++--
 .../java/org/apache/hadoop/oncrpc/RpcResponse.java |  23 ++--
 .../java/org/apache/hadoop/oncrpc/RpcUtil.java | 123 +++-
 .../org/apache/hadoop/oncrpc/SimpleTcpClient.java  |  78 -
 .../hadoop/oncrpc/SimpleTcpClientHandler.java  |  30 ++---
 .../org/apache/hadoop/oncrpc/SimpleTcpServer.java  |  76 +++--
 .../org/apache/hadoop/oncrpc/SimpleUdpServer.java  |  65 +++
 .../main/java/org/apache/hadoop/oncrpc/XDR.java|  12 +-
 .../java/org/apache/hadoop/portmap/Portmap.java| 126 +
 .../apache/hadoop/portmap/RpcProgramPortmap.java   |  46 
 .../org/apache/hadoop/oncrpc/TestFrameDecoder.java | 100 
 .../org/apache/hadoop/portmap/TestPortmap.java |   2 +-
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml|   2 +-
 .../hadoop/hdfs/nfs/mount/RpcProgramMountd.java|  12 +-
 .../org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java |  12 +-
 .../apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java   |   2 +-
 .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java   |  14 ++-
 .../org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java  |   2 +-
 .../apache/hadoop/hdfs/nfs/nfs3/WriteManager.java  |   2 +-
 .../hadoop/hdfs/nfs/TestOutOfOrderWrite.java   |  32 +++---
 .../hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java   |   2 +-
 .../apache/hadoop/hdfs/nfs/nfs3/TestWrites.java|   2 +-
 27 files changed, 472 insertions(+), 358 deletions(-)

diff --git a/hadoop-common-project/hadoop-nfs/pom.xml 
b/hadoop-common-project/hadoop-nfs/pom.xml
index cdda01fc640..ca4b97a3f62 100644
--- a/hadoop-common-project/hadoop-nfs/pom.xml
+++ b/hadoop-common-project/hadoop-nfs/pom.xml
@@ -90,7 +90,7 @@
 
 
   io.netty
-  netty
+  netty-all
   compile
 
 
diff --git 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
index 0ff3084bf3e..58d3e51f2bd 100644
--- 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
+++ 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
@@ -41,6 +41,8 @@ abstract public class MountdBase {
   private final RpcProgram rpcProgram;
   private int udpBoundPort; // Will set after server starts
   private int tcpBoundPort; // Will set after server starts
+  private SimpleUdpServer udpServer = null;
+  private SimpleTcpServer tcpServer = null;
 
   public RpcProgram getRpcProgram() {
 return rpcProgram;
@@ -57,7 +59,7 @@ abstract public class MountdBase {
 
   /* Start UDP server */
   private void startUDPServer() {
-SimpleUdpServer udpServer = new SimpleUdpServer(rpcProgram.getPort(),
+udpServer = new SimpleUdpServer(rpcProgram.getPort(),
 rpcProgram, 1);
 rpcProgram.startDaemons();
 try {
@@ -76,7 +78,7 @@ abstract public class MountdBase {
 
   /* Start TCP server */
   private void startTCPServer() {
-SimpleTcpServer tcpServer = new SimpleTcpServer(rpcProgram.getPort(),
+tcpServer = new SimpleTcpServer(rpcProgram.getPort(),
 rpcProgram, 1);
 rpcProgram.startDaemons();
 try {
@@ -118,6 +120,14 @@ abstract public class MountdBase {
   rpcProgram.unregister(PortmapMapping.TRANSPORT_TCP, tcpBoundPort);
   tcpBoundPort = 0;
 }
+if (udpServer != null) {
+  udpServer.shutdown();
+  udpServer = null;
+}
+if (tcpServer != null) {
+  tcpServer.shutdown();
+  tcpServer = null;
+}
   }
 
   /**
diff --git 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
index ff83a5f19be..e6ea29b42bf 100644
--- 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
+++ 

[hadoop] branch branch-3.3 updated: HADOOP-11245. Update NFS gateway to use Netty4 (#2832) (#4997)

2022-10-10 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 6847ec0647c HADOOP-11245. Update NFS gateway to use Netty4 (#2832) 
(#4997)
6847ec0647c is described below

commit 6847ec0647c3063bcbf9cf0315a77e247cae8534
Author: Ashutosh Gupta 
AuthorDate: Mon Oct 10 22:27:43 2022 +0100

HADOOP-11245. Update NFS gateway to use Netty4 (#2832) (#4997)

Reviewed-by: Tsz-Wo Nicholas Sze 

Co-authored-by: Wei-Chiu Chuang 
---
 hadoop-common-project/hadoop-nfs/pom.xml   |   2 +-
 .../java/org/apache/hadoop/mount/MountdBase.java   |  14 ++-
 .../java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java  |   7 +-
 .../apache/hadoop/oncrpc/RegistrationClient.java   |  13 +--
 .../java/org/apache/hadoop/oncrpc/RpcInfo.java |  12 +-
 .../java/org/apache/hadoop/oncrpc/RpcProgram.java  |  19 ++--
 .../java/org/apache/hadoop/oncrpc/RpcResponse.java |  23 ++--
 .../java/org/apache/hadoop/oncrpc/RpcUtil.java | 123 +++-
 .../org/apache/hadoop/oncrpc/SimpleTcpClient.java  |  78 -
 .../hadoop/oncrpc/SimpleTcpClientHandler.java  |  30 ++---
 .../org/apache/hadoop/oncrpc/SimpleTcpServer.java  |  76 +++--
 .../org/apache/hadoop/oncrpc/SimpleUdpServer.java  |  65 +++
 .../main/java/org/apache/hadoop/oncrpc/XDR.java|  12 +-
 .../java/org/apache/hadoop/portmap/Portmap.java| 126 +
 .../apache/hadoop/portmap/RpcProgramPortmap.java   |  46 
 .../org/apache/hadoop/oncrpc/TestFrameDecoder.java | 100 
 .../org/apache/hadoop/portmap/TestPortmap.java |   2 +-
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml|   2 +-
 .../hadoop/hdfs/nfs/mount/RpcProgramMountd.java|  12 +-
 .../org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java |  12 +-
 .../apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java   |   2 +-
 .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java   |  14 ++-
 .../org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java  |   2 +-
 .../apache/hadoop/hdfs/nfs/nfs3/WriteManager.java  |   2 +-
 .../hadoop/hdfs/nfs/TestOutOfOrderWrite.java   |  32 +++---
 .../hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java   |   2 +-
 .../apache/hadoop/hdfs/nfs/nfs3/TestWrites.java|   2 +-
 27 files changed, 472 insertions(+), 358 deletions(-)

diff --git a/hadoop-common-project/hadoop-nfs/pom.xml 
b/hadoop-common-project/hadoop-nfs/pom.xml
index c0b4e14177c..baabb0fdc6e 100644
--- a/hadoop-common-project/hadoop-nfs/pom.xml
+++ b/hadoop-common-project/hadoop-nfs/pom.xml
@@ -90,7 +90,7 @@
 
 
   io.netty
-  netty
+  netty-all
   compile
 
 
diff --git 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
index 0ff3084bf3e..58d3e51f2bd 100644
--- 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
+++ 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
@@ -41,6 +41,8 @@ abstract public class MountdBase {
   private final RpcProgram rpcProgram;
   private int udpBoundPort; // Will set after server starts
   private int tcpBoundPort; // Will set after server starts
+  private SimpleUdpServer udpServer = null;
+  private SimpleTcpServer tcpServer = null;
 
   public RpcProgram getRpcProgram() {
 return rpcProgram;
@@ -57,7 +59,7 @@ abstract public class MountdBase {
 
   /* Start UDP server */
   private void startUDPServer() {
-SimpleUdpServer udpServer = new SimpleUdpServer(rpcProgram.getPort(),
+udpServer = new SimpleUdpServer(rpcProgram.getPort(),
 rpcProgram, 1);
 rpcProgram.startDaemons();
 try {
@@ -76,7 +78,7 @@ abstract public class MountdBase {
 
   /* Start TCP server */
   private void startTCPServer() {
-SimpleTcpServer tcpServer = new SimpleTcpServer(rpcProgram.getPort(),
+tcpServer = new SimpleTcpServer(rpcProgram.getPort(),
 rpcProgram, 1);
 rpcProgram.startDaemons();
 try {
@@ -118,6 +120,14 @@ abstract public class MountdBase {
   rpcProgram.unregister(PortmapMapping.TRANSPORT_TCP, tcpBoundPort);
   tcpBoundPort = 0;
 }
+if (udpServer != null) {
+  udpServer.shutdown();
+  udpServer = null;
+}
+if (tcpServer != null) {
+  tcpServer.shutdown();
+  tcpServer = null;
+}
   }
 
   /**
diff --git 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
index ff83a5f19be..e6ea29b42bf 100644
--- 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
+++ 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
@@ -35,6 +35,7 

[hadoop] branch trunk updated: HDFS-16795. Use secure XML parsers (#4979)

2022-10-10 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 4fe079f85fb HDFS-16795. Use secure XML parsers (#4979)
4fe079f85fb is described below

commit 4fe079f85fb3d288f1b53a9f4668ad0025919e50
Author: PJ Fanning 
AuthorDate: Mon Oct 10 18:56:35 2022 +0100

HDFS-16795. Use secure XML parsers (#4979)


Contributed by P J Fanning
---
 .../java/org/apache/hadoop/hdfs/util/ECPolicyLoader.java |  9 +++--
 .../hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java   |  2 +-
 .../tools/offlineImageViewer/OfflineImageReconstructor.java  |  3 +++
 .../java/org/apache/hadoop/hdfs/TestEncryptionZones.java |  4 ++--
 .../tools/offlineImageViewer/TestOfflineImageViewer.java | 12 ++--
 .../offlineImageViewer/TestOfflineImageViewerForAcl.java |  4 +++-
 6 files changed, 18 insertions(+), 16 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ECPolicyLoader.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ECPolicyLoader.java
index fcba618c94a..0d1be4b8e67 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ECPolicyLoader.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ECPolicyLoader.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.util;
 import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.util.XMLUtils;
+
 import org.w3c.dom.Node;
 import org.w3c.dom.Text;
 import org.w3c.dom.Element;
@@ -87,13 +89,8 @@ public class ECPolicyLoader {
 LOG.info("Loading EC policy file " + policyFile);
 
 // Read and parse the EC policy file.
-DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
+DocumentBuilderFactory dbf = XMLUtils.newSecureDocumentBuilderFactory();
 dbf.setIgnoringComments(true);
-dbf.setFeature("http://apache.org/xml/features/disallow-doctype-decl;, 
true);
-
dbf.setFeature("http://apache.org/xml/features/nonvalidating/load-external-dtd;,
 false);
-dbf.setFeature("http://xml.org/sax/features/external-general-entities;, 
false);
-dbf.setFeature("http://xml.org/sax/features/external-parameter-entities;, 
false);
-
dbf.setFeature("http://apache.org/xml/features/dom/create-entity-ref-nodes;, 
false);
 DocumentBuilder builder = dbf.newDocumentBuilder();
 Document doc = builder.parse(policyFile);
 Element root = doc.getDocumentElement();
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
index ddf7933f032..9fabd1887ce 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
@@ -60,8 +60,8 @@ public class XmlEditsVisitor implements OfflineEditsVisitor {
   public XmlEditsVisitor(OutputStream out)
   throws IOException {
 this.out = out;
-factory =(SAXTransformerFactory)SAXTransformerFactory.newInstance();
 try {
+  factory = 
org.apache.hadoop.util.XMLUtils.newSecureSAXTransformerFactory();
   TransformerHandler handler = factory.newTransformerHandler();
   handler.getTransformer().setOutputProperty(OutputKeys.METHOD, "xml");
   handler.getTransformer().setOutputProperty(OutputKeys.ENCODING, "UTF-8");
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
index 78a7301db04..6a2049acb4b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.thirdparty.protobuf.TextFormat;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.permission.AclEntry;
@@ -147,6 +148,8 @@ class OfflineImageReconstructor {
   InputStreamReader reader) throws XMLStreamException {
 this.out = out;
 XMLInputFactory factory = XMLInputFactory.newInstance();
+

[hadoop] branch trunk updated: YARN-6766. Add helper method in FairSchedulerAppsBlock to print app info. Contributed by Riya Khandelwal

2022-10-10 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 0c515b0ef09 YARN-6766. Add helper method in FairSchedulerAppsBlock to 
print app info. Contributed by Riya Khandelwal
0c515b0ef09 is described below

commit 0c515b0ef09b21fcd3e800bf54c1a7920ebf23fc
Author: Szilard Nemeth 
AuthorDate: Mon Oct 10 15:28:27 2022 +0200

YARN-6766. Add helper method in FairSchedulerAppsBlock to print app info. 
Contributed by Riya Khandelwal
---
 .../webapp/FairSchedulerAppsBlock.java | 26 +-
 1 file changed, 16 insertions(+), 10 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java
index f6202cbcc51..c74e2ae3e1b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java
@@ -129,6 +129,12 @@ public class FairSchedulerAppsBlock extends HtmlBlock {
 return true;
   }
 
+  private static String printAppInfo(long value) {
+if (value == -1) {
+  return "N/A";
+}
+return String.valueOf(value);
+  }
 
   @Override public void render(Block html) {
 TBODY> tbody = html.
@@ -193,16 +199,16 @@ public class FairSchedulerAppsBlock extends HtmlBlock {
   .append(appInfo.getFinishTime()).append("\",\"")
   .append(appInfo.getState()).append("\",\"")
   .append(appInfo.getFinalStatus()).append("\",\"")
-  .append(appInfo.getRunningContainers() == -1 ? "N/A" : String
- .valueOf(appInfo.getRunningContainers())).append("\",\"")
-  .append(appInfo.getAllocatedVCores() == -1 ? "N/A" : String
-.valueOf(appInfo.getAllocatedVCores())).append("\",\"")
-  .append(appInfo.getAllocatedMB() == -1 ? "N/A" : String
-.valueOf(appInfo.getAllocatedMB())).append("\",\"")
-  .append(appInfo.getReservedVCores() == -1 ? "N/A" : String
-.valueOf(appInfo.getReservedVCores())).append("\",\"")
-  .append(appInfo.getReservedMB() == -1 ? "N/A" : String
-.valueOf(appInfo.getReservedMB())).append("\",\"")
+  .append(printAppInfo(appInfo.getRunningContainers()))
+  .append("\",\"")
+  .append(printAppInfo(appInfo.getAllocatedVCores()))
+  .append("\",\"")
+  .append(printAppInfo(appInfo.getAllocatedMB()))
+  .append("\",\"")
+  .append(printAppInfo(appInfo.getReservedVCores()))
+  .append("\",\"")
+  .append(printAppInfo(appInfo.getReservedMB()))
+  .append("\",\"")
   // Progress bar
   .append(" 

[hadoop] 02/02: HADOOP-18460. checkIfVectoredIOStopped before populating the buffers (#4986)

2022-10-10 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch branch-3.3.5
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 2aa77a75f9ee59bdff0d03fc1eb65eaab72e353b
Author: Mukund Thakur 
AuthorDate: Mon Oct 10 15:47:45 2022 +0530

HADOOP-18460. checkIfVectoredIOStopped before populating the buffers (#4986)

Contributed by Mukund Thakur
---
 .../org/apache/hadoop/fs/s3a/S3AInputStream.java   | 43 ++
 1 file changed, 28 insertions(+), 15 deletions(-)

diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
index 39d41f5ffd2..be5b1799b35 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
@@ -910,21 +910,15 @@ public class S3AInputStream extends FSInputStream 
implements  CanSetReadahead,
   private void readCombinedRangeAndUpdateChildren(CombinedFileRange 
combinedFileRange,
   IntFunction 
allocate) {
 LOG.debug("Start reading combined range {} from path {} ", 
combinedFileRange, pathStr);
-// This reference is must be kept till all buffers are populated as this 
is a
+// This reference must be kept till all buffers are populated as this is a
 // finalizable object which closes the internal stream when gc triggers.
 S3Object objectRange = null;
 S3ObjectInputStream objectContent = null;
 try {
-  checkIfVectoredIOStopped();
-  final String operationName = "readCombinedFileRange";
-  objectRange = getS3Object(operationName,
+  objectRange = getS3ObjectAndValidateNotNull("readCombinedFileRange",
   combinedFileRange.getOffset(),
   combinedFileRange.getLength());
   objectContent = objectRange.getObjectContent();
-  if (objectContent == null) {
-throw new PathIOException(uri,
-"Null IO stream received during " + operationName);
-  }
   populateChildBuffers(combinedFileRange, objectContent, allocate);
 } catch (Exception ex) {
   LOG.debug("Exception while reading a range {} from path {} ", 
combinedFileRange, pathStr, ex);
@@ -1019,19 +1013,15 @@ public class S3AInputStream extends FSInputStream 
implements  CanSetReadahead,
*/
   private void readSingleRange(FileRange range, ByteBuffer buffer) {
 LOG.debug("Start reading range {} from path {} ", range, pathStr);
+// This reference must be kept till all buffers are populated as this is a
+// finalizable object which closes the internal stream when gc triggers.
 S3Object objectRange = null;
 S3ObjectInputStream objectContent = null;
 try {
-  checkIfVectoredIOStopped();
   long position = range.getOffset();
   int length = range.getLength();
-  final String operationName = "readRange";
-  objectRange = getS3Object(operationName, position, length);
+  objectRange = getS3ObjectAndValidateNotNull("readSingleRange", position, 
length);
   objectContent = objectRange.getObjectContent();
-  if (objectContent == null) {
-throw new PathIOException(uri,
-"Null IO stream received during " + operationName);
-  }
   populateBuffer(length, buffer, objectContent);
   range.getData().complete(buffer);
 } catch (Exception ex) {
@@ -1043,6 +1033,29 @@ public class S3AInputStream extends FSInputStream 
implements  CanSetReadahead,
 LOG.debug("Finished reading range {} from path {} ", range, pathStr);
   }
 
+  /**
+   * Get the s3 object for S3 server for a specified range.
+   * Also checks if the vectored io operation has been stopped before and after
+   * the http get request such that we don't waste time populating the buffers.
+   * @param operationName name of the operation for which get object on S3 is 
called.
+   * @param position position of the object to be read from S3.
+   * @param length length from position of the object to be read from S3.
+   * @return result s3 object.
+   * @throws IOException exception if any.
+   */
+  private S3Object getS3ObjectAndValidateNotNull(final String operationName,
+ final long position,
+ final int length) throws 
IOException {
+checkIfVectoredIOStopped();
+S3Object objectRange = getS3Object(operationName, position, length);
+if (objectRange.getObjectContent() == null) {
+  throw new PathIOException(uri,
+  "Null IO stream received during " + operationName);
+}
+checkIfVectoredIOStopped();
+return objectRange;
+  }
+
   /**
* Populates the buffer with data from objectContent
* till length. Handles both direct and heap byte buffers.



[hadoop] 01/02: HADOOP-18480. Upgrade aws sdk to 1.12.316 (#4972)

2022-10-10 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch branch-3.3.5
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit cdbfc7abd392da5f05e20c5549fc55450bae366b
Author: Steve Loughran 
AuthorDate: Mon Oct 10 10:23:50 2022 +0100

HADOOP-18480. Upgrade aws sdk to 1.12.316 (#4972)

Contributed by Steve Loughran
---
 LICENSE-binary | 2 +-
 hadoop-project/pom.xml | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/LICENSE-binary b/LICENSE-binary
index 3ff026a0d6f..ba33689a8fa 100644
--- a/LICENSE-binary
+++ b/LICENSE-binary
@@ -214,7 +214,7 @@ com.aliyun:aliyun-java-sdk-ecs:4.2.0
 com.aliyun:aliyun-java-sdk-ram:3.0.0
 com.aliyun:aliyun-java-sdk-sts:3.0.0
 com.aliyun.oss:aliyun-sdk-oss:3.13.0
-com.amazonaws:aws-java-sdk-bundle:1.12.262
+com.amazonaws:aws-java-sdk-bundle:1.12.316
 com.cedarsoftware:java-util:1.9.0
 com.cedarsoftware:json-io:2.5.1
 com.fasterxml.jackson.core:jackson-annotations:2.12.7
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 8c3a4b940cb..4bb8f9d7c3e 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -184,7 +184,7 @@
 1.3.1
 1.0-beta-1
 900
-1.12.262
+1.12.316
 2.3.4
 1.11.2
 2.1


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3.5 updated (cc5344e80fa -> 2aa77a75f9e)

2022-10-10 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a change to branch branch-3.3.5
in repository https://gitbox.apache.org/repos/asf/hadoop.git


from cc5344e80fa HADOOP-18468: Upgrade jettison to 1.5.1 to fix 
CVE-2022-40149 (#4937)
 new cdbfc7abd39 HADOOP-18480. Upgrade aws sdk to 1.12.316 (#4972)
 new 2aa77a75f9e HADOOP-18460. checkIfVectoredIOStopped before populating 
the buffers (#4986)

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 LICENSE-binary |  2 +-
 hadoop-project/pom.xml |  2 +-
 .../org/apache/hadoop/fs/s3a/S3AInputStream.java   | 43 ++
 3 files changed, 30 insertions(+), 17 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated (e360e7620c9 -> 77cb778a446)

2022-10-10 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a change to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


from e360e7620c9 HADOOP-18468: Upgrade jettison to 1.5.1 to fix 
CVE-2022-40149 (#4937)
 new 80525615e57 HADOOP-18480. Upgrade aws sdk to 1.12.316 (#4972)
 new 77cb778a446 HADOOP-18460. checkIfVectoredIOStopped before populating 
the buffers (#4986)

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 LICENSE-binary |  2 +-
 hadoop-project/pom.xml |  2 +-
 .../org/apache/hadoop/fs/s3a/S3AInputStream.java   | 43 ++
 3 files changed, 30 insertions(+), 17 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 02/02: HADOOP-18460. checkIfVectoredIOStopped before populating the buffers (#4986)

2022-10-10 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 77cb778a4462a2da26663b431e9e179e9f6b3208
Author: Mukund Thakur 
AuthorDate: Mon Oct 10 15:47:45 2022 +0530

HADOOP-18460. checkIfVectoredIOStopped before populating the buffers (#4986)

Contributed by Mukund Thakur
---
 .../org/apache/hadoop/fs/s3a/S3AInputStream.java   | 43 ++
 1 file changed, 28 insertions(+), 15 deletions(-)

diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
index 39d41f5ffd2..be5b1799b35 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
@@ -910,21 +910,15 @@ public class S3AInputStream extends FSInputStream 
implements  CanSetReadahead,
   private void readCombinedRangeAndUpdateChildren(CombinedFileRange 
combinedFileRange,
   IntFunction 
allocate) {
 LOG.debug("Start reading combined range {} from path {} ", 
combinedFileRange, pathStr);
-// This reference is must be kept till all buffers are populated as this 
is a
+// This reference must be kept till all buffers are populated as this is a
 // finalizable object which closes the internal stream when gc triggers.
 S3Object objectRange = null;
 S3ObjectInputStream objectContent = null;
 try {
-  checkIfVectoredIOStopped();
-  final String operationName = "readCombinedFileRange";
-  objectRange = getS3Object(operationName,
+  objectRange = getS3ObjectAndValidateNotNull("readCombinedFileRange",
   combinedFileRange.getOffset(),
   combinedFileRange.getLength());
   objectContent = objectRange.getObjectContent();
-  if (objectContent == null) {
-throw new PathIOException(uri,
-"Null IO stream received during " + operationName);
-  }
   populateChildBuffers(combinedFileRange, objectContent, allocate);
 } catch (Exception ex) {
   LOG.debug("Exception while reading a range {} from path {} ", 
combinedFileRange, pathStr, ex);
@@ -1019,19 +1013,15 @@ public class S3AInputStream extends FSInputStream 
implements  CanSetReadahead,
*/
   private void readSingleRange(FileRange range, ByteBuffer buffer) {
 LOG.debug("Start reading range {} from path {} ", range, pathStr);
+// This reference must be kept till all buffers are populated as this is a
+// finalizable object which closes the internal stream when gc triggers.
 S3Object objectRange = null;
 S3ObjectInputStream objectContent = null;
 try {
-  checkIfVectoredIOStopped();
   long position = range.getOffset();
   int length = range.getLength();
-  final String operationName = "readRange";
-  objectRange = getS3Object(operationName, position, length);
+  objectRange = getS3ObjectAndValidateNotNull("readSingleRange", position, 
length);
   objectContent = objectRange.getObjectContent();
-  if (objectContent == null) {
-throw new PathIOException(uri,
-"Null IO stream received during " + operationName);
-  }
   populateBuffer(length, buffer, objectContent);
   range.getData().complete(buffer);
 } catch (Exception ex) {
@@ -1043,6 +1033,29 @@ public class S3AInputStream extends FSInputStream 
implements  CanSetReadahead,
 LOG.debug("Finished reading range {} from path {} ", range, pathStr);
   }
 
+  /**
+   * Get the s3 object for S3 server for a specified range.
+   * Also checks if the vectored io operation has been stopped before and after
+   * the http get request such that we don't waste time populating the buffers.
+   * @param operationName name of the operation for which get object on S3 is 
called.
+   * @param position position of the object to be read from S3.
+   * @param length length from position of the object to be read from S3.
+   * @return result s3 object.
+   * @throws IOException exception if any.
+   */
+  private S3Object getS3ObjectAndValidateNotNull(final String operationName,
+ final long position,
+ final int length) throws 
IOException {
+checkIfVectoredIOStopped();
+S3Object objectRange = getS3Object(operationName, position, length);
+if (objectRange.getObjectContent() == null) {
+  throw new PathIOException(uri,
+  "Null IO stream received during " + operationName);
+}
+checkIfVectoredIOStopped();
+return objectRange;
+  }
+
   /**
* Populates the buffer with data from objectContent
* till length. Handles both direct and heap byte buffers.



[hadoop] 01/02: HADOOP-18480. Upgrade aws sdk to 1.12.316 (#4972)

2022-10-10 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 80525615e571d11dffdf14e68e2e1f1b689eeb86
Author: Steve Loughran 
AuthorDate: Mon Oct 10 10:23:50 2022 +0100

HADOOP-18480. Upgrade aws sdk to 1.12.316 (#4972)

Contributed by Steve Loughran
---
 LICENSE-binary | 2 +-
 hadoop-project/pom.xml | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/LICENSE-binary b/LICENSE-binary
index 3ff026a0d6f..ba33689a8fa 100644
--- a/LICENSE-binary
+++ b/LICENSE-binary
@@ -214,7 +214,7 @@ com.aliyun:aliyun-java-sdk-ecs:4.2.0
 com.aliyun:aliyun-java-sdk-ram:3.0.0
 com.aliyun:aliyun-java-sdk-sts:3.0.0
 com.aliyun.oss:aliyun-sdk-oss:3.13.0
-com.amazonaws:aws-java-sdk-bundle:1.12.262
+com.amazonaws:aws-java-sdk-bundle:1.12.316
 com.cedarsoftware:java-util:1.9.0
 com.cedarsoftware:json-io:2.5.1
 com.fasterxml.jackson.core:jackson-annotations:2.12.7
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index adc82e4c5bf..8c7111855f5 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -184,7 +184,7 @@
 1.3.1
 1.0-beta-1
 900
-1.12.262
+1.12.316
 2.3.4
 1.11.2
 2.1


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (b0b2cb4a16d -> 62ff4e36cfa)

2022-10-10 Thread hexiaoqiao
This is an automated email from the ASF dual-hosted git repository.

hexiaoqiao pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


from b0b2cb4a16d HDFS-16783. Remove the redundant lock in deepCopyReplica 
and getFinalizedBlocks (#4942).  Contributed by ZanderXu.
 add 62ff4e36cfa HDFS-16787. Remove the redundant lock in 
DataSetLockManager#removeLock (#4948). Contributed by ZanderXu.

No new revisions were added by this update.

Summary of changes:
 .../java/org/apache/hadoop/hdfs/server/datanode/DataSetLockManager.java  | 1 -
 1 file changed, 1 deletion(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (be70bbb4be0 -> b0b2cb4a16d)

2022-10-10 Thread hexiaoqiao
This is an automated email from the ASF dual-hosted git repository.

hexiaoqiao pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


from be70bbb4be0 HADOOP-18460. checkIfVectoredIOStopped before populating 
the buffers (#4986)
 add b0b2cb4a16d HDFS-16783. Remove the redundant lock in deepCopyReplica 
and getFinalizedBlocks (#4942).  Contributed by ZanderXu.

No new revisions were added by this update.

Summary of changes:
 .../datanode/fsdataset/impl/FsDatasetImpl.java | 39 ++
 1 file changed, 17 insertions(+), 22 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-18460. checkIfVectoredIOStopped before populating the buffers (#4986)

2022-10-10 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new be70bbb4be0 HADOOP-18460. checkIfVectoredIOStopped before populating 
the buffers (#4986)
be70bbb4be0 is described below

commit be70bbb4be0c112c9032991342da1effcf74e00d
Author: Mukund Thakur 
AuthorDate: Mon Oct 10 15:47:45 2022 +0530

HADOOP-18460. checkIfVectoredIOStopped before populating the buffers (#4986)


Contributed by Mukund Thakur
---
 .../org/apache/hadoop/fs/s3a/S3AInputStream.java   | 43 ++
 1 file changed, 28 insertions(+), 15 deletions(-)

diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
index 39d41f5ffd2..be5b1799b35 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
@@ -910,21 +910,15 @@ public class S3AInputStream extends FSInputStream 
implements  CanSetReadahead,
   private void readCombinedRangeAndUpdateChildren(CombinedFileRange 
combinedFileRange,
   IntFunction 
allocate) {
 LOG.debug("Start reading combined range {} from path {} ", 
combinedFileRange, pathStr);
-// This reference is must be kept till all buffers are populated as this 
is a
+// This reference must be kept till all buffers are populated as this is a
 // finalizable object which closes the internal stream when gc triggers.
 S3Object objectRange = null;
 S3ObjectInputStream objectContent = null;
 try {
-  checkIfVectoredIOStopped();
-  final String operationName = "readCombinedFileRange";
-  objectRange = getS3Object(operationName,
+  objectRange = getS3ObjectAndValidateNotNull("readCombinedFileRange",
   combinedFileRange.getOffset(),
   combinedFileRange.getLength());
   objectContent = objectRange.getObjectContent();
-  if (objectContent == null) {
-throw new PathIOException(uri,
-"Null IO stream received during " + operationName);
-  }
   populateChildBuffers(combinedFileRange, objectContent, allocate);
 } catch (Exception ex) {
   LOG.debug("Exception while reading a range {} from path {} ", 
combinedFileRange, pathStr, ex);
@@ -1019,19 +1013,15 @@ public class S3AInputStream extends FSInputStream 
implements  CanSetReadahead,
*/
   private void readSingleRange(FileRange range, ByteBuffer buffer) {
 LOG.debug("Start reading range {} from path {} ", range, pathStr);
+// This reference must be kept till all buffers are populated as this is a
+// finalizable object which closes the internal stream when gc triggers.
 S3Object objectRange = null;
 S3ObjectInputStream objectContent = null;
 try {
-  checkIfVectoredIOStopped();
   long position = range.getOffset();
   int length = range.getLength();
-  final String operationName = "readRange";
-  objectRange = getS3Object(operationName, position, length);
+  objectRange = getS3ObjectAndValidateNotNull("readSingleRange", position, 
length);
   objectContent = objectRange.getObjectContent();
-  if (objectContent == null) {
-throw new PathIOException(uri,
-"Null IO stream received during " + operationName);
-  }
   populateBuffer(length, buffer, objectContent);
   range.getData().complete(buffer);
 } catch (Exception ex) {
@@ -1043,6 +1033,29 @@ public class S3AInputStream extends FSInputStream 
implements  CanSetReadahead,
 LOG.debug("Finished reading range {} from path {} ", range, pathStr);
   }
 
+  /**
+   * Get the s3 object for S3 server for a specified range.
+   * Also checks if the vectored io operation has been stopped before and after
+   * the http get request such that we don't waste time populating the buffers.
+   * @param operationName name of the operation for which get object on S3 is 
called.
+   * @param position position of the object to be read from S3.
+   * @param length length from position of the object to be read from S3.
+   * @return result s3 object.
+   * @throws IOException exception if any.
+   */
+  private S3Object getS3ObjectAndValidateNotNull(final String operationName,
+ final long position,
+ final int length) throws 
IOException {
+checkIfVectoredIOStopped();
+S3Object objectRange = getS3Object(operationName, position, length);
+if (objectRange.getObjectContent() == null) {
+  throw new PathIOException(uri,
+  "Null IO stream received during " + operationName);
+}
+checkIfVectoredIOStopped();

[hadoop] branch trunk updated (9a7d0e7ed0f -> 540a660429b)

2022-10-10 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


from 9a7d0e7ed0f YARN-11260. Upgrade JUnit from 4 to 5 in 
hadoop-yarn-server-timelineservice (#4775)
 add 540a660429b HADOOP-18480. Upgrade aws sdk to 1.12.316 (#4972)

No new revisions were added by this update.

Summary of changes:
 LICENSE-binary | 2 +-
 hadoop-project/pom.xml | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 02/02: HADOOP-18468: Upgrade jettison to 1.5.1 to fix CVE-2022-40149 (#4937)

2022-10-10 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch branch-3.3.5
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit cc5344e80faaeb0f3a408c4950d462ea03d4959b
Author: Steve Loughran 
AuthorDate: Mon Oct 10 10:05:39 2022 +0100

HADOOP-18468: Upgrade jettison to 1.5.1 to fix CVE-2022-40149 (#4937)


Contributed by PJ Fanning
---
 LICENSE-binary | 2 +-
 hadoop-project/pom.xml | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/LICENSE-binary b/LICENSE-binary
index ff72d373475..3ff026a0d6f 100644
--- a/LICENSE-binary
+++ b/LICENSE-binary
@@ -351,7 +351,7 @@ org.codehaus.jackson:jackson-core-asl:1.9.13
 org.codehaus.jackson:jackson-jaxrs:1.9.13
 org.codehaus.jackson:jackson-mapper-asl:1.9.13
 org.codehaus.jackson:jackson-xc:1.9.13
-org.codehaus.jettison:jettison:1.1
+org.codehaus.jettison:jettison:1.5.1
 org.eclipse.jetty:jetty-annotations:9.4.48.v20220622
 org.eclipse.jetty:jetty-http:9.4.48.v20220622
 org.eclipse.jetty:jetty-io:9.4.48.v20220622
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index b2fd463d0e8..8c3a4b940cb 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1514,7 +1514,7 @@
   
 org.codehaus.jettison
 jettison
-1.1
+1.5.1
 
   
 stax


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3.5 updated (8e8bc037aa6 -> cc5344e80fa)

2022-10-10 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a change to branch branch-3.3.5
in repository https://gitbox.apache.org/repos/asf/hadoop.git


from 8e8bc037aa6 HADOOP-18442. Remove openstack support (#4855)
 new 2a4d421c5c3 HADOOP-18401. No ARM binaries in branch-3.3.x releases. 
(#4953)
 new cc5344e80fa HADOOP-18468: Upgrade jettison to 1.5.1 to fix 
CVE-2022-40149 (#4937)

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 LICENSE-binary |  2 +-
 dev-support/bin/create-release |  5 +++--
 dev-support/docker/Dockerfile_aarch64  | 23 --
 .../dev-support/jdiff-workaround.patch |  6 +++---
 hadoop-project/pom.xml |  2 +-
 5 files changed, 21 insertions(+), 17 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/02: HADOOP-18401. No ARM binaries in branch-3.3.x releases. (#4953)

2022-10-10 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch branch-3.3.5
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 2a4d421c5c3192a9b542ad576c60533e0d0a2ba7
Author: Steve Loughran 
AuthorDate: Fri Oct 7 15:58:51 2022 +0100

HADOOP-18401. No ARM binaries in branch-3.3.x releases. (#4953)


Fix the branch-3.3 docker image and create-release scripts to work on arm 
64 and macbook m1

Contributed by Ayush Saxena and Steve Loughran
---
 dev-support/bin/create-release |  5 +++--
 dev-support/docker/Dockerfile_aarch64  | 23 --
 .../dev-support/jdiff-workaround.patch |  6 +++---
 3 files changed, 19 insertions(+), 15 deletions(-)

diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index 19f17cb3666..fc0602ab186 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -205,7 +205,8 @@ function set_defaults
   DOCKERRAN=false
 
   CPU_ARCH=$(echo "$MACHTYPE" | cut -d- -f1)
-  if [ "$CPU_ARCH" = "aarch64" ]; then
+  if [[ "$CPU_ARCH" = "aarch64"  || "$CPU_ARCH" = "arm64" ]]; then
+echo "Using aarch64 docker file"
 DOCKERFILE="${BASEDIR}/dev-support/docker/Dockerfile_aarch64"
   fi
 
@@ -513,7 +514,7 @@ function dockermode
 
 # we always force build with the OpenJDK JDK
 # but with the correct version
-if [ "$CPU_ARCH" = "aarch64" ]; then
+if [[ "$CPU_ARCH" = "aarch64" || "$CPU_ARCH" = "arm64" ]]; then
   echo "ENV JAVA_HOME /usr/lib/jvm/java-${JVM_VERSION}-openjdk-arm64"
 else
   echo "ENV JAVA_HOME /usr/lib/jvm/java-${JVM_VERSION}-openjdk-amd64"
diff --git a/dev-support/docker/Dockerfile_aarch64 
b/dev-support/docker/Dockerfile_aarch64
index 80e813bb316..bd6e7acc463 100644
--- a/dev-support/docker/Dockerfile_aarch64
+++ b/dev-support/docker/Dockerfile_aarch64
@@ -17,7 +17,7 @@
 # Dockerfile for installing the necessary dependencies for building Hadoop.
 # See BUILDING.txt.
 
-FROM ubuntu:bionic
+FROM ubuntu:focal
 
 WORKDIR /root
 
@@ -53,6 +53,7 @@ RUN apt-get -q update \
 gcc \
 git \
 gnupg-agent \
+hugo \
 libbcprov-java \
 libbz2-dev \
 libcurl4-openssl-dev \
@@ -63,10 +64,13 @@ RUN apt-get -q update \
 libsnappy-dev \
 libssl-dev \
 libtool \
-libzstd1-dev \
+libzstd-dev \
 locales \
 make \
 maven \
+nodejs \
+node-yarn \
+npm \
 openjdk-11-jdk \
 openjdk-8-jdk \
 phantomjs \
@@ -74,7 +78,6 @@ RUN apt-get -q update \
 pkg-config \
 python2.7 \
 python3 \
-python3-dev \
 python3-pip \
 python3-pkg-resources \
 python3-setuptools \
@@ -110,7 +113,7 @@ RUN mkdir -p /opt/spotbugs \
 ENV SPOTBUGS_HOME /opt/spotbugs
 
 ##
-# Install Google Protobuf 3.7.1 (3.0.0 ships with Bionic)
+# Install Google Protobuf 3.7.1 (3.6.1 ships with Focal)
 ##
 # hadolint ignore=DL3003
 RUN mkdir -p /opt/protobuf-src \
@@ -132,6 +135,12 @@ ENV PATH "${PATH}:/opt/protobuf/bin"
 
 RUN pip3 install pylint==2.6.0 python-dateutil==2.8.1
 
+
+# Install bower
+
+# hadolint ignore=DL3008
+RUN npm install -g bower@1.8.8
+
 ###
 # Avoid out of memory errors in builds
 ###
@@ -149,12 +158,6 @@ ENV QT_QPA_PLATFORM offscreen
 # YETUS CUT HERE
 ###
 
-# Hugo static website generator (for new hadoop site docs)
-RUN curl -L -o hugo.deb 
https://github.com/gohugoio/hugo/releases/download/v0.58.3/hugo_0.58.3_Linux-ARM64.deb
 \
-&& dpkg --install hugo.deb \
-&& rm hugo.deb
-
-
 # Add a welcome message and environment checks.
 COPY hadoop_env_checks.sh /root/hadoop_env_checks.sh
 RUN chmod 755 /root/hadoop_env_checks.sh
diff --git 
a/hadoop-common-project/hadoop-common/dev-support/jdiff-workaround.patch 
b/hadoop-common-project/hadoop-common/dev-support/jdiff-workaround.patch
index 2bd7b63f017..26eb53a990b 100644
--- a/hadoop-common-project/hadoop-common/dev-support/jdiff-workaround.patch
+++ b/hadoop-common-project/hadoop-common/dev-support/jdiff-workaround.patch
@@ -1,5 +1,5 @@
 diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSystem.java
-index a277abd6e13..1d131d5db6e 100644
+index fef8c4b7e4b..8d2b9339706 100644
 --- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSystem.java
 +++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSystem.java
 @@ -42,18 +42,6 @@
@@ -14,7 +14,7 @@ index a277abd6e13..1d131d5db6e 100644
 -   *  the annotations of the source object.)
 -   * @param desc  the description of the source (or null. See above.)
 -   * @return the source object
--   * @exception MetricsException
+-   * @exception 

[hadoop] branch branch-3.3 updated: HADOOP-18468: Upgrade jettison to 1.5.1 to fix CVE-2022-40149 (#4937)

2022-10-10 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new e360e7620c9 HADOOP-18468: Upgrade jettison to 1.5.1 to fix 
CVE-2022-40149 (#4937)
e360e7620c9 is described below

commit e360e7620c9a08c26b11237535f12904dc42762d
Author: Steve Loughran 
AuthorDate: Mon Oct 10 10:05:39 2022 +0100

HADOOP-18468: Upgrade jettison to 1.5.1 to fix CVE-2022-40149 (#4937)


Contributed by PJ Fanning
---
 LICENSE-binary | 2 +-
 hadoop-project/pom.xml | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/LICENSE-binary b/LICENSE-binary
index ff72d373475..3ff026a0d6f 100644
--- a/LICENSE-binary
+++ b/LICENSE-binary
@@ -351,7 +351,7 @@ org.codehaus.jackson:jackson-core-asl:1.9.13
 org.codehaus.jackson:jackson-jaxrs:1.9.13
 org.codehaus.jackson:jackson-mapper-asl:1.9.13
 org.codehaus.jackson:jackson-xc:1.9.13
-org.codehaus.jettison:jettison:1.1
+org.codehaus.jettison:jettison:1.5.1
 org.eclipse.jetty:jetty-annotations:9.4.48.v20220622
 org.eclipse.jetty:jetty-http:9.4.48.v20220622
 org.eclipse.jetty:jetty-io:9.4.48.v20220622
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index feccfb12e5d..adc82e4c5bf 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1514,7 +1514,7 @@
   
 org.codehaus.jettison
 jettison
-1.1
+1.5.1
 
   
 stax


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org