This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/hadoop-release-support.git

commit 9f8b73566011f2b79ccc53ec44b46ebfc1c0dbce
Author: Steve Loughran <ste...@cloudera.com>
AuthorDate: Tue Mar 14 14:25:39 2023 +0000

    Hadoop 3.3.5 RC3 preparation
---
 README.md | 169 +++++++++++++++++++++++++++++++++++++++-----------------------
 build.xml | 102 ++++++++++++++++++++++++++++++++++---
 pom.xml   |   1 +
 3 files changed, 204 insertions(+), 68 deletions(-)

diff --git a/README.md b/README.md
index 82e92a8..1e47d66 100644
--- a/README.md
+++ b/README.md
@@ -5,51 +5,29 @@ This project helps validate hadoop release candidates
 It has an ant `build.xml` file to help with preparing the release,
 validating gpg signatures, creating release messages and other things.
 
-# ant builds
-
-see below
-
-# maven builds
-
-To build and test with the client API:
-
-```bash
-mvn clean test 
-```
-
-Compilation verifies the API is present; the
-test verifies that some shaded artifacts are present.
-
-If the hadoop artifacts are in staging/snapshot repositories,
-use the `staging` profile
-
-```bash
-mvn clean test -Pstaging
-```
-
-To force an update
-
-```bash
-mvn clean test -Pstaging -U
-```
-
-To purge all artifacts of the chosen hadoop version from your local maven 
repository.
-
-```bash
-ant purge
-```
-
 # workflow for preparing an RC
 
-Build the RC using the docker process on whichever host is set to do it
+Build the RC using the docker process on whichever host is set to do it.
 
-### set up build.properties
+### set up `build.properties`
 
 ```properties
+hadoop.version=3.3.5
+# RC for emails, staging dir names
+rc=0
+
+# id of commit built; used for email
+git.commit.id=3262495904d
+
+# info for copying down the RC from the build host
 scp.hostname=stevel-ubuntu
 scp.user=stevel
 scp.hadoop.dir=hadoop
+
+# SVN managed staging dir
 staging.dir=/Users/stevel/hadoop/release/staging
+
+# where various modules live for build and test
 spark.dir=/Users/stevel/Projects/sparkwork/spark
 
cloud-examples.dir=/Users/stevel/Projects/sparkwork/cloud-integration/cloud-examples
 
cloud.test.configuration.file=/Users/stevel/Projects/config/cloud-test-configs/s3a.xml
@@ -57,9 +35,7 @@ bigdata-interop.dir=/Users/stevel/Projects/gcs/bigdata-interop
 hboss.dir=/Users/stevel/Projects/hbasework/hbase-filesystem
 cloudstore.dir=/Users/stevel/Projects/cloudstore
 fs-api-shim.dir=/Users/stevel/Projects/Formats/fs-api-shim/
-hadoop.version=3.3.5
-git.commit.id=3262495904d
-rc=0
+
 ```
 
 ### Clean up first
@@ -97,24 +73,35 @@ The `release.dir.check` target just lists the directory.
 
 If arm64 binaries are being created then they must be
 built on an arm docker image.
-Do not do this at the same time as building the x86 binaries
-because both builds will generate staging repositories on
-nexus. Instead: run the arm one first and drop its artifacts
-on nexus before doing the x86 one. That will ensure that
-it is the JAR files created on the x86 build are the ones
-publised on maven.
+Do not use the `--asfrelease` option as this stages the JARs.
+Instead use the explicit `--deploy --native --sign` options
 
 The arm process is one of
 1. Create the full set of artifacts on an arm machine (macbook, cloud vm, ...)
-2. Drop any mvn repository from nexus
-3. Use the ant build to copy and rename the .tar.gz with the native binaries 
only
-4. Create a new .asc file. This is needed is without the `--asfrelease` option 
no signing takes place.
-5. Generate new sha512 checksum file containing the new name.
-6. Move these files into the `downloads/release/$RC` dir
+1. Use the ant build to copy and rename the `.tar.gz` with the native binaries 
only
+1. Create a new `.asc `file.
+1. Generate new sha512 checksum file containing the new name.
+1. Move these files into the `downloads/release/$RC` dir
 
-To perform stages 3-6:
 ```bash
-ant arm.copy.artifacts arm.release
+time dev-support/bin/create-release --docker --dockercache 
--mvnargs="-Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false" --deploy 
--native --sign
+```
+
+To perform these stages, you need a clean directory of the same
+hadoop commit ID as for the x86 release.
+
+In `build.properties` declare its location
+
+```properties
+arm.hadoop.dir=/Users/stevel/hadoop/release/hadoop
+```
+```bash
+# create the release (slow)
+ant arm.create.release
+# copy the artifacts to this project's target/ dir, renaming
+ant arm.copy.artifacts
+# sign artifacts then move to the shared RC dir alongside the x86 artifacts
+ant arm.release
 ```
 
 
@@ -152,19 +139,20 @@ directly.
 
 Can take a while...exit any VPN for extra speed.
 
-#### Manual
+
 ```bash
-cd $staging-dir
+ant stage-to-svn
+```
+
+Manual
+```bash
+cd $stagingdir
 svn update
 svn add <RC directory name>
 svn commit
 ```
 
-#### Ant
 
-```bash
-ant stage-to-svn
-```
 
 
 
@@ -172,6 +160,9 @@ ant stage-to-svn
 
 This isn't automated as it needs to be done in the source tree.
 
+The ant `print-tag-command` prints the command needed to create and sign
+a tag.
+
 ```bash
 ant print-tag-command
 ```
@@ -206,11 +197,11 @@ In build properties, declare `hadoop.version`, `rc` and 
`http.source`
 
 ```properties
 hadoop.version=3.3.5
-rc=1
+rc=2
 
http.source=https://dist.apache.org/repos/dist/dev/hadoop/hadoop-${hadoop.version}-RC${rc}/
 ```
 
-targets of relevance
+### Targets of Relevance
 
 | target               | action                     |
 |----------------------|----------------------------|
@@ -223,15 +214,16 @@ targets of relevance
 | `gpg.verify `        | verify the D/L'd artifacts |
 |                      |                            |
 
-set `release.native.binaries` to false to skip native binary checks on 
platforms without them
+set `check.native.binaries` to false to skip native binary checks on platforms 
without them
 
 ### Download the RC files from the http server
 
+Downloads under `downloads/incoming`
 ```bash
 ant release.fetch.http
 ```
 
-### untar and build.
+### untar source and build.
 
 This puts the built artifacts into the local maven repo so
 do not do this while building/testing downstream projects
@@ -241,6 +233,36 @@ do not do this while building/testing downstream projects
 ant release.src.untar release.src.build
 ```
 
+
+### untar site and build.
+
+
+```bash
+ant release.site.untar 
+```
+
+
+### untar binary release
+
+Untars the (already downloaded) binary tar to `target/bin-untar`
+
+```bash
+ant release.bin.untar
+```
+
+once expanded, the binary commands can be tested
+
+
+```bash
+ant release.bin.commands
+```
+
+This will fail on a platform where the native binaries don't load
+
+```bash
+ant release.bin.commands -Dcheck.native.binaries=false
+```
+
 # Building and testing projects from the staged maven artifacts
 
 A lot of the targets build maven projects from the staged maven artifacts.
@@ -441,3 +463,26 @@ For some unknown reason the parquet build doesn't seem to 
cope.
 </profile>
 
 ```
+
+### Rolling back an RC
+
+Drop the staged artifacts from nexus
+ 
[https://repository.apache.org/#stagingRepositories](https://repository.apache.org/#stagingRepositories)
+
+Delete the tag. Print out the delete command and then copy/paste it into a 
terminal in the hadoop repo
+
+```bash
+ant print-tag-command
+```
+
+Remove downloaded files and maven artifactgs
+
+```bash
+ant clean purge-from-maven
+```
+
+Removing staged tar files is not yet automated:
+
+1. Go to the svn staging dir
+2. `svn rm` the RC subdir
+3. `svn commit -m "rollback RC"`
diff --git a/build.xml b/build.xml
index 657f46f..12af262 100644
--- a/build.xml
+++ b/build.xml
@@ -84,11 +84,13 @@
   <property name="release.site.dir" location="${release.untar.dir}/site"/>
   <property name="site.dir" 
location="${release.untar.dir}/site/r${hadoop.version}"/>
   <property name="release.bin.dir" location="${release.untar.dir}/bin"/>
-  <property name="release.native.binaries" value="true"/>
+  <property name="check.native.binaries" value="true"/>
   <property name="arm.artifact.dir" 
location="${arm.hadoop.dir}/target/artifacts/" />
   <property name="arm.dir" location="${downloads.dir}/arm" />
   <property name="arm.binary.src" 
location="${arm.artifact.dir}/hadoop-${hadoop.version}.tar.gz" />
-  <property name="arm.binary" 
location="${arm.dir}/hadoop-arm64-${hadoop.version}.tar.gz" />
+  <property name="arm.binary.prefix" value="hadoop-arm64-${hadoop.version}" />
+  <property name="arm.binary.filename" value="${arm.binary.prefix}.tar.gz" />
+  <property name="arm.binary" location="${arm.dir}/${arm.binary.filename}" />
   <property name="arm.binary.sha512" location="${arm.binary}.sha512" />
   <property name="arm.binary.asc" location="${arm.binary}.asc" />
   <property name="staging.commit.msg" value="${jira.id}. Hadoop ${rc.name} 
built from ${git.commit.id}" />
@@ -289,6 +291,7 @@
     description="verify the downloaded artifacts">
 
 
+    <echo>Verifying GPG signatures of artifacts in ${release.dir}</echo>
     <gpgv>
       <arg value="--verify"/>
       <arg value="${release}-src.tar.gz.asc"/>
@@ -317,6 +320,11 @@
       <arg value="CHANGELOG.md.asc"/>
     </gpgv>
 
+    <gpgv>
+      <arg value="--verify"/>
+      <arg value="${arm.binary.filename}.asc"/>
+    </gpgv>
+
 
   </target>
 
@@ -689,15 +697,17 @@ Message is in file ${message.out}
 
     <gunzip src="${release.dir}/${release}-site.tar.gz" dest="target/untar"/>
     <untar src="target/untar/${release}-site.tar" dest="${release.site.dir}" />
+    <echo>site is under ${release.site.dir}</echo>
   </target>
 
   <target name="release.src.untar" depends="release.dir.check"
     description="untar the release source">
-    <echo>untarring source ${release.dir}/${release}-src.tar.gz</echo>
+    <echo>untarring source ${release.dir}/${release}-src.tar.gz to 
${release.source.dir}</echo>
     <mkdir dir="target/untar"/>
 
     <gunzip src="${release.dir}/${release}-src.tar.gz" dest="target/untar"/>
     <untar src="target/untar/${release}-src.tar" dest="${release.source.dir}" 
/>
+    <echo>Release source expanded to 
"${release.source.dir}/${release}-src"</echo>
   </target>
 
   <target name="release.src.build" depends="init"
@@ -718,7 +728,7 @@ Message is in file ${message.out}
   </target>
 
   <target name="release.bin.untar" depends="release.dir.check"
-    description="untar the binary release">
+    description="untar the x86 binary release">
 
     <mkdir dir="target/bin-untar" />
     <gunzip src="${release.dir}/${release}.tar.gz" dest="target/bin-untar"/>
@@ -729,7 +739,7 @@ Message is in file ${message.out}
       <arg value="${release}.tar" />
     </x>
     <echo>
-      Binary release expanded into target/untar/${release}
+      Binary release expanded into target/bin-untar/${release}
     </echo>
   </target>
 
@@ -767,12 +777,70 @@ Message is in file ${message.out}
 
     <echo>checknative</echo>
 
-    <hadoopq failonerror="${release.native.binaries}">
+    <hadoopq failonerror="${check.native.binaries}">
       <arg value="checknative" />
     </hadoopq>
 
   </target>
 
+
+  <target name="release.arm.untar" depends="release.dir.check"
+    description="untar the x86 binary release">
+
+    <mkdir dir="target/arm-untar" />
+    <gunzip src="${release.dir}/${arm.binary.filename}" 
dest="target/arm-untar"/>
+
+    <!--  use the native command to preserve properties -->
+    <x executable="tar" dir="target/arm-untar" >
+      <arg value="-xf" />
+      <arg value="${arm.binary.prefix}.tar" />
+    </x>
+    <echo>
+      Binary release expanded into target/arm-untar/${release}
+    </echo>
+  </target>
+  
+  <target name="release.arm.commands" depends="init"
+    description="run test hadoop commands ">
+
+
+    <!--   hadoop with errors-->
+    <presetdef name="hadoop">
+      <exec failonerror="true"
+        executable="bin/hadoop"
+        dir="target/arm-untar/${release}" />
+    </presetdef>
+
+    <!--    quiet hadoop-->
+    <presetdef name="hadoopq">
+      <exec failonerror="false"
+        executable="bin/hadoop"
+        dir="target/arm-untar/${release}" />
+    </presetdef>
+    <echo>ls</echo>
+    <hadoop>
+      <arg value="fs" />
+      <arg value="-ls" />
+      <arg value="file://${target}" />
+    </hadoop>
+
+    <echo>du</echo>
+    <hadoop>
+      <arg value="fs" />
+      <arg value="-du" />
+      <arg value="-h" />
+      <arg value="file://${target}" />
+    </hadoop>
+
+    <echo>checknative</echo>
+
+    <hadoopq failonerror="${check.native.binaries}">
+      <arg value="checknative" />
+    </hadoopq>
+
+  </target>
+
+
   <target name="release.copy.init" depends="release.dir.check" >
     <!--    destination dir-->
     <require-dir path="${hadoop.source.dir}"/>
@@ -918,12 +986,34 @@ Message is in file ${message.out}
   </target>
 
 
+  <!--
+  create the arm distro
+  -->
+  <target name="arm.create.release" depends="init"
+      description="create an arm native distro -no asf staging">
+    <delete dir="${arm.dir}" />
+    <mkdir dir="${arm.dir}" />
+    <echo>source artifact is ${arm.binary.src}</echo>
+    <copy file="${arm.binary.src}" tofile="${arm.binary}" />
+   <!-- <copy file="${arm.binary.src}.asc" tofile="${arm.binary.asc}" />-->
+    <x executable="time" dir="${arm.hadoop.dir}">
+      <arg value="dev-support/bin/create-release"/>
+      <arg value="--docker"/>
+      <arg value="--dockercache"/>
+      <arg value="--deploy"/>
+      <arg value="--native"/>
+      <arg value="--sign"/>
+      <arg value='--deploy --native --sign'/>
+      <arg value="--mvnargs=-Dhttp.keepAlive=false 
-Dmaven.wagon.http.pool=false"/>
+    </x>
+  </target>
 
   <!--  copy the arm binaries into downloads/arm with their final filenames -->
   <target name="arm.copy.artifacts" depends="init"
       description="copy the arm binary and .asc files">
     <delete dir="${arm.dir}" />
     <mkdir dir="${arm.dir}" />
+    <echo>source artifact is ${arm.binary.src}</echo>
     <copy file="${arm.binary.src}" tofile="${arm.binary}" />
    <!-- <copy file="${arm.binary.src}.asc" tofile="${arm.binary.asc}" />-->
     <x executable="ls">
diff --git a/pom.xml b/pom.xml
index d66bb02..d810f03 100644
--- a/pom.xml
+++ b/pom.xml
@@ -191,6 +191,7 @@
         </exclusion>
       </exclusions>
     </dependency>
+
   </dependencies>
   <!--
    This is a profile to enable the use of the ASF snapshot and staging 
repositories


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to