This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch HDDS-10656-atomic-key-overwrite
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to 
refs/heads/HDDS-10656-atomic-key-overwrite by this push:
     new 771c41895d HDDS-10872. Rewrite single key via CLI (#6691)
771c41895d is described below

commit 771c41895da3ed0e5103e3793dacf66fbf8e4c25
Author: Doroszlai, Attila <[email protected]>
AuthorDate: Sat May 18 15:05:18 2024 +0200

    HDDS-10872. Rewrite single key via CLI (#6691)
---
 .../hadoop/ozone/shell/keys/KeyCommands.java       |  1 +
 .../hadoop/ozone/shell/keys/PutKeyHandler.java     | 26 ++++++--
 .../hadoop/ozone/shell/keys/RewriteKeyHandler.java | 77 ++++++++++++++++++++++
 3 files changed, 97 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyCommands.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyCommands.java
index 726ab8be4a..bbef584143 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyCommands.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyCommands.java
@@ -44,6 +44,7 @@ import picocli.CommandLine.ParentCommand;
         CatKeyHandler.class,
         PutKeyHandler.class,
         RenameKeyHandler.class,
+        RewriteKeyHandler.class,
         CopyKeyHandler.class,
         DeleteKeyHandler.class,
         AddAclKeyHandler.class,
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java
index 68beb69228..833f4f7e77 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java
@@ -19,13 +19,13 @@
 package org.apache.hadoop.ozone.shell.keys;
 
 import java.io.File;
-import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.io.RandomAccessFile;
 import java.nio.ByteBuffer;
 import java.nio.channels.FileChannel;
+import java.nio.file.Files;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -40,6 +40,7 @@ import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientException;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.shell.OzoneAddress;
 
 import org.apache.commons.codec.digest.DigestUtils;
@@ -68,6 +69,10 @@ public class PutKeyHandler extends KeyHandler {
   @Mixin
   private ShellReplicationOptions replication;
 
+  @Option(names = "--expectedGeneration",
+      description = "Store key only if it already exists and its generation 
matches the value provided")
+  private long expectedGeneration;
+
   @Override
   protected void execute(OzoneClient client, OzoneAddress address)
       throws IOException, OzoneClientException {
@@ -79,7 +84,7 @@ public class PutKeyHandler extends KeyHandler {
     File dataFile = new File(fileName);
 
     if (isVerbose()) {
-      try (InputStream stream = new FileInputStream(dataFile)) {
+      try (InputStream stream = Files.newInputStream(dataFile.toPath())) {
         String hash = DigestUtils.sha256Hex(stream);
         out().printf("File sha256 checksum : %s%n", hash);
       }
@@ -109,7 +114,7 @@ public class PutKeyHandler extends KeyHandler {
     }
   }
 
-  void async(
+  private void async(
       File dataFile, OzoneBucket bucket,
       String keyName, Map<String, String> keyMetadata,
       ReplicationConfig replicationConfig, int chunkSize)
@@ -117,14 +122,21 @@ public class PutKeyHandler extends KeyHandler {
     if (isVerbose()) {
       out().println("API: async");
     }
-    try (InputStream input = new FileInputStream(dataFile);
-         OutputStream output = bucket.createKey(keyName, dataFile.length(),
-             replicationConfig, keyMetadata)) {
+    try (InputStream input = Files.newInputStream(dataFile.toPath());
+         OutputStream output = createOrReplaceKey(bucket, keyName, 
dataFile.length(), keyMetadata, replicationConfig)) {
       IOUtils.copyBytes(input, output, chunkSize);
     }
   }
 
-  void stream(
+  private OzoneOutputStream createOrReplaceKey(OzoneBucket bucket, String 
keyName,
+      long size, Map<String, String> keyMetadata, ReplicationConfig 
replicationConfig
+  ) throws IOException {
+    return expectedGeneration > 0
+        ? bucket.rewriteKey(keyName, size, expectedGeneration, 
replicationConfig, keyMetadata)
+        : bucket.createKey(keyName, size, replicationConfig, keyMetadata);
+  }
+
+  private void stream(
       File dataFile, OzoneBucket bucket,
       String keyName, Map<String, String> keyMetadata,
       ReplicationConfig replicationConfig, int chunkSize)
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/RewriteKeyHandler.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/RewriteKeyHandler.java
new file mode 100644
index 0000000000..c6f97c6528
--- /dev/null
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/RewriteKeyHandler.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.shell.keys;
+
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+import org.apache.hadoop.hdds.client.RatisReplicationConfig;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientException;
+import org.apache.hadoop.ozone.client.OzoneKeyDetails;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.shell.OzoneAddress;
+import org.apache.hadoop.ozone.shell.ShellReplicationOptions;
+import picocli.CommandLine;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+import static org.apache.hadoop.ozone.OzoneConsts.MB;
+
+/**
+ * Rewrite a key with different replication.
+ */
[email protected](name = "rewrite",
+    description = "Rewrites the key with different replication")
+public class RewriteKeyHandler extends KeyHandler {
+
+  @CommandLine.Mixin
+  private ShellReplicationOptions replication;
+
+  @Override
+  protected void execute(OzoneClient client, OzoneAddress address) throws 
IOException, OzoneClientException {
+    String volumeName = address.getVolumeName();
+    String bucketName = address.getBucketName();
+    String keyName = address.getKeyName();
+
+    OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
+    OzoneBucket bucket = vol.getBucket(bucketName);
+    OzoneKeyDetails key = bucket.getKey(keyName);
+
+    ReplicationConfig newReplication = 
replication.fromParamsOrConfig(getConf());
+    if (newReplication == null) {
+      newReplication = key.getReplicationConfig().getReplicationType() == 
HddsProtos.ReplicationType.RATIS
+          ? new ECReplicationConfig(3, 2)
+          : 
RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE);
+    } else if (newReplication.equals(key.getReplicationConfig())) {
+      System.err.println("Replication unchanged: " + 
key.getReplicationConfig());
+      return;
+    }
+
+    try (
+        InputStream input = bucket.readKey(keyName);
+        OutputStream output = bucket.rewriteKey(keyName, key.getDataSize(), 
key.getGeneration(),
+            newReplication, key.getMetadata())) {
+      IOUtils.copyBytes(input, output, (int) Math.min(MB, key.getDataSize()));
+    }
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to