sadanand48 commented on code in PR #8477: URL: https://github.com/apache/ozone/pull/8477#discussion_r2101128768
########## hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckPointServletInodeBasedXfer.java: ########## @@ -0,0 +1,272 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om; + +import org.apache.commons.compress.archivers.ArchiveOutputStream; +import org.apache.commons.compress.archivers.tar.TarArchiveEntry; +import org.apache.hadoop.hdds.utils.db.DBCheckpoint; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils; +import org.apache.hadoop.util.Time; +import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; +import javax.servlet.http.HttpServletRequest; +import java.io.File; +import java.io.IOException; +import java.io.OutputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.apache.hadoop.hdds.utils.Archiver.includeFile; +import static org.apache.hadoop.hdds.utils.Archiver.tar; +import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_CHECKPOINT_DIR; +import static org.apache.hadoop.ozone.OzoneConsts.ROCKSDB_SST_SUFFIX; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_KEY; + +public class OMDBCheckPointServletInodeBasedXfer extends OMDBCheckpointServlet { + + @Override + public void writeDbDataToStream(DBCheckpoint checkpoint, HttpServletRequest request, OutputStream destination, + List<String> toExcludeList, List<String> excludedList, Path tmpdir) throws IOException, InterruptedException { + + // Key is the InodeID and path is the first encountered file path with this inodeID + // This will be later used to while writing to the tar. + Map<String, Path> copyFiles = new HashMap<>(); + + try (ArchiveOutputStream<TarArchiveEntry> archiveOutputStream = tar(destination)) { + RocksDBCheckpointDiffer differ = + getDbStore().getRocksDBCheckpointDiffer(); + DirectoryData sstBackupDir = new DirectoryData(tmpdir, + differ.getSSTBackupDir()); + DirectoryData compactionLogDir = new DirectoryData(tmpdir, + differ.getCompactionLogDir()); + + Set<String> sstFilesToExclude = new HashSet<>(toExcludeList); + + Map<Object, Set<Path>> hardlinkFiles = new HashMap<>(); + + boolean completed = getFilesForArchive(checkpoint, copyFiles, hardlinkFiles, Review Comment: > We would need to take a hardlink of the file in iteration and then write it to the tarball. This is to ensure it doesn't get deleted. I have changed to read from DB now. But , I'd rather take a checkpoint instead of this, I feel iterating through DB doesn't give a consistent view and we have to manually solve these issues. I already saw intermittent tar file getting truncated error when I changed it to read from DB. we can take a checkpoint at the last iteration again . -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
