http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java ---------------------------------------------------------------------- diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java b/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java deleted file mode 100644 index da56385..0000000 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java +++ /dev/null @@ -1,94 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * <p> - * http://www.apache.org/licenses/LICENSE-2.0 - * <p> - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common; - -import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmBlockResult; -import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmBlockResult.Result; -import org.apache.hadoop.scm.container.common.helpers.DeleteBlockResult; - -import java.util.ArrayList; -import java.util.List; -import java.util.stream.Collectors; - -/** - * Result to delete a group of blocks. - */ -public class DeleteBlockGroupResult { - private String objectKey; - private List<DeleteBlockResult> blockResultList; - public DeleteBlockGroupResult(String objectKey, - List<DeleteBlockResult> blockResultList) { - this.objectKey = objectKey; - this.blockResultList = blockResultList; - } - - public String getObjectKey() { - return objectKey; - } - - public List<DeleteBlockResult> getBlockResultList() { - return blockResultList; - } - - public List<DeleteScmBlockResult> getBlockResultProtoList() { - List<DeleteScmBlockResult> resultProtoList = - new ArrayList<>(blockResultList.size()); - for (DeleteBlockResult result : blockResultList) { - DeleteScmBlockResult proto = DeleteScmBlockResult.newBuilder() - .setKey(result.getKey()) - .setResult(result.getResult()).build(); - resultProtoList.add(proto); - } - return resultProtoList; - } - - public static List<DeleteBlockResult> convertBlockResultProto( - List<DeleteScmBlockResult> results) { - List<DeleteBlockResult> protoResults = new ArrayList<>(results.size()); - for (DeleteScmBlockResult result : results) { - protoResults.add(new DeleteBlockResult(result.getKey(), - result.getResult())); - } - return protoResults; - } - - /** - * Only if all blocks are successfully deleted, this group is considered - * to be successfully executed. - * - * @return true if all blocks are successfully deleted, false otherwise. - */ - public boolean isSuccess() { - for (DeleteBlockResult result : blockResultList) { - if (result.getResult() != Result.success) { - return false; - } - } - return true; - } - - /** - * @return A list of deletion failed block IDs. - */ - public List<String> getFailedBlocks() { - List<String> failedBlocks = blockResultList.stream() - .filter(result -> result.getResult() != Result.success) - .map(DeleteBlockResult::getKey).collect(Collectors.toList()); - return failedBlocks; - } -}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/InconsistentStorageStateException.java ---------------------------------------------------------------------- diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/InconsistentStorageStateException.java b/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/InconsistentStorageStateException.java deleted file mode 100644 index c3f9234..0000000 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/InconsistentStorageStateException.java +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common; - -import java.io.File; -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * The exception is thrown when file system state is inconsistent - * and is not recoverable. - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public class InconsistentStorageStateException extends IOException { - private static final long serialVersionUID = 1L; - - public InconsistentStorageStateException(String descr) { - super(descr); - } - - public InconsistentStorageStateException(File dir, String descr) { - super("Directory " + getFilePath(dir) + " is in an inconsistent state: " - + descr); - } - - private static String getFilePath(File dir) { - try { - return dir.getCanonicalPath(); - } catch (IOException e) { - } - return dir.getPath(); - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java ---------------------------------------------------------------------- diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java b/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java deleted file mode 100644 index 9df2ffa..0000000 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java +++ /dev/null @@ -1,249 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common; - -import java.io.File; -import java.io.IOException; -import java.nio.file.DirectoryStream; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.Properties; - - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeType; -import org.apache.hadoop.util.Time; - -/** - * Storage information file. This Class defines the methods to check - * the consistency of the storage dir and the version file. - * <p> - * Local storage information is stored in a separate file VERSION. - * It contains type of the node, - * the storage layout version, the SCM id, and - * the KSM/SCM state creation time. - * - */ -@InterfaceAudience.Private -public abstract class Storage { - private static final Logger LOG = LoggerFactory.getLogger(Storage.class); - - protected static final String STORAGE_DIR_CURRENT = "current"; - protected static final String STORAGE_FILE_VERSION = "VERSION"; - - private final NodeType nodeType; - private final File root; - private final File storageDir; - - private StorageState state; - private StorageInfo storageInfo; - - - /** - * Determines the state of the Version file. - */ - public enum StorageState { - NON_EXISTENT, NOT_INITIALIZED, INITIALIZED - } - - public Storage(NodeType type, File root, String sdName) - throws IOException { - this.nodeType = type; - this.root = root; - this.storageDir = new File(root, sdName); - this.state = getStorageState(); - if (state == StorageState.INITIALIZED) { - this.storageInfo = new StorageInfo(type, getVersionFile()); - } else { - this.storageInfo = new StorageInfo( - nodeType, StorageInfo.newClusterID(), Time.now()); - setNodeProperties(); - } - } - - /** - * Gets the path of the Storage dir. - * @return Stoarge dir path - */ - public String getStorageDir() { - return storageDir.getAbsoluteFile().toString(); - } - - /** - * Gets the state of the version file. - * @return the state of the Version file - */ - public StorageState getState() { - return state; - } - - public NodeType getNodeType() { - return storageInfo.getNodeType(); - } - - public String getClusterID() { - return storageInfo.getClusterID(); - } - - public long getCreationTime() { - return storageInfo.getCreationTime(); - } - - public void setClusterId(String clusterId) throws IOException { - if (state == StorageState.INITIALIZED) { - throw new IOException( - "Storage directory " + storageDir + " already initialized."); - } else { - storageInfo.setClusterId(clusterId); - } - } - - /** - * Retreives the storageInfo instance to read/write the common - * version file properties. - * @return the instance of the storageInfo class - */ - protected StorageInfo getStorageInfo() { - return storageInfo; - } - - abstract protected Properties getNodeProperties(); - - /** - * Sets the Node properties spaecific to KSM/SCM. - */ - private void setNodeProperties() { - Properties nodeProperties = getNodeProperties(); - if (nodeProperties != null) { - for (String key : nodeProperties.stringPropertyNames()) { - storageInfo.setProperty(key, nodeProperties.getProperty(key)); - } - } - } - - /** - * Directory {@code current} contains latest files defining - * the file system meta-data. - * - * @return the directory path - */ - private File getCurrentDir() { - return new File(storageDir, STORAGE_DIR_CURRENT); - } - - /** - * File {@code VERSION} contains the following fields: - * <ol> - * <li>node type</li> - * <li>KSM/SCM state creation time</li> - * <li>other fields specific for this node type</li> - * </ol> - * The version file is always written last during storage directory updates. - * The existence of the version file indicates that all other files have - * been successfully written in the storage directory, the storage is valid - * and does not need to be recovered. - * - * @return the version file path - */ - private File getVersionFile() { - return new File(getCurrentDir(), STORAGE_FILE_VERSION); - } - - - /** - * Check to see if current/ directory is empty. This method is used - * before determining to format the directory. - * @throws IOException if unable to list files under the directory. - */ - private void checkEmptyCurrent() throws IOException { - File currentDir = getCurrentDir(); - if (!currentDir.exists()) { - // if current/ does not exist, it's safe to format it. - return; - } - try (DirectoryStream<Path> dirStream = Files - .newDirectoryStream(currentDir.toPath())) { - if (dirStream.iterator().hasNext()) { - throw new InconsistentStorageStateException(getCurrentDir(), - "Can't initialize the storage directory because the current " - + "it is not empty."); - } - } - } - - /** - * Check consistency of the storage directory. - * - * @return state {@link StorageState} of the storage directory - * @throws IOException - */ - private StorageState getStorageState() throws IOException { - assert root != null : "root is null"; - String rootPath = root.getCanonicalPath(); - try { // check that storage exists - if (!root.exists()) { - // storage directory does not exist - LOG.warn("Storage directory " + rootPath + " does not exist"); - return StorageState.NON_EXISTENT; - } - // or is inaccessible - if (!root.isDirectory()) { - LOG.warn(rootPath + "is not a directory"); - return StorageState.NON_EXISTENT; - } - if (!FileUtil.canWrite(root)) { - LOG.warn("Cannot access storage directory " + rootPath); - return StorageState.NON_EXISTENT; - } - } catch (SecurityException ex) { - LOG.warn("Cannot access storage directory " + rootPath, ex); - return StorageState.NON_EXISTENT; - } - - // check whether current directory is valid - File versionFile = getVersionFile(); - boolean hasCurrent = versionFile.exists(); - - if (hasCurrent) { - return StorageState.INITIALIZED; - } else { - checkEmptyCurrent(); - return StorageState.NOT_INITIALIZED; - } - } - - /** - * Creates the Version file if not present, - * otherwise returns with IOException. - * @throws IOException - */ - public void initialize() throws IOException { - if (state == StorageState.INITIALIZED) { - throw new IOException("Storage directory already initialized."); - } - if (!getCurrentDir().mkdirs()) { - throw new IOException("Cannot create directory " + getCurrentDir()); - } - storageInfo.writeTo(getVersionFile()); - } - -} - http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java ---------------------------------------------------------------------- diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java b/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java deleted file mode 100644 index a79980a..0000000 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java +++ /dev/null @@ -1,184 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common; - -import java.io.IOException; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.File; -import java.io.RandomAccessFile; - -import java.util.Properties; -import java.util.UUID; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeType; - -/** - * Common class for storage information. This class defines the common - * properties and functions to set them , write them into the version file - * and read them from the version file. - * - */ -@InterfaceAudience.Private -public class StorageInfo { - - private Properties properties = new Properties(); - - /** - * Property to hold node type. - */ - private static final String NODE_TYPE = "nodeType"; - /** - * Property to hold ID of the cluster. - */ - private static final String CLUSTER_ID = "clusterID"; - /** - * Property to hold creation time of the storage. - */ - private static final String CREATION_TIME = "cTime"; - - /** - * Constructs StorageInfo instance. - * @param type - * Type of the node using the storage - * @param cid - * Cluster ID - * @param cT - * Cluster creation Time - - * @throws IOException - */ - public StorageInfo(NodeType type, String cid, long cT) - throws IOException { - Preconditions.checkNotNull(type); - Preconditions.checkNotNull(cid); - Preconditions.checkNotNull(cT); - properties.setProperty(NODE_TYPE, type.name()); - properties.setProperty(CLUSTER_ID, cid); - properties.setProperty(CREATION_TIME, String.valueOf(cT)); - } - - public StorageInfo(NodeType type, File propertiesFile) - throws IOException { - this.properties = readFrom(propertiesFile); - verifyNodeType(type); - verifyClusterId(); - verifyCreationTime(); - } - - public NodeType getNodeType() { - return NodeType.valueOf(properties.getProperty(NODE_TYPE)); - } - - public String getClusterID() { - return properties.getProperty(CLUSTER_ID); - } - - public Long getCreationTime() { - String creationTime = properties.getProperty(CREATION_TIME); - if(creationTime != null) { - return Long.parseLong(creationTime); - } - return null; - } - - public String getProperty(String key) { - return properties.getProperty(key); - } - - public void setProperty(String key, String value) { - properties.setProperty(key, value); - } - - public void setClusterId(String clusterId) { - properties.setProperty(CLUSTER_ID, clusterId); - } - - private void verifyNodeType(NodeType type) - throws InconsistentStorageStateException { - NodeType nodeType = getNodeType(); - Preconditions.checkNotNull(nodeType); - if(type != nodeType) { - throw new InconsistentStorageStateException("Expected NodeType: " + type + - ", but found: " + nodeType); - } - } - - private void verifyClusterId() - throws InconsistentStorageStateException { - String clusterId = getClusterID(); - Preconditions.checkNotNull(clusterId); - if(clusterId.isEmpty()) { - throw new InconsistentStorageStateException("Cluster ID not found"); - } - } - - private void verifyCreationTime() { - Long creationTime = getCreationTime(); - Preconditions.checkNotNull(creationTime); - } - - - public void writeTo(File to) - throws IOException { - try (RandomAccessFile file = new RandomAccessFile(to, "rws"); - FileOutputStream out = new FileOutputStream(file.getFD())) { - file.seek(0); - /* - * If server is interrupted before this line, - * the version file will remain unchanged. - */ - properties.store(out, null); - /* - * Now the new fields are flushed to the head of the file, but file - * length can still be larger then required and therefore the file can - * contain whole or corrupted fields from its old contents in the end. - * If server is interrupted here and restarted later these extra fields - * either should not effect server behavior or should be handled - * by the server correctly. - */ - file.setLength(out.getChannel().position()); - } - } - - private Properties readFrom(File from) throws IOException { - try (RandomAccessFile file = new RandomAccessFile(from, "rws"); - FileInputStream in = new FileInputStream(file.getFD())) { - Properties props = new Properties(); - file.seek(0); - props.load(in); - return props; - } - } - - /** - * Generate new clusterID. - * - * clusterID is a persistent attribute of the cluster. - * It is generated when the cluster is created and remains the same - * during the life cycle of the cluster. When a new SCM node is initialized, - * if this is a new cluster, a new clusterID is generated and stored. - * @return new clusterID - */ - public static String newClusterID() { - return "CID-" + UUID.randomUUID().toString(); - } - -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java ---------------------------------------------------------------------- diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java b/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java deleted file mode 100644 index 6517e58..0000000 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common; \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/InvalidStateTransitionException.java ---------------------------------------------------------------------- diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/InvalidStateTransitionException.java b/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/InvalidStateTransitionException.java deleted file mode 100644 index 9aeff24..0000000 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/InvalidStateTransitionException.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.common.statemachine; - -/** - * Class wraps invalid state transition exception. - */ -public class InvalidStateTransitionException extends Exception { - private Enum<?> currentState; - private Enum<?> event; - - public InvalidStateTransitionException(Enum<?> currentState, Enum<?> event) { - super("Invalid event: " + event + " at " + currentState + " state."); - this.currentState = currentState; - this.event = event; - } - - public Enum<?> getCurrentState() { - return currentState; - } - - public Enum<?> getEvent() { - return event; - } - -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java ---------------------------------------------------------------------- diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java b/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java deleted file mode 100644 index bf8cbd5..0000000 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.common.statemachine; - -import com.google.common.base.Supplier; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; - -import java.util.HashMap; -import java.util.Map; -import java.util.Set; - -/** - * Template class that wraps simple event driven state machine. - * @param <STATE> states allowed - * @param <EVENT> events allowed - */ -public class StateMachine<STATE extends Enum<?>, EVENT extends Enum<?>> { - private STATE initialState; - private Set<STATE> finalStates; - - private final LoadingCache<EVENT, Map<STATE, STATE>> transitions = - CacheBuilder.newBuilder().build( - CacheLoader.from((Supplier<Map<STATE, STATE>>) () -> new HashMap())); - - public StateMachine(STATE initState, Set<STATE> finalStates) { - this.initialState = initState; - this.finalStates = finalStates; - } - - public STATE getInitialState() { - return initialState; - } - - public Set<STATE> getFinalStates() { - return finalStates; - } - - public STATE getNextState(STATE from, EVENT e) - throws InvalidStateTransitionException { - STATE target = transitions.getUnchecked(e).get(from); - if (target == null) { - throw new InvalidStateTransitionException(from, e); - } - return target; - } - - public void addTransition(STATE from, STATE to, EVENT e) { - transitions.getUnchecked(e).put(from, to); - } -} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java ---------------------------------------------------------------------- diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java b/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java deleted file mode 100644 index 045409e..0000000 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common.statemachine; -/** - state machine template class for ozone. - **/ \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java ---------------------------------------------------------------------- diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java b/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java deleted file mode 100644 index 4aa36c3..0000000 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java +++ /dev/null @@ -1,185 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.helpers; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; - -import java.io.IOException; -import java.util.Map; -import java.util.TreeMap; - -/** - * Java class that represents ChunkInfo ProtoBuf class. This helper class allows - * us to convert to and from protobuf to normal java. - */ -public class ChunkInfo { - private final String chunkName; - private final long offset; - private final long len; - private String checksum; - private final Map<String, String> metadata; - - - /** - * Constructs a ChunkInfo. - * - * @param chunkName - File Name where chunk lives. - * @param offset - offset where Chunk Starts. - * @param len - Length of the Chunk. - */ - public ChunkInfo(String chunkName, long offset, long len) { - this.chunkName = chunkName; - this.offset = offset; - this.len = len; - this.metadata = new TreeMap<>(); - } - - /** - * Adds metadata. - * - * @param key - Key Name. - * @param value - Value. - * @throws IOException - */ - public void addMetadata(String key, String value) throws IOException { - synchronized (this.metadata) { - if (this.metadata.containsKey(key)) { - throw new IOException("This key already exists. Key " + key); - } - metadata.put(key, value); - } - } - - /** - * Gets a Chunkinfo class from the protobuf definitions. - * - * @param info - Protobuf class - * @return ChunkInfo - * @throws IOException - */ - public static ChunkInfo getFromProtoBuf(ContainerProtos.ChunkInfo info) - throws IOException { - Preconditions.checkNotNull(info); - - ChunkInfo chunkInfo = new ChunkInfo(info.getChunkName(), info.getOffset(), - info.getLen()); - - for (int x = 0; x < info.getMetadataCount(); x++) { - chunkInfo.addMetadata(info.getMetadata(x).getKey(), - info.getMetadata(x).getValue()); - } - - - if (info.hasChecksum()) { - chunkInfo.setChecksum(info.getChecksum()); - } - return chunkInfo; - } - - /** - * Returns a ProtoBuf Message from ChunkInfo. - * - * @return Protocol Buffer Message - */ - public ContainerProtos.ChunkInfo getProtoBufMessage() { - ContainerProtos.ChunkInfo.Builder builder = ContainerProtos - .ChunkInfo.newBuilder(); - - builder.setChunkName(this.getChunkName()); - builder.setOffset(this.getOffset()); - builder.setLen(this.getLen()); - if (this.getChecksum() != null && !this.getChecksum().isEmpty()) { - builder.setChecksum(this.getChecksum()); - } - - for (Map.Entry<String, String> entry : metadata.entrySet()) { - HdslProtos.KeyValue.Builder keyValBuilder = - HdslProtos.KeyValue.newBuilder(); - builder.addMetadata(keyValBuilder.setKey(entry.getKey()) - .setValue(entry.getValue()).build()); - } - - return builder.build(); - } - - /** - * Returns the chunkName. - * - * @return - String - */ - public String getChunkName() { - return chunkName; - } - - /** - * Gets the start offset of the given chunk in physical file. - * - * @return - long - */ - public long getOffset() { - return offset; - } - - /** - * Returns the length of the Chunk. - * - * @return long - */ - public long getLen() { - return len; - } - - /** - * Returns the SHA256 value of this chunk. - * - * @return - Hash String - */ - public String getChecksum() { - return checksum; - } - - /** - * Sets the Hash value of this chunk. - * - * @param checksum - Hash String. - */ - public void setChecksum(String checksum) { - this.checksum = checksum; - } - - /** - * Returns Metadata associated with this Chunk. - * - * @return - Map of Key,values. - */ - public Map<String, String> getMetadata() { - return metadata; - } - - @Override - public String toString() { - return "ChunkInfo{" + - "chunkName='" + chunkName + - ", offset=" + offset + - ", len=" + len + - '}'; - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java ---------------------------------------------------------------------- diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java b/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java deleted file mode 100644 index eb02152..0000000 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.helpers; - -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; - -import java.io.IOException; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.TreeMap; - -/** - * Helper class to convert Protobuf to Java classes. - */ -public class KeyData { - private final String containerName; - private final String keyName; - private final Map<String, String> metadata; - - /** - * Please note : when we are working with keys, we don't care what they point - * to. So we We don't read chunkinfo nor validate them. It is responsibility - * of higher layer like ozone. We just read and write data from network. - */ - private List<ContainerProtos.ChunkInfo> chunks; - - /** - * Constructs a KeyData Object. - * - * @param containerName - * @param keyName - */ - public KeyData(String containerName, String keyName) { - this.containerName = containerName; - this.keyName = keyName; - this.metadata = new TreeMap<>(); - } - - /** - * Returns a keyData object from the protobuf data. - * - * @param data - Protobuf data. - * @return - KeyData - * @throws IOException - */ - public static KeyData getFromProtoBuf(ContainerProtos.KeyData data) throws - IOException { - KeyData keyData = new KeyData(data.getContainerName(), data.getName()); - for (int x = 0; x < data.getMetadataCount(); x++) { - keyData.addMetadata(data.getMetadata(x).getKey(), - data.getMetadata(x).getValue()); - } - keyData.setChunks(data.getChunksList()); - return keyData; - } - - /** - * Returns a Protobuf message from KeyData. - * @return Proto Buf Message. - */ - public ContainerProtos.KeyData getProtoBufMessage() { - ContainerProtos.KeyData.Builder builder = - ContainerProtos.KeyData.newBuilder(); - builder.setContainerName(this.containerName); - builder.setName(this.getKeyName()); - builder.addAllChunks(this.chunks); - for (Map.Entry<String, String> entry : metadata.entrySet()) { - HdslProtos.KeyValue.Builder keyValBuilder = - HdslProtos.KeyValue.newBuilder(); - builder.addMetadata(keyValBuilder.setKey(entry.getKey()) - .setValue(entry.getValue()).build()); - } - return builder.build(); - } - - /** - * Adds metadata. - * - * @param key - Key - * @param value - Value - * @throws IOException - */ - public synchronized void addMetadata(String key, String value) throws - IOException { - if (this.metadata.containsKey(key)) { - throw new IOException("This key already exists. Key " + key); - } - metadata.put(key, value); - } - - public synchronized Map<String, String> getMetadata() { - return Collections.unmodifiableMap(this.metadata); - } - - /** - * Returns value of a key. - */ - public synchronized String getValue(String key) { - return metadata.get(key); - } - - /** - * Deletes a metadata entry from the map. - * - * @param key - Key - */ - public synchronized void deleteKey(String key) { - metadata.remove(key); - } - - /** - * Returns chunks list. - * - * @return list of chunkinfo. - */ - public List<ContainerProtos.ChunkInfo> getChunks() { - return chunks; - } - - /** - * Returns container Name. - * @return String. - */ - public String getContainerName() { - return containerName; - } - - /** - * Returns KeyName. - * @return String. - */ - public String getKeyName() { - return keyName; - } - - /** - * Sets Chunk list. - * - * @param chunks - List of chunks. - */ - public void setChunks(List<ContainerProtos.ChunkInfo> chunks) { - this.chunks = chunks; - } - - /** - * Get the total size of chunks allocated for the key. - * @return total size of the key. - */ - public long getSize() { - return chunks.parallelStream().mapToLong(e->e.getLen()).sum(); - } - -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java ---------------------------------------------------------------------- diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java b/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java deleted file mode 100644 index fa5df11..0000000 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.helpers; - -/** - * Helper classes for the container protocol communication. - */ \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java ---------------------------------------------------------------------- diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java b/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java deleted file mode 100644 index dfa9315..0000000 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java +++ /dev/null @@ -1,189 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * <p> - * http://www.apache.org/licenses/LICENSE-2.0 - * <p> - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lease; - -import org.apache.hadoop.util.Time; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.Callable; - -/** - * This class represents the lease created on a resource. Callback can be - * registered on the lease which will be executed in case of timeout. - * - * @param <T> Resource type for which the lease can be associated - */ -public class Lease<T> { - - /** - * The resource for which this lease is created. - */ - private final T resource; - - private final long creationTime; - - /** - * Lease lifetime in milliseconds. - */ - private volatile long leaseTimeout; - - private boolean expired; - - /** - * Functions to be called in case of timeout. - */ - private List<Callable<Void>> callbacks; - - - /** - * Creates a lease on the specified resource with given timeout. - * - * @param resource - * Resource for which the lease has to be created - * @param timeout - * Lease lifetime in milliseconds - */ - public Lease(T resource, long timeout) { - this.resource = resource; - this.leaseTimeout = timeout; - this.callbacks = Collections.synchronizedList(new ArrayList<>()); - this.creationTime = Time.monotonicNow(); - this.expired = false; - } - - /** - * Returns true if the lease has expired, else false. - * - * @return true if expired, else false - */ - public boolean hasExpired() { - return expired; - } - - /** - * Registers a callback which will be executed in case of timeout. Callbacks - * are executed in a separate Thread. - * - * @param callback - * The Callable which has to be executed - * @throws LeaseExpiredException - * If the lease has already timed out - */ - public void registerCallBack(Callable<Void> callback) - throws LeaseExpiredException { - if(hasExpired()) { - throw new LeaseExpiredException("Resource: " + resource); - } - callbacks.add(callback); - } - - /** - * Returns the time elapsed since the creation of lease. - * - * @return elapsed time in milliseconds - * @throws LeaseExpiredException - * If the lease has already timed out - */ - public long getElapsedTime() throws LeaseExpiredException { - if(hasExpired()) { - throw new LeaseExpiredException("Resource: " + resource); - } - return Time.monotonicNow() - creationTime; - } - - /** - * Returns the time available before timeout. - * - * @return remaining time in milliseconds - * @throws LeaseExpiredException - * If the lease has already timed out - */ - public long getRemainingTime() throws LeaseExpiredException { - if(hasExpired()) { - throw new LeaseExpiredException("Resource: " + resource); - } - return leaseTimeout - getElapsedTime(); - } - - /** - * Returns total lease lifetime. - * - * @return total lifetime of lease in milliseconds - * @throws LeaseExpiredException - * If the lease has already timed out - */ - public long getLeaseLifeTime() throws LeaseExpiredException { - if(hasExpired()) { - throw new LeaseExpiredException("Resource: " + resource); - } - return leaseTimeout; - } - - /** - * Renews the lease timeout period. - * - * @param timeout - * Time to be added to the lease in milliseconds - * @throws LeaseExpiredException - * If the lease has already timed out - */ - public void renew(long timeout) throws LeaseExpiredException { - if(hasExpired()) { - throw new LeaseExpiredException("Resource: " + resource); - } - leaseTimeout += timeout; - } - - @Override - public int hashCode() { - return resource.hashCode(); - } - - @Override - public boolean equals(Object obj) { - if(obj instanceof Lease) { - return resource.equals(((Lease) obj).resource); - } - return false; - } - - @Override - public String toString() { - return "Lease<" + resource.toString() + ">"; - } - - /** - * Returns the callbacks to be executed for the lease in case of timeout. - * - * @return callbacks to be executed - */ - List<Callable<Void>> getCallbacks() { - return callbacks; - } - - /** - * Expires/Invalidates the lease. - */ - void invalidate() { - callbacks = null; - expired = true; - } - -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseAlreadyExistException.java ---------------------------------------------------------------------- diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseAlreadyExistException.java b/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseAlreadyExistException.java deleted file mode 100644 index a39ea22..0000000 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseAlreadyExistException.java +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * <p> - * http://www.apache.org/licenses/LICENSE-2.0 - * <p> - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lease; - -/** - * This exception represents that there is already a lease acquired on the - * same resource. - */ -public class LeaseAlreadyExistException extends LeaseException { - - /** - * Constructs an {@code LeaseAlreadyExistException} with {@code null} - * as its error detail message. - */ - public LeaseAlreadyExistException() { - super(); - } - - /** - * Constructs an {@code LeaseAlreadyExistException} with the specified - * detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public LeaseAlreadyExistException(String message) { - super(message); - } - -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java ---------------------------------------------------------------------- diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java b/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java deleted file mode 100644 index 1b7391b..0000000 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * <p> - * http://www.apache.org/licenses/LICENSE-2.0 - * <p> - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lease; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; -import java.util.concurrent.Callable; - -/** - * This class is responsible for executing the callbacks of a lease in case of - * timeout. - */ -public class LeaseCallbackExecutor<T> implements Runnable { - - private static final Logger LOG = LoggerFactory.getLogger(Lease.class); - - private final T resource; - private final List<Callable<Void>> callbacks; - - /** - * Constructs LeaseCallbackExecutor instance with list of callbacks. - * - * @param resource - * The resource for which the callbacks are executed - * @param callbacks - * Callbacks to be executed by this executor - */ - public LeaseCallbackExecutor(T resource, List<Callable<Void>> callbacks) { - this.resource = resource; - this.callbacks = callbacks; - } - - @Override - public void run() { - if(LOG.isDebugEnabled()) { - LOG.debug("Executing callbacks for lease on {}", resource); - } - for(Callable<Void> callback : callbacks) { - try { - callback.call(); - } catch (Exception e) { - LOG.warn("Exception while executing callback for lease on {}", - resource, e); - } - } - } - -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseException.java ---------------------------------------------------------------------- diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseException.java b/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseException.java deleted file mode 100644 index 418f412..0000000 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseException.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * <p> - * http://www.apache.org/licenses/LICENSE-2.0 - * <p> - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lease; - -/** - * This exception represents all lease related exceptions. - */ -public class LeaseException extends Exception { - - /** - * Constructs an {@code LeaseException} with {@code null} - * as its error detail message. - */ - public LeaseException() { - super(); - } - - /** - * Constructs an {@code LeaseException} with the specified - * detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public LeaseException(String message) { - super(message); - } - -} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseExpiredException.java ---------------------------------------------------------------------- diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseExpiredException.java b/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseExpiredException.java deleted file mode 100644 index 440a023..0000000 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseExpiredException.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * <p> - * http://www.apache.org/licenses/LICENSE-2.0 - * <p> - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lease; - -/** - * This exception represents that the lease that is being accessed has expired. - */ -public class LeaseExpiredException extends LeaseException { - - /** - * Constructs an {@code LeaseExpiredException} with {@code null} - * as its error detail message. - */ - public LeaseExpiredException() { - super(); - } - - /** - * Constructs an {@code LeaseExpiredException} with the specified - * detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public LeaseExpiredException(String message) { - super(message); - } - -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java b/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java deleted file mode 100644 index b8390dd..0000000 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java +++ /dev/null @@ -1,247 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * <p> - * http://www.apache.org/licenses/LICENSE-2.0 - * <p> - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lease; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; -import java.util.Map; -import java.util.concurrent.Callable; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; - -/** - * LeaseManager is someone who can provide you leases based on your - * requirement. If you want to return the lease back before it expires, - * you can give it back to Lease Manager. He is the one responsible for - * the lifecycle of leases. The resource for which lease is created - * should have proper {@code equals} method implementation, resource - * equality is checked while the lease is created. - * - * @param <T> Type of leases that this lease manager can create - */ -public class LeaseManager<T> { - - private static final Logger LOG = - LoggerFactory.getLogger(LeaseManager.class); - - private final long defaultTimeout; - private Map<T, Lease<T>> activeLeases; - private LeaseMonitor leaseMonitor; - private Thread leaseMonitorThread; - private boolean isRunning; - - /** - * Creates an instance of lease manager. - * - * @param defaultTimeout - * Default timeout in milliseconds to be used for lease creation. - */ - public LeaseManager(long defaultTimeout) { - this.defaultTimeout = defaultTimeout; - } - - /** - * Starts the lease manager service. - */ - public void start() { - LOG.debug("Starting LeaseManager service"); - activeLeases = new ConcurrentHashMap<>(); - leaseMonitor = new LeaseMonitor(); - leaseMonitorThread = new Thread(leaseMonitor); - leaseMonitorThread.setName("LeaseManager#LeaseMonitor"); - leaseMonitorThread.setDaemon(true); - leaseMonitorThread.setUncaughtExceptionHandler((thread, throwable) -> { - // Let us just restart this thread after logging an error. - // if this thread is not running we cannot handle Lease expiry. - LOG.error("LeaseMonitor thread encountered an error. Thread: {}", - thread.toString(), throwable); - leaseMonitorThread.start(); - }); - LOG.debug("Starting LeaseManager#LeaseMonitor Thread"); - leaseMonitorThread.start(); - isRunning = true; - } - - /** - * Returns a lease for the specified resource with default timeout. - * - * @param resource - * Resource for which lease has to be created - * @throws LeaseAlreadyExistException - * If there is already a lease on the resource - */ - public synchronized Lease<T> acquire(T resource) - throws LeaseAlreadyExistException { - return acquire(resource, defaultTimeout); - } - - /** - * Returns a lease for the specified resource with the timeout provided. - * - * @param resource - * Resource for which lease has to be created - * @param timeout - * The timeout in milliseconds which has to be set on the lease - * @throws LeaseAlreadyExistException - * If there is already a lease on the resource - */ - public synchronized Lease<T> acquire(T resource, long timeout) - throws LeaseAlreadyExistException { - checkStatus(); - if(LOG.isDebugEnabled()) { - LOG.debug("Acquiring lease on {} for {} milliseconds", resource, timeout); - } - if(activeLeases.containsKey(resource)) { - throw new LeaseAlreadyExistException("Resource: " + resource); - } - Lease<T> lease = new Lease<>(resource, timeout); - activeLeases.put(resource, lease); - leaseMonitorThread.interrupt(); - return lease; - } - - /** - * Returns a lease associated with the specified resource. - * - * @param resource - * Resource for which the lease has to be returned - * @throws LeaseNotFoundException - * If there is no active lease on the resource - */ - public Lease<T> get(T resource) throws LeaseNotFoundException { - checkStatus(); - Lease<T> lease = activeLeases.get(resource); - if(lease != null) { - return lease; - } - throw new LeaseNotFoundException("Resource: " + resource); - } - - /** - * Releases the lease associated with the specified resource. - * - * @param resource - * The for which the lease has to be released - * @throws LeaseNotFoundException - * If there is no active lease on the resource - */ - public synchronized void release(T resource) - throws LeaseNotFoundException { - checkStatus(); - if(LOG.isDebugEnabled()) { - LOG.debug("Releasing lease on {}", resource); - } - Lease<T> lease = activeLeases.remove(resource); - if(lease == null) { - throw new LeaseNotFoundException("Resource: " + resource); - } - lease.invalidate(); - } - - /** - * Shuts down the LeaseManager and releases the resources. All the active - * {@link Lease} will be released (callbacks on leases will not be - * executed). - */ - public void shutdown() { - checkStatus(); - LOG.debug("Shutting down LeaseManager service"); - leaseMonitor.disable(); - leaseMonitorThread.interrupt(); - for(T resource : activeLeases.keySet()) { - try { - release(resource); - } catch(LeaseNotFoundException ex) { - //Ignore the exception, someone might have released the lease - } - } - isRunning = false; - } - - /** - * Throws {@link LeaseManagerNotRunningException} if the service is not - * running. - */ - private void checkStatus() { - if(!isRunning) { - throw new LeaseManagerNotRunningException("LeaseManager not running."); - } - } - - /** - * Monitors the leases and expires them based on the timeout, also - * responsible for executing the callbacks of expired leases. - */ - private final class LeaseMonitor implements Runnable { - - private boolean monitor = true; - private ExecutorService executorService; - - private LeaseMonitor() { - this.monitor = true; - this.executorService = Executors.newCachedThreadPool(); - } - - @Override - public void run() { - while(monitor) { - LOG.debug("LeaseMonitor: checking for lease expiry"); - long sleepTime = Long.MAX_VALUE; - - for (T resource : activeLeases.keySet()) { - try { - Lease<T> lease = get(resource); - long remainingTime = lease.getRemainingTime(); - if (remainingTime <= 0) { - //Lease has timed out - List<Callable<Void>> leaseCallbacks = lease.getCallbacks(); - release(resource); - executorService.execute( - new LeaseCallbackExecutor(resource, leaseCallbacks)); - } else { - sleepTime = remainingTime > sleepTime ? - sleepTime : remainingTime; - } - } catch (LeaseNotFoundException | LeaseExpiredException ex) { - //Ignore the exception, someone might have released the lease - } - } - - try { - if(!Thread.interrupted()) { - Thread.sleep(sleepTime); - } - } catch (InterruptedException ignored) { - // This means a new lease is added to activeLeases. - } - } - } - - /** - * Disables lease monitor, next interrupt call on the thread - * will stop lease monitor. - */ - public void disable() { - monitor = false; - } - } - -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManagerNotRunningException.java ---------------------------------------------------------------------- diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManagerNotRunningException.java b/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManagerNotRunningException.java deleted file mode 100644 index ced31de..0000000 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManagerNotRunningException.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * <p> - * http://www.apache.org/licenses/LICENSE-2.0 - * <p> - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lease; - -/** - * This exception represents that there LeaseManager service is not running. - */ -public class LeaseManagerNotRunningException extends RuntimeException { - - /** - * Constructs an {@code LeaseManagerNotRunningException} with {@code null} - * as its error detail message. - */ - public LeaseManagerNotRunningException() { - super(); - } - - /** - * Constructs an {@code LeaseManagerNotRunningException} with the specified - * detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public LeaseManagerNotRunningException(String message) { - super(message); - } - -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseNotFoundException.java ---------------------------------------------------------------------- diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseNotFoundException.java b/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseNotFoundException.java deleted file mode 100644 index c292d33..0000000 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseNotFoundException.java +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * <p> - * http://www.apache.org/licenses/LICENSE-2.0 - * <p> - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lease; - -/** - * This exception represents that the lease that is being accessed does not - * exist. - */ -public class LeaseNotFoundException extends LeaseException { - - /** - * Constructs an {@code LeaseNotFoundException} with {@code null} - * as its error detail message. - */ - public LeaseNotFoundException() { - super(); - } - - /** - * Constructs an {@code LeaseNotFoundException} with the specified - * detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public LeaseNotFoundException(String message) { - super(message); - } - -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/package-info.java ---------------------------------------------------------------------- diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/package-info.java b/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/package-info.java deleted file mode 100644 index 48ee2e1..0000000 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/package-info.java +++ /dev/null @@ -1,26 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * A generic lease management API which can be used if a service - * needs any kind of lease management. - */ - -package org.apache.hadoop.ozone.lease; -/* - This package contains lease management related classes. - */ \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/package-info.java ---------------------------------------------------------------------- diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/package-info.java b/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/package-info.java deleted file mode 100644 index db399db..0000000 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/package-info.java +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone; - -/** - This package contains class that support ozone implementation on the datanode - side. - - Main parts of ozone on datanode are: - - 1. REST Interface - This code lives under the web directory and listens to the - WebHDFS port. - - 2. Datanode container classes: This support persistence of ozone objects on - datanode. These classes live under container directory. - - 3. Client and Shell: We also support a ozone REST client lib, they are under - web/client and web/ozShell. - - */ http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java ---------------------------------------------------------------------- diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java b/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java deleted file mode 100644 index c211bd5..0000000 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java +++ /dev/null @@ -1,169 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocolPB; - -import com.google.common.collect.Sets; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.DeleteKeyBlocksResultProto; -import org.apache.hadoop.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; -import org.apache.hadoop.scm.ScmInfo; -import org.apache.hadoop.scm.protocol.ScmBlockLocationProtocol; -import org.apache.hadoop.scm.protocol.StorageContainerLocationProtocol; -import org.apache.hadoop.scm.protocolPB.ScmBlockLocationProtocolPB; -import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolPB; -import org.apache.hadoop.hdsl.protocol.proto - .ScmBlockLocationProtocolProtos.AllocateScmBlockRequestProto; -import org.apache.hadoop.hdsl.protocol.proto - .ScmBlockLocationProtocolProtos.AllocateScmBlockResponseProto; -import org.apache.hadoop.hdsl.protocol.proto - .ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksRequestProto; -import org.apache.hadoop.hdsl.protocol.proto - .ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksResponseProto; -import org.apache.hadoop.hdsl.protocol.proto - .ScmBlockLocationProtocolProtos.GetScmBlockLocationsRequestProto; -import org.apache.hadoop.hdsl.protocol.proto - .ScmBlockLocationProtocolProtos.GetScmBlockLocationsResponseProto; -import org.apache.hadoop.hdsl.protocol.proto - .ScmBlockLocationProtocolProtos.ScmLocatedBlockProto; - -import java.io.IOException; -import java.util.List; -import java.util.Set; -import java.util.stream.Collectors; - -/** - * This class is the server-side translator that forwards requests received on - * {@link StorageContainerLocationProtocolPB} to the - * {@link StorageContainerLocationProtocol} server implementation. - */ -@InterfaceAudience.Private -public final class ScmBlockLocationProtocolServerSideTranslatorPB - implements ScmBlockLocationProtocolPB { - - private final ScmBlockLocationProtocol impl; - - /** - * Creates a new ScmBlockLocationProtocolServerSideTranslatorPB. - * - * @param impl {@link ScmBlockLocationProtocol} server implementation - */ - public ScmBlockLocationProtocolServerSideTranslatorPB( - ScmBlockLocationProtocol impl) throws IOException { - this.impl = impl; - } - - - @Override - public GetScmBlockLocationsResponseProto getScmBlockLocations( - RpcController controller, GetScmBlockLocationsRequestProto req) - throws ServiceException { - Set<String> keys = Sets.newLinkedHashSetWithExpectedSize( - req.getKeysCount()); - for (String key : req.getKeysList()) { - keys.add(key); - } - final Set<AllocatedBlock> blocks; - try { - blocks = impl.getBlockLocations(keys); - } catch (IOException ex) { - throw new ServiceException(ex); - } - GetScmBlockLocationsResponseProto.Builder resp = - GetScmBlockLocationsResponseProto.newBuilder(); - for (AllocatedBlock block: blocks) { - ScmLocatedBlockProto.Builder locatedBlock = - ScmLocatedBlockProto.newBuilder() - .setKey(block.getKey()) - .setPipeline(block.getPipeline().getProtobufMessage()); - resp.addLocatedBlocks(locatedBlock.build()); - } - return resp.build(); - } - - @Override - public AllocateScmBlockResponseProto allocateScmBlock( - RpcController controller, AllocateScmBlockRequestProto request) - throws ServiceException { - try { - AllocatedBlock allocatedBlock = - impl.allocateBlock(request.getSize(), request.getType(), - request.getFactor(), request.getOwner()); - if (allocatedBlock != null) { - return - AllocateScmBlockResponseProto.newBuilder() - .setKey(allocatedBlock.getKey()) - .setPipeline(allocatedBlock.getPipeline().getProtobufMessage()) - .setCreateContainer(allocatedBlock.getCreateContainer()) - .setErrorCode(AllocateScmBlockResponseProto.Error.success) - .build(); - } else { - return AllocateScmBlockResponseProto.newBuilder() - .setErrorCode(AllocateScmBlockResponseProto.Error.unknownFailure) - .build(); - } - } catch (IOException e) { - throw new ServiceException(e); - } - } - - @Override - public DeleteScmKeyBlocksResponseProto deleteScmKeyBlocks( - RpcController controller, DeleteScmKeyBlocksRequestProto req) - throws ServiceException { - DeleteScmKeyBlocksResponseProto.Builder resp = - DeleteScmKeyBlocksResponseProto.newBuilder(); - try { - List<BlockGroup> infoList = req.getKeyBlocksList().stream() - .map(BlockGroup::getFromProto).collect(Collectors.toList()); - final List<DeleteBlockGroupResult> results = - impl.deleteKeyBlocks(infoList); - for (DeleteBlockGroupResult result: results) { - DeleteKeyBlocksResultProto.Builder deleteResult = - DeleteKeyBlocksResultProto - .newBuilder() - .setObjectKey(result.getObjectKey()) - .addAllBlockResults(result.getBlockResultProtoList()); - resp.addResults(deleteResult.build()); - } - } catch (IOException ex) { - throw new ServiceException(ex); - } - return resp.build(); - } - - @Override - public HdslProtos.GetScmInfoRespsonseProto getScmInfo( - RpcController controller, HdslProtos.GetScmInfoRequestProto req) - throws ServiceException { - ScmInfo scmInfo; - try { - scmInfo = impl.getScmInfo(); - } catch (IOException ex) { - throw new ServiceException(ex); - } - return HdslProtos.GetScmInfoRespsonseProto.newBuilder() - .setClusterId(scmInfo.getClusterId()) - .setScmId(scmInfo.getScmId()) - .build(); - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java ---------------------------------------------------------------------- diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java deleted file mode 100644 index be19c68..0000000 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java +++ /dev/null @@ -1,202 +0,0 @@ - -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocolPB; - -import java.io.IOException; -import java.util.EnumSet; -import java.util.List; - -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.hdsl.protocol.proto - .StorageContainerLocationProtocolProtos; -import org.apache.hadoop.scm.ScmInfo; -import org.apache.hadoop.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.scm.protocol.StorageContainerLocationProtocol; - -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.ContainerRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.ContainerResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.SCMDeleteContainerRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.SCMDeleteContainerResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.SCMListContainerResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.SCMListContainerRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.PipelineResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.PipelineRequestProto; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolPB; - -/** - * This class is the server-side translator that forwards requests received on - * {@link StorageContainerLocationProtocolPB} to the - * {@link StorageContainerLocationProtocol} server implementation. - */ -@InterfaceAudience.Private -public final class StorageContainerLocationProtocolServerSideTranslatorPB - implements StorageContainerLocationProtocolPB { - - private final StorageContainerLocationProtocol impl; - - /** - * Creates a new StorageContainerLocationProtocolServerSideTranslatorPB. - * - * @param impl {@link StorageContainerLocationProtocol} server implementation - */ - public StorageContainerLocationProtocolServerSideTranslatorPB( - StorageContainerLocationProtocol impl) throws IOException { - this.impl = impl; - } - - @Override - public ContainerResponseProto allocateContainer(RpcController unused, - ContainerRequestProto request) throws ServiceException { - try { - Pipeline pipeline = impl.allocateContainer(request.getReplicationType(), - request.getReplicationFactor(), request.getContainerName(), - request.getOwner()); - return ContainerResponseProto.newBuilder() - .setPipeline(pipeline.getProtobufMessage()) - .setErrorCode(ContainerResponseProto.Error.success) - .build(); - - } catch (IOException e) { - throw new ServiceException(e); - } - } - - @Override - public GetContainerResponseProto getContainer( - RpcController controller, GetContainerRequestProto request) - throws ServiceException { - try { - Pipeline pipeline = impl.getContainer(request.getContainerName()); - return GetContainerResponseProto.newBuilder() - .setPipeline(pipeline.getProtobufMessage()) - .build(); - } catch (IOException e) { - throw new ServiceException(e); - } - } - - @Override - public SCMListContainerResponseProto listContainer(RpcController controller, - SCMListContainerRequestProto request) throws ServiceException { - try { - String startName = null; - String prefixName = null; - int count = -1; - - // Arguments check. - if (request.hasPrefixName()) { - // End container name is given. - prefixName = request.getPrefixName(); - } - if (request.hasStartName()) { - // End container name is given. - startName = request.getStartName(); - } - - count = request.getCount(); - List<ContainerInfo> containerList = - impl.listContainer(startName, prefixName, count); - SCMListContainerResponseProto.Builder builder = - SCMListContainerResponseProto.newBuilder(); - for (ContainerInfo container : containerList) { - builder.addContainers(container.getProtobuf()); - } - return builder.build(); - } catch (IOException e) { - throw new ServiceException(e); - } - } - - @Override - public SCMDeleteContainerResponseProto deleteContainer( - RpcController controller, SCMDeleteContainerRequestProto request) - throws ServiceException { - try { - impl.deleteContainer(request.getContainerName()); - return SCMDeleteContainerResponseProto.newBuilder().build(); - } catch (IOException e) { - throw new ServiceException(e); - } - } - - @Override - public StorageContainerLocationProtocolProtos.NodeQueryResponseProto - queryNode(RpcController controller, - StorageContainerLocationProtocolProtos.NodeQueryRequestProto request) - throws ServiceException { - try { - EnumSet<HdslProtos.NodeState> nodeStateEnumSet = EnumSet.copyOf(request - .getQueryList()); - HdslProtos.NodePool datanodes = impl.queryNode(nodeStateEnumSet, - request.getScope(), request.getPoolName()); - return StorageContainerLocationProtocolProtos - .NodeQueryResponseProto.newBuilder() - .setDatanodes(datanodes) - .build(); - } catch (Exception e) { - throw new ServiceException(e); - } - } - - @Override - public ObjectStageChangeResponseProto notifyObjectStageChange( - RpcController controller, ObjectStageChangeRequestProto request) - throws ServiceException { - try { - impl.notifyObjectStageChange(request.getType(), request.getName(), - request.getOp(), request.getStage()); - return ObjectStageChangeResponseProto.newBuilder().build(); - } catch (IOException e) { - throw new ServiceException(e); - } - } - - @Override - public PipelineResponseProto allocatePipeline( - RpcController controller, PipelineRequestProto request) - throws ServiceException { - // TODO : Wiring this up requires one more patch. - return null; - } - - @Override - public HdslProtos.GetScmInfoRespsonseProto getScmInfo( - RpcController controller, HdslProtos.GetScmInfoRequestProto req) - throws ServiceException { - try { - ScmInfo scmInfo = impl.getScmInfo(); - return HdslProtos.GetScmInfoRespsonseProto.newBuilder() - .setClusterId(scmInfo.getClusterId()) - .setScmId(scmInfo.getScmId()) - .build(); - } catch (IOException ex) { - throw new ServiceException(ex); - } - - } -} --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org