http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java ---------------------------------------------------------------------- diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java index ed8b1e3..03c99ef 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java @@ -16,7 +16,6 @@ */ package org.apache.hadoop.hdds.scm.container; -import com.google.common.primitives.Longs; import java.util.Set; import java.util.UUID; import org.apache.commons.lang3.RandomUtils; @@ -24,26 +23,22 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.container.states.ContainerStateMap; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.LambdaTestUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import java.io.IOException; -import java.util.ArrayList; import java.util.List; import java.util.NavigableSet; -import java.util.Random; +import java.util.concurrent.TimeoutException; + import org.slf4j.event.Level; /** @@ -57,7 +52,6 @@ public class TestContainerStateManagerIntegration { private StorageContainerManager scm; private ContainerManager containerManager; private ContainerStateManager containerStateManager; - private PipelineSelector selector; private String containerOwner = "OZONE"; @@ -70,8 +64,8 @@ public class TestContainerStateManagerIntegration { xceiverClientManager = new XceiverClientManager(conf); scm = cluster.getStorageContainerManager(); containerManager = scm.getContainerManager(); - containerStateManager = containerManager.getStateManager(); - selector = containerManager.getPipelineSelector(); + containerStateManager = ((SCMContainerManager)containerManager) + .getContainerStateManager(); } @After @@ -88,13 +82,13 @@ public class TestContainerStateManagerIntegration { .allocateContainer( xceiverClientManager.getType(), xceiverClientManager.getFactor(), containerOwner); + ContainerStateManager stateManager = new ContainerStateManager(conf); ContainerInfo info = containerStateManager .getMatchingContainer(OzoneConsts.GB * 3, containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), HddsProtos.LifeCycleState.ALLOCATED); Assert.assertEquals(container1.getContainerInfo().getContainerID(), info.getContainerID()); - Assert.assertEquals(OzoneConsts.GB * 3, info.getAllocatedBytes()); Assert.assertEquals(containerOwner, info.getOwner()); Assert.assertEquals(xceiverClientManager.getType(), info.getReplicationType()); @@ -117,35 +111,49 @@ public class TestContainerStateManagerIntegration { } @Test - public void testContainerStateManagerRestart() throws IOException { + public void testContainerStateManagerRestart() + throws IOException, TimeoutException, InterruptedException { // Allocate 5 containers in ALLOCATED state and 5 in CREATING state - List<ContainerInfo> containers = new ArrayList<>(); for (int i = 0; i < 10; i++) { + ContainerWithPipeline container = scm.getClientProtocolServer() .allocateContainer( xceiverClientManager.getType(), xceiverClientManager.getFactor(), containerOwner); - containers.add(container.getContainerInfo()); if (i >= 5) { scm.getContainerManager().updateContainerState(container - .getContainerInfo().getContainerID(), + .getContainerInfo().containerID(), HddsProtos.LifeCycleEvent.CREATE); } } - // New instance of ContainerStateManager should load all the containers in - // container store. - ContainerStateManager stateManager = - new ContainerStateManager(conf, containerManager, selector); - int matchCount = stateManager - .getMatchingContainerIDs(containerOwner, - xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HddsProtos.LifeCycleState.ALLOCATED).size(); + cluster.restartStorageContainerManager(); + + List<ContainerInfo> result = cluster.getStorageContainerManager() + .getContainerManager().listContainer(null, 100); + + long matchCount = result.stream() + .filter(info -> + info.getOwner().equals(containerOwner)) + .filter(info -> + info.getReplicationType() == xceiverClientManager.getType()) + .filter(info -> + info.getReplicationFactor() == xceiverClientManager.getFactor()) + .filter(info -> + info.getState() == HddsProtos.LifeCycleState.ALLOCATED) + .count(); Assert.assertEquals(5, matchCount); - matchCount = stateManager.getMatchingContainerIDs(containerOwner, - xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HddsProtos.LifeCycleState.CREATING).size(); + matchCount = result.stream() + .filter(info -> + info.getOwner().equals(containerOwner)) + .filter(info -> + info.getReplicationType() == xceiverClientManager.getType()) + .filter(info -> + info.getReplicationFactor() == xceiverClientManager.getFactor()) + .filter(info -> + info.getState() == HddsProtos.LifeCycleState.CREATING) + .count(); Assert.assertEquals(5, matchCount); } @@ -155,10 +163,10 @@ public class TestContainerStateManagerIntegration { allocateContainer(xceiverClientManager.getType(), xceiverClientManager.getFactor(), containerOwner); containerManager - .updateContainerState(container1.getContainerInfo().getContainerID(), + .updateContainerState(container1.getContainerInfo().containerID(), HddsProtos.LifeCycleEvent.CREATE); containerManager - .updateContainerState(container1.getContainerInfo().getContainerID(), + .updateContainerState(container1.getContainerInfo().containerID(), HddsProtos.LifeCycleEvent.CREATED); ContainerWithPipeline container2 = scm.getClientProtocolServer(). @@ -176,23 +184,24 @@ public class TestContainerStateManagerIntegration { .getMatchingContainer(OzoneConsts.GB * 3, containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), HddsProtos.LifeCycleState.ALLOCATED); + // space has already been allocated in container1, now container 2 should + // be chosen. Assert.assertEquals(container2.getContainerInfo().getContainerID(), info.getContainerID()); containerManager - .updateContainerState(container2.getContainerInfo().getContainerID(), + .updateContainerState(container2.getContainerInfo().containerID(), HddsProtos.LifeCycleEvent.CREATE); containerManager - .updateContainerState(container2.getContainerInfo().getContainerID(), + .updateContainerState(container2.getContainerInfo().containerID(), HddsProtos.LifeCycleEvent.CREATED); - // space has already been allocated in container1, now container 2 should - // be chosen. + // now we have to get container1 info = containerStateManager .getMatchingContainer(OzoneConsts.GB * 3, containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), HddsProtos.LifeCycleState.OPEN); - Assert.assertEquals(container2.getContainerInfo().getContainerID(), + Assert.assertEquals(container1.getContainerInfo().getContainerID(), info.getContainerID()); } @@ -217,7 +226,7 @@ public class TestContainerStateManagerIntegration { Assert.assertEquals(1, containers); containerManager - .updateContainerState(container1.getContainerInfo().getContainerID(), + .updateContainerState(container1.getContainerInfo().containerID(), HddsProtos.LifeCycleEvent.CREATE); containers = containerStateManager.getMatchingContainerIDs(containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), @@ -225,7 +234,7 @@ public class TestContainerStateManagerIntegration { Assert.assertEquals(1, containers); containerManager - .updateContainerState(container1.getContainerInfo().getContainerID(), + .updateContainerState(container1.getContainerInfo().containerID(), HddsProtos.LifeCycleEvent.CREATED); containers = containerStateManager.getMatchingContainerIDs(containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), @@ -233,7 +242,7 @@ public class TestContainerStateManagerIntegration { Assert.assertEquals(1, containers); containerManager - .updateContainerState(container1.getContainerInfo().getContainerID(), + .updateContainerState(container1.getContainerInfo().containerID(), HddsProtos.LifeCycleEvent.FINALIZE); containers = containerStateManager.getMatchingContainerIDs(containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), @@ -241,7 +250,7 @@ public class TestContainerStateManagerIntegration { Assert.assertEquals(1, containers); containerManager - .updateContainerState(container1.getContainerInfo().getContainerID(), + .updateContainerState(container1.getContainerInfo().containerID(), HddsProtos.LifeCycleEvent.CLOSE); containers = containerStateManager.getMatchingContainerIDs(containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), @@ -249,7 +258,7 @@ public class TestContainerStateManagerIntegration { Assert.assertEquals(1, containers); containerManager - .updateContainerState(container1.getContainerInfo().getContainerID(), + .updateContainerState(container1.getContainerInfo().containerID(), HddsProtos.LifeCycleEvent.DELETE); containers = containerStateManager.getMatchingContainerIDs(containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), @@ -257,7 +266,7 @@ public class TestContainerStateManagerIntegration { Assert.assertEquals(1, containers); containerManager - .updateContainerState(container1.getContainerInfo().getContainerID(), + .updateContainerState(container1.getContainerInfo().containerID(), HddsProtos.LifeCycleEvent.CLEANUP); containers = containerStateManager.getMatchingContainerIDs(containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), @@ -271,10 +280,10 @@ public class TestContainerStateManagerIntegration { xceiverClientManager.getType(), xceiverClientManager.getFactor(), containerOwner); containerManager - .updateContainerState(container2.getContainerInfo().getContainerID(), + .updateContainerState(container2.getContainerInfo().containerID(), HddsProtos.LifeCycleEvent.CREATE); containerManager - .updateContainerState(container2.getContainerInfo().getContainerID(), + .updateContainerState(container2.getContainerInfo().containerID(), HddsProtos.LifeCycleEvent.TIMEOUT); containers = containerStateManager.getMatchingContainerIDs(containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), @@ -288,16 +297,16 @@ public class TestContainerStateManagerIntegration { xceiverClientManager.getType(), xceiverClientManager.getFactor(), containerOwner); containerManager - .updateContainerState(container3.getContainerInfo().getContainerID(), + .updateContainerState(container3.getContainerInfo().containerID(), HddsProtos.LifeCycleEvent.CREATE); containerManager - .updateContainerState(container3.getContainerInfo().getContainerID(), + .updateContainerState(container3.getContainerInfo().containerID(), HddsProtos.LifeCycleEvent.CREATED); containerManager - .updateContainerState(container3.getContainerInfo().getContainerID(), + .updateContainerState(container3.getContainerInfo().containerID(), HddsProtos.LifeCycleEvent.FINALIZE); containerManager - .updateContainerState(container3.getContainerInfo().getContainerID(), + .updateContainerState(container3.getContainerInfo().containerID(), HddsProtos.LifeCycleEvent.CLOSE); containers = containerStateManager.getMatchingContainerIDs(containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), @@ -305,46 +314,6 @@ public class TestContainerStateManagerIntegration { Assert.assertEquals(1, containers); } - @Test - public void testUpdatingAllocatedBytes() throws Exception { - ContainerWithPipeline container1 = scm.getClientProtocolServer() - .allocateContainer(xceiverClientManager.getType(), - xceiverClientManager.getFactor(), containerOwner); - containerManager.updateContainerState(container1 - .getContainerInfo().getContainerID(), - HddsProtos.LifeCycleEvent.CREATE); - containerManager.updateContainerState(container1 - .getContainerInfo().getContainerID(), - HddsProtos.LifeCycleEvent.CREATED); - - Random ran = new Random(); - long allocatedSize = 0; - for (int i = 0; i<5; i++) { - long size = Math.abs(ran.nextLong() % OzoneConsts.GB); - allocatedSize += size; - // trigger allocating bytes by calling getMatchingContainer - ContainerInfo info = containerStateManager - .getMatchingContainer(size, containerOwner, - xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HddsProtos.LifeCycleState.OPEN); - Assert.assertEquals(container1.getContainerInfo().getContainerID(), - info.getContainerID()); - - SCMContainerManager containerMapping = - (SCMContainerManager) containerManager; - // manually trigger a flush, this will persist the allocated bytes value - // to disk - containerMapping.flushContainerInfo(); - - // the persisted value should always be equal to allocated size. - byte[] containerBytes = containerMapping.getContainerStore().get( - Longs.toByteArray(container1.getContainerInfo().getContainerID())); - HddsProtos.SCMContainerInfo infoProto = - HddsProtos.SCMContainerInfo.PARSER.parseFrom(containerBytes); - ContainerInfo currentInfo = ContainerInfo.fromProtobuf(infoProto); - Assert.assertEquals(allocatedSize, currentInfo.getAllocatedBytes()); - } - } @Test public void testReplicaMap() throws Exception { @@ -360,59 +329,71 @@ public class TestContainerStateManagerIntegration { // Test 1: no replica's exist ContainerID containerID = ContainerID.valueof(RandomUtils.nextLong()); - Set<DatanodeDetails> replicaSet; - LambdaTestUtils.intercept(SCMException.class, "", () -> { + Set<ContainerReplica> replicaSet; + try { containerStateManager.getContainerReplicas(containerID); - }); + Assert.fail(); + } catch (ContainerNotFoundException ex) { + // expected. + } + + ContainerWithPipeline container = scm.getClientProtocolServer() + .allocateContainer( + xceiverClientManager.getType(), + xceiverClientManager.getFactor(), containerOwner); + + ContainerID id = container.getContainerInfo().containerID(); // Test 2: Add replica nodes and then test - containerStateManager.addContainerReplica(containerID, dn1); - containerStateManager.addContainerReplica(containerID, dn2); - replicaSet = containerStateManager.getContainerReplicas(containerID); + ContainerReplica replicaOne = ContainerReplica.newBuilder() + .setContainerID(id) + .setDatanodeDetails(dn1) + .build(); + ContainerReplica replicaTwo = ContainerReplica.newBuilder() + .setContainerID(id) + .setDatanodeDetails(dn2) + .build(); + containerStateManager.updateContainerReplica(id, replicaOne); + containerStateManager.updateContainerReplica(id, replicaTwo); + replicaSet = containerStateManager.getContainerReplicas(id); Assert.assertEquals(2, replicaSet.size()); - Assert.assertTrue(replicaSet.contains(dn1)); - Assert.assertTrue(replicaSet.contains(dn2)); + Assert.assertTrue(replicaSet.contains(replicaOne)); + Assert.assertTrue(replicaSet.contains(replicaTwo)); // Test 3: Remove one replica node and then test - containerStateManager.removeContainerReplica(containerID, dn1); - replicaSet = containerStateManager.getContainerReplicas(containerID); + containerStateManager.removeContainerReplica(id, replicaOne); + replicaSet = containerStateManager.getContainerReplicas(id); Assert.assertEquals(1, replicaSet.size()); - Assert.assertFalse(replicaSet.contains(dn1)); - Assert.assertTrue(replicaSet.contains(dn2)); + Assert.assertFalse(replicaSet.contains(replicaOne)); + Assert.assertTrue(replicaSet.contains(replicaTwo)); // Test 3: Remove second replica node and then test - containerStateManager.removeContainerReplica(containerID, dn2); - replicaSet = containerStateManager.getContainerReplicas(containerID); + containerStateManager.removeContainerReplica(id, replicaTwo); + replicaSet = containerStateManager.getContainerReplicas(id); Assert.assertEquals(0, replicaSet.size()); - Assert.assertFalse(replicaSet.contains(dn1)); - Assert.assertFalse(replicaSet.contains(dn2)); + Assert.assertFalse(replicaSet.contains(replicaOne)); + Assert.assertFalse(replicaSet.contains(replicaTwo)); // Test 4: Re-insert dn1 - containerStateManager.addContainerReplica(containerID, dn1); - replicaSet = containerStateManager.getContainerReplicas(containerID); + containerStateManager.updateContainerReplica(id, replicaOne); + replicaSet = containerStateManager.getContainerReplicas(id); Assert.assertEquals(1, replicaSet.size()); - Assert.assertTrue(replicaSet.contains(dn1)); - Assert.assertFalse(replicaSet.contains(dn2)); + Assert.assertTrue(replicaSet.contains(replicaOne)); + Assert.assertFalse(replicaSet.contains(replicaTwo)); // Re-insert dn2 - containerStateManager.addContainerReplica(containerID, dn2); - replicaSet = containerStateManager.getContainerReplicas(containerID); + containerStateManager.updateContainerReplica(id, replicaTwo); + replicaSet = containerStateManager.getContainerReplicas(id); Assert.assertEquals(2, replicaSet.size()); - Assert.assertTrue(replicaSet.contains(dn1)); - Assert.assertTrue(replicaSet.contains(dn2)); + Assert.assertTrue(replicaSet.contains(replicaOne)); + Assert.assertTrue(replicaSet.contains(replicaTwo)); - Assert.assertFalse(logCapturer.getOutput().contains( - "ReplicaMap already contains entry for container Id: " + containerID - .toString() + ",DataNode: " + dn1.toString())); // Re-insert dn1 - containerStateManager.addContainerReplica(containerID, dn1); - replicaSet = containerStateManager.getContainerReplicas(containerID); + containerStateManager.updateContainerReplica(id, replicaOne); + replicaSet = containerStateManager.getContainerReplicas(id); Assert.assertEquals(2, replicaSet.size()); - Assert.assertTrue(replicaSet.contains(dn1)); - Assert.assertTrue(replicaSet.contains(dn2)); - Assert.assertTrue(logCapturer.getOutput().contains( - "ReplicaMap already contains entry for container Id: " + containerID - .toString() + ",DataNode: " + dn1.toString())); + Assert.assertTrue(replicaSet.contains(replicaOne)); + Assert.assertTrue(replicaSet.contains(replicaTwo)); } }
http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java ---------------------------------------------------------------------- diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java index c0a6989..7a0fa5c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.container.common.helpers .ContainerWithPipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID; -import org.apache.hadoop.hdds.scm.container.states.ContainerStateMap; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; @@ -50,7 +49,6 @@ public class TestNode2PipelineMap { private static OzoneConfiguration conf; private static StorageContainerManager scm; private static ContainerWithPipeline ratisContainer; - private static ContainerStateMap stateMap; private static ContainerManager containerManager; private static PipelineSelector pipelineSelector; @@ -66,7 +64,6 @@ public class TestNode2PipelineMap { cluster.waitForClusterToBeReady(); scm = cluster.getStorageContainerManager(); containerManager = scm.getContainerManager(); - stateMap = containerManager.getStateManager().getContainerStateMap(); ratisContainer = containerManager.allocateContainer( RATIS, THREE, "testOwner"); pipelineSelector = containerManager.getPipelineSelector(); @@ -89,10 +86,10 @@ public class TestNode2PipelineMap { Set<ContainerID> set = pipelineSelector.getOpenContainerIDsByPipeline( ratisContainer.getPipeline().getId()); - long cId = ratisContainer.getContainerInfo().getContainerID(); + ContainerID cId = ratisContainer.getContainerInfo().containerID(); Assert.assertEquals(1, set.size()); set.forEach(containerID -> - Assert.assertEquals(containerID, ContainerID.valueof(cId))); + Assert.assertEquals(containerID, cId)); List<DatanodeDetails> dns = ratisContainer.getPipeline().getMachines(); Assert.assertEquals(3, dns.size()); http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java ---------------------------------------------------------------------- diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java index 7e6d5b4..52a493d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.container.common.helpers .ContainerWithPipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.scm.container.states.ContainerStateMap; import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.ozone.MiniOzoneCluster; @@ -50,7 +49,6 @@ public class TestPipelineClose { private static StorageContainerManager scm; private static ContainerWithPipeline ratisContainer1; private static ContainerWithPipeline ratisContainer2; - private static ContainerStateMap stateMap; private static ContainerManager containerManager; private static PipelineSelector pipelineSelector; @@ -66,7 +64,6 @@ public class TestPipelineClose { cluster.waitForClusterToBeReady(); scm = cluster.getStorageContainerManager(); containerManager = scm.getContainerManager(); - stateMap = containerManager.getStateManager().getContainerStateMap(); ratisContainer1 = containerManager .allocateContainer(RATIS, THREE, "testOwner"); ratisContainer2 = containerManager @@ -93,10 +90,9 @@ public class TestPipelineClose { Set<ContainerID> set = pipelineSelector.getOpenContainerIDsByPipeline( ratisContainer1.getPipeline().getId()); - long cId = ratisContainer1.getContainerInfo().getContainerID(); + ContainerID cId = ratisContainer1.getContainerInfo().containerID(); Assert.assertEquals(1, set.size()); - set.forEach(containerID -> - Assert.assertEquals(containerID, ContainerID.valueof(cId))); + set.forEach(containerID -> Assert.assertEquals(containerID, cId)); // Now close the container and it should not show up while fetching // containers by pipeline @@ -133,7 +129,7 @@ public class TestPipelineClose { ratisContainer2.getPipeline().getId()); Assert.assertEquals(1, setOpen.size()); - long cId2 = ratisContainer2.getContainerInfo().getContainerID(); + ContainerID cId2 = ratisContainer2.getContainerInfo().containerID(); containerManager .updateContainerState(cId2, HddsProtos.LifeCycleEvent.CREATE); containerManager http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java ---------------------------------------------------------------------- diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java index 16e66ba..d7e5360 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; @@ -45,14 +46,15 @@ public class OzoneTestUtils { return performOperationOnKeyContainers((blockID) -> { try { scm.getContainerManager() - .updateContainerState(blockID.getContainerID(), + .updateContainerState(ContainerID.valueof(blockID.getContainerID()), HddsProtos.LifeCycleEvent.FINALIZE); scm.getContainerManager() - .updateContainerState(blockID.getContainerID(), + .updateContainerState(ContainerID.valueof(blockID.getContainerID()), HddsProtos.LifeCycleEvent.CLOSE); Assert.assertFalse(scm.getContainerManager() - .getContainerWithPipeline(blockID.getContainerID()) - .getContainerInfo().isContainerOpen()); + .getContainerWithPipeline(ContainerID.valueof( + blockID.getContainerID())) + .getContainerInfo().isOpen()); } catch (IOException e) { e.printStackTrace(); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java ---------------------------------------------------------------------- diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java index c3c5d04..e260924 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hdds.scm.ScmInfo; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.block.DeletedBlockLog; import org.apache.hadoop.hdds.scm.block.SCMBlockDeletingService; +import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.node.NodeManager; @@ -168,8 +169,7 @@ public class TestStorageContainerManager { } else { // If passes permission check, it should fail with // key not exist exception. - Assert.assertTrue(e.getMessage() - .contains("Specified key does not exist")); + Assert.assertTrue(e instanceof ContainerNotFoundException); } } } finally { http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java ---------------------------------------------------------------------- diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java index 1daf8e1..26ece8b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.ozone.*; import org.apache.hadoop.ozone.client.*; @@ -445,7 +446,8 @@ public class TestOzoneRestClient { // Sum the data size from chunks in Container via containerID // and localID, make sure the size equals to the actually value size. Pipeline pipeline = cluster.getStorageContainerManager() - .getContainerManager().getContainerWithPipeline(containerID) + .getContainerManager().getContainerWithPipeline( + ContainerID.valueof(containerID)) .getPipeline(); List<DatanodeDetails> datanodes = pipeline.getMachines(); Assert.assertEquals(datanodes.size(), 1); http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java ---------------------------------------------------------------------- diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java index da8d334..ee9919d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.hdds.scm.container.common.helpers. StorageContainerException; @@ -302,7 +303,8 @@ public class TestCloseContainerHandlingByClient { for (long containerID : containerIdList) { Pipeline pipeline = cluster.getStorageContainerManager().getContainerManager() - .getContainerWithPipeline(containerID).getPipeline(); + .getContainerWithPipeline(ContainerID.valueof(containerID)) + .getPipeline(); pipelineList.add(pipeline); List<DatanodeDetails> datanodes = pipeline.getMachines(); for (DatanodeDetails details : datanodes) { @@ -349,7 +351,8 @@ public class TestCloseContainerHandlingByClient { long containerID = locationInfos.get(0).getContainerID(); List<DatanodeDetails> datanodes = cluster.getStorageContainerManager().getContainerManager() - .getContainerWithPipeline(containerID).getPipeline().getMachines(); + .getContainerWithPipeline(ContainerID.valueof(containerID)) + .getPipeline().getMachines(); Assert.assertEquals(1, datanodes.size()); waitForContainerClose(keyName, key, HddsProtos.ReplicationType.STAND_ALONE); dataString = fixedLengthString(keyString, (1 * blockSize)); @@ -451,7 +454,8 @@ public class TestCloseContainerHandlingByClient { long containerID = locationInfos.get(0).getContainerID(); List<DatanodeDetails> datanodes = cluster.getStorageContainerManager().getContainerManager() - .getContainerWithPipeline(containerID).getPipeline().getMachines(); + .getContainerWithPipeline(ContainerID.valueof(containerID)) + .getPipeline().getMachines(); Assert.assertEquals(1, datanodes.size()); // move the container on the datanode to Closing state, this will ensure // closing the key will hit BLOCK_NOT_COMMITTED_EXCEPTION while trying http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java ---------------------------------------------------------------------- diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java index b60343a..881c827 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java @@ -23,7 +23,8 @@ import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.ozone.*; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -642,7 +643,8 @@ public class TestOzoneRpcClient { // Second, sum the data size from chunks in Container via containerID // and localID, make sure the size equals to the size from keyDetails. Pipeline pipeline = cluster.getStorageContainerManager() - .getContainerManager().getContainerWithPipeline(containerID) + .getContainerManager().getContainerWithPipeline( + ContainerID.valueof(containerID)) .getPipeline(); List<DatanodeDetails> datanodes = pipeline.getMachines(); Assert.assertEquals(datanodes.size(), 1); http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java ---------------------------------------------------------------------- diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index fc361b8..e4cbad5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -242,8 +242,7 @@ public class TestBlockDeletion { logCapturer.clearOutput(); scm.getContainerManager().processContainerReports( - cluster.getHddsDatanodes().get(0).getDatanodeDetails(), dummyReport, - false); + cluster.getHddsDatanodes().get(0).getDatanodeDetails(), dummyReport); // wait for event to be handled by event handler Thread.sleep(1000); String output = logCapturer.getOutput(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java ---------------------------------------------------------------------- diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java index 9602207..62cc5b2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; @@ -102,7 +103,8 @@ public class TestCloseContainerByPipeline { long containerID = omKeyLocationInfo.getContainerID(); Pipeline pipeline = cluster.getStorageContainerManager() - .getContainerManager().getContainerWithPipeline(containerID) + .getContainerManager().getContainerWithPipeline( + ContainerID.valueof(containerID)) .getPipeline(); List<DatanodeDetails> datanodes = pipeline.getMachines(); Assert.assertEquals(datanodes.size(), 1); @@ -157,7 +159,8 @@ public class TestCloseContainerByPipeline { long containerID = omKeyLocationInfo.getContainerID(); Pipeline pipeline = cluster.getStorageContainerManager() - .getContainerManager().getContainerWithPipeline(containerID) + .getContainerManager().getContainerWithPipeline( + ContainerID.valueof(containerID)) .getPipeline(); List<DatanodeDetails> datanodes = pipeline.getMachines(); Assert.assertEquals(datanodes.size(), 1); @@ -214,7 +217,8 @@ public class TestCloseContainerByPipeline { long containerID = omKeyLocationInfo.getContainerID(); Pipeline pipeline = cluster.getStorageContainerManager() - .getContainerManager().getContainerWithPipeline(containerID) + .getContainerManager().getContainerWithPipeline( + ContainerID.valueof(containerID)) .getPipeline(); List<DatanodeDetails> datanodes = pipeline.getMachines(); Assert.assertEquals(3, datanodes.size()); http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java ---------------------------------------------------------------------- diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java index 0137a40..4cd42ab 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.client.ObjectStore; @@ -81,7 +82,8 @@ public class TestCloseContainerHandler { long containerID = omKeyLocationInfo.getContainerID(); Pipeline pipeline = cluster.getStorageContainerManager() - .getContainerManager().getContainerWithPipeline(containerID) + .getContainerManager().getContainerWithPipeline( + ContainerID.valueof(containerID)) .getPipeline(); Assert.assertFalse(isContainerClosed(cluster, containerID)); http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java ---------------------------------------------------------------------- diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java index c24cfbf..8b464bd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.client.*; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmChillMode.java ---------------------------------------------------------------------- diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmChillMode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmChillMode.java index ef84b0e..ed50a9f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmChillMode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmChillMode.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.scm.container.SCMContainerManager; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; @@ -127,8 +127,7 @@ public class TestScmChillMode { new TestStorageContainerManagerHelper(cluster, conf); Map<String, OmKeyInfo> keyLocations = helper.createKeys(100, 4096); final List<ContainerInfo> containers = cluster - .getStorageContainerManager() - .getContainerManager().getStateManager().getAllContainers(); + .getStorageContainerManager().getContainerManager().getContainers(); GenericTestUtils.waitFor(() -> { return containers.size() > 10; }, 100, 1000); @@ -251,8 +250,7 @@ public class TestScmChillMode { new TestStorageContainerManagerHelper(miniCluster, conf); Map<String, OmKeyInfo> keyLocations = helper.createKeys(100 * 2, 4096); final List<ContainerInfo> containers = miniCluster - .getStorageContainerManager().getContainerManager() - .getStateManager().getAllContainers(); + .getStorageContainerManager().getContainerManager().getContainers(); GenericTestUtils.waitFor(() -> { return containers.size() > 10; }, 100, 1000 * 2); @@ -268,9 +266,9 @@ public class TestScmChillMode { .getStorageContainerManager().getContainerManager(); containers.forEach(c -> { try { - mapping.updateContainerState(c.getContainerID(), + mapping.updateContainerState(c.containerID(), HddsProtos.LifeCycleEvent.FINALIZE); - mapping.updateContainerState(c.getContainerID(), + mapping.updateContainerState(c.containerID(), LifeCycleEvent.CLOSE); } catch (IOException e) { LOG.info("Failed to change state of open containers.", e); @@ -348,7 +346,7 @@ public class TestScmChillMode { .getStorageContainerManager().getClientProtocolServer(); assertFalse((scm.getClientProtocolServer()).getChillModeStatus()); final List<ContainerInfo> containers = scm.getContainerManager() - .getStateManager().getAllContainers(); + .getContainers(); scm.getEventQueue().fireEvent(SCMEvents.CHILL_MODE_STATUS, true); GenericTestUtils.waitFor(() -> { return clientProtocolServer.getChillModeStatus(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java ---------------------------------------------------------------------- diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index e035eb2..af813ed 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -411,10 +411,6 @@ public class KeyManagerImpl implements KeyManager { // A rename is a no-op if the target and source name is same. // TODO: Discuss if we need to throw?. - // TODO: Define the semantics of rename more clearly. Today this code - // will allow rename of a Key across volumes. This should *not* be - // allowed. The documentation of Ozone says that rename is permitted only - // within a volume. if (fromKeyName.equals(toKeyName)) { return; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java ---------------------------------------------------------------------- diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java index e680dd2..f29a5e6 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java @@ -22,7 +22,7 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID; import org.apache.hadoop.hdds.scm.container.states.ContainerStateMap; @@ -70,9 +70,6 @@ public class BenchMarkContainerStateMap { .setPipelineID(pipeline.getId()) .setReplicationType(pipeline.getType()) .setReplicationFactor(pipeline.getFactor()) - // This is bytes allocated for blocks inside container, not the - // container size - .setAllocatedBytes(0) .setUsedBytes(0) .setNumberOfKeys(0) .setStateEnterTime(Time.monotonicNow()) @@ -93,9 +90,6 @@ public class BenchMarkContainerStateMap { .setPipelineID(pipeline.getId()) .setReplicationType(pipeline.getType()) .setReplicationFactor(pipeline.getFactor()) - // This is bytes allocated for blocks inside container, not the - // container size - .setAllocatedBytes(0) .setUsedBytes(0) .setNumberOfKeys(0) .setStateEnterTime(Time.monotonicNow()) @@ -115,9 +109,6 @@ public class BenchMarkContainerStateMap { .setPipelineID(pipeline.getId()) .setReplicationType(pipeline.getType()) .setReplicationFactor(pipeline.getFactor()) - // This is bytes allocated for blocks inside container, not the - // container size - .setAllocatedBytes(0) .setUsedBytes(0) .setNumberOfKeys(0) .setStateEnterTime(Time.monotonicNow()) @@ -188,9 +179,6 @@ public class BenchMarkContainerStateMap { .setPipelineID(pipeline.getId()) .setReplicationType(pipeline.getType()) .setReplicationFactor(pipeline.getFactor()) - // This is bytes allocated for blocks inside container, not the - // container size - .setAllocatedBytes(0) .setUsedBytes(0) .setNumberOfKeys(0) .setStateEnterTime(Time.monotonicNow()) http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java ---------------------------------------------------------------------- diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java index 522fea9..c03128d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java @@ -36,7 +36,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.utils.MetadataStore; @@ -83,7 +83,6 @@ public class SQLCLI extends Configured implements Tool { "replicationType TEXT NOT NULL," + "replicationFactor TEXT NOT NULL," + "usedBytes LONG NOT NULL," + - "allocatedBytes LONG NOT NULL," + "owner TEXT," + "numberOfKeys LONG)"; private static final String CREATE_DATANODE_INFO = @@ -94,8 +93,8 @@ public class SQLCLI extends Configured implements Tool { "containerPort INTEGER NOT NULL);"; private static final String INSERT_CONTAINER_INFO = "INSERT INTO containerInfo (containerID, replicationType, " - + "replicationFactor, usedBytes, allocatedBytes, owner, " - + "numberOfKeys) VALUES (\"%d\", \"%s\", \"%s\", \"%d\", \"%d\", " + + "replicationFactor, usedBytes, owner, " + + "numberOfKeys) VALUES (\"%d\", \"%s\", \"%s\", \"%d\", " + "\"%s\", \"%d\")"; private static final String INSERT_DATANODE_INFO = "INSERT INTO datanodeInfo (hostname, datanodeUUid, ipAddress, " + @@ -498,7 +497,6 @@ public class SQLCLI extends Configured implements Tool { containerInfo.getReplicationType(), containerInfo.getReplicationFactor(), containerInfo.getUsedBytes(), - containerInfo.getAllocatedBytes(), containerInfo.getOwner(), containerInfo.getNumberOfKeys()); http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java ---------------------------------------------------------------------- diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java index a476583..922856b 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java @@ -120,8 +120,7 @@ public class TestContainerSQLCli { cluster.getStorageContainerManager().stop(); eventQueue = new EventQueue(); nodeManager = cluster.getStorageContainerManager().getScmNodeManager(); - containerManager = new SCMContainerManager(conf, nodeManager, 128, - eventQueue); + containerManager = new SCMContainerManager(conf, nodeManager, eventQueue); blockManager = new BlockManagerImpl( conf, nodeManager, containerManager, eventQueue); eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS, blockManager); --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org