http://git-wip-us.apache.org/repos/asf/hadoop/blob/5aace38b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java index 6023351..e70c3e0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java @@ -60,6 +60,12 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; + +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerRequestKey; + + +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity + .TestUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent; import org.apache.hadoop.yarn.util.Records; @@ -78,25 +84,21 @@ public class Application { final private ApplicationAttemptId applicationAttemptId; final private ResourceManager resourceManager; private final static RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); - - final private Map<Priority, Resource> requestSpec = - new TreeMap<Priority, Resource>( - new org.apache.hadoop.yarn.server.resourcemanager.resource.Priority.Comparator()); - - final private Map<Priority, Map<String, ResourceRequest>> requests = - new TreeMap<Priority, Map<String, ResourceRequest>>( - new org.apache.hadoop.yarn.server.resourcemanager.resource.Priority.Comparator()); - - final Map<Priority, Set<Task>> tasks = - new TreeMap<Priority, Set<Task>>( - new org.apache.hadoop.yarn.server.resourcemanager.resource.Priority.Comparator()); - - final private Set<ResourceRequest> ask = - new TreeSet<ResourceRequest>( - new org.apache.hadoop.yarn.api.records.ResourceRequest.ResourceRequestComparator()); - final private Map<String, NodeManager> nodes = - new HashMap<String, NodeManager>(); + final private Map<SchedulerRequestKey, Resource> requestSpec = + new TreeMap<>(); + + final private Map<SchedulerRequestKey, Map<String, ResourceRequest>> + requests = new TreeMap<>(); + + final Map<SchedulerRequestKey, Set<Task>> tasks = new TreeMap<>(); + + final private Set<ResourceRequest> ask = + new TreeSet<>( + new org.apache.hadoop.yarn.api.records.ResourceRequest + .ResourceRequestComparator()); + + final private Map<String, NodeManager> nodes = new HashMap<>(); Resource used = recordFactory.newRecordInstance(Resource.class); @@ -188,13 +190,19 @@ public class Application { new AppAttemptAddedSchedulerEvent(this.applicationAttemptId, false); scheduler.handle(addAttemptEvent); } - + public synchronized void addResourceRequestSpec( Priority priority, Resource capability) { - Resource currentSpec = requestSpec.put(priority, capability); + addResourceRequestSpec(TestUtils.toSchedulerKey(priority.getPriority()), + capability); + } + public synchronized void addResourceRequestSpec( + SchedulerRequestKey schedulerKey, Resource capability) { + Resource currentSpec = requestSpec.put(schedulerKey, capability); if (currentSpec != null) { throw new IllegalStateException("Resource spec already exists for " + - "priority " + priority.getPriority() + " - " + currentSpec.getMemorySize()); + "priority " + schedulerKey.getPriority().getPriority() + + " - " + currentSpec.getMemorySize()); } } @@ -208,29 +216,29 @@ public class Application { } public synchronized void addTask(Task task) { - Priority priority = task.getPriority(); - Map<String, ResourceRequest> requests = this.requests.get(priority); + SchedulerRequestKey schedulerKey = task.getSchedulerKey(); + Map<String, ResourceRequest> requests = this.requests.get(schedulerKey); if (requests == null) { requests = new HashMap<String, ResourceRequest>(); - this.requests.put(priority, requests); + this.requests.put(schedulerKey, requests); if(LOG.isDebugEnabled()) { - LOG.debug("Added priority=" + priority + " application=" - + applicationId); + LOG.debug("Added priority=" + schedulerKey.getPriority() + + " application="+ applicationId); } } - final Resource capability = requestSpec.get(priority); + final Resource capability = requestSpec.get(schedulerKey); // Note down the task - Set<Task> tasks = this.tasks.get(priority); + Set<Task> tasks = this.tasks.get(schedulerKey); if (tasks == null) { tasks = new HashSet<Task>(); - this.tasks.put(priority, tasks); + this.tasks.put(schedulerKey, tasks); } tasks.add(task); LOG.info("Added task " + task.getTaskId() + " to application " + - applicationId + " at priority " + priority); + applicationId + " at priority " + schedulerKey.getPriority()); if(LOG.isDebugEnabled()) { LOG.debug("addTask: application=" + applicationId @@ -240,21 +248,21 @@ public class Application { // Create resource requests for (String host : task.getHosts()) { // Data-local - addResourceRequest(priority, requests, host, capability); + addResourceRequest(schedulerKey, requests, host, capability); } // Rack-local for (String rack : task.getRacks()) { - addResourceRequest(priority, requests, rack, capability); + addResourceRequest(schedulerKey, requests, rack, capability); } // Off-switch - addResourceRequest(priority, requests, ResourceRequest.ANY, capability); + addResourceRequest(schedulerKey, requests, ResourceRequest.ANY, capability); } public synchronized void finishTask(Task task) throws IOException, YarnException { - Set<Task> tasks = this.tasks.get(task.getPriority()); + Set<Task> tasks = this.tasks.get(task.getSchedulerKey()); if (!tasks.remove(task)) { throw new IllegalStateException( "Finishing unknown task " + task.getTaskId() + @@ -270,7 +278,7 @@ public class Application { StopContainersRequest.newInstance(containerIds); nodeManager.stopContainers(stopRequest); - Resources.subtractFrom(used, requestSpec.get(task.getPriority())); + Resources.subtractFrom(used, requestSpec.get(task.getSchedulerKey())); LOG.info("Finished task " + task.getTaskId() + " of application " + applicationId + @@ -279,13 +287,13 @@ public class Application { } private synchronized void addResourceRequest( - Priority priority, Map<String, ResourceRequest> requests, + SchedulerRequestKey schedulerKey, Map<String, ResourceRequest> requests, String resourceName, Resource capability) { ResourceRequest request = requests.get(resourceName); if (request == null) { request = org.apache.hadoop.yarn.server.utils.BuilderUtils.newResourceRequest( - priority, resourceName, capability, 1); + schedulerKey.getPriority(), resourceName, capability, 1); requests.put(resourceName, request); } else { request.setNumContainers(request.getNumContainers() + 1); @@ -299,13 +307,13 @@ public class Application { ask.add( org.apache.hadoop.yarn.server.utils.BuilderUtils.newResourceRequest( request)); // clone to ensure the RM doesn't manipulate the same obj - - if(LOG.isDebugEnabled()) { + + if (LOG.isDebugEnabled()) { LOG.debug("addResourceRequest: applicationId=" + applicationId.getId() - + " priority=" + priority.getPriority() - + " resourceName=" + resourceName + " capability=" + capability - + " numContainers=" + request.getNumContainers() - + " #asks=" + ask.size()); + + " priority=" + schedulerKey.getPriority().getPriority() + + " resourceName=" + resourceName + " capability=" + capability + + " numContainers=" + request.getNumContainers() + + " #asks=" + ask.size()); } } @@ -349,10 +357,10 @@ public class Application { int numContainers = containers.size(); // Schedule in priority order - for (Priority priority : requests.keySet()) { - assign(priority, NodeType.NODE_LOCAL, containers); - assign(priority, NodeType.RACK_LOCAL, containers); - assign(priority, NodeType.OFF_SWITCH, containers); + for (SchedulerRequestKey schedulerKey: requests.keySet()) { + assign(schedulerKey, NodeType.NODE_LOCAL, containers); + assign(schedulerKey, NodeType.RACK_LOCAL, containers); + assign(schedulerKey, NodeType.OFF_SWITCH, containers); if (containers.isEmpty()) { break; @@ -368,15 +376,18 @@ public class Application { assign(getResources()); } - private synchronized void assign(Priority priority, NodeType type, - List<Container> containers) throws IOException, YarnException { + private synchronized void assign(SchedulerRequestKey schedulerKey, + NodeType type, List<Container> containers) + throws IOException, YarnException { for (Iterator<Container> i=containers.iterator(); i.hasNext();) { Container container = i.next(); String host = container.getNodeId().toString(); - if (Resources.equals(requestSpec.get(priority), container.getResource())) { + if (Resources.equals(requestSpec.get(schedulerKey), + container.getResource())) { // See which task can use this container - for (Iterator<Task> t=tasks.get(priority).iterator(); t.hasNext();) { + for (Iterator<Task> t=tasks.get(schedulerKey).iterator(); + t.hasNext();) { Task task = t.next(); if (task.getState() == State.PENDING && task.canSchedule(type, host)) { NodeManager nodeManager = getNodeManager(host); @@ -386,14 +397,15 @@ public class Application { // Track application resource usage Resources.addTo(used, container.getResource()); - + LOG.info("Assigned container (" + container + ") of type " + type + - " to task " + task.getTaskId() + " at priority " + priority + + " to task " + task.getTaskId() + " at priority " + + schedulerKey.getPriority() + " on node " + nodeManager.getHostName() + ", currently using " + used + " resources"); // Update resource requests - updateResourceRequests(requests.get(priority), type, task); + updateResourceRequests(requests.get(schedulerKey), type, task); // Launch the container StartContainerRequest scRequest =
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5aace38b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Task.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Task.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Task.java index eebfa1d..35218bd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Task.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Task.java @@ -31,6 +31,9 @@ import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerRequestKey; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity + .TestUtils; public class Task { private static final Log LOG = LogFactory.getLog(Task.class); @@ -40,6 +43,7 @@ public class Task { final private ApplicationId applicationId; final private int taskId; final private Priority priority; + final private SchedulerRequestKey schedulerKey; final private Set<String> hosts = new HashSet<String>(); final private Set<String> racks = new HashSet<String>(); @@ -48,7 +52,7 @@ public class Task { private org.apache.hadoop.yarn.server.resourcemanager.NodeManager nodeManager; private State state; - + public Task(Application application, Priority priority, String[] hosts) { this.applicationId = application.getApplicationId(); this.priority = priority; @@ -64,6 +68,7 @@ public class Task { this.racks.add(Application.resolve(host)); } } + this.schedulerKey = TestUtils.toSchedulerKey(priority.getPriority()); LOG.info("Task " + taskId + " added to application " + this.applicationId + " with " + this.hosts.size() + " hosts, " + racks.size() + " racks"); } @@ -75,6 +80,10 @@ public class Task { public Priority getPriority() { return priority; } + + public SchedulerRequestKey getSchedulerKey() { + return schedulerKey; + } public org.apache.hadoop.yarn.server.resourcemanager.NodeManager getNodeManager() { return nodeManager; http://git-wip-us.apache.org/repos/asf/hadoop/blob/5aace38b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java index 7c8fb2a..3d3f1ea 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java @@ -36,6 +36,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler + .SchedulerRequestKey; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; @@ -205,7 +207,9 @@ public class ProportionalCapacityPreemptionPolicyMockFramework { Container c = mock(Container.class); when(c.getResource()).thenReturn(res); when(c.getPriority()).thenReturn(pri); + SchedulerRequestKey sk = SchedulerRequestKey.extractFrom(c); RMContainerImpl rmc = mock(RMContainerImpl.class); + when(rmc.getAllocatedSchedulerKey()).thenReturn(sk); when(rmc.getAllocatedNode()).thenReturn(host); when(rmc.getNodeLabelExpression()).thenReturn(exp); when(rmc.getAllocatedResource()).thenReturn(res); http://git-wip-us.apache.org/repos/asf/hadoop/blob/5aace38b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java index e3ef8c2..a115aac 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java @@ -36,6 +36,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.resource.Priority; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler + .SchedulerRequestKey; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; @@ -1318,8 +1320,10 @@ public class TestProportionalCapacityPreemptionPolicy { Container c = mock(Container.class); when(c.getResource()).thenReturn(r); when(c.getPriority()).thenReturn(Priority.create(cpriority)); + SchedulerRequestKey sk = SchedulerRequestKey.extractFrom(c); RMContainer mC = mock(RMContainer.class); when(mC.getContainerId()).thenReturn(cId); + when(mC.getAllocatedSchedulerKey()).thenReturn(sk); when(mC.getContainer()).thenReturn(c); when(mC.getApplicationAttemptId()).thenReturn(appAttId); when(mC.getAllocatedResource()).thenReturn(r); http://git-wip-us.apache.org/repos/asf/hadoop/blob/5aace38b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java index ed8d56f..e737a84 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java @@ -120,7 +120,8 @@ public class TestRMContainerImpl { assertEquals(RMContainerState.NEW, rmContainer.getState()); assertEquals(resource, rmContainer.getAllocatedResource()); assertEquals(nodeId, rmContainer.getAllocatedNode()); - assertEquals(priority, rmContainer.getAllocatedPriority()); + assertEquals(priority, + rmContainer.getAllocatedSchedulerKey().getPriority()); verify(writer).containerStarted(any(RMContainer.class)); verify(publisher).containerCreated(any(RMContainer.class), anyLong()); @@ -221,7 +222,8 @@ public class TestRMContainerImpl { assertEquals(RMContainerState.NEW, rmContainer.getState()); assertEquals(resource, rmContainer.getAllocatedResource()); assertEquals(nodeId, rmContainer.getAllocatedNode()); - assertEquals(priority, rmContainer.getAllocatedPriority()); + assertEquals(priority, + rmContainer.getAllocatedSchedulerKey().getPriority()); verify(writer).containerStarted(any(RMContainer.class)); verify(publisher).containerCreated(any(RMContainer.class), anyLong()); http://git-wip-us.apache.org/repos/asf/hadoop/blob/5aace38b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java index 54166c0..3cb668c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java @@ -45,6 +45,8 @@ import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator; import org.junit.After; import org.junit.Test; +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestUtils.toSchedulerKey; + public class TestSchedulerApplicationAttempt { private static final NodeId nodeId = NodeId.newInstance("somehost", 5); @@ -88,7 +90,8 @@ public class TestSchedulerApplicationAttempt { RMContainer container1 = createRMContainer(appAttId, 1, requestedResource); app.liveContainers.put(container1.getContainerId(), container1); SchedulerNode node = createNode(); - app.appSchedulingInfo.allocate(NodeType.OFF_SWITCH, node, requestedPriority, + app.appSchedulingInfo.allocate(NodeType.OFF_SWITCH, node, + toSchedulerKey(requestedPriority), request, container1.getContainer()); // Reserved container @@ -98,7 +101,7 @@ public class TestSchedulerApplicationAttempt { node.getNodeID(), prio1); Map<NodeId, RMContainer> reservations = new HashMap<NodeId, RMContainer>(); reservations.put(node.getNodeID(), container2); - app.reservedContainers.put(prio1, reservations); + app.reservedContainers.put(toSchedulerKey(prio1), reservations); oldMetrics.reserveResource(user, reservedResource); checkQueueMetrics(oldMetrics, 1, 1, 1536, 2, 2048, 3, 3072, 4); @@ -137,7 +140,8 @@ public class TestSchedulerApplicationAttempt { int id, Resource resource, NodeId nodeId, Priority reservedPriority) { RMContainer container = createRMContainer(appAttId, id, resource); when(container.getReservedResource()).thenReturn(resource); - when(container.getReservedPriority()).thenReturn(reservedPriority); + when(container.getReservedSchedulerKey()) + .thenReturn(toSchedulerKey(reservedPriority)); when(container.getReservedNode()).thenReturn(nodeId); return container; } @@ -260,16 +264,19 @@ public class TestSchedulerApplicationAttempt { SchedulerApplicationAttempt app = new SchedulerApplicationAttempt( attemptId, "user", queue, queue.getActiveUsersManager(), rmContext); Priority priority = Priority.newInstance(1); - assertEquals(0, app.getSchedulingOpportunities(priority)); - app.addSchedulingOpportunity(priority); - assertEquals(1, app.getSchedulingOpportunities(priority)); + SchedulerRequestKey schedulerKey = toSchedulerKey(priority); + assertEquals(0, app.getSchedulingOpportunities(schedulerKey)); + app.addSchedulingOpportunity(schedulerKey); + assertEquals(1, app.getSchedulingOpportunities(schedulerKey)); // verify the count is capped at MAX_VALUE and does not overflow - app.setSchedulingOpportunities(priority, Integer.MAX_VALUE - 1); + app.setSchedulingOpportunities(schedulerKey, Integer.MAX_VALUE - 1); assertEquals(Integer.MAX_VALUE - 1, - app.getSchedulingOpportunities(priority)); - app.addSchedulingOpportunity(priority); - assertEquals(Integer.MAX_VALUE, app.getSchedulingOpportunities(priority)); - app.addSchedulingOpportunity(priority); - assertEquals(Integer.MAX_VALUE, app.getSchedulingOpportunities(priority)); + app.getSchedulingOpportunities(schedulerKey)); + app.addSchedulingOpportunity(schedulerKey); + assertEquals(Integer.MAX_VALUE, + app.getSchedulingOpportunities(schedulerKey)); + app.addSchedulingOpportunity(schedulerKey); + assertEquals(Integer.MAX_VALUE, + app.getSchedulingOpportunities(schedulerKey)); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/5aace38b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index 7c34292..fb021c0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -116,6 +116,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplication; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt; + +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerRequestKey; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.TestSchedulerUtils; @@ -1244,7 +1246,7 @@ public class TestCapacityScheduler { rm1.stop(); } - @Test(timeout = 30000) + @Test(timeout = 300000) public void testRecoverRequestAfterPreemption() throws Exception { Configuration conf = new Configuration(); conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, @@ -1277,8 +1279,8 @@ public class TestCapacityScheduler { // Already the node local resource request is cleared from RM after // allocation. - Assert.assertNull(app.getResourceRequest(request.getPriority(), - request.getResourceName())); + Assert.assertNull(app.getResourceRequest( + SchedulerRequestKey.create(request), request.getResourceName())); } // Call killContainer to preempt the container @@ -1290,7 +1292,7 @@ public class TestCapacityScheduler { // handling. Assert.assertEquals( 1, - app.getResourceRequest(request.getPriority(), + app.getResourceRequest(SchedulerRequestKey.create(request), request.getResourceName()).getNumContainers()); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/5aace38b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index 48e6f0e..274c063 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -68,6 +68,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerRequestKey; + + import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; @@ -89,6 +92,8 @@ import org.junit.Test; import org.mockito.Matchers; import org.mockito.Mockito; +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestUtils.toSchedulerKey; + public class TestLeafQueue { private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); @@ -731,6 +736,7 @@ public class TestLeafQueue { qb.getActiveUsersManager(), spyRMContext); qb.submitApplicationAttempt(app_0, user_0); Priority u0Priority = TestUtils.createMockPriority(1); + SchedulerRequestKey u0SchedKey = toSchedulerKey(u0Priority); app_0.updateResourceRequests(Collections.singletonList( TestUtils.createResourceRequest(ResourceRequest.ANY, 4*GB, 1, true, u0Priority, recordFactory))); @@ -753,6 +759,7 @@ public class TestLeafQueue { new FiCaSchedulerApp(appAttemptId_2, user_1, qb, qb.getActiveUsersManager(), spyRMContext); Priority u1Priority = TestUtils.createMockPriority(2); + SchedulerRequestKey u1SchedKey = toSchedulerKey(u1Priority); app_2.updateResourceRequests(Collections.singletonList( TestUtils.createResourceRequest(ResourceRequest.ANY, 4*GB, 1, true, u1Priority, recordFactory))); @@ -773,9 +780,9 @@ public class TestLeafQueue { //test case 3 qb.finishApplication(app_0.getApplicationId(), user_0); qb.finishApplication(app_2.getApplicationId(), user_1); - qb.releaseResource(clusterResource, app_0, app_0.getResource(u0Priority), + qb.releaseResource(clusterResource, app_0, app_0.getResource(u0SchedKey), null, null, false); - qb.releaseResource(clusterResource, app_2, app_2.getResource(u1Priority), + qb.releaseResource(clusterResource, app_2, app_2.getResource(u1SchedKey), null, null, false); qb.setUserLimit(50); @@ -1452,7 +1459,7 @@ public class TestLeafQueue { assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); assertEquals(4*GB, app_1.getCurrentReservation().getMemorySize()); assertEquals(1*GB, node_0.getAllocatedResource().getMemorySize()); - assertEquals(1, app_1.getReReservations(priority)); + assertEquals(1, app_1.getReReservations(toSchedulerKey(priority))); // Re-reserve a.assignContainers(clusterResource, node_0, @@ -1462,7 +1469,7 @@ public class TestLeafQueue { assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); assertEquals(4*GB, app_1.getCurrentReservation().getMemorySize()); assertEquals(1*GB, node_0.getAllocatedResource().getMemorySize()); - assertEquals(2, app_1.getReReservations(priority)); + assertEquals(2, app_1.getReReservations(toSchedulerKey(priority))); // Try to schedule on node_1 now, should *move* the reservation a.assignContainers(clusterResource, node_1, @@ -1474,7 +1481,7 @@ public class TestLeafQueue { assertEquals(4*GB, node_1.getAllocatedResource().getMemorySize()); // Doesn't change yet... only when reservation is cancelled or a different // container is reserved - assertEquals(2, app_1.getReReservations(priority)); + assertEquals(2, app_1.getReReservations(toSchedulerKey(priority))); // Now finish another container from app_0 and see the reservation cancelled rmContainer = app_0.getLiveContainers().iterator().next(); @@ -1564,29 +1571,30 @@ public class TestLeafQueue { // Start testing... CSAssignment assignment = null; - + + SchedulerRequestKey schedulerKey = toSchedulerKey(priority); // Start with off switch, shouldn't allocate due to delay scheduling assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyNoContainerAllocated(assignment); - assertEquals(1, app_0.getSchedulingOpportunities(priority)); - assertEquals(3, app_0.getTotalRequiredResources(priority)); + assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey)); + assertEquals(3, app_0.getTotalRequiredResources(schedulerKey)); assertEquals(NodeType.NODE_LOCAL, assignment.getType()); // None->NODE_LOCAL // Another off switch, shouldn't allocate due to delay scheduling assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyNoContainerAllocated(assignment); - assertEquals(2, app_0.getSchedulingOpportunities(priority)); - assertEquals(3, app_0.getTotalRequiredResources(priority)); + assertEquals(2, app_0.getSchedulingOpportunities(schedulerKey)); + assertEquals(3, app_0.getTotalRequiredResources(schedulerKey)); assertEquals(NodeType.NODE_LOCAL, assignment.getType()); // None->NODE_LOCAL // Another off switch, shouldn't allocate due to delay scheduling assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyNoContainerAllocated(assignment); - assertEquals(3, app_0.getSchedulingOpportunities(priority)); - assertEquals(3, app_0.getTotalRequiredResources(priority)); + assertEquals(3, app_0.getSchedulingOpportunities(schedulerKey)); + assertEquals(3, app_0.getTotalRequiredResources(schedulerKey)); assertEquals(NodeType.NODE_LOCAL, assignment.getType()); // None->NODE_LOCAL // Another off switch, now we should allocate @@ -1594,22 +1602,25 @@ public class TestLeafQueue { assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyContainerAllocated(assignment, NodeType.OFF_SWITCH); - assertEquals(4, app_0.getSchedulingOpportunities(priority)); // should NOT reset - assertEquals(2, app_0.getTotalRequiredResources(priority)); + // should NOT reset + assertEquals(4, app_0.getSchedulingOpportunities(schedulerKey)); + assertEquals(2, app_0.getTotalRequiredResources(schedulerKey)); // NODE_LOCAL - node_0 assignment = a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyContainerAllocated(assignment, NodeType.NODE_LOCAL); - assertEquals(0, app_0.getSchedulingOpportunities(priority)); // should reset - assertEquals(1, app_0.getTotalRequiredResources(priority)); + // should reset + assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey)); + assertEquals(1, app_0.getTotalRequiredResources(schedulerKey)); // NODE_LOCAL - node_1 assignment = a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyContainerAllocated(assignment, NodeType.NODE_LOCAL); - assertEquals(0, app_0.getSchedulingOpportunities(priority)); // should reset - assertEquals(0, app_0.getTotalRequiredResources(priority)); + // should reset + assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey)); + assertEquals(0, app_0.getTotalRequiredResources(schedulerKey)); assertEquals(NodeType.NODE_LOCAL, assignment.getType()); // Add 1 more request to check for RACK_LOCAL @@ -1624,7 +1635,7 @@ public class TestLeafQueue { TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 4, // one extra true, priority, recordFactory)); app_0.updateResourceRequests(app_0_requests_0); - assertEquals(4, app_0.getTotalRequiredResources(priority)); + assertEquals(4, app_0.getTotalRequiredResources(schedulerKey)); String host_3 = "127.0.0.4"; // on rack_1 FiCaSchedulerNode node_3 = TestUtils.getMockNode(host_3, rack_1, 0, 8*GB); @@ -1636,21 +1647,22 @@ public class TestLeafQueue { // Shouldn't assign RACK_LOCAL yet assignment = a.assignContainers(clusterResource, node_3, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(1, app_0.getSchedulingOpportunities(priority)); - assertEquals(4, app_0.getTotalRequiredResources(priority)); + assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey)); + assertEquals(4, app_0.getTotalRequiredResources(schedulerKey)); // Should assign RACK_LOCAL now assignment = a.assignContainers(clusterResource, node_3, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyContainerAllocated(assignment, NodeType.RACK_LOCAL); - assertEquals(0, app_0.getSchedulingOpportunities(priority)); // should reset - assertEquals(3, app_0.getTotalRequiredResources(priority)); + // should reset + assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey)); + assertEquals(3, app_0.getTotalRequiredResources(schedulerKey)); // Shouldn't assign RACK_LOCAL because schedulingOpportunities should have gotten reset. assignment = a.assignContainers(clusterResource, node_3, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(1, app_0.getSchedulingOpportunities(priority)); - assertEquals(3, app_0.getTotalRequiredResources(priority)); + assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey)); + assertEquals(3, app_0.getTotalRequiredResources(schedulerKey)); // Next time we schedule RACK_LOCAL, don't reset doReturn(false).when(a).getRackLocalityFullReset(); @@ -1659,19 +1671,21 @@ public class TestLeafQueue { assignment = a.assignContainers(clusterResource, node_3, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyContainerAllocated(assignment, NodeType.RACK_LOCAL); - assertEquals(2, app_0.getSchedulingOpportunities(priority)); // should NOT reset - assertEquals(2, app_0.getTotalRequiredResources(priority)); + // should NOT reset + assertEquals(2, app_0.getSchedulingOpportunities(schedulerKey)); + assertEquals(2, app_0.getTotalRequiredResources(schedulerKey)); // Another RACK_LOCAL since schedulingOpportunities not reset assignment = a.assignContainers(clusterResource, node_3, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyContainerAllocated(assignment, NodeType.RACK_LOCAL); - assertEquals(3, app_0.getSchedulingOpportunities(priority)); // should NOT reset - assertEquals(1, app_0.getTotalRequiredResources(priority)); + // should NOT reset + assertEquals(3, app_0.getSchedulingOpportunities(schedulerKey)); + assertEquals(1, app_0.getTotalRequiredResources(schedulerKey)); // Add a request larger than cluster size to verify // OFF_SWITCH delay is capped by cluster size - app_0.resetSchedulingOpportunities(priority); + app_0.resetSchedulingOpportunities(schedulerKey); app_0_requests_0.clear(); app_0_requests_0.add( TestUtils.createResourceRequest(host_0, 1*GB, 100, @@ -1690,13 +1704,13 @@ public class TestLeafQueue { a.assignContainers(clusterResource, node_2, new ResourceLimits( clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyNoContainerAllocated(assignment); - assertEquals(i+1, app_0.getSchedulingOpportunities(priority)); + assertEquals(i+1, app_0.getSchedulingOpportunities(schedulerKey)); } // delay should be capped at numNodes so next one should allocate assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyContainerAllocated(assignment, NodeType.OFF_SWITCH); - assertEquals(numNodes+1, app_0.getSchedulingOpportunities(priority)); + assertEquals(numNodes+1, app_0.getSchedulingOpportunities(schedulerKey)); } @Test @@ -1738,6 +1752,7 @@ public class TestLeafQueue { // P1 Priority priority_1 = TestUtils.createMockPriority(1); + SchedulerRequestKey schedulerKey1 = toSchedulerKey(priority_1); app_0_requests_0.add( TestUtils.createResourceRequest(host_0, 1*GB, 1, true, priority_1, recordFactory)); @@ -1756,6 +1771,7 @@ public class TestLeafQueue { // P2 Priority priority_2 = TestUtils.createMockPriority(2); + SchedulerRequestKey schedulerKey2 = toSchedulerKey(priority_2); app_0_requests_0.add( TestUtils.createResourceRequest(host_2, 2*GB, 1, true, priority_2, recordFactory)); @@ -1775,47 +1791,47 @@ public class TestLeafQueue { CSAssignment assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyNoContainerAllocated(assignment); - assertEquals(1, app_0.getSchedulingOpportunities(priority_1)); - assertEquals(2, app_0.getTotalRequiredResources(priority_1)); - assertEquals(0, app_0.getSchedulingOpportunities(priority_2)); - assertEquals(1, app_0.getTotalRequiredResources(priority_2)); + assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey1)); + assertEquals(2, app_0.getTotalRequiredResources(schedulerKey1)); + assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey2)); + assertEquals(1, app_0.getTotalRequiredResources(schedulerKey2)); // Another off-switch, shouldn't allocate P1 due to delay scheduling // thus, no P2 either! assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyNoContainerAllocated(assignment); - assertEquals(2, app_0.getSchedulingOpportunities(priority_1)); - assertEquals(2, app_0.getTotalRequiredResources(priority_1)); - assertEquals(0, app_0.getSchedulingOpportunities(priority_2)); - assertEquals(1, app_0.getTotalRequiredResources(priority_2)); + assertEquals(2, app_0.getSchedulingOpportunities(schedulerKey1)); + assertEquals(2, app_0.getTotalRequiredResources(schedulerKey1)); + assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey2)); + assertEquals(1, app_0.getTotalRequiredResources(schedulerKey2)); // Another off-switch, shouldn't allocate OFF_SWITCH P1 assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyContainerAllocated(assignment, NodeType.OFF_SWITCH); - assertEquals(3, app_0.getSchedulingOpportunities(priority_1)); - assertEquals(1, app_0.getTotalRequiredResources(priority_1)); - assertEquals(0, app_0.getSchedulingOpportunities(priority_2)); - assertEquals(1, app_0.getTotalRequiredResources(priority_2)); + assertEquals(3, app_0.getSchedulingOpportunities(schedulerKey1)); + assertEquals(1, app_0.getTotalRequiredResources(schedulerKey1)); + assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey2)); + assertEquals(1, app_0.getTotalRequiredResources(schedulerKey2)); // Now, DATA_LOCAL for P1 assignment = a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyContainerAllocated(assignment, NodeType.NODE_LOCAL); - assertEquals(0, app_0.getSchedulingOpportunities(priority_1)); - assertEquals(0, app_0.getTotalRequiredResources(priority_1)); - assertEquals(0, app_0.getSchedulingOpportunities(priority_2)); - assertEquals(1, app_0.getTotalRequiredResources(priority_2)); + assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey1)); + assertEquals(0, app_0.getTotalRequiredResources(schedulerKey1)); + assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey2)); + assertEquals(1, app_0.getTotalRequiredResources(schedulerKey2)); // Now, OFF_SWITCH for P2 assignment = a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyContainerAllocated(assignment, NodeType.OFF_SWITCH); - assertEquals(0, app_0.getSchedulingOpportunities(priority_1)); - assertEquals(0, app_0.getTotalRequiredResources(priority_1)); - assertEquals(1, app_0.getSchedulingOpportunities(priority_2)); - assertEquals(0, app_0.getTotalRequiredResources(priority_2)); + assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey1)); + assertEquals(0, app_0.getTotalRequiredResources(schedulerKey1)); + assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey2)); + assertEquals(0, app_0.getTotalRequiredResources(schedulerKey2)); } @@ -1855,6 +1871,7 @@ public class TestLeafQueue { // Setup resource-requests and submit Priority priority = TestUtils.createMockPriority(1); + SchedulerRequestKey schedulerKey = toSchedulerKey(priority); List<ResourceRequest> app_0_requests_0 = new ArrayList<ResourceRequest>(); app_0_requests_0.add( TestUtils.createResourceRequest(host_0_0, 1*GB, 1, @@ -1878,7 +1895,7 @@ public class TestLeafQueue { // Add one request app_0_requests_0.clear(); app_0_requests_0.add( - TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 1, // only one + TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 1, // only 1 true, priority, recordFactory)); app_0.updateResourceRequests(app_0_requests_0); @@ -1886,17 +1903,19 @@ public class TestLeafQueue { CSAssignment assignment = a.assignContainers(clusterResource, node_0_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyContainerAllocated(assignment, NodeType.NODE_LOCAL); - assertEquals(0, app_0.getSchedulingOpportunities(priority)); // should reset - assertEquals(0, app_0.getTotalRequiredResources(priority)); + assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey)); + // should reset + assertEquals(0, app_0.getTotalRequiredResources(schedulerKey)); // No allocation on node_1_0 even though it's node/rack local since // required(ANY) == 0 assignment = a.assignContainers(clusterResource, node_1_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyNoContainerAllocated(assignment); - assertEquals(0, app_0.getSchedulingOpportunities(priority)); // Still zero - // since #req=0 - assertEquals(0, app_0.getTotalRequiredResources(priority)); + // Still zero + // since #req=0 + assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey)); + assertEquals(0, app_0.getTotalRequiredResources(schedulerKey)); // Add one request app_0_requests_0.clear(); @@ -1910,15 +1929,16 @@ public class TestLeafQueue { assignment = a.assignContainers(clusterResource, node_0_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyNoContainerAllocated(assignment); - assertEquals(1, app_0.getSchedulingOpportunities(priority)); - assertEquals(1, app_0.getTotalRequiredResources(priority)); + assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey)); + assertEquals(1, app_0.getTotalRequiredResources(schedulerKey)); // NODE_LOCAL - node_1 assignment = a.assignContainers(clusterResource, node_1_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyContainerAllocated(assignment, NodeType.NODE_LOCAL); - assertEquals(0, app_0.getSchedulingOpportunities(priority)); // should reset - assertEquals(0, app_0.getTotalRequiredResources(priority)); + // should reset + assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey)); + assertEquals(0, app_0.getTotalRequiredResources(schedulerKey)); } @Test (timeout = 30000) @@ -2142,6 +2162,7 @@ public class TestLeafQueue { // host_1_1: 8G // Blacklist: <host_0_0> Priority priority = TestUtils.createMockPriority(1); + SchedulerRequestKey schedulerKey = toSchedulerKey(priority); List<ResourceRequest> app_0_requests_0 = new ArrayList<ResourceRequest>(); app_0_requests_0.add( TestUtils.createResourceRequest(host_0_0, 1*GB, 1, @@ -2169,7 +2190,8 @@ public class TestLeafQueue { a.assignContainers(clusterResource, node_0_1, new ResourceLimits( clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyNoContainerAllocated(assignment); - assertEquals(0, app_0.getSchedulingOpportunities(priority)); // should be 0 + // should be 0 + assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey)); // resourceName: <priority, memory, #containers, relaxLocality> // host_0_0: < 1, 1GB, 1, true > @@ -2191,7 +2213,8 @@ public class TestLeafQueue { assignment = a.assignContainers(clusterResource, node_1_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyNoContainerAllocated(assignment); - assertEquals(0, app_0.getSchedulingOpportunities(priority)); // should be 0 + // should be 0 + assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey)); // Allow rack-locality for rack_1, but blacklist node_1_1 app_0_requests_0.add( @@ -2221,7 +2244,8 @@ public class TestLeafQueue { assignment = a.assignContainers(clusterResource, node_1_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyNoContainerAllocated(assignment); - assertEquals(0, app_0.getSchedulingOpportunities(priority)); // should be 0 + // should be 0 + assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey)); // Now, remove node_1_1 from blacklist, but add rack_1 to blacklist app_0.updateResourceRequests(app_0_requests_0); @@ -2249,7 +2273,8 @@ public class TestLeafQueue { assignment = a.assignContainers(clusterResource, node_1_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyNoContainerAllocated(assignment); - assertEquals(0, app_0.getSchedulingOpportunities(priority)); // should be 0 + // should be 0 + assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey)); // Now remove rack_1 from blacklist app_0.updateResourceRequests(app_0_requests_0); @@ -2275,8 +2300,8 @@ public class TestLeafQueue { assignment = a.assignContainers(clusterResource, node_1_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyNoContainerAllocated(assignment); - assertEquals(0, app_0.getSchedulingOpportunities(priority)); - assertEquals(1, app_0.getTotalRequiredResources(priority)); + assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey)); + assertEquals(1, app_0.getTotalRequiredResources(schedulerKey)); // Now sanity-check node_local app_0_requests_0.add( @@ -2305,8 +2330,8 @@ public class TestLeafQueue { assignment = a.assignContainers(clusterResource, node_1_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyContainerAllocated(assignment, NodeType.NODE_LOCAL); - assertEquals(0, app_0.getSchedulingOpportunities(priority)); - assertEquals(0, app_0.getTotalRequiredResources(priority)); + assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey)); + assertEquals(0, app_0.getTotalRequiredResources(schedulerKey)); } @@ -2667,6 +2692,7 @@ public class TestLeafQueue { // App0 has node local request for host_0/host_1, and app1 has node local // request for host2. Priority priority = TestUtils.createMockPriority(1); + SchedulerRequestKey schedulerKey = toSchedulerKey(priority); List<ResourceRequest> app_0_requests_0 = new ArrayList<ResourceRequest>(); app_0_requests_0.add( TestUtils.createResourceRequest(host_0, 1*GB, 1, @@ -2706,8 +2732,8 @@ public class TestLeafQueue { assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyContainerAllocated(assignment, NodeType.NODE_LOCAL); - assertEquals(1, app_0.getSchedulingOpportunities(priority)); - assertEquals(3, app_0.getTotalRequiredResources(priority)); + assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey)); + assertEquals(3, app_0.getTotalRequiredResources(schedulerKey)); assertEquals(0, app_0.getLiveContainers().size()); assertEquals(1, app_1.getLiveContainers().size()); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/5aace38b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java index 47fd534..df2c9ff 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java @@ -32,7 +32,6 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeLabel; -import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.MockAM; @@ -48,6 +47,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerStat import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppReport; + import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; @@ -549,7 +549,7 @@ public class TestNodeLabelContainerAllocation { FiCaSchedulerApp app = cs.getApplicationAttempt(attemptId); ResourceRequest rr = app.getAppSchedulingInfo().getResourceRequest( - Priority.newInstance(priority), "*"); + TestUtils.toSchedulerKey(priority), "*"); Assert.assertEquals(memory, rr.getCapability().getMemorySize() * rr.getNumContainers()); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/5aace38b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java index e8ac804..5e2007c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java @@ -56,6 +56,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManage import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.AMState; + +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerRequestKey; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; @@ -71,6 +73,8 @@ import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestUtils.toSchedulerKey; + public class TestReservations { private static final Log LOG = LogFactory.getLog(TestReservations.class); @@ -300,7 +304,8 @@ public class TestReservations { assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize()); - assertEquals(2, app_0.getTotalRequiredResources(priorityReduce)); + assertEquals(2, app_0.getTotalRequiredResources( + toSchedulerKey(priorityReduce))); // try to assign reducer (5G on node 0 and should reserve) a.assignContainers(clusterResource, node_0, @@ -316,7 +321,8 @@ public class TestReservations { assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize()); - assertEquals(2, app_0.getTotalRequiredResources(priorityReduce)); + assertEquals(2, app_0.getTotalRequiredResources( + toSchedulerKey(priorityReduce))); // assign reducer to node 2 a.assignContainers(clusterResource, node_2, @@ -332,7 +338,8 @@ public class TestReservations { assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); assertEquals(5 * GB, node_2.getAllocatedResource().getMemorySize()); - assertEquals(1, app_0.getTotalRequiredResources(priorityReduce)); + assertEquals(1, app_0.getTotalRequiredResources( + toSchedulerKey(priorityReduce))); // node_1 heartbeat and unreserves from node_0 in order to allocate // on node_1 @@ -348,7 +355,8 @@ public class TestReservations { assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); assertEquals(8 * GB, node_1.getAllocatedResource().getMemorySize()); assertEquals(5 * GB, node_2.getAllocatedResource().getMemorySize()); - assertEquals(0, app_0.getTotalRequiredResources(priorityReduce)); + assertEquals(0, app_0.getTotalRequiredResources( + toSchedulerKey(priorityReduce))); } // Test that hitting a reservation limit and needing to unreserve @@ -597,7 +605,8 @@ public class TestReservations { assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize()); - assertEquals(2, app_0.getTotalRequiredResources(priorityReduce)); + assertEquals(2, app_0.getTotalRequiredResources( + toSchedulerKey(priorityReduce))); // try to assign reducer (5G on node 0 and should reserve) a.assignContainers(clusterResource, node_0, @@ -613,7 +622,8 @@ public class TestReservations { assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize()); - assertEquals(2, app_0.getTotalRequiredResources(priorityReduce)); + assertEquals(2, app_0.getTotalRequiredResources( + toSchedulerKey(priorityReduce))); // assign reducer to node 2 a.assignContainers(clusterResource, node_2, @@ -629,7 +639,8 @@ public class TestReservations { assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); assertEquals(5 * GB, node_2.getAllocatedResource().getMemorySize()); - assertEquals(1, app_0.getTotalRequiredResources(priorityReduce)); + assertEquals(1, app_0.getTotalRequiredResources( + toSchedulerKey(priorityReduce))); // node_1 heartbeat and won't unreserve from node_0, potentially stuck // if AM doesn't handle @@ -646,7 +657,8 @@ public class TestReservations { assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); assertEquals(5 * GB, node_2.getAllocatedResource().getMemorySize()); - assertEquals(1, app_0.getTotalRequiredResources(priorityReduce)); + assertEquals(1, app_0.getTotalRequiredResources( + toSchedulerKey(priorityReduce))); } @Test @@ -754,7 +766,8 @@ public class TestReservations { assertEquals(null, node_0.getReservedContainer()); assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); - assertEquals(2, app_0.getTotalRequiredResources(priorityReduce)); + assertEquals(2, app_0.getTotalRequiredResources( + toSchedulerKey(priorityReduce))); // try to assign reducer (5G on node 0 and should reserve) a.assignContainers(clusterResource, node_0, @@ -769,7 +782,8 @@ public class TestReservations { .getMemorySize()); assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); - assertEquals(2, app_0.getTotalRequiredResources(priorityReduce)); + assertEquals(2, app_0.getTotalRequiredResources( + toSchedulerKey(priorityReduce))); // could allocate but told need to unreserve first a.assignContainers(clusterResource, node_1, @@ -783,7 +797,8 @@ public class TestReservations { assertEquals(null, node_0.getReservedContainer()); assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); assertEquals(8 * GB, node_1.getAllocatedResource().getMemorySize()); - assertEquals(1, app_0.getTotalRequiredResources(priorityReduce)); + assertEquals(1, app_0.getTotalRequiredResources( + toSchedulerKey(priorityReduce))); } @Test @@ -808,7 +823,8 @@ public class TestReservations { Resource clusterResource = Resources.createResource(2 * 8 * GB); // Setup resource-requests - Priority priorityMap = TestUtils.createMockPriority(5); + Priority p = TestUtils.createMockPriority(5); + SchedulerRequestKey priorityMap = toSchedulerKey(p); Resource capability = Resources.createResource(2*GB, 0); RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class); @@ -826,12 +842,14 @@ public class TestReservations { app_0.getApplicationId(), 1); ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1); Container container = TestUtils.getMockContainer(containerId, - node_1.getNodeID(), Resources.createResource(2*GB), priorityMap); + node_1.getNodeID(), Resources.createResource(2*GB), + priorityMap.getPriority()); RMContainer rmContainer = new RMContainerImpl(container, appAttemptId, node_1.getNodeID(), "user", rmContext); Container container_1 = TestUtils.getMockContainer(containerId, - node_0.getNodeID(), Resources.createResource(1*GB), priorityMap); + node_0.getNodeID(), Resources.createResource(1*GB), + priorityMap.getPriority()); RMContainer rmContainer_1 = new RMContainerImpl(container_1, appAttemptId, node_0.getNodeID(), "user", rmContext); @@ -878,7 +896,8 @@ public class TestReservations { 8 * GB); // Setup resource-requests - Priority priorityMap = TestUtils.createMockPriority(5); + Priority p = TestUtils.createMockPriority(5); + SchedulerRequestKey priorityMap = toSchedulerKey(p); Resource capability = Resources.createResource(2 * GB, 0); RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class); @@ -896,7 +915,8 @@ public class TestReservations { app_0.getApplicationId(), 1); ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1); Container container = TestUtils.getMockContainer(containerId, - node_1.getNodeID(), Resources.createResource(2*GB), priorityMap); + node_1.getNodeID(), Resources.createResource(2*GB), + priorityMap.getPriority()); RMContainer rmContainer = new RMContainerImpl(container, appAttemptId, node_1.getNodeID(), "user", rmContext); http://git-wip-us.apache.org/repos/asf/hadoop/blob/5aace38b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java index 621c5c5..c808b5a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java @@ -51,6 +51,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsMana import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; + +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerRequestKey; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager; @@ -400,4 +402,14 @@ public class TestUtils { return conf; } + + public static SchedulerRequestKey toSchedulerKey(Priority pri) { + return SchedulerRequestKey.create( + ResourceRequest.newInstance(pri, null, null, 0)); + } + + public static SchedulerRequestKey toSchedulerKey(int pri) { + return SchedulerRequestKey.create(ResourceRequest.newInstance( + Priority.newInstance(pri), null, null, 0)); + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/5aace38b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppAttempt.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppAttempt.java index af1dc62..61c5743 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppAttempt.java @@ -38,7 +38,10 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerRequestKey; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity + .TestUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FairSharePolicy; @@ -63,8 +66,9 @@ public class TestFSAppAttempt extends FairSchedulerTestBase { @Test public void testDelayScheduling() { FSLeafQueue queue = Mockito.mock(FSLeafQueue.class); - Priority prio = Mockito.mock(Priority.class); - Mockito.when(prio.getPriority()).thenReturn(1); + Priority pri = Mockito.mock(Priority.class); + SchedulerRequestKey prio = TestUtils.toSchedulerKey(pri); + Mockito.when(pri.getPriority()).thenReturn(1); double nodeLocalityThreshold = .5; double rackLocalityThreshold = .6; @@ -122,8 +126,9 @@ public class TestFSAppAttempt extends FairSchedulerTestBase { public void testDelaySchedulingForContinuousScheduling() throws InterruptedException { FSLeafQueue queue = scheduler.getQueueManager().getLeafQueue("queue", true); - Priority prio = Mockito.mock(Priority.class); - Mockito.when(prio.getPriority()).thenReturn(1); + Priority pri = Mockito.mock(Priority.class); + SchedulerRequestKey prio = TestUtils.toSchedulerKey(pri); + Mockito.when(pri.getPriority()).thenReturn(1); ControlledClock clock = new ControlledClock(); scheduler.setClock(clock); @@ -180,8 +185,9 @@ public class TestFSAppAttempt extends FairSchedulerTestBase { */ public void testLocalityLevelWithoutDelays() { FSLeafQueue queue = Mockito.mock(FSLeafQueue.class); - Priority prio = Mockito.mock(Priority.class); - Mockito.when(prio.getPriority()).thenReturn(1); + Priority pri = Mockito.mock(Priority.class); + SchedulerRequestKey prio = TestUtils.toSchedulerKey(pri); + Mockito.when(pri.getPriority()).thenReturn(1); RMContext rmContext = resourceManager.getRMContext(); ApplicationAttemptId applicationAttemptId = createAppAttemptId(1, 1); http://git-wip-us.apache.org/repos/asf/hadoop/blob/5aace38b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index f92af35..dab7312 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -92,8 +92,13 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeResourceUpdate import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt; + +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerRequestKey; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.TestSchedulerUtils; + +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity + .TestUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent; @@ -2316,7 +2321,8 @@ public class TestFairScheduler extends FairSchedulerTestBase { assertEquals(1, app.getLiveContainers().size()); // Reserved container should still be at lower priority for (RMContainer container : app.getReservedContainers()) { - assertEquals(2, container.getReservedPriority().getPriority()); + assertEquals(2, + container.getReservedSchedulerKey().getPriority().getPriority()); } // Complete container @@ -2817,7 +2823,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { scheduler.handle(node1UpdateEvent); assertEquals(1, app.getLiveContainers().size()); } - + @Test public void testCancelStrictLocality() throws IOException { scheduler.init(conf); @@ -4485,9 +4491,10 @@ public class TestFairScheduler extends FairSchedulerTestBase { // time clock.tickSec(DELAY_THRESHOLD_TIME_MS / 1000); scheduler.attemptScheduling(node); - Map<Priority, Long> lastScheduledContainer = + Map<SchedulerRequestKey, Long> lastScheduledContainer = fsAppAttempt.getLastScheduledContainer(); - long initSchedulerTime = lastScheduledContainer.get(priority); + long initSchedulerTime = + lastScheduledContainer.get(TestUtils.toSchedulerKey(priority)); assertEquals(DELAY_THRESHOLD_TIME_MS, initSchedulerTime); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/5aace38b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java index 07a2dca..79f4601 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java @@ -22,7 +22,6 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; -import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -34,6 +33,10 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt; + +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerRequestKey; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity + .TestUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerPreemptEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; @@ -375,13 +378,15 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase { Set<RMContainer> set = new HashSet<RMContainer>(); for (RMContainer container : scheduler.getSchedulerApp(app2).getLiveContainers()) { - if (container.getAllocatedPriority().getPriority() == 4) { + if (container.getAllocatedSchedulerKey().getPriority().getPriority() == + 4) { set.add(container); } } for (RMContainer container : scheduler.getSchedulerApp(app4).getLiveContainers()) { - if (container.getAllocatedPriority().getPriority() == 4) { + if (container.getAllocatedSchedulerKey().getPriority().getPriority() == + 4) { set.add(container); } } @@ -1399,7 +1404,7 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase { scheduler.start(); scheduler.reinitialize(conf, resourceManager.getRMContext()); - Priority priority = Priority.newInstance(20); + SchedulerRequestKey schedulerKey = TestUtils.toSchedulerKey(20); String host = "127.0.0.1"; int GB = 1024; @@ -1412,11 +1417,12 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase { // Create 3 container requests and place it in ask List<ResourceRequest> ask = new ArrayList<ResourceRequest>(); ResourceRequest nodeLocalRequest = createResourceRequest(GB, 1, host, - priority.getPriority(), 1, true); + schedulerKey.getPriority().getPriority(), 1, true); ResourceRequest rackLocalRequest = createResourceRequest(GB, 1, - node.getRackName(), priority.getPriority(), 1, true); + node.getRackName(), schedulerKey.getPriority().getPriority(), 1, + true); ResourceRequest offRackRequest = createResourceRequest(GB, 1, - ResourceRequest.ANY, priority.getPriority(), 1, true); + ResourceRequest.ANY, schedulerKey.getPriority().getPriority(), 1, true); ask.add(nodeLocalRequest); ask.add(rackLocalRequest); ask.add(offRackRequest); @@ -1435,7 +1441,7 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase { SchedulerApplicationAttempt app = scheduler.getSchedulerApp(appAttemptId); // ResourceRequest will be empty once NodeUpdate is completed - Assert.assertNull(app.getResourceRequest(priority, host)); + Assert.assertNull(app.getResourceRequest(schedulerKey, host)); ContainerId containerId1 = ContainerId.newContainerId(appAttemptId, 1); RMContainer rmContainer = app.getRMContainer(containerId1); @@ -1458,7 +1464,7 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase { Assert.assertEquals(3, requests.size()); for (ResourceRequest request : requests) { Assert.assertEquals(1, - app.getResourceRequest(priority, request.getResourceName()) + app.getResourceRequest(schedulerKey, request.getResourceName()) .getNumContainers()); } --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org