Modified: hadoop/common/branches/fs-encryption/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java?rev=1612403&r1=1612402&r2=1612403&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java Mon Jul 21 21:44:50 2014 @@ -147,11 +147,11 @@ public class FairSchedulerTestBase { int memory, int vcores, String queueId, String userId, int numContainers, int priority) { ApplicationAttemptId id = createAppAttemptId(this.APP_ID++, this.ATTEMPT_ID++); - scheduler.addApplication(id.getApplicationId(), queueId, userId); + scheduler.addApplication(id.getApplicationId(), queueId, userId, false); // This conditional is for testAclSubmitApplication where app is rejected // and no app is added. if (scheduler.getSchedulerApplications().containsKey(id.getApplicationId())) { - scheduler.addApplicationAttempt(id, false, true); + scheduler.addApplicationAttempt(id, false, false); } List<ResourceRequest> ask = new ArrayList<ResourceRequest>(); ResourceRequest request = createResourceRequest(memory, vcores, ResourceRequest.ANY, @@ -167,6 +167,27 @@ public class FairSchedulerTestBase { .put(id.getApplicationId(), rmApp); return id; } + + protected ApplicationAttemptId createSchedulingRequest(String queueId, + String userId, List<ResourceRequest> ask) { + ApplicationAttemptId id = createAppAttemptId(this.APP_ID++, + this.ATTEMPT_ID++); + scheduler.addApplication(id.getApplicationId(), queueId, userId, false); + // This conditional is for testAclSubmitApplication where app is rejected + // and no app is added. + if (scheduler.getSchedulerApplications().containsKey(id.getApplicationId())) { + scheduler.addApplicationAttempt(id, false, false); + } + scheduler.allocate(id, ask, new ArrayList<ContainerId>(), null, null); + RMApp rmApp = mock(RMApp.class); + RMAppAttempt rmAppAttempt = mock(RMAppAttempt.class); + when(rmApp.getCurrentAppAttempt()).thenReturn(rmAppAttempt); + when(rmAppAttempt.getRMAppAttemptMetrics()).thenReturn( + new RMAppAttemptMetrics(id)); + resourceManager.getRMContext().getRMApps() + .put(id.getApplicationId(), rmApp); + return id; + } protected void createSchedulingRequestExistingApplication( int memory, int priority, ApplicationAttemptId attId) {
Modified: hadoop/common/branches/fs-encryption/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java?rev=1612403&r1=1612402&r2=1612403&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java Mon Jul 21 21:44:50 2014 @@ -53,10 +53,13 @@ import org.apache.hadoop.yarn.MockApps; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; +import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl; @@ -77,11 +80,13 @@ import org.apache.hadoop.yarn.server.res import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; +import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.TestSchedulerUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent; @@ -788,14 +793,14 @@ public class TestFairScheduler extends F scheduler.reinitialize(conf, resourceManager.getRMContext()); ApplicationAttemptId id11 = createAppAttemptId(1, 1); - scheduler.addApplication(id11.getApplicationId(), "root.queue1", "user1"); - scheduler.addApplicationAttempt(id11, false, true); + scheduler.addApplication(id11.getApplicationId(), "root.queue1", "user1", false); + scheduler.addApplicationAttempt(id11, false, false); ApplicationAttemptId id21 = createAppAttemptId(2, 1); - scheduler.addApplication(id21.getApplicationId(), "root.queue2", "user1"); - scheduler.addApplicationAttempt(id21, false, true); + scheduler.addApplication(id21.getApplicationId(), "root.queue2", "user1", false); + scheduler.addApplicationAttempt(id21, false, false); ApplicationAttemptId id22 = createAppAttemptId(2, 2); - scheduler.addApplication(id22.getApplicationId(), "root.queue2", "user1"); - scheduler.addApplicationAttempt(id22, false, true); + scheduler.addApplication(id22.getApplicationId(), "root.queue2", "user1", false); + scheduler.addApplicationAttempt(id22, false, false); int minReqSize = FairSchedulerConfiguration.DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_MB; @@ -1556,8 +1561,8 @@ public class TestFairScheduler extends F scheduler.handle(nodeEvent2); ApplicationAttemptId appId = createAppAttemptId(this.APP_ID++, this.ATTEMPT_ID++); - scheduler.addApplication(appId.getApplicationId(), "queue1", "user1"); - scheduler.addApplicationAttempt(appId, false, true); + scheduler.addApplication(appId.getApplicationId(), "queue1", "user1", false); + scheduler.addApplicationAttempt(appId, false, false); // 1 request with 2 nodes on the same rack. another request with 1 node on // a different rack @@ -1838,7 +1843,7 @@ public class TestFairScheduler extends F ApplicationAttemptId attId = ApplicationAttemptId.newInstance(applicationId, this.ATTEMPT_ID++); - scheduler.addApplication(attId.getApplicationId(), queue, user); + scheduler.addApplication(attId.getApplicationId(), queue, user, false); numTries = 0; while (application.getFinishTime() == 0 && numTries < MAX_TRIES) { @@ -2715,8 +2720,8 @@ public class TestFairScheduler extends F // send application request ApplicationAttemptId appAttemptId = createAppAttemptId(this.APP_ID++, this.ATTEMPT_ID++); - fs.addApplication(appAttemptId.getApplicationId(), "queue11", "user11"); - fs.addApplicationAttempt(appAttemptId, false, true); + fs.addApplication(appAttemptId.getApplicationId(), "queue11", "user11", false); + fs.addApplicationAttempt(appAttemptId, false, false); List<ResourceRequest> ask = new ArrayList<ResourceRequest>(); ResourceRequest request = createResourceRequest(1024, 1, ResourceRequest.ANY, 1, 1, true); @@ -2831,6 +2836,87 @@ public class TestFairScheduler extends F } } } + + @Test(timeout=5000) + public void testRecoverRequestAfterPreemption() throws Exception { + conf.setLong(FairSchedulerConfiguration.WAIT_TIME_BEFORE_KILL, 10); + + MockClock clock = new MockClock(); + scheduler.setClock(clock); + scheduler.init(conf); + scheduler.start(); + scheduler.reinitialize(conf, resourceManager.getRMContext()); + + Priority priority = Priority.newInstance(20); + String host = "127.0.0.1"; + int GB = 1024; + + // Create Node and raised Node Added event + RMNode node = MockNodes.newNodeInfo(1, + Resources.createResource(16 * 1024, 4), 0, host); + NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node); + scheduler.handle(nodeEvent); + + // Create 3 container requests and place it in ask + List<ResourceRequest> ask = new ArrayList<ResourceRequest>(); + ResourceRequest nodeLocalRequest = createResourceRequest(GB, 1, host, + priority.getPriority(), 1, true); + ResourceRequest rackLocalRequest = createResourceRequest(GB, 1, + node.getRackName(), priority.getPriority(), 1, true); + ResourceRequest offRackRequest = createResourceRequest(GB, 1, + ResourceRequest.ANY, priority.getPriority(), 1, true); + ask.add(nodeLocalRequest); + ask.add(rackLocalRequest); + ask.add(offRackRequest); + + // Create Request and update + ApplicationAttemptId appAttemptId = createSchedulingRequest("queueA", + "user1", ask); + scheduler.update(); + + // Sufficient node check-ins to fully schedule containers + NodeUpdateSchedulerEvent nodeUpdate = new NodeUpdateSchedulerEvent(node); + scheduler.handle(nodeUpdate); + + assertEquals(1, scheduler.getSchedulerApp(appAttemptId).getLiveContainers() + .size()); + FSSchedulerApp app = scheduler.getSchedulerApp(appAttemptId); + + // ResourceRequest will be empty once NodeUpdate is completed + Assert.assertNull(app.getResourceRequest(priority, host)); + + ContainerId containerId1 = ContainerId.newInstance(appAttemptId, 1); + RMContainer rmContainer = app.getRMContainer(containerId1); + + // Create a preempt event and register for preemption + scheduler.warnOrKillContainer(rmContainer); + + // Wait for few clock ticks + clock.tick(5); + + // preempt now + scheduler.warnOrKillContainer(rmContainer); + + List<ResourceRequest> requests = rmContainer.getResourceRequests(); + // Once recovered, resource request will be present again in app + Assert.assertEquals(3, requests.size()); + for (ResourceRequest request : requests) { + Assert.assertEquals(1, + app.getResourceRequest(priority, request.getResourceName()) + .getNumContainers()); + } + + // Send node heartbeat + scheduler.update(); + scheduler.handle(nodeUpdate); + + List<Container> containers = scheduler.allocate(appAttemptId, + Collections.<ResourceRequest> emptyList(), + Collections.<ContainerId> emptyList(), null, null).getContainers(); + + // Now with updated ResourceRequest, a container is allocated for AM. + Assert.assertTrue(containers.size() == 1); + } @SuppressWarnings("resource") @Test Modified: hadoop/common/branches/fs-encryption/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java?rev=1612403&r1=1612402&r2=1612403&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java Mon Jul 21 21:44:50 2014 @@ -23,13 +23,12 @@ import java.security.PrivilegedAction; import java.util.Arrays; import java.util.Collection; -import javax.crypto.SecretKey; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; @@ -41,7 +40,9 @@ import org.apache.hadoop.yarn.api.record import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.ipc.YarnRPC; +import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.server.resourcemanager.MockNM; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization.MockRMWithAMS; @@ -50,6 +51,7 @@ import org.apache.hadoop.yarn.server.res import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerFinishedEvent; +import org.apache.hadoop.yarn.server.security.MasterKeyData; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.Records; import org.junit.Assert; @@ -65,6 +67,8 @@ public class TestAMRMTokens { private final Configuration conf; private static final int maxWaitAttempts = 50; + private static final int rolling_interval_sec = 13; + private static final long am_expire_ms = 4000; @Parameters public static Collection<Object[]> configs() { @@ -201,15 +205,22 @@ public class TestAMRMTokens { @Test public void testMasterKeyRollOver() throws Exception { + conf.setLong( + YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS, + rolling_interval_sec); + conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, am_expire_ms); MyContainerManager containerManager = new MyContainerManager(); final MockRMWithAMS rm = new MockRMWithAMS(conf, containerManager); rm.start(); - + Long startTime = System.currentTimeMillis(); final Configuration conf = rm.getConfig(); final YarnRPC rpc = YarnRPC.create(conf); ApplicationMasterProtocol rmClient = null; - + AMRMTokenSecretManager appTokenSecretManager = + rm.getRMContext().getAMRMTokenSecretManager(); + MasterKeyData oldKey = appTokenSecretManager.getMasterKey(); + Assert.assertNotNull(oldKey); try { MockNM nm1 = rm.registerNode("localhost:1234", 5120); @@ -218,7 +229,7 @@ public class TestAMRMTokens { nm1.nodeHeartbeat(true); int waitCount = 0; - while (containerManager.containerTokens == null && waitCount++ < 20) { + while (containerManager.containerTokens == null && waitCount++ < maxWaitAttempts) { LOG.info("Waiting for AM Launch to happen.."); Thread.sleep(1000); } @@ -250,21 +261,65 @@ public class TestAMRMTokens { Assert.assertTrue( rmClient.allocate(allocateRequest).getAMCommand() == null); - // Simulate a master-key-roll-over - AMRMTokenSecretManager appTokenSecretManager = - rm.getRMContext().getAMRMTokenSecretManager(); - SecretKey oldKey = appTokenSecretManager.getMasterKey(); - appTokenSecretManager.rollMasterKey(); - SecretKey newKey = appTokenSecretManager.getMasterKey(); + // Wait for enough time and make sure the roll_over happens + // At mean time, the old AMRMToken should continue to work + while(System.currentTimeMillis() - startTime < rolling_interval_sec*1000) { + rmClient.allocate(allocateRequest); + Thread.sleep(500); + } + + MasterKeyData newKey = appTokenSecretManager.getMasterKey(); + Assert.assertNotNull(newKey); Assert.assertFalse("Master key should have changed!", oldKey.equals(newKey)); + // Another allocate call with old AMRMToken. Should continue to work. + rpc.stopProxy(rmClient, conf); // To avoid using cached client + rmClient = createRMClient(rm, conf, rpc, currentUser); + Assert + .assertTrue(rmClient.allocate(allocateRequest).getAMCommand() == null); + + waitCount = 0; + while(waitCount++ <= maxWaitAttempts) { + if (appTokenSecretManager.getCurrnetMasterKeyData() != oldKey) { + break; + } + try { + rmClient.allocate(allocateRequest); + } catch (Exception ex) { + break; + } + Thread.sleep(200); + } + // active the nextMasterKey, and replace the currentMasterKey + Assert.assertTrue(appTokenSecretManager.getCurrnetMasterKeyData().equals(newKey)); + Assert.assertTrue(appTokenSecretManager.getMasterKey().equals(newKey)); + Assert.assertTrue(appTokenSecretManager.getNextMasterKeyData() == null); + + // Create a new Token + Token<AMRMTokenIdentifier> newToken = + appTokenSecretManager.createAndGetAMRMToken(applicationAttemptId); + SecurityUtil.setTokenService(newToken, rmBindAddress); + currentUser.addToken(newToken); // Another allocate call. Should continue to work. rpc.stopProxy(rmClient, conf); // To avoid using cached client rmClient = createRMClient(rm, conf, rpc, currentUser); allocateRequest = Records.newRecord(AllocateRequest.class); - Assert.assertTrue( - rmClient.allocate(allocateRequest).getAMCommand() == null); + Assert + .assertTrue(rmClient.allocate(allocateRequest).getAMCommand() == null); + + // Should not work by using the old AMRMToken. + rpc.stopProxy(rmClient, conf); // To avoid using cached client + try { + currentUser.addToken(amRMToken); + rmClient = createRMClient(rm, conf, rpc, currentUser); + allocateRequest = Records.newRecord(AllocateRequest.class); + Assert + .assertTrue(rmClient.allocate(allocateRequest).getAMCommand() == null); + Assert.fail("The old Token should not work"); + } catch (Exception ex) { + // expect exception + } } finally { rm.stop(); if (rmClient != null) { Modified: hadoop/common/branches/fs-encryption/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm?rev=1612403&r1=1612402&r2=1612403&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm (original) +++ hadoop/common/branches/fs-encryption/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm Mon Jul 21 21:44:50 2014 @@ -2707,3 +2707,223 @@ Server: Jetty(6.1.26) +---+ +* Cluster {Delegation Tokens API} + + The Delegation Tokens API can be used to create, renew and cancel YARN ResourceManager delegation tokens. All delegation token requests must be carried out on a Kerberos authenticated connection(using SPNEGO). Carrying out operations on a non-kerberos connection will result in a FORBIDDEN response. In case of renewing a token, only the renewer specified when creating the token can renew the token. Other users(including the owner) are forbidden from renewing tokens. It should be noted that when cancelling or renewing a token, the token to be cancelled or renewed is specified by setting a header. + + This feature is currently in the alpha stage and may change in the future. + +** URI + + Use the following URI to create and cancel delegation tokens. + +------ + * http://<rm http address:port>/ws/v1/cluster/delegation-token +------ + + Use the following URI to renew delegation tokens. + +------ + * http://<rm http address:port>/ws/v1/cluster/delegation-token/expiration +------ + +** HTTP Operations Supported + +------ + * POST + * DELETE +------ + +** Query Parameters Supported + +------ + None +------ + +** Elements of the <delegation-token> object + + The response from the delegation tokens API contains one of the fields listed below. + +*---------------+--------------+-------------------------------+ +|| Item || Data Type || Description | +*---------------+--------------+-------------------------------+ +| token | string | The delegation token | +*---------------+--------------+-------------------------------+ +| renewer | string | The user who is allowed to renew the delegation token | +*---------------+--------------+-------------------------------+ +| owner | string | The owner of the delegation token | +*---------------+--------------+-------------------------------+ +| kind | string | The kind of delegation token | +*---------------+--------------+-------------------------------+ +| expiration-time | long | The expiration time of the token | +*---------------+--------------+-------------------------------+ +| max-validity | long | The maximum validity of the token | +*---------------+--------------+-------------------------------+ + +** Response Examples + +*** Creating a token + + <<JSON response>> + + HTTP Request: + +------ + POST http://<rm http address:port>/ws/v1/cluster/delegation-token + Accept: application/json + Content-Type: application/json + { + "renewer" : "test-renewer" + } +------ + + Response Header + ++---+ + HTTP/1.1 200 OK + WWW-Authenticate: Negotiate ... + Date: Sat, 28 Jun 2014 18:08:11 GMT + Server: Jetty(6.1.26) + Set-Cookie: ... + Content-Type: application/json ++---+ + + Response body + ++---+ + { + "token":"MgASY2xpZW50QEVYQU1QTEUuQ09NDHRlc3QtcmVuZXdlcgCKAUckiEZpigFHSJTKaQECFN9EMM9BzfPoDxu572EVUpzqhnSGE1JNX0RFTEVHQVRJT05fVE9LRU4A", + "renewer":"test-renewer", + "owner":"[email protected]", + "kind":"RM_DELEGATION_TOKEN", + "expiration-time":"1405153616489", + "max-validity":"1405672016489" + } ++---+ + + <<XML response>> + + HTTP Request + +------ + POST http://<rm http address:port>/ws/v1/cluster/delegation-token + Accept: application/xml + Content-Type: application/xml + <delegation-token> + <renewer>test-renewer</renewer> + </delegation-token> +------ + + Response Header + ++---+ + HTTP/1.1 200 OK + WWW-Authenticate: Negotiate ... + Date: Sat, 28 Jun 2014 18:08:11 GMT + Content-Length: 423 + Server: Jetty(6.1.26) + Set-Cookie: ... + Content-Type: application/xml ++---+ + + Response Body + ++---+ + <?xml version="1.0" encoding="UTF-8" standalone="yes"?> + <delegation-token> + <token>MgASY2xpZW50QEVYQU1QTEUuQ09NDHRlc3QtcmVuZXdlcgCKAUckgZ8yigFHSI4jMgcCFDTG8X6XFFn2udQngzSXQL8vWaKIE1JNX0RFTEVHQVRJT05fVE9LRU4A</token> + <renewer>test-renewer</renewer> + <owner>[email protected]</owner> + <kind>RM_DELEGATION_TOKEN</kind> + <expiration-time>1405153180466</expiration-time> + <max-validity>1405671580466</max-validity> + </delegation-token> ++---+ + +*** Renewing a token + + <<JSON response>> + + HTTP Request: + +------ + POST http://<rm http address:port>/ws/v1/cluster/delegation-token/expiration + Accept: application/json + Hadoop-YARN-RM-Delegation-Token: MgASY2xpZW50QEVYQU1QTEUuQ09NDHRlc3QtcmVuZXdlcgCKAUbjqcHHigFHB7ZFxwQCFKWD3znCkDSy6SQIjRCLDydxbxvgE1JNX0RFTEVHQVRJT05fVE9LRU4A + Content-Type: application/json +------ + + Response Header + ++---+ + HTTP/1.1 200 OK + WWW-Authenticate: Negotiate ... + Date: Sat, 28 Jun 2014 18:08:11 GMT + Server: Jetty(6.1.26) + Set-Cookie: ... + Content-Type: application/json ++---+ + + Response body + ++---+ + { + "expiration-time":"1404112520402" + } ++---+ + + <<XML response>> + + HTTP Request + +------ + POST http://<rm http address:port>/ws/v1/cluster/delegation-token/expiration + Accept: application/xml + Content-Type: application/xml + Hadoop-YARN-RM-Delegation-Token: MgASY2xpZW50QEVYQU1QTEUuQ09NDHRlc3QtcmVuZXdlcgCKAUbjqcHHigFHB7ZFxwQCFKWD3znCkDSy6SQIjRCLDydxbxvgE1JNX0RFTEVHQVRJT05fVE9LRU4A +------ + + Response Header + ++---+ + HTTP/1.1 200 OK + WWW-Authenticate: Negotiate ... + Date: Sat, 28 Jun 2014 18:08:11 GMT + Content-Length: 423 + Server: Jetty(6.1.26) + Set-Cookie: ... + Content-Type: application/xml ++---+ + + Response Body + ++---+ + <?xml version="1.0" encoding="UTF-8" standalone="yes"?> + <delegation-token> + <expiration-time>1404112520402</expiration-time> + </delegation-token> ++---+ + +*** Cancelling a token + + HTTP Request + +----- +DELETE http://<rm http address:port>/ws/v1/cluster/delegation-token +Hadoop-YARN-RM-Delegation-Token: MgASY2xpZW50QEVYQU1QTEUuQ09NDHRlc3QtcmVuZXdlcgCKAUbjqcHHigFHB7ZFxwQCFKWD3znCkDSy6SQIjRCLDydxbxvgE1JNX0RFTEVHQVRJT05fVE9LRU4A +Accept: application/xml +----- + + Response Header + ++---+ + HTTP/1.1 200 OK + WWW-Authenticate: Negotiate ... + Date: Sun, 29 Jun 2014 07:25:18 GMT + Transfer-Encoding: chunked + Server: Jetty(6.1.26) + Set-Cookie: ... + Content-Type: application/xml ++---+ + + No response body.
