Author: rvs
Date: Tue Jun 19 16:37:07 2012
New Revision: 1351770
URL: http://svn.apache.org/viewvc?rev=1351770&view=rev
Log:
BIGTOP-623. Add test for HDFS Quota Testing (Sujay Rau via rvs)
Added:
incubator/bigtop/trunk/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hdfstests/
incubator/bigtop/trunk/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hdfstests/TestHDFSQuota.groovy
Added:
incubator/bigtop/trunk/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hdfstests/TestHDFSQuota.groovy
URL:
http://svn.apache.org/viewvc/incubator/bigtop/trunk/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hdfstests/TestHDFSQuota.groovy?rev=1351770&view=auto
==============================================================================
---
incubator/bigtop/trunk/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hdfstests/TestHDFSQuota.groovy
(added)
+++
incubator/bigtop/trunk/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hdfstests/TestHDFSQuota.groovy
Tue Jun 19 16:37:07 2012
@@ -0,0 +1,312 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.cloudera.itest.hadoop;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.apache.bigtop.itest.JarContent;
+import org.apache.bigtop.itest.shell.Shell;
+
+public class TestHDFSQuota {
+
+ private static Shell shHDFS = new Shell("/bin/bash", "hdfs");
+ private static final long LARGE = Long.MAX_VALUE - 1;
+ private static final String USERNAME = System.getProperty("user.name");
+ private static String quotaDate =
shHDFS.exec("date").getOut().get(0).replaceAll("\\s","").replaceAll(":","");
+ private static String testQuotaFolder = "testQuotaFolder" + quotaDate;
+
+ @BeforeClass
+ public static void setUp() {
+ // unpack resource
+ JarContent.unpackJarContainer(TestHDFSQuota.class, "." , null);
+
+ shHDFS.exec("hadoop fs -mkdir $testQuotaFolder" + "1");
+ assertTrue("Could not create input directory", shHDFS.getRet() == 0);
+
+ assertTrue("Could not create input directory", shHDFS.getRet() == 0);
+
+ System.out.println("Running Quota Tests:");
+ }
+
+ @AfterClass
+ public static void tearDown() {
+ // clean up of existing folders
+ shHDFS.exec("hadoop fs -test -e $testQuotaFolder" + "1");
+ if (shHDFS.getRet() == 0) {
+ shHDFS.exec("hadoop fs -rmr -skipTrash $testQuotaFolder" + "1");
+ assertTrue("Deletion of previous testQuotaFolder1 from HDFS failed",
+ shHDFS.getRet() == 0);
+ }
+ shHDFS.exec("hadoop fs -test -e $testQuotaFolder" + "3");
+ if (shHDFS.getRet() == 0) {
+ shHDFS.exec("hadoop fs -rmr -skipTrash $testQuotaFolder" + "3");
+ assertTrue("Deletion of previous testQuotaFolder1 from HDFS failed",
+ shHDFS.getRet() == 0);
+ }
+ shHDFS.exec("hadoop fs -test -e /user/$USERNAME/$testQuotaFolder" + "2");
+ if (shHDFS.getRet() == 0) {
+ shHDFS.exec("hadoop fs -rmr -skipTrash /user/$USERNAME/$testQuotaFolder"
+ "2");
+ assertTrue("Deletion of previous testQuotaFolder2 from HDFS failed",
+ shHDFS.getRet() == 0);
+ }
+ shHDFS.exec("hadoop fs -mkdir /user/$USERNAME/$testQuotaFolder" + "2");
+
+ }
+
+ @Test
+ public void testNewlyCreatedDir() {
+ // newly created dir should have no name quota, no space quota
+ shHDFS.exec("hadoop fs -count -q $testQuotaFolder" + "1");
+ assertTrue("Could not use count command", shHDFS.getRet() == 0);
+ String[] output = shHDFS.getOut().get(0).trim().split();
+ assertTrue("Newly created directory had a set name quota",
output[0].equals("none"));
+ assertTrue("Newly created directory had a set name quota left",
output[1].equals("inf"));
+ assertTrue("Newly created directory had a set space quota",
output[2].equals("none"));
+ assertTrue("Newly created directory had a set space quota left",
output[3].equals("inf"));
+ }
+
+ @Test
+ public void testAdminPermissions() {
+ // admin setting quotas should succeed
+ shHDFS.exec("hdfs dfsadmin -setQuota 10 $testQuotaFolder" + "1");
+ assertTrue("setQuota failed", shHDFS.getRet() == 0);
+ shHDFS.exec("hdfs dfsadmin -setSpaceQuota 1000000 $testQuotaFolder" + "1");
+ assertTrue("setSpaceQuota failed", shHDFS.getRet() == 0);
+
+ // non-admin setting/clearing quotas should fail
+ shHDFS.exec("hdfs dfsadmin -setQuota 10 $testQuotaFolder" + "2");
+ assertTrue("setQuota should not have worked", shHDFS.getRet() != 0);
+ shHDFS.exec("hdfs dfsadmin -setSpaceQuota 1000000 $testQuotaFolder" + "2");
+ assertTrue("setSpaceQuota should not have worked", shHDFS.getRet() != 0);
+ shHDFS.exec("hdfs dfsadmin -clrQuota $testQuotaFolder" + "2");
+ assertTrue("clrQuota should not have worked", shHDFS.getRet() != 0);
+ shHDFS.exec("hdfs dfsadmin -clrSpaceQuota $testQuotaFolder" + "2");
+ assertTrue("clrSpaceQuota should not have worked", shHDFS.getRet() != 0);
+
+ // admin clearing quotas should succeed
+ shHDFS.exec("hdfs dfsadmin -clrQuota $testQuotaFolder" + "1");
+ assertTrue("clrQuota failed", shHDFS.getRet() == 0);
+ shHDFS.exec("hdfs dfsadmin -clrSpaceQuota $testQuotaFolder" + "1");
+ assertTrue("clrSpaceQuota failed", shHDFS.getRet() == 0);
+ }
+
+ @Test
+ public void testRename() {
+ // name and space quotas stick after rename
+ shHDFS.exec("hadoop fs -count -q $testQuotaFolder" + "1");
+ assertTrue("Could not use count command", shHDFS.getRet() == 0);
+ String[] status1 = shHDFS.getOut().get(0).trim().split();
+ shHDFS.exec("hadoop fs -mv $testQuotaFolder" + "1" + "
/user/hdfs/$testQuotaFolder" + "3");
+ assertTrue("Could not use move command", shHDFS.getRet() == 0);
+ shHDFS.exec("hadoop fs -count -q $testQuotaFolder" + "3");
+ assertTrue("Could not use count command", shHDFS.getRet() == 0);
+ String[] status2 = shHDFS.getOut().get(0).trim().split();
+ for (int i = 0; i < status1.length - 1; i++) {
+ assertTrue("quotas changed after folder rename",
status1[i].equals(status2[i]));
+ }
+ shHDFS.exec("hadoop fs -mv $testQuotaFolder" + "3" + "
/user/hdfs/$testQuotaFolder" + "1");
+ assertTrue("Could not use move command", shHDFS.getRet() == 0);
+ }
+
+ @Test
+ public void testInputValues() {
+ // the largest allowable quota size is Long.Max_Value and must be greater
than zero
+ shHDFS.exec("hdfs dfsadmin -setQuota -1 $testQuotaFolder" + "1");
+ assertTrue("setQuota should not have worked", shHDFS.getRet() != 0);
+ shHDFS.exec("hdfs dfsadmin -setSpaceQuota -1 $testQuotaFolder" + "1");
+ assertTrue("setSpaceQuota should not have worked", shHDFS.getRet() != 0);
+ shHDFS.exec("hdfs dfsadmin -setQuota 1.04 $testQuotaFolder" + "1");
+ assertTrue("setQuota should not have worked", shHDFS.getRet() != 0);
+ shHDFS.exec("hdfs dfsadmin -setSpaceQuota 1.04 $testQuotaFolder" + "1");
+ assertTrue("setSpaceQuota should not have worked", shHDFS.getRet() != 0);
+ shHDFS.exec("hdfs dfsadmin -setQuota 0 $testQuotaFolder" + "1");
+ assertTrue("setQuota should not have worked", shHDFS.getRet() != 0);
+ shHDFS.exec("hdfs dfsadmin -setSpaceQuota 0 $testQuotaFolder" + "1");
+ assertTrue("setSpaceQuota should not have worked", shHDFS.getRet() != 0);
+ shHDFS.exec("hdfs dfsadmin -setQuota $LARGE $testQuotaFolder" + "1");
+ assertTrue("setQuota failed", shHDFS.getRet() == 0);
+ shHDFS.exec("hdfs dfsadmin -setSpaceQuota $LARGE $testQuotaFolder" + "1");
+ assertTrue("setSpaceQuota failed", shHDFS.getRet() == 0);
+ shHDFS.exec("hdfs dfsadmin -setQuota 9223372036854775808 $testQuotaFolder"
+ "1");
+ assertTrue("setQuota should not have worked", shHDFS.getRet() != 0);
+ shHDFS.exec("hdfs dfsadmin -setSpaceQuota 9223372036854775808
$testQuotaFolder" + "1");
+ assertTrue("setSpaceQuota should not have worked", shHDFS.getRet() != 0);
+ }
+
+ @Test
+ public void testForceDirEmpty() {
+ // setting the name quota to 1 for an empty dir will cause the dir to
remain empty
+ shHDFS.exec("hdfs dfsadmin -setQuota 1 $testQuotaFolder" + "1");
+ assertTrue("Could not setQuota", shHDFS.getRet() == 0);
+ shHDFS.exec("hadoop fs -mkdir $testQuotaFolder" + "1" + "/sample1");
+ assertTrue("mkdir should not have worked due to quota of 1",
shHDFS.getRet() != 0);
+ }
+
+ @Test
+ public void testQuotasPostViolation() {
+ // quota can be set even if it violates
+ shHDFS.exec("hdfs dfsadmin -setQuota $LARGE $testQuotaFolder" + "1");
+ assertTrue("Could not setQuota", shHDFS.getRet() == 0);
+ shHDFS.exec("hadoop fs -put - $testQuotaFolder" + "1", "-------TEST
STRING--------");
+ assertTrue("Could not use put command", shHDFS.getRet() == 0);
+ shHDFS.exec("hadoop fs -mkdir $testQuotaFolder" + "1" + "/sample1");
+ assertTrue("Could not use mkdir command", shHDFS.getRet() == 0);
+ shHDFS.exec("hadoop fs -mkdir $testQuotaFolder" + "1" + "/sample2");
+ assertTrue("Could not use mkdir command", shHDFS.getRet() == 0);
+ shHDFS.exec("hdfs dfsadmin -setQuota 2 $testQuotaFolder" + "1");
+ assertTrue("setQuota should have worked", shHDFS.getRet() == 0);
+ shHDFS.exec("hdfs dfsadmin -setSpaceQuota 1 $testQuotaFolder" + "1");
+ assertTrue("setSpaceQuota should have worked", shHDFS.getRet() == 0);
+ }
+
+ @Test
+ public void testQuotas() {
+ // dir creation should fail - name quota
+ shHDFS.exec("hdfs dfsadmin -setSpaceQuota 10000000000 $testQuotaFolder" +
"1");
+ assertTrue("Could not setSpaceQuota", shHDFS.getRet() == 0);
+ shHDFS.exec("hadoop fs -mkdir $testQuotaFolder" + "1" + "/sample3");
+ assertTrue("mkdir should not have worked", shHDFS.getRet() != 0);
+
+ // file creation should fail - name quota
+ shHDFS.exec("hadoop fs -rmr $testQuotaFolder" + "1" + "/-");
+ shHDFS.exec("hadoop fs -put - $testQuotaFolder" + "1", "-------TEST
STRING--------");
+ assertTrue("put should not have worked", shHDFS.getRet() != 0);
+
+ // file creation should fail - space quota
+ shHDFS.exec("hdfs dfsadmin -setSpaceQuota 10 $testQuotaFolder" + "1");
+ assertTrue("Could not setSpaceQuota", shHDFS.getRet() == 0);
+ shHDFS.exec("hdfs dfsadmin -setQuota 1000 $testQuotaFolder" + "1");
+ assertTrue("Could not setQuota", shHDFS.getRet() == 0);
+ shHDFS.exec("hadoop fs -put - $testQuotaFolder" + "1", "-------TEST
STRING--------");
+ assertTrue("put should not have worked", shHDFS.getRet() != 0);
+ }
+
+ @Test
+ public void testLogEntries() {
+ // Log entry created when nodes are started with both quota violations
+ shHDFS.exec("date");
+ String date = "logTest" +
shHDFS.getOut().get(0).replaceAll("\\s","").replaceAll(":","");
+ shHDFS.exec("hadoop fs -mkdir $date");
+ assertTrue("Could not use mkdir command", shHDFS.getRet() == 0);
+ shHDFS.exec("hadoop fs -put - $date", "-------TEST STRING--------");
+ assertTrue("Could not use put command", shHDFS.getRet() == 0);
+ shHDFS.exec("hdfs dfsadmin -setQuota 1 $date");
+ assertTrue("Could not setQuota", shHDFS.getRet() == 0);
+ shHDFS.exec("date");
+ String date1 = "logTest" +
shHDFS.getOut().get(0).replaceAll("\\s","").replaceAll(":","");
+ shHDFS.exec("hadoop fs -mkdir $date1");
+ assertTrue("Could not use mkdir command", shHDFS.getRet() == 0);
+ shHDFS.exec("hadoop fs -put - $date1", "-------TEST STRING--------");
+ assertTrue("Could not use put command", shHDFS.getRet() == 0);
+ shHDFS.exec("hdfs dfsadmin -setSpaceQuota 1 $date1");
+ assertTrue("Could not setSpaceQuota", shHDFS.getRet() == 0);
+ shHDFS.exec("for service in /etc/init.d/hadoop-hdfs-*; do sudo \$service
stop; done");
+ shHDFS.exec("for service in /etc/init.d/hadoop-hdfs-*; do sudo \$service
start; done");
+ shHDFS.exec("grep \"Quota violation in image for //user/hdfs/$date\"
/var/log/hadoop-hdfs/hadoop-hdfs-namenode*.log");
+ if (shHDFS.getOut().isEmpty()) {
+ assertTrue("Log was not written", 1 == 0);
+ }
+ else {
+ assertTrue(shHDFS.getOut().get(0).contains(date));
+ }
+ shHDFS.exec("grep \"Quota violation in image for //user/hdfs/$date1\"
/var/log/hadoop-hdfs/hadoop-hdfs-namenode*.log");
+ if (shHDFS.getOut().isEmpty()) {
+ assertTrue("Log was not written", 1 == 0);
+ }
+ else {
+ assertTrue(shHDFS.getOut().get(0).contains(date1));
+ }
+
+ shHDFS.exec("hadoop fs -rmr $date1");
+ // following while loop is due to namenode going into safemode for about
15 seconds after being restarted
+ while (shHDFS.getErr().get(0).contains("safe mode") ||
(shHDFS.getErr().size() > 1 && shHDFS.getErr().get(1).contains("safe mode"))) {
+ shHDFS.exec("hadoop fs -rmr $date1");
+ }
+ }
+
+ @Test
+ public void testQuotasShouldFail() {
+ shHDFS.exec("date");
+ String date = "failTest" +
shHDFS.getOut().get(0).replaceAll("\\s","").replaceAll(":","");
+ shHDFS.exec("hadoop fs -mkdir $date");
+ assertTrue("Could not use mkdir command", shHDFS.getRet() == 0);
+ shHDFS.exec("hadoop fs -put - $date", "-------TEST STRING--------");
+ assertTrue("Could not use put command", shHDFS.getRet() == 0);
+ // Errors when setting quotas on a file
+ shHDFS.exec("hdfs dfsadmin -setQuota 1000 $date/-");
+ assertTrue("setting quota on a file should not have worked",
shHDFS.getRet() != 0);
+ shHDFS.exec("hdfs dfsadmin -setSpaceQuota 1000 $date/-");
+ assertTrue("setting quota on a file should not have worked",
shHDFS.getRet() != 0);
+
+ // Errors when clearing quotas on a file
+ shHDFS.exec("hdfs dfsadmin -clrQuota $date/-");
+ assertTrue("setting quota on a file should not have worked",
shHDFS.getErr().size() > 0 && shHDFS.getErr().get(0).contains("on a file"));
+ shHDFS.exec("hdfs dfsadmin -clrSpaceQuota $date/-");
+ assertTrue("setting quota on a file should not have worked",
shHDFS.getErr().size() > 0 && shHDFS.getErr().get(0).contains("on a file"));
+
+ // set/clr quota on nonexistant directory
+ shHDFS.exec("hdfs dfsadmin -setQuota 100 DIRECTORYDOESNOTEXIST" + date);
+ assertTrue("setting quota on non-existant directory should not have
worked", shHDFS.getRet() != 0);
+ shHDFS.exec("hdfs dfsadmin -setSpaceQuota 100 DIRECTORYDOESNOTEXIST" +
date);
+ assertTrue("setting quota on non-existant directory should not have
worked", shHDFS.getRet() != 0);
+ shHDFS.exec("hdfs dfsadmin -clrQuota DIRECTORYDOESNOTEXIST" + date);
+ assertTrue("clearing quota on non-existant directory should not have
worked", shHDFS.getRet() != 0);
+ shHDFS.exec("hdfs dfsadmin -clrSpaceQuota DIRECTORYDOESNOTEXIST" + date);
+ assertTrue("clearing quota on non-existant directory should not have
worked", shHDFS.getRet() != 0);
+
+ shHDFS.exec("hadoop fs -rmr $date");
+ }
+
+ @Test
+ public void testReplicationFactor() {
+ // increasing/decreasing replication factor of a file should debit/credit
quota
+ shHDFS.exec("date");
+ String repFolder = "repFactorTest" +
shHDFS.getOut().get(0).replaceAll("\\s","").replaceAll(":","");
+ shHDFS.exec("hadoop fs -mkdir $repFolder");
+ assertTrue("Could not use mkdir command", shHDFS.getRet() == 0);
+ shHDFS.exec("hadoop fs -put - $repFolder", "-------TEST STRING--------");
+ assertTrue("Could not use put command", shHDFS.getRet() == 0);
+ shHDFS.exec("hdfs dfsadmin -setSpaceQuota 1000 $repFolder");
+ assertTrue("Could not setQuota", shHDFS.getRet() == 0);
+ shHDFS.exec("hadoop fs -setrep 1 $repFolder/-");
+ shHDFS.exec("hadoop fs -count -q $repFolder");
+ assertTrue("Could not use count command", shHDFS.getRet() == 0);
+ String[] output = shHDFS.getOut().get(0).trim().split();
+ int size_of_one = Integer.parseInt(output[2]) -
Integer.parseInt(output[3]);
+ shHDFS.exec("hadoop fs -setrep 5 $repFolder/-");
+ shHDFS.exec("hadoop fs -count -q $repFolder");
+ assertTrue("Could not use count command", shHDFS.getRet() == 0);
+ output = shHDFS.getOut().get(0).trim().split();
+ int size_of_five = Integer.parseInt(output[2]) -
Integer.parseInt(output[3]);
+ assertTrue("Quota not debited correctly", size_of_one * 5 == size_of_five);
+ shHDFS.exec("hadoop fs -setrep 3 $repFolder/-");
+ shHDFS.exec("hadoop fs -count -q $repFolder");
+ assertTrue("Could not use count command", shHDFS.getRet() == 0);
+ output = shHDFS.getOut().get(0).trim().split();
+ int size_of_three = Integer.parseInt(output[2]) -
Integer.parseInt(output[3]);
+ assertTrue("Quota not credited correctly", size_of_one * 3 ==
size_of_three);
+ shHDFS.exec("hadoop fs -rmr $repFolder");
+ }
+
+}
+
+