Repository: hadoop Updated Branches: refs/heads/branch-2 a7c5456c7 -> c28449d4b
http://git-wip-us.apache.org/repos/asf/hadoop/blob/82268d87/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java new file mode 100644 index 0000000..5583fec --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java @@ -0,0 +1,186 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azure; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.OutputStream; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.util.HashMap; +import java.util.Iterator; +import java.util.concurrent.ConcurrentLinkedQueue; + +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.StringUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +public class TestNativeAzureFileSystemConcurrency { + private AzureBlobStorageTestAccount testAccount; + private FileSystem fs; + private InMemoryBlockBlobStore backingStore; + + @Before + public void setUp() throws Exception { + testAccount = AzureBlobStorageTestAccount.createMock(); + fs = testAccount.getFileSystem(); + backingStore = testAccount.getMockStorage().getBackingStore(); + } + + @After + public void tearDown() throws Exception { + testAccount.cleanup(); + fs = null; + backingStore = null; + } + + @Test + public void testLinkBlobs() throws Exception { + Path filePath = new Path("/inProgress"); + FSDataOutputStream outputStream = fs.create(filePath); + // Since the stream is still open, we should see an empty link + // blob in the backing store linking to the temporary file. + HashMap<String, String> metadata = backingStore + .getMetadata(AzureBlobStorageTestAccount.toMockUri(filePath)); + assertNotNull(metadata); + String linkValue = metadata + .get(AzureNativeFileSystemStore.LINK_BACK_TO_UPLOAD_IN_PROGRESS_METADATA_KEY); + assertNotNull(linkValue); + assertTrue(backingStore.exists(AzureBlobStorageTestAccount + .toMockUri(linkValue))); + // Also, WASB should say the file exists now even before we close the + // stream. + assertTrue(fs.exists(filePath)); + outputStream.close(); + // Now there should be no link metadata on the final file. + metadata = backingStore.getMetadata(AzureBlobStorageTestAccount + .toMockUri(filePath)); + assertNull(metadata + .get(AzureNativeFileSystemStore.LINK_BACK_TO_UPLOAD_IN_PROGRESS_METADATA_KEY)); + } + + private static String toString(FileStatus[] list) { + String[] asStrings = new String[list.length]; + for (int i = 0; i < list.length; i++) { + asStrings[i] = list[i].getPath().toString(); + } + return StringUtils.join(",", asStrings); + } + + /** + * Test to make sure that we don't expose the temporary upload folder when + * listing at the root. + */ + @Test + public void testNoTempBlobsVisible() throws Exception { + Path filePath = new Path("/inProgress"); + FSDataOutputStream outputStream = fs.create(filePath); + // Make sure I can't see the temporary blob if I ask for a listing + FileStatus[] listOfRoot = fs.listStatus(new Path("/")); + assertEquals("Expected one file listed, instead got: " + + toString(listOfRoot), 1, listOfRoot.length); + assertEquals(fs.makeQualified(filePath), listOfRoot[0].getPath()); + outputStream.close(); + } + + /** + * Converts a collection of exceptions to a collection of strings by getting + * the stack trace on every exception. + */ + private static Iterable<String> selectToString( + final Iterable<Throwable> collection) { + return new Iterable<String>() { + @Override + public Iterator<String> iterator() { + final Iterator<Throwable> exceptionIterator = collection.iterator(); + return new Iterator<String>() { + @Override + public boolean hasNext() { + return exceptionIterator.hasNext(); + } + + @Override + public String next() { + StringWriter stringWriter = new StringWriter(); + PrintWriter printWriter = new PrintWriter(stringWriter); + exceptionIterator.next().printStackTrace(printWriter); + printWriter.close(); + return stringWriter.toString(); + } + + @Override + public void remove() { + exceptionIterator.remove(); + } + }; + } + }; + } + + /** + * Tests running starting multiple threads all doing various File system + * operations against the same FS. + */ + @Test + public void testMultiThreadedOperation() throws Exception { + for (int iter = 0; iter < 10; iter++) { + final int numThreads = 20; + Thread[] threads = new Thread[numThreads]; + final ConcurrentLinkedQueue<Throwable> exceptionsEncountered = new ConcurrentLinkedQueue<Throwable>(); + for (int i = 0; i < numThreads; i++) { + final Path threadLocalFile = new Path("/myFile" + i); + threads[i] = new Thread(new Runnable() { + @Override + public void run() { + try { + assertTrue(!fs.exists(threadLocalFile)); + OutputStream output = fs.create(threadLocalFile); + output.write(5); + output.close(); + assertTrue(fs.exists(threadLocalFile)); + assertTrue(fs.listStatus(new Path("/")).length > 0); + } catch (Throwable ex) { + exceptionsEncountered.add(ex); + } + } + }); + } + for (Thread t : threads) { + t.start(); + } + for (Thread t : threads) { + t.join(); + } + assertTrue( + "Encountered exceptions: " + + StringUtils.join("\r\n", selectToString(exceptionsEncountered)), + exceptionsEncountered.isEmpty()); + tearDown(); + setUp(); + } + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/82268d87/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractEmulator.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractEmulator.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractEmulator.java new file mode 100644 index 0000000..b4a71f6 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractEmulator.java @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azure; + +import org.apache.hadoop.fs.FileSystemContractBaseTest; + +public class TestNativeAzureFileSystemContractEmulator extends + FileSystemContractBaseTest { + private AzureBlobStorageTestAccount testAccount; + + @Override + protected void setUp() throws Exception { + testAccount = AzureBlobStorageTestAccount.createForEmulator(); + if (testAccount != null) { + fs = testAccount.getFileSystem(); + } + } + + @Override + protected void tearDown() throws Exception { + if (testAccount != null) { + testAccount.cleanup(); + testAccount = null; + fs = null; + } + } + + @Override + protected void runTest() throws Throwable { + if (testAccount != null) { + super.runTest(); + } + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/82268d87/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractLive.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractLive.java new file mode 100644 index 0000000..03292f3 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractLive.java @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azure; + +import org.apache.hadoop.fs.FileSystemContractBaseTest; + +public class TestNativeAzureFileSystemContractLive extends + FileSystemContractBaseTest { + private AzureBlobStorageTestAccount testAccount; + + @Override + protected void setUp() throws Exception { + testAccount = AzureBlobStorageTestAccount.create(); + if (testAccount != null) { + fs = testAccount.getFileSystem(); + } + } + + @Override + protected void tearDown() throws Exception { + if (testAccount != null) { + testAccount.cleanup(); + testAccount = null; + fs = null; + } + } + + @Override + protected void runTest() throws Throwable { + if (testAccount != null) { + super.runTest(); + } + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/82268d87/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractMocked.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractMocked.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractMocked.java new file mode 100644 index 0000000..f25055b --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractMocked.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azure; + +import org.apache.hadoop.fs.FileSystemContractBaseTest; + +public class TestNativeAzureFileSystemContractMocked extends + FileSystemContractBaseTest { + + @Override + protected void setUp() throws Exception { + fs = AzureBlobStorageTestAccount.createMock().getFileSystem(); + } + +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/82268d87/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemFileNameCheck.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemFileNameCheck.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemFileNameCheck.java new file mode 100644 index 0000000..d7ff0c7 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemFileNameCheck.java @@ -0,0 +1,140 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azure; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.IOException; +import java.util.HashMap; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +/** + * Tests the scenario where a colon is included in the file/directory name. + * + * NativeAzureFileSystem#create(), #mkdir(), and #rename() disallow the + * creation/rename of files/directories through WASB that have colons in the + * names. + */ +public class TestNativeAzureFileSystemFileNameCheck { + private FileSystem fs = null; + private AzureBlobStorageTestAccount testAccount = null; + private String root = null; + + @Before + public void setUp() throws Exception { + testAccount = AzureBlobStorageTestAccount.createMock(); + fs = testAccount.getFileSystem(); + root = fs.getUri().toString(); + } + + @After + public void tearDown() throws Exception { + testAccount.cleanup(); + root = null; + fs = null; + testAccount = null; + } + + @Test + public void testCreate() throws Exception { + // positive test + Path testFile1 = new Path(root + "/testFile1"); + assertTrue(fs.createNewFile(testFile1)); + + // negative test + Path testFile2 = new Path(root + "/testFile2:2"); + try { + fs.createNewFile(testFile2); + fail("Should've thrown."); + } catch (IOException e) { // ignore + } + } + + @Test + public void testRename() throws Exception { + // positive test + Path testFile1 = new Path(root + "/testFile1"); + assertTrue(fs.createNewFile(testFile1)); + Path testFile2 = new Path(root + "/testFile2"); + fs.rename(testFile1, testFile2); + assertTrue(!fs.exists(testFile1) && fs.exists(testFile2)); + + // negative test + Path testFile3 = new Path(root + "/testFile3:3"); + try { + fs.rename(testFile2, testFile3); + fail("Should've thrown."); + } catch (IOException e) { // ignore + } + assertTrue(fs.exists(testFile2)); + } + + @Test + public void testMkdirs() throws Exception { + // positive test + Path testFolder1 = new Path(root + "/testFolder1"); + assertTrue(fs.mkdirs(testFolder1)); + + // negative test + Path testFolder2 = new Path(root + "/testFolder2:2"); + try { + assertTrue(fs.mkdirs(testFolder2)); + fail("Should've thrown."); + } catch (IOException e) { // ignore + } + } + + @Test + public void testWasbFsck() throws Exception { + // positive test + Path testFolder1 = new Path(root + "/testFolder1"); + assertTrue(fs.mkdirs(testFolder1)); + Path testFolder2 = new Path(testFolder1, "testFolder2"); + assertTrue(fs.mkdirs(testFolder2)); + Path testFolder3 = new Path(testFolder1, "testFolder3"); + assertTrue(fs.mkdirs(testFolder3)); + Path testFile1 = new Path(testFolder2, "testFile1"); + assertTrue(fs.createNewFile(testFile1)); + Path testFile2 = new Path(testFolder1, "testFile2"); + assertTrue(fs.createNewFile(testFile2)); + assertFalse(runWasbFsck(testFolder1)); + + // negative test + InMemoryBlockBlobStore backingStore = testAccount.getMockStorage() + .getBackingStore(); + backingStore.setContent(AzureBlobStorageTestAccount + .toMockUri("testFolder1/testFolder2/test2:2"), new byte[] { 1, 2 }, + new HashMap<String, String>()); + assertTrue(runWasbFsck(testFolder1)); + } + + private boolean runWasbFsck(Path p) throws Exception { + WasbFsck fsck = new WasbFsck(fs.getConf()); + fsck.setMockFileSystemForTesting(fs); + fsck.run(new String[] { p.toString() }); + return fsck.getPathNameWarning(); + } +} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/82268d87/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemLive.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemLive.java new file mode 100644 index 0000000..c82cee3 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemLive.java @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azure; + +/* + * Tests the Native Azure file system (WASB) against an actual blob store if + * provided in the environment. + */ +public class TestNativeAzureFileSystemLive extends + NativeAzureFileSystemBaseTest { + + @Override + protected AzureBlobStorageTestAccount createTestAccount() throws Exception { + return AzureBlobStorageTestAccount.create(); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/82268d87/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemMocked.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemMocked.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemMocked.java new file mode 100644 index 0000000..9819c18 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemMocked.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azure; + +public class TestNativeAzureFileSystemMocked extends + NativeAzureFileSystemBaseTest { + + @Override + protected AzureBlobStorageTestAccount createTestAccount() throws Exception { + return AzureBlobStorageTestAccount.createMock(); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/82268d87/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemOperationsMocked.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemOperationsMocked.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemOperationsMocked.java new file mode 100644 index 0000000..a6e782a --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemOperationsMocked.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azure; + +import static org.junit.Assume.assumeTrue; + +import org.apache.hadoop.fs.FSMainOperationsBaseTest; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; + +public class TestNativeAzureFileSystemOperationsMocked extends + FSMainOperationsBaseTest { + + public TestNativeAzureFileSystemOperationsMocked() { + super("/tmp/TestNativeAzureFileSystemOperationsMocked"); + } + + @Override + protected FileSystem createFileSystem() throws Exception { + return AzureBlobStorageTestAccount.createMock().getFileSystem(); + } + + public void testListStatusThrowsExceptionForUnreadableDir() throws Exception { + System.out + .println("Skipping testListStatusThrowsExceptionForUnreadableDir since WASB" + + " doesn't honor directory permissions."); + assumeTrue(!Path.WINDOWS); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/82268d87/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java new file mode 100644 index 0000000..b49f6ee --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java @@ -0,0 +1,169 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azure; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.util.HashMap; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +/** + * Tests that WASB handles things gracefully when users add blobs to the Azure + * Storage container from outside WASB's control. + */ +public class TestOutOfBandAzureBlobOperations { + private AzureBlobStorageTestAccount testAccount; + private FileSystem fs; + private InMemoryBlockBlobStore backingStore; + + @Before + public void setUp() throws Exception { + testAccount = AzureBlobStorageTestAccount.createMock(); + fs = testAccount.getFileSystem(); + backingStore = testAccount.getMockStorage().getBackingStore(); + } + + @After + public void tearDown() throws Exception { + testAccount.cleanup(); + fs = null; + backingStore = null; + } + + private void createEmptyBlobOutOfBand(String path) { + backingStore.setContent(AzureBlobStorageTestAccount.toMockUri(path), + new byte[] { 1, 2 }, new HashMap<String, String>()); + } + + @Test + public void testImplicitFolderListed() throws Exception { + createEmptyBlobOutOfBand("root/b"); + + // List the blob itself. + FileStatus[] obtained = fs.listStatus(new Path("/root/b")); + assertNotNull(obtained); + assertEquals(1, obtained.length); + assertFalse(obtained[0].isDirectory()); + assertEquals("/root/b", obtained[0].getPath().toUri().getPath()); + + // List the directory + obtained = fs.listStatus(new Path("/root")); + assertNotNull(obtained); + assertEquals(1, obtained.length); + assertFalse(obtained[0].isDirectory()); + assertEquals("/root/b", obtained[0].getPath().toUri().getPath()); + + // Get the directory's file status + FileStatus dirStatus = fs.getFileStatus(new Path("/root")); + assertNotNull(dirStatus); + assertTrue(dirStatus.isDirectory()); + assertEquals("/root", dirStatus.getPath().toUri().getPath()); + } + + @Test + public void testImplicitFolderDeleted() throws Exception { + createEmptyBlobOutOfBand("root/b"); + assertTrue(fs.exists(new Path("/root"))); + assertTrue(fs.delete(new Path("/root"), true)); + assertFalse(fs.exists(new Path("/root"))); + } + + @Test + public void testFileInImplicitFolderDeleted() throws Exception { + createEmptyBlobOutOfBand("root/b"); + assertTrue(fs.exists(new Path("/root"))); + assertTrue(fs.delete(new Path("/root/b"), true)); + assertTrue(fs.exists(new Path("/root"))); + } + + @Test + public void testFileAndImplicitFolderSameName() throws Exception { + createEmptyBlobOutOfBand("root/b"); + createEmptyBlobOutOfBand("root/b/c"); + FileStatus[] listResult = fs.listStatus(new Path("/root/b")); + // File should win. + assertEquals(1, listResult.length); + assertFalse(listResult[0].isDirectory()); + try { + // Trying to delete root/b/c would cause a dilemma for WASB, so + // it should throw. + fs.delete(new Path("/root/b/c"), true); + assertTrue("Should've thrown.", false); + } catch (AzureException e) { + assertEquals("File /root/b/c has a parent directory /root/b" + + " which is also a file. Can't resolve.", e.getMessage()); + } + } + + private static enum DeepCreateTestVariation { + File, Folder + }; + + /** + * Tests that when we create the file (or folder) x/y/z, we also create + * explicit folder blobs for x and x/y + */ + @Test + public void testCreatingDeepFileCreatesExplicitFolder() throws Exception { + for (DeepCreateTestVariation variation : DeepCreateTestVariation.values()) { + switch (variation) { + case File: + assertTrue(fs.createNewFile(new Path("/x/y/z"))); + break; + case Folder: + assertTrue(fs.mkdirs(new Path("/x/y/z"))); + break; + } + assertTrue(backingStore + .exists(AzureBlobStorageTestAccount.toMockUri("x"))); + assertTrue(backingStore.exists(AzureBlobStorageTestAccount + .toMockUri("x/y"))); + fs.delete(new Path("/x"), true); + } + } + + @Test + public void testSetPermissionOnImplicitFolder() throws Exception { + createEmptyBlobOutOfBand("root/b"); + FsPermission newPermission = new FsPermission((short) 0600); + fs.setPermission(new Path("/root"), newPermission); + FileStatus newStatus = fs.getFileStatus(new Path("/root")); + assertNotNull(newStatus); + assertEquals(newPermission, newStatus.getPermission()); + } + + @Test + public void testSetOwnerOnImplicitFolder() throws Exception { + createEmptyBlobOutOfBand("root/b"); + fs.setOwner(new Path("/root"), "newOwner", null); + FileStatus newStatus = fs.getFileStatus(new Path("/root")); + assertNotNull(newStatus); + assertEquals("newOwner", newStatus.getOwner()); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/82268d87/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java new file mode 100644 index 0000000..1855c3b --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java @@ -0,0 +1,182 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azure; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeNotNull; + +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.security.UserGroupInformation; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.microsoft.windowsazure.storage.blob.BlobOutputStream; +import com.microsoft.windowsazure.storage.blob.CloudBlockBlob; + +public class TestOutOfBandAzureBlobOperationsLive { + private FileSystem fs; + private AzureBlobStorageTestAccount testAccount; + + @Before + public void setUp() throws Exception { + testAccount = AzureBlobStorageTestAccount.create(); + if (testAccount != null) { + fs = testAccount.getFileSystem(); + } + assumeNotNull(testAccount); + } + + @After + public void tearDown() throws Exception { + if (testAccount != null) { + testAccount.cleanup(); + testAccount = null; + fs = null; + } + } + + // scenario for this particular test described at MONARCH-HADOOP-764 + // creating a file out-of-band would confuse mkdirs("<oobfilesUncleFolder>") + // eg oob creation of "user/<name>/testFolder/a/input/file" + // Then wasb creation of "user/<name>/testFolder/a/output" fails + @Test + public void outOfBandFolder_uncleMkdirs() throws Exception { + + // NOTE: manual use of CloubBlockBlob targets working directory explicitly. + // WASB driver methods prepend working directory implicitly. + String workingDir = "user/" + + UserGroupInformation.getCurrentUser().getShortUserName() + "/"; + + CloudBlockBlob blob = testAccount.getBlobReference(workingDir + + "testFolder1/a/input/file"); + BlobOutputStream s = blob.openOutputStream(); + s.close(); + assertTrue(fs.exists(new Path("testFolder1/a/input/file"))); + + Path targetFolder = new Path("testFolder1/a/output"); + assertTrue(fs.mkdirs(targetFolder)); + } + + // scenario for this particular test described at MONARCH-HADOOP-764 + @Test + public void outOfBandFolder_parentDelete() throws Exception { + + // NOTE: manual use of CloubBlockBlob targets working directory explicitly. + // WASB driver methods prepend working directory implicitly. + String workingDir = "user/" + + UserGroupInformation.getCurrentUser().getShortUserName() + "/"; + CloudBlockBlob blob = testAccount.getBlobReference(workingDir + + "testFolder2/a/input/file"); + BlobOutputStream s = blob.openOutputStream(); + s.close(); + assertTrue(fs.exists(new Path("testFolder2/a/input/file"))); + + Path targetFolder = new Path("testFolder2/a/input"); + assertTrue(fs.delete(targetFolder, true)); + } + + @Test + public void outOfBandFolder_rootFileDelete() throws Exception { + + CloudBlockBlob blob = testAccount.getBlobReference("fileY"); + BlobOutputStream s = blob.openOutputStream(); + s.close(); + assertTrue(fs.exists(new Path("/fileY"))); + assertTrue(fs.delete(new Path("/fileY"), true)); + } + + @Test + public void outOfBandFolder_firstLevelFolderDelete() throws Exception { + + CloudBlockBlob blob = testAccount.getBlobReference("folderW/file"); + BlobOutputStream s = blob.openOutputStream(); + s.close(); + assertTrue(fs.exists(new Path("/folderW"))); + assertTrue(fs.exists(new Path("/folderW/file"))); + assertTrue(fs.delete(new Path("/folderW"), true)); + } + + // scenario for this particular test described at MONARCH-HADOOP-764 + @Test + public void outOfBandFolder_siblingCreate() throws Exception { + + // NOTE: manual use of CloubBlockBlob targets working directory explicitly. + // WASB driver methods prepend working directory implicitly. + String workingDir = "user/" + + UserGroupInformation.getCurrentUser().getShortUserName() + "/"; + CloudBlockBlob blob = testAccount.getBlobReference(workingDir + + "testFolder3/a/input/file"); + BlobOutputStream s = blob.openOutputStream(); + s.close(); + assertTrue(fs.exists(new Path("testFolder3/a/input/file"))); + + Path targetFile = new Path("testFolder3/a/input/file2"); + FSDataOutputStream s2 = fs.create(targetFile); + s2.close(); + } + + // scenario for this particular test described at MONARCH-HADOOP-764 + // creating a new file in the root folder + @Test + public void outOfBandFolder_create_rootDir() throws Exception { + Path targetFile = new Path("/newInRoot"); + FSDataOutputStream s2 = fs.create(targetFile); + s2.close(); + } + + // scenario for this particular test described at MONARCH-HADOOP-764 + @Test + public void outOfBandFolder_rename() throws Exception { + + // NOTE: manual use of CloubBlockBlob targets working directory explicitly. + // WASB driver methods prepend working directory implicitly. + String workingDir = "user/" + + UserGroupInformation.getCurrentUser().getShortUserName() + "/"; + CloudBlockBlob blob = testAccount.getBlobReference(workingDir + + "testFolder4/a/input/file"); + BlobOutputStream s = blob.openOutputStream(); + s.close(); + + Path srcFilePath = new Path("testFolder4/a/input/file"); + assertTrue(fs.exists(srcFilePath)); + + Path destFilePath = new Path("testFolder4/a/input/file2"); + fs.rename(srcFilePath, destFilePath); + } + + // scenario for this particular test described at MONARCH-HADOOP-764 + @Test + public void outOfBandFolder_rename_rootLevelFiles() throws Exception { + + // NOTE: manual use of CloubBlockBlob targets working directory explicitly. + // WASB driver methods prepend working directory implicitly. + CloudBlockBlob blob = testAccount.getBlobReference("fileX"); + BlobOutputStream s = blob.openOutputStream(); + s.close(); + + Path srcFilePath = new Path("/fileX"); + assertTrue(fs.exists(srcFilePath)); + + Path destFilePath = new Path("/fileXrename"); + fs.rename(srcFilePath, destFilePath); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/82268d87/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestShellDecryptionKeyProvider.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestShellDecryptionKeyProvider.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestShellDecryptionKeyProvider.java new file mode 100644 index 0000000..2284d1f --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestShellDecryptionKeyProvider.java @@ -0,0 +1,82 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azure; + +import static org.junit.Assert.assertEquals; + +import java.io.File; + +import org.apache.commons.io.FileUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.Shell; +import org.junit.Assert; +import org.junit.Test; + +public class TestShellDecryptionKeyProvider { + public static final Log LOG = LogFactory + .getLog(TestShellDecryptionKeyProvider.class); + private static File TEST_ROOT_DIR = new File(System.getProperty( + "test.build.data", "/tmp"), "TestShellDecryptionKeyProvider"); + + @Test + public void testScriptPathNotSpecified() throws Exception { + if (!Shell.WINDOWS) { + return; + } + ShellDecryptionKeyProvider provider = new ShellDecryptionKeyProvider(); + Configuration conf = new Configuration(); + String account = "testacct"; + String key = "key"; + + conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account, key); + try { + provider.getStorageAccountKey(account, conf); + Assert + .fail("fs.azure.shellkeyprovider.script is not specified, we should throw"); + } catch (KeyProviderException e) { + LOG.info("Received an expected exception: " + e.getMessage()); + } + } + + @Test + public void testValidScript() throws Exception { + if (!Shell.WINDOWS) { + return; + } + String expectedResult = "decretedKey"; + + // Create a simple script which echoes the given key plus the given + // expected result (so that we validate both script input and output) + File scriptFile = new File(TEST_ROOT_DIR, "testScript.cmd"); + FileUtils.writeStringToFile(scriptFile, "@echo %1 " + expectedResult); + + ShellDecryptionKeyProvider provider = new ShellDecryptionKeyProvider(); + Configuration conf = new Configuration(); + String account = "testacct"; + String key = "key1"; + conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account, key); + conf.set(ShellDecryptionKeyProvider.KEY_ACCOUNT_SHELLKEYPROVIDER_SCRIPT, + "cmd /c " + scriptFile.getAbsolutePath()); + + String result = provider.getStorageAccountKey(account, conf); + assertEquals(key + " " + expectedResult, result); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/82268d87/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbFsck.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbFsck.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbFsck.java new file mode 100644 index 0000000..a6c3f39 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbFsck.java @@ -0,0 +1,101 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azure; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +public class TestWasbFsck { + private AzureBlobStorageTestAccount testAccount; + private FileSystem fs; + private InMemoryBlockBlobStore backingStore; + + @Before + public void setUp() throws Exception { + testAccount = AzureBlobStorageTestAccount.createMock(); + fs = testAccount.getFileSystem(); + backingStore = testAccount.getMockStorage().getBackingStore(); + } + + @After + public void tearDown() throws Exception { + testAccount.cleanup(); + fs = null; + backingStore = null; + } + + /** + * Counts the number of temporary blobs in the backing store. + */ + private int getNumTempBlobs() { + int count = 0; + for (String key : backingStore.getKeys()) { + if (key.contains(NativeAzureFileSystem.AZURE_TEMP_FOLDER)) { + count++; + } + } + return count; + } + + private void runFsck(String command) throws Exception { + Configuration conf = fs.getConf(); + // Set the dangling cutoff to zero, so every temp blob is considered + // dangling. + conf.setInt(NativeAzureFileSystem.AZURE_TEMP_EXPIRY_PROPERTY_NAME, 0); + WasbFsck fsck = new WasbFsck(conf); + fsck.setMockFileSystemForTesting(fs); + fsck.run(new String[] { AzureBlobStorageTestAccount.MOCK_WASB_URI, command }); + } + + /** + * Tests that we delete dangling files properly + */ + @Test + public void testDelete() throws Exception { + Path danglingFile = new Path("/crashedInTheMiddle"); + + // Create a file and leave it dangling and try to delete it. + FSDataOutputStream stream = fs.create(danglingFile); + stream.write(new byte[] { 1, 2, 3 }); + stream.flush(); + + // Now we should still only see a zero-byte file in this place + FileStatus fileStatus = fs.getFileStatus(danglingFile); + assertNotNull(fileStatus); + assertEquals(0, fileStatus.getLen()); + assertEquals(1, getNumTempBlobs()); + + // Run WasbFsck -delete to delete the file. + runFsck("-delete"); + + // Now we should see no trace of the file. + assertEquals(0, getNumTempBlobs()); + assertFalse(fs.exists(danglingFile)); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/82268d87/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java new file mode 100644 index 0000000..ea24c59 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java @@ -0,0 +1,392 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azure; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeNotNull; + +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.URI; +import java.util.Date; +import java.util.EnumSet; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount.CreateOptions; +import org.junit.After; +import org.junit.Assert; +import org.junit.Test; + +import com.microsoft.windowsazure.storage.blob.CloudBlobContainer; +import com.microsoft.windowsazure.storage.blob.CloudBlockBlob; + +public class TestWasbUriAndConfiguration { + + private static final int FILE_SIZE = 4096; + private static final String PATH_DELIMITER = "/"; + + protected String accountName; + protected String accountKey; + protected static Configuration conf = null; + + private AzureBlobStorageTestAccount testAccount; + + @After + public void tearDown() throws Exception { + if (testAccount != null) { + testAccount.cleanup(); + testAccount = null; + } + } + + private boolean validateIOStreams(Path filePath) throws IOException { + // Capture the file system from the test account. + FileSystem fs = testAccount.getFileSystem(); + return validateIOStreams(fs, filePath); + } + + private boolean validateIOStreams(FileSystem fs, Path filePath) + throws IOException { + + // Create and write a file + OutputStream outputStream = fs.create(filePath); + outputStream.write(new byte[FILE_SIZE]); + outputStream.close(); + + // Return true if the the count is equivalent to the file size. + return (FILE_SIZE == readInputStream(fs, filePath)); + } + + private int readInputStream(Path filePath) throws IOException { + // Capture the file system from the test account. + FileSystem fs = testAccount.getFileSystem(); + return readInputStream(fs, filePath); + } + + private int readInputStream(FileSystem fs, Path filePath) throws IOException { + // Read the file + InputStream inputStream = fs.open(filePath); + int count = 0; + while (inputStream.read() >= 0) { + count++; + } + inputStream.close(); + + // Return true if the the count is equivalent to the file size. + return count; + } + + // Positive tests to exercise making a connection with to Azure account using + // account key. + @Test + public void testConnectUsingKey() throws Exception { + + testAccount = AzureBlobStorageTestAccount.create(); + assumeNotNull(testAccount); + + // Validate input and output on the connection. + assertTrue(validateIOStreams(new Path("/wasb_scheme"))); + } + + @Test + public void testConnectUsingSAS() throws Exception { + // Create the test account with SAS credentials. + testAccount = AzureBlobStorageTestAccount.create("", + EnumSet.of(CreateOptions.UseSas, CreateOptions.CreateContainer)); + assumeNotNull(testAccount); + // Validate input and output on the connection. + // NOTE: As of 4/15/2013, Azure Storage has a deficiency that prevents the + // full scenario from working (CopyFromBlob doesn't work with SAS), so + // just do a minor check until that is corrected. + assertFalse(testAccount.getFileSystem().exists(new Path("/IDontExist"))); + //assertTrue(validateIOStreams(new Path("/sastest.txt"))); + } + + @Test + public void testConnectUsingSASReadonly() throws Exception { + // Create the test account with SAS credentials. + testAccount = AzureBlobStorageTestAccount.create("", EnumSet.of( + CreateOptions.UseSas, CreateOptions.CreateContainer, + CreateOptions.Readonly)); + assumeNotNull(testAccount); + + // Create a blob in there + final String blobKey = "blobForReadonly"; + CloudBlobContainer container = testAccount.getRealContainer(); + CloudBlockBlob blob = container.getBlockBlobReference(blobKey); + ByteArrayInputStream inputStream = new ByteArrayInputStream(new byte[] { 1, + 2, 3 }); + blob.upload(inputStream, 3); + inputStream.close(); + + // Make sure we can read it from the file system + Path filePath = new Path("/" + blobKey); + FileSystem fs = testAccount.getFileSystem(); + assertTrue(fs.exists(filePath)); + byte[] obtained = new byte[3]; + DataInputStream obtainedInputStream = fs.open(filePath); + obtainedInputStream.readFully(obtained); + obtainedInputStream.close(); + assertEquals(3, obtained[2]); + } + + @Test + public void testConnectUsingAnonymous() throws Exception { + + // Create test account with anonymous credentials + testAccount = AzureBlobStorageTestAccount.createAnonymous("testWasb.txt", + FILE_SIZE); + assumeNotNull(testAccount); + + // Read the file from the public folder using anonymous credentials. + assertEquals(FILE_SIZE, readInputStream(new Path("/testWasb.txt"))); + } + + @Test + public void testConnectToEmulator() throws Exception { + testAccount = AzureBlobStorageTestAccount.createForEmulator(); + assumeNotNull(testAccount); + assertTrue(validateIOStreams(new Path("/testFile"))); + } + + /** + * Tests that we can connect to fully qualified accounts outside of + * blob.core.windows.net + */ + @Test + public void testConnectToFullyQualifiedAccountMock() throws Exception { + Configuration conf = new Configuration(); + AzureBlobStorageTestAccount.setMockAccountKey(conf, + "mockAccount.mock.authority.net"); + AzureNativeFileSystemStore store = new AzureNativeFileSystemStore(); + MockStorageInterface mockStorage = new MockStorageInterface(); + store.setAzureStorageInteractionLayer(mockStorage); + NativeAzureFileSystem fs = new NativeAzureFileSystem(store); + fs.initialize( + new URI("wasb://[email protected]"), conf); + fs.createNewFile(new Path("/x")); + assertTrue(mockStorage.getBackingStore().exists( + "http://mockAccount.mock.authority.net/mockContainer/x")); + fs.close(); + } + + public void testConnectToRoot() throws Exception { + + // Set up blob names. + final String blobPrefix = String.format("wasbtests-%s-%tQ-blob", + System.getProperty("user.name"), new Date()); + final String inblobName = blobPrefix + "_In" + ".txt"; + final String outblobName = blobPrefix + "_Out" + ".txt"; + + // Create test account with default root access. + testAccount = AzureBlobStorageTestAccount.createRoot(inblobName, FILE_SIZE); + assumeNotNull(testAccount); + + // Read the file from the default container. + assertEquals(FILE_SIZE, readInputStream(new Path(PATH_DELIMITER + + inblobName))); + + try { + // Capture file system. + FileSystem fs = testAccount.getFileSystem(); + + // Create output path and open an output stream to the root folder. + Path outputPath = new Path(PATH_DELIMITER + outblobName); + OutputStream outputStream = fs.create(outputPath); + fail("Expected an AzureException when writing to root folder."); + outputStream.write(new byte[FILE_SIZE]); + outputStream.close(); + } catch (AzureException e) { + assertTrue(true); + } catch (Exception e) { + String errMsg = String.format( + "Expected AzureException but got %s instead.", e); + assertTrue(errMsg, false); + } + } + + // Positive tests to exercise throttling I/O path. Connections are made to an + // Azure account using account key. + // + public void testConnectWithThrottling() throws Exception { + + testAccount = AzureBlobStorageTestAccount.createThrottled(); + + // Validate input and output on the connection. + assertTrue(validateIOStreams(new Path("/wasb_scheme"))); + } + + /** + * Creates a file and writes a single byte with the given value in it. + */ + private static void writeSingleByte(FileSystem fs, Path testFile, int toWrite) + throws Exception { + OutputStream outputStream = fs.create(testFile); + outputStream.write(toWrite); + outputStream.close(); + } + + /** + * Reads the file given and makes sure that it's a single-byte file with the + * given value in it. + */ + private static void assertSingleByteValue(FileSystem fs, Path testFile, + int expectedValue) throws Exception { + InputStream inputStream = fs.open(testFile); + int byteRead = inputStream.read(); + assertTrue("File unexpectedly empty: " + testFile, byteRead >= 0); + assertTrue("File has more than a single byte: " + testFile, + inputStream.read() < 0); + inputStream.close(); + assertEquals("Unxpected content in: " + testFile, expectedValue, byteRead); + } + + @Test + public void testMultipleContainers() throws Exception { + AzureBlobStorageTestAccount firstAccount = AzureBlobStorageTestAccount + .create("first"), secondAccount = AzureBlobStorageTestAccount + .create("second"); + assumeNotNull(firstAccount); + assumeNotNull(secondAccount); + try { + FileSystem firstFs = firstAccount.getFileSystem(), secondFs = secondAccount + .getFileSystem(); + Path testFile = new Path("/testWasb"); + assertTrue(validateIOStreams(firstFs, testFile)); + assertTrue(validateIOStreams(secondFs, testFile)); + // Make sure that we're really dealing with two file systems here. + writeSingleByte(firstFs, testFile, 5); + writeSingleByte(secondFs, testFile, 7); + assertSingleByteValue(firstFs, testFile, 5); + assertSingleByteValue(secondFs, testFile, 7); + } finally { + firstAccount.cleanup(); + secondAccount.cleanup(); + } + } + + @Test + public void testDefaultKeyProvider() throws Exception { + Configuration conf = new Configuration(); + String account = "testacct"; + String key = "testkey"; + + conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account, key); + + String result = AzureNativeFileSystemStore.getAccountKeyFromConfiguration( + account, conf); + assertEquals(key, result); + } + + @Test + public void testValidKeyProvider() throws Exception { + Configuration conf = new Configuration(); + String account = "testacct"; + String key = "testkey"; + + conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account, key); + conf.setClass("fs.azure.account.keyprovider." + account, + SimpleKeyProvider.class, KeyProvider.class); + String result = AzureNativeFileSystemStore.getAccountKeyFromConfiguration( + account, conf); + assertEquals(key, result); + } + + @Test + public void testInvalidKeyProviderNonexistantClass() throws Exception { + Configuration conf = new Configuration(); + String account = "testacct"; + + conf.set("fs.azure.account.keyprovider." + account, + "org.apache.Nonexistant.Class"); + try { + AzureNativeFileSystemStore.getAccountKeyFromConfiguration(account, conf); + Assert.fail("Nonexistant key provider class should have thrown a " + + "KeyProviderException"); + } catch (KeyProviderException e) { + } + } + + @Test + public void testInvalidKeyProviderWrongClass() throws Exception { + Configuration conf = new Configuration(); + String account = "testacct"; + + conf.set("fs.azure.account.keyprovider." + account, "java.lang.String"); + try { + AzureNativeFileSystemStore.getAccountKeyFromConfiguration(account, conf); + Assert.fail("Key provider class that doesn't implement KeyProvider " + + "should have thrown a KeyProviderException"); + } catch (KeyProviderException e) { + } + } + + /** + * Tests the cases when the URI is specified with no authority, i.e. + * wasb:///path/to/file. + */ + @Test + public void testNoUriAuthority() throws Exception { + // For any combination of default FS being asv(s)/wasb(s)://c@a/ and + // the actual URI being asv(s)/wasb(s):///, it should work. + + String[] wasbAliases = new String[] { "wasb", "wasbs" }; + for (String defaultScheme : wasbAliases){ + for (String wantedScheme : wasbAliases) { + testAccount = AzureBlobStorageTestAccount.createMock(); + Configuration conf = testAccount.getFileSystem().getConf(); + String authority = testAccount.getFileSystem().getUri().getAuthority(); + URI defaultUri = new URI(defaultScheme, authority, null, null, null); + conf.set("fs.default.name", defaultUri.toString()); + URI wantedUri = new URI(wantedScheme + ":///random/path"); + NativeAzureFileSystem obtained = (NativeAzureFileSystem) FileSystem + .get(wantedUri, conf); + assertNotNull(obtained); + assertEquals(new URI(wantedScheme, authority, null, null, null), + obtained.getUri()); + // Make sure makeQualified works as expected + Path qualified = obtained.makeQualified(new Path(wantedUri)); + assertEquals(new URI(wantedScheme, authority, wantedUri.getPath(), + null, null), qualified.toUri()); + // Cleanup for the next iteration to not cache anything in FS + testAccount.cleanup(); + FileSystem.closeAll(); + } + } + // If the default FS is not a WASB FS, then specifying a URI without + // authority for the Azure file system should throw. + testAccount = AzureBlobStorageTestAccount.createMock(); + Configuration conf = testAccount.getFileSystem().getConf(); + conf.set("fs.default.name", "file:///"); + try { + FileSystem.get(new URI("wasb:///random/path"), conf); + fail("Should've thrown."); + } catch (IllegalArgumentException e) { + } + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/82268d87/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml b/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml new file mode 100644 index 0000000..fb2aa20 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml @@ -0,0 +1,49 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!-- + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> + +<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> +<configuration xmlns:xi="http://www.w3.org/2001/XInclude"> + + <property> + <name>fs.wasb.impl</name> + <value>org.apache.hadoop.fs.azure.NativeAzureFileSystem</value> + </property> + + <property> + <name>fs.wasbs.impl</name> + <value>org.apache.hadoop.fs.azure.NativeAzureFileSystem</value> + </property> + + + <!-- For tests against live azure, provide the following account information --> + <!-- + <property> + <name>fs.azure.test.account.name</name> + <value>{ACCOUNTNAME}.blob.core.windows.net</value> + </property> + <property> + <name>fs.azure.account.key.{ACCOUNTNAME}.blob.core.windows.net</name> + <value>{ACCOUNTKEY}</value> + </property> + --> + + <!-- For tests against azure-emulator --> + <!-- + <property> + <name>fs.azure.test.emulator</name> + <value>true</value> + </property> + --> +</configuration> http://git-wip-us.apache.org/repos/asf/hadoop/blob/82268d87/hadoop-tools/hadoop-azure/src/test/resources/log4j.properties ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/resources/log4j.properties b/hadoop-tools/hadoop-azure/src/test/resources/log4j.properties new file mode 100644 index 0000000..81b935b --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/resources/log4j.properties @@ -0,0 +1,23 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# log4j configuration used during build and unit tests + +log4j.rootLogger=INFO,stdout +log4j.threshhold=ALL +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n http://git-wip-us.apache.org/repos/asf/hadoop/blob/82268d87/hadoop-tools/hadoop-tools-dist/pom.xml ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-tools-dist/pom.xml b/hadoop-tools/hadoop-tools-dist/pom.xml index 2e4ff87..0a9a5c7 100644 --- a/hadoop-tools/hadoop-tools-dist/pom.xml +++ b/hadoop-tools/hadoop-tools-dist/pom.xml @@ -91,6 +91,12 @@ </dependency> <dependency> <groupId>org.apache.hadoop</groupId> + <artifactId>hadoop-azure</artifactId> + <scope>compile</scope> + <version>${project.version}</version> + </dependency> + <dependency> + <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-sls</artifactId> <scope>compile</scope> </dependency> http://git-wip-us.apache.org/repos/asf/hadoop/blob/82268d87/hadoop-tools/pom.xml ---------------------------------------------------------------------- diff --git a/hadoop-tools/pom.xml b/hadoop-tools/pom.xml index 8475542..79b8877 100644 --- a/hadoop-tools/pom.xml +++ b/hadoop-tools/pom.xml @@ -44,6 +44,7 @@ <module>hadoop-openstack</module> <module>hadoop-sls</module> <module>hadoop-aws</module> + <module>hadoop-azure</module> </modules> <build>
