Repository: knox
Updated Branches:
  refs/heads/master 31ac5c54f -> 4d45d0545


KNOX-307: Topology deployment failed after undeploy


Project: http://git-wip-us.apache.org/repos/asf/knox/repo
Commit: http://git-wip-us.apache.org/repos/asf/knox/commit/4d45d054
Tree: http://git-wip-us.apache.org/repos/asf/knox/tree/4d45d054
Diff: http://git-wip-us.apache.org/repos/asf/knox/diff/4d45d054

Branch: refs/heads/master
Commit: 4d45d05454e0a0f7683b4182bed73d707d525792
Parents: 31ac5c5
Author: Kevin Minder <[email protected]>
Authored: Mon Mar 17 01:31:29 2014 -0400
Committer: Kevin Minder <[email protected]>
Committed: Mon Mar 17 01:31:29 2014 -0400

----------------------------------------------------------------------
 build.xml                                       |   3 -
 gateway-server/pom.xml                          |   4 -
 .../topology/file/FileTopologyProvider.java     | 151 ++++++-----
 .../topology/file/FileTopologyProviderTest.java | 270 ++++++-------------
 .../gateway/topology/TopologyMonitor.java       |   4 +-
 .../java/org/apache/hadoop/test/TestUtils.java  |  10 +
 pom.xml                                         |  16 --
 7 files changed, 177 insertions(+), 281 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/knox/blob/4d45d054/build.xml
----------------------------------------------------------------------
diff --git a/build.xml b/build.xml
index 8ad7de6..3650cdb 100644
--- a/build.xml
+++ b/build.xml
@@ -110,9 +110,6 @@
         <copy file="CHANGES" todir="target/${gateway-version}"/>
     </target>
 
-
-
-
     <target name="build-candidate">
         <exec executable="${curl.cmd}">
             <arg value="--silent"/>

http://git-wip-us.apache.org/repos/asf/knox/blob/4d45d054/gateway-server/pom.xml
----------------------------------------------------------------------
diff --git a/gateway-server/pom.xml b/gateway-server/pom.xml
index 3f34cdd..fcf3ae1 100644
--- a/gateway-server/pom.xml
+++ b/gateway-server/pom.xml
@@ -170,10 +170,6 @@
         </dependency>
 
         <dependency>
-            <groupId>org.apache.commons</groupId>
-            <artifactId>commons-vfs2</artifactId>
-        </dependency>
-        <dependency>
             <groupId>commons-io</groupId>
             <artifactId>commons-io</artifactId>
         </dependency>

http://git-wip-us.apache.org/repos/asf/knox/blob/4d45d054/gateway-server/src/main/java/org/apache/hadoop/gateway/topology/file/FileTopologyProvider.java
----------------------------------------------------------------------
diff --git 
a/gateway-server/src/main/java/org/apache/hadoop/gateway/topology/file/FileTopologyProvider.java
 
b/gateway-server/src/main/java/org/apache/hadoop/gateway/topology/file/FileTopologyProvider.java
index 484c5b6..cd0567d 100644
--- 
a/gateway-server/src/main/java/org/apache/hadoop/gateway/topology/file/FileTopologyProvider.java
+++ 
b/gateway-server/src/main/java/org/apache/hadoop/gateway/topology/file/FileTopologyProvider.java
@@ -19,15 +19,12 @@ package org.apache.hadoop.gateway.topology.file;
 
 import org.apache.commons.digester3.Digester;
 import org.apache.commons.digester3.binder.DigesterLoader;
+import org.apache.commons.io.FileUtils;
 import org.apache.commons.io.FilenameUtils;
-import org.apache.commons.vfs2.FileChangeEvent;
-import org.apache.commons.vfs2.FileContent;
-import org.apache.commons.vfs2.FileListener;
-import org.apache.commons.vfs2.FileName;
-import org.apache.commons.vfs2.FileObject;
-import org.apache.commons.vfs2.FileSystemException;
-import org.apache.commons.vfs2.VFS;
-import org.apache.commons.vfs2.impl.DefaultFileMonitor;
+import org.apache.commons.io.monitor.FileAlterationListener;
+import org.apache.commons.io.monitor.FileAlterationListenerAdaptor;
+import org.apache.commons.io.monitor.FileAlterationMonitor;
+import org.apache.commons.io.monitor.FileAlterationObserver;
 import org.apache.hadoop.gateway.GatewayMessages;
 import org.apache.hadoop.gateway.i18n.messages.MessagesFactory;
 import org.apache.hadoop.gateway.topology.Topology;
@@ -41,6 +38,7 @@ import 
org.apache.hadoop.gateway.topology.xml.KnoxFormatXmlTopologyRules;
 import org.xml.sax.SAXException;
 
 import java.io.File;
+import java.io.FileFilter;
 import java.io.IOException;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
@@ -56,7 +54,9 @@ import static 
org.apache.commons.digester3.binder.DigesterLoader.newLoader;
 
 //import org.codehaus.plexus.util.FileUtils;
 
-public class FileTopologyProvider implements TopologyProvider, 
TopologyMonitor, FileListener {
+public class FileTopologyProvider
+    extends FileAlterationListenerAdaptor
+    implements TopologyProvider, TopologyMonitor, FileFilter, 
FileAlterationListener {
 
   private static GatewayMessages log = MessagesFactory.get( 
GatewayMessages.class );
   private static DigesterLoader digesterLoader = newLoader( new 
KnoxFormatXmlTopologyRules(), new AmbariFormatXmlTopologyRules() );
@@ -66,29 +66,31 @@ public class FileTopologyProvider implements 
TopologyProvider, TopologyMonitor,
       SUPPORTED_TOPOLOGY_FILE_EXTENSIONS.add("conf");
   }
 
-  private DefaultFileMonitor monitor;
-  private FileObject directory;
+  private FileAlterationMonitor monitor;
+  private File directory;
   private Set<TopologyListener> listeners;
-  private volatile Map<FileName, Topology> topologies;
+  private volatile Map<File,Topology> topologies;
 
-  // For unit testing.
-  FileTopologyProvider( DefaultFileMonitor monitor, FileObject directory ) 
throws IOException, SAXException {
+  FileTopologyProvider( FileAlterationMonitor monitor, File directory ) {
     this.directory = directory;
-    this.monitor = ( monitor != null ) ? monitor : new DefaultFileMonitor( 
this );
-    this.monitor.setRecursive( false );
-    this.monitor.addFile( this.directory );
+    this.monitor = monitor;
+
+    FileAlterationObserver observer = new FileAlterationObserver( 
this.directory, this );
+    observer.addListener( this );
+    monitor.addObserver( observer );
+
     this.listeners = new HashSet<TopologyListener>();
-    this.topologies = new HashMap<FileName, Topology>(); //loadTopologies( 
this.directory );
+    this.topologies = new HashMap<File,Topology>(); //loadTopologies( 
this.directory );
   }
 
   public FileTopologyProvider( File directory ) throws IOException, 
SAXException {
-    this( null, VFS.getManager().toFileObject( directory ) );
+      this( new FileAlterationMonitor( 1000L ), directory );
   }
 
-  private static Topology loadTopology( FileObject file ) throws IOException, 
SAXException, URISyntaxException, InterruptedException {
+  private static Topology loadTopology( File file ) throws IOException, 
SAXException, URISyntaxException, InterruptedException {
     final long TIMEOUT = 250; //ms
     final long DELAY = 50; //ms
-    log.loadingTopologyFile( file.getName().getFriendlyURI() );
+    log.loadingTopologyFile( file.getAbsolutePath() );
     Topology topology;
     long start = System.currentTimeMillis();
     while( true ) {
@@ -97,14 +99,14 @@ public class FileTopologyProvider implements 
TopologyProvider, TopologyMonitor,
         break;
       } catch ( IOException e ) {
         if( System.currentTimeMillis() - start < TIMEOUT ) {
-          log.failedToLoadTopologyRetrying( file.getName().getFriendlyURI(), 
Long.toString( DELAY ), e );
+          log.failedToLoadTopologyRetrying( file.getAbsolutePath(), 
Long.toString( DELAY ), e );
           Thread.sleep( DELAY );
         } else {
           throw e;
         }
       } catch ( SAXException e ) {
         if( System.currentTimeMillis() - start < TIMEOUT ) {
-          log.failedToLoadTopologyRetrying( file.getName().getFriendlyURI(), 
Long.toString( DELAY ), e );
+          log.failedToLoadTopologyRetrying( file.getAbsolutePath(), 
Long.toString( DELAY ), e );
           Thread.sleep( DELAY );
         } else {
           throw e;
@@ -114,34 +116,31 @@ public class FileTopologyProvider implements 
TopologyProvider, TopologyMonitor,
     return topology;
   }
 
-  private static Topology loadTopologyAttempt( FileObject file ) throws 
IOException, SAXException, URISyntaxException {
+  private static Topology loadTopologyAttempt( File file ) throws IOException, 
SAXException, URISyntaxException {
     Topology topology;Digester digester = digesterLoader.newDigester();
-    FileContent content = file.getContent();
-    TopologyBuilder topologyBuilder = digester.parse( content.getInputStream() 
);
+    TopologyBuilder topologyBuilder = digester.parse( 
FileUtils.openInputStream( file ) );
     topology = topologyBuilder.build();
-    topology.setUri( file.getURL().toURI() );
-    topology.setName( FilenameUtils.removeExtension( 
file.getName().getBaseName() ) );
-    topology.setTimestamp( content.getLastModifiedTime() );
+    topology.setUri( file.toURI() );
+    topology.setName( FilenameUtils.removeExtension( file.getName() ) );
+    topology.setTimestamp( file.lastModified() );
     return topology;
   }
 
-  private Map<FileName, Topology> loadTopologies( FileObject directory ) 
throws FileSystemException {
-    Map<FileName, Topology> map = new HashMap<FileName, Topology>();
-    if( directory.exists() && directory.getType().hasChildren() ) {
-      for( FileObject file : directory.getChildren() ) {
-        if( file.exists() && !file.getType().hasChildren() && 
SUPPORTED_TOPOLOGY_FILE_EXTENSIONS.contains( file.getName().getExtension() )) {
-          try {
-            map.put( file.getName(), loadTopology( file ) );
-          } catch( IOException e ) {
-            // Maybe it makes sense to throw exception
-            log.failedToLoadTopology( file.getName().getFriendlyURI(), e );
-          } catch( SAXException e ) {
-            // Maybe it makes sense to throw exception
-            log.failedToLoadTopology( file.getName().getFriendlyURI(), e );
-          } catch ( Exception e ) {
-            // Maybe it makes sense to throw exception
-            log.failedToLoadTopology( file.getName().getFriendlyURI(), e );
-          }
+  private Map<File, Topology> loadTopologies( File directory ) {
+    Map<File, Topology> map = new HashMap<File, Topology>();
+    if( directory.exists() && directory.canRead() ) {
+      for( File file : directory.listFiles( this ) ) {
+        try {
+          map.put( file, loadTopology( file ) );
+        } catch( IOException e ) {
+          // Maybe it makes sense to throw exception
+          log.failedToLoadTopology( file.getAbsolutePath(), e );
+        } catch( SAXException e ) {
+          // Maybe it makes sense to throw exception
+          log.failedToLoadTopology( file.getAbsolutePath(), e );
+        } catch ( Exception e ) {
+          // Maybe it makes sense to throw exception
+          log.failedToLoadTopology( file.getAbsolutePath(), e );
         }
       }
     }
@@ -151,38 +150,39 @@ public class FileTopologyProvider implements 
TopologyProvider, TopologyMonitor,
   public void reloadTopologies() {
     try {
       synchronized ( this ) {
-        Map<FileName, Topology> oldTopologies = topologies;
-        Map<FileName, Topology> newTopologies = loadTopologies( directory );
+        Map<File,Topology> oldTopologies = topologies;
+        Map<File,Topology> newTopologies = loadTopologies( directory );
         List<TopologyEvent> events = createChangeEvents( oldTopologies, 
newTopologies );
         topologies = newTopologies;
         notifyChangeListeners( events );
       }
-    } catch( FileSystemException e ) {
+    }
+    catch( Exception e ) {
       // Maybe it makes sense to throw exception
       log.failedToReloadTopologies( e );
     }
   }
 
   private static List<TopologyEvent> createChangeEvents(
-    Map<FileName, Topology> oldTopologies,
-    Map<FileName, Topology> newTopologies ) {
+    Map<File,Topology> oldTopologies,
+    Map<File,Topology> newTopologies ) {
     ArrayList<TopologyEvent> events = new ArrayList<TopologyEvent>();
     // Go through the old topologies and find anything that was deleted.
-    for( FileName fileName : oldTopologies.keySet() ) {
-      if( !newTopologies.containsKey( fileName ) ) {
-        events.add( new TopologyEvent( TopologyEvent.Type.DELETED, 
oldTopologies.get( fileName ) ) );
+    for( File file : oldTopologies.keySet() ) {
+      if( !newTopologies.containsKey( file ) ) {
+        events.add( new TopologyEvent( TopologyEvent.Type.DELETED, 
oldTopologies.get( file ) ) );
       }
     }
     // Go through the new topologies and figure out what was updated vs added.
-    for( FileName fileName : newTopologies.keySet() ) {
-      if( oldTopologies.containsKey( fileName ) ) {
-        Topology oldTopology = oldTopologies.get( fileName );
-        Topology newTopology = newTopologies.get( fileName );
+    for( File file : newTopologies.keySet() ) {
+      if( oldTopologies.containsKey( file ) ) {
+        Topology oldTopology = oldTopologies.get( file );
+        Topology newTopology = newTopologies.get( file );
         if( newTopology.getTimestamp() > oldTopology.getTimestamp() ) {
-          events.add( new TopologyEvent( TopologyEvent.Type.UPDATED, 
newTopologies.get( fileName ) ) );
+          events.add( new TopologyEvent( TopologyEvent.Type.UPDATED, 
newTopologies.get( file ) ) );
         }
       } else {
-        events.add( new TopologyEvent( TopologyEvent.Type.CREATED, 
newTopologies.get( fileName ) ) );
+        events.add( new TopologyEvent( TopologyEvent.Type.CREATED, 
newTopologies.get( file ) ) );
       }
     }
     return events ;
@@ -200,7 +200,7 @@ public class FileTopologyProvider implements 
TopologyProvider, TopologyMonitor,
 
   @Override
   public Collection<Topology> getTopologies() {
-    Map<FileName, Topology> map = topologies;
+    Map<File,Topology> map = topologies;
     return Collections.unmodifiableCollection( map.values() );
   }
 
@@ -210,35 +210,40 @@ public class FileTopologyProvider implements 
TopologyProvider, TopologyMonitor,
   }
 
   @Override
-  public void startMonitor() {
+  public void startMonitor() throws Exception {
     monitor.start();
   }
 
   @Override
-  public void stopMonitor() {
+  public void stopMonitor() throws Exception {
     monitor.stop();
   }
 
-  private void handleFileEvent( FileChangeEvent fileChangeEvent ) throws 
FileSystemException {
-    FileObject file = fileChangeEvent.getFile();
-    if( file != null && ( !file.getType().hasChildren() || file.equals( 
directory ) ) ) {
-      reloadTopologies();
+  @Override
+  public boolean accept( File file ) {
+    boolean accept = false;
+    if( !file.isDirectory() && file.canRead() ) {
+      String extension = FilenameUtils.getExtension( file.getName() );
+      if( SUPPORTED_TOPOLOGY_FILE_EXTENSIONS.contains(  extension  ) ) {
+        accept = true;
+      }
     }
+    return accept;
   }
 
   @Override
-  public void fileCreated( FileChangeEvent fileChangeEvent ) throws 
FileSystemException {
-    handleFileEvent( fileChangeEvent );
+  public void onFileCreate( File file ) {
+    onFileChange( file );
   }
 
   @Override
-  public void fileDeleted( FileChangeEvent fileChangeEvent ) throws 
FileSystemException {
-    handleFileEvent( fileChangeEvent );
+  public void onFileDelete(java.io.File file) {
+    onFileChange( file );
   }
 
   @Override
-  public void fileChanged( FileChangeEvent fileChangeEvent ) throws 
FileSystemException {
-    handleFileEvent( fileChangeEvent );
+  public void onFileChange( File file ) {
+    reloadTopologies();
   }
 
 }

http://git-wip-us.apache.org/repos/asf/knox/blob/4d45d054/gateway-server/src/test/java/org/apache/hadoop/gateway/topology/file/FileTopologyProviderTest.java
----------------------------------------------------------------------
diff --git 
a/gateway-server/src/test/java/org/apache/hadoop/gateway/topology/file/FileTopologyProviderTest.java
 
b/gateway-server/src/test/java/org/apache/hadoop/gateway/topology/file/FileTopologyProviderTest.java
index e66a94e..5accbcd 100644
--- 
a/gateway-server/src/test/java/org/apache/hadoop/gateway/topology/file/FileTopologyProviderTest.java
+++ 
b/gateway-server/src/test/java/org/apache/hadoop/gateway/topology/file/FileTopologyProviderTest.java
@@ -23,6 +23,7 @@ import static org.hamcrest.core.IsNull.notNullValue;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 
+import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
@@ -35,19 +36,16 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.commons.io.FileUtils;
 import org.apache.commons.io.IOUtils;
-import org.apache.commons.vfs2.FileChangeEvent;
-import org.apache.commons.vfs2.FileListener;
-import org.apache.commons.vfs2.FileObject;
-import org.apache.commons.vfs2.FileSystemException;
-import org.apache.commons.vfs2.FileSystemManager;
-import org.apache.commons.vfs2.VFS;
-import org.apache.commons.vfs2.impl.DefaultFileMonitor;
+import org.apache.commons.io.monitor.FileAlterationMonitor;
+import org.apache.commons.io.monitor.FileAlterationObserver;
 import org.apache.hadoop.gateway.topology.Provider;
 import org.apache.hadoop.gateway.topology.ProviderParam;
 import org.apache.hadoop.gateway.topology.Topology;
 import org.apache.hadoop.gateway.topology.TopologyEvent;
 import org.apache.hadoop.gateway.topology.TopologyListener;
+import org.apache.hadoop.test.TestUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -62,88 +60,24 @@ public class FileTopologyProviderTest {
   public void tearDown() throws Exception {
   }
 
-//  @Test
-//  public void testFileMonitor() throws IOException {
-//    FileSystemManager manager = VFS.getManager();
-//    FileObject dir = manager.resolveFile( "/Users/kevin.minder/tmp" );
-//    DefaultFileMonitor monitor = new DefaultFileMonitor( new FileListener() {
-//      @Override
-//      public void fileCreated( FileChangeEvent event ) throws Exception {
-//        System.out.println( "File created " + 
event.getFile().getName().getFriendlyURI() );
-//      }
-//      @Override
-//      public void fileDeleted( FileChangeEvent event ) throws Exception {
-//        System.out.println( "File deleted " + 
event.getFile().getName().getFriendlyURI() );
-//      }
-//      @Override
-//      public void fileChanged( FileChangeEvent event ) throws Exception {
-//        System.out.println( "File modified " + 
event.getFile().getName().getFriendlyURI() );
-//      }
-//    } );
-//    monitor.setRecursive( false );
-//    monitor.addFile( dir );
-//    monitor.start();
-//    System.out.println( "Waiting" );
-//    System.in.read();
-//  }
-
-//  @Test
-//  public void testRamFileSystemMonitor() throws IOException, 
InterruptedException {
-//    FileSystemManager manager = VFS.getManager();
-//    FileObject dir = manager.resolveFile( "ram:///dir" );
-//    dir.createFolder();
-//    DefaultFileMonitor monitor = new DefaultFileMonitor( new FileListener() {
-//      @Override
-//      public void fileCreated( FileChangeEvent event ) throws Exception {
-//        System.out.println( "Created " + 
event.getFile().getName().getFriendlyURI() );
-//      }
-//      @Override
-//      public void fileDeleted( FileChangeEvent event ) throws Exception {
-//        System.out.println( "Deleted " + 
event.getFile().getName().getFriendlyURI() );
-//      }
-//      @Override
-//      public void fileChanged( FileChangeEvent event ) throws Exception {
-//        System.out.println( "Modified " + 
event.getFile().getName().getFriendlyURI() );
-//      }
-//    } );
-//    monitor.addFile( dir );
-//    monitor.start();
-//    FileObject file = createFileNN( dir, "one", 
"org/apache/hadoop/gateway/topology/file/topology-one.xml", 1L );
-//    file = createFileNN( dir, "two", 
"org/apache/hadoop/gateway/topology/file/topology-two.xml", 2L );
-//    Thread.sleep( 4000 );
-//    file = createFileNN( dir, "two", 
"org/apache/hadoop/gateway/topology/file/topology-one.xml", 3L );
-//    file = createFileNN( dir, "one", 
"org/apache/hadoop/gateway/topology/file/topology-two.xml", 2L );
-//
-//    System.out.println( "Waiting" );
-//    System.in.read();
-//  }
-
-  private FileObject createDir( String name ) throws FileSystemException {
-    FileSystemManager fsm = VFS.getManager();
-    FileObject dir = fsm.resolveFile( name );
-    dir.createFolder();
-    assertTrue( "Failed to create test dir " + dir.getName().getFriendlyURI(), 
dir.exists() );
-    return dir;
+  private File createDir() throws IOException {
+    return TestUtils.createTempDir( this.getClass().getSimpleName() + "-" );
   }
 
-  private FileObject createFile( FileObject parent, String name, String 
resource, long timestamp ) throws IOException {
-    FileObject file = parent.resolveFile( name );
+  private File createFile( File parent, String name, String resource, long 
timestamp ) throws IOException {
+    File file = new File( parent, name );
     if( !file.exists() ) {
-      file.createFile();
+      FileUtils.touch( file );
     }
     InputStream input = ClassLoader.getSystemResourceAsStream( resource );
-    OutputStream output = file.getContent().getOutputStream();
+    OutputStream output = FileUtils.openOutputStream( file );
     IOUtils.copy( input, output );
     output.flush();
     input.close();
     output.close();
-    file.getContent().setLastModifiedTime( timestamp );
-    assertTrue( "Failed to create test file " + 
file.getName().getFriendlyURI(), file.exists() );
-    assertTrue( "Failed to populate test file " + 
file.getName().getFriendlyURI(), file.getContent().getSize() > 0 );
-
-//    ByteArrayOutputStream buffer = new ByteArrayOutputStream();
-//    IOUtils.copy( file.getContent().getInputStream(), buffer );
-//    System.out.println( new String( buffer.toString( "UTF-8" ) ) );
+    file.setLastModified( timestamp );
+    assertTrue( "Failed to create test file " + file.getAbsolutePath(), 
file.exists() );
+    assertTrue( "Failed to populate test file " + file.getAbsolutePath(), 
file.length() > 0 );
 
     return file;
   }
@@ -151,73 +85,80 @@ public class FileTopologyProviderTest {
   @Test
   public void testGetTopologies() throws Exception {
 
-    FileObject dir = createDir( "ram:///test/dir" );
-    createFile( dir, "one.xml", 
"org/apache/hadoop/gateway/topology/file/topology-one.xml", 1L );
-
-    TestTopologyListener topoListener = new TestTopologyListener();
-    FileListenerDelegator fileListener = new FileListenerDelegator();
-    NoOpFileMonitor monitor = new NoOpFileMonitor( fileListener );
-
-    FileTopologyProvider provider = new FileTopologyProvider( monitor, dir );
-    provider.addTopologyChangeListener( topoListener );
-    fileListener.delegate = provider;
-
-    // Unit test "hack" to force monitor to execute.
-    provider.reloadTopologies();
-
-    Collection<Topology> topologies = provider.getTopologies();
-    assertThat( topologies, notNullValue() );
-    assertThat( topologies.size(), is( 1 ) );
-    Topology topology = topologies.iterator().next();
-    assertThat( topology.getName(), is( "one" ) );
-    assertThat( topology.getTimestamp(), is( 1L ) );
-    assertThat( topoListener.events.size(), is( 1 ) );
-    topoListener.events.clear();
-
-    // Add a file to the directory.
-    FileObject two = createFile( dir, "two.xml", 
"org/apache/hadoop/gateway/topology/file/topology-two.xml", 1L );
-    fileListener.fileCreated( new FileChangeEvent( two ) );
-    topologies = provider.getTopologies();
-    assertThat( topologies.size(), is( 2 ) );
-    Set<String> names = new HashSet<String>( Arrays.asList( "one", "two" ) );
-    Iterator<Topology> iterator = topologies.iterator();
-    topology = iterator.next();
-    assertThat( names, hasItem( topology.getName() ) );
-    names.remove( topology.getName() );
-    topology = iterator.next();
-    assertThat( names, hasItem( topology.getName() ) );
-    names.remove( topology.getName() );
-    assertThat( names.size(), is( 0 ) );
-    assertThat( topoListener.events.size(), is( 1 ) );
-    List<TopologyEvent> events = topoListener.events.get( 0 );
-    assertThat( events.size(), is( 1 ) );
-    TopologyEvent event = events.get( 0 );
-    assertThat( event.getType(), is( TopologyEvent.Type.CREATED ) );
-    assertThat( event.getTopology(), notNullValue() );
-
-    // Update a file in the directory.
-    two = createFile( dir, "two.xml", 
"org/apache/hadoop/gateway/topology/file/topology-three.xml", 2L );
-    fileListener.fileChanged( new FileChangeEvent( two ) );
-    topologies = provider.getTopologies();
-    assertThat( topologies.size(), is( 2 ) );
-    names = new HashSet<String>( Arrays.asList( "one", "two" ) );
-    iterator = topologies.iterator();
-    topology = iterator.next();
-    assertThat( names, hasItem( topology.getName() ) );
-    names.remove( topology.getName() );
-    topology = iterator.next();
-    assertThat( names, hasItem( topology.getName() ) );
-    names.remove( topology.getName() );
-    assertThat( names.size(), is( 0 ) );
+    File dir = createDir();
+    long time = dir.lastModified();
+    try {
+      createFile( dir, "one.xml", 
"org/apache/hadoop/gateway/topology/file/topology-one.xml", time );
+
+      TestTopologyListener topoListener = new TestTopologyListener();
+      FileAlterationMonitor monitor = new FileAlterationMonitor( 
Long.MAX_VALUE );
+      FileTopologyProvider provider = new FileTopologyProvider( monitor, dir );
+      provider.addTopologyChangeListener( topoListener );
+
+      kickMonitor( monitor );
+
+      Collection<Topology> topologies = provider.getTopologies();
+      assertThat( topologies, notNullValue() );
+      assertThat( topologies.size(), is( 1 ) );
+      Topology topology = topologies.iterator().next();
+      assertThat( topology.getName(), is( "one" ) );
+      assertThat( topology.getTimestamp(), is( time ) );
+      assertThat( topoListener.events.size(), is( 1 ) );
+      topoListener.events.clear();
+
+      // Add a file to the directory.
+      File two = createFile( dir, "two.xml", 
"org/apache/hadoop/gateway/topology/file/topology-two.xml", 1L );
+      kickMonitor( monitor );
+      topologies = provider.getTopologies();
+      assertThat( topologies.size(), is( 2 ) );
+      Set<String> names = new HashSet<String>( Arrays.asList( "one", "two" ) );
+      Iterator<Topology> iterator = topologies.iterator();
+      topology = iterator.next();
+      assertThat( names, hasItem( topology.getName() ) );
+      names.remove( topology.getName() );
+      topology = iterator.next();
+      assertThat( names, hasItem( topology.getName() ) );
+      names.remove( topology.getName() );
+      assertThat( names.size(), is( 0 ) );
+      assertThat( topoListener.events.size(), is( 1 ) );
+      List<TopologyEvent> events = topoListener.events.get( 0 );
+      assertThat( events.size(), is( 1 ) );
+      TopologyEvent event = events.get( 0 );
+      assertThat( event.getType(), is( TopologyEvent.Type.CREATED ) );
+      assertThat( event.getTopology(), notNullValue() );
+
+      // Update a file in the directory.
+      two = createFile( dir, "two.xml", 
"org/apache/hadoop/gateway/topology/file/topology-three.xml", 2L );
+      kickMonitor( monitor );
+      topologies = provider.getTopologies();
+      assertThat( topologies.size(), is( 2 ) );
+      names = new HashSet<String>( Arrays.asList( "one", "two" ) );
+      iterator = topologies.iterator();
+      topology = iterator.next();
+      assertThat( names, hasItem( topology.getName() ) );
+      names.remove( topology.getName() );
+      topology = iterator.next();
+      assertThat( names, hasItem( topology.getName() ) );
+      names.remove( topology.getName() );
+      assertThat( names.size(), is( 0 ) );
+
+      // Remove a file from the directory.
+      two.delete();
+      kickMonitor( monitor );
+      topologies = provider.getTopologies();
+      assertThat( topologies.size(), is( 1 ) );
+      topology = topologies.iterator().next();
+      assertThat( topology.getName(), is( "one" ) );
+      assertThat( topology.getTimestamp(), is( time ) );
+    } finally {
+      FileUtils.deleteQuietly( dir );
+    }
+  }
 
-    // Remove a file from the directory.
-    two.delete();
-    fileListener.fileDeleted( new FileChangeEvent( two ) );
-    topologies = provider.getTopologies();
-    assertThat( topologies.size(), is( 1 ) );
-    topology = topologies.iterator().next();
-    assertThat( topology.getName(), is( "one" ) );
-    assertThat( topology.getTimestamp(), is( 1L ) );
+  private void kickMonitor( FileAlterationMonitor monitor ) {
+    for( FileAlterationObserver observer : monitor.getObservers() ) {
+      observer.checkAndNotify();
+    }
   }
 
   @Test
@@ -250,43 +191,6 @@ public class FileTopologyProviderTest {
 
   }
   
-  private class FileListenerDelegator implements FileListener {
-    private FileListener delegate;
-
-    @Override
-    public void fileCreated( FileChangeEvent event ) throws Exception {
-      delegate.fileCreated( event );
-    }
-
-    @Override
-    public void fileDeleted( FileChangeEvent event ) throws Exception {
-      delegate.fileDeleted( event );
-    }
-
-    @Override
-    public void fileChanged( FileChangeEvent event ) throws Exception {
-      delegate.fileChanged( event );
-    }
-  }
-
-  private class NoOpFileMonitor extends DefaultFileMonitor {
-
-    public NoOpFileMonitor( FileListener listener ) {
-      super( listener );
-    }
-
-    @Override
-    public void start() {
-      // NOOP
-    }
-
-    @Override
-    public void stop() {
-      // NOOP
-    }
-
-  }
-
   private class TestTopologyListener implements TopologyListener {
 
     public ArrayList<List<TopologyEvent>> events = new 
ArrayList<List<TopologyEvent>>();

http://git-wip-us.apache.org/repos/asf/knox/blob/4d45d054/gateway-spi/src/main/java/org/apache/hadoop/gateway/topology/TopologyMonitor.java
----------------------------------------------------------------------
diff --git 
a/gateway-spi/src/main/java/org/apache/hadoop/gateway/topology/TopologyMonitor.java
 
b/gateway-spi/src/main/java/org/apache/hadoop/gateway/topology/TopologyMonitor.java
index 12f9693..d0af1b3 100644
--- 
a/gateway-spi/src/main/java/org/apache/hadoop/gateway/topology/TopologyMonitor.java
+++ 
b/gateway-spi/src/main/java/org/apache/hadoop/gateway/topology/TopologyMonitor.java
@@ -21,8 +21,8 @@ public interface TopologyMonitor {
 
   void addTopologyChangeListener( TopologyListener listener );
 
-  void startMonitor();
+  void startMonitor() throws Exception;
 
-  void stopMonitor();
+  void stopMonitor() throws Exception;
 
 }

http://git-wip-us.apache.org/repos/asf/knox/blob/4d45d054/gateway-test-utils/src/main/java/org/apache/hadoop/test/TestUtils.java
----------------------------------------------------------------------
diff --git 
a/gateway-test-utils/src/main/java/org/apache/hadoop/test/TestUtils.java 
b/gateway-test-utils/src/main/java/org/apache/hadoop/test/TestUtils.java
index add02a4..d919575 100644
--- a/gateway-test-utils/src/main/java/org/apache/hadoop/test/TestUtils.java
+++ b/gateway-test-utils/src/main/java/org/apache/hadoop/test/TestUtils.java
@@ -17,14 +17,17 @@
  */
 package org.apache.hadoop.test;
 
+import org.apache.commons.io.FileUtils;
 import org.apache.commons.io.IOUtils;
 
+import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.io.Reader;
 import java.net.URL;
+import java.util.UUID;
 
 public class TestUtils {
 
@@ -56,4 +59,11 @@ public class TestUtils {
     return IOUtils.toString( getResourceReader( clazz, name, charset ) );
   }
 
+  public static File createTempDir( String prefix ) throws IOException {
+    File targetDir = new File( System.getProperty( "user.dir" ), "target" );
+    File tempDir = new File( targetDir, prefix + UUID.randomUUID() );
+    FileUtils.forceMkdir( tempDir );
+    return tempDir;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/knox/blob/4d45d054/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index b807f7a..a3375a4 100644
--- a/pom.xml
+++ b/pom.xml
@@ -774,22 +774,6 @@
             </dependency>
 
             <dependency>
-                <groupId>org.apache.commons</groupId>
-                <artifactId>commons-vfs2</artifactId>
-                <version>2.0</version>
-                <exclusions>
-                    <exclusion>
-                        <groupId>org.apache.maven.scm</groupId>
-                        <artifactId>maven-scm-api</artifactId>
-                    </exclusion>
-                    <exclusion>
-                        <groupId>org.apache.maven.scm</groupId>
-                        <artifactId>maven-scm-provider-svnexe</artifactId>
-                    </exclusion>
-                </exclusions>
-            </dependency>
-            
-            <dependency>
                 <groupId>org.jboss.shrinkwrap</groupId>
                 <artifactId>shrinkwrap-api</artifactId>
                 <version>1.0.1</version>

Reply via email to