This is an automated email from the ASF dual-hosted git repository.

ckj pushed a commit to branch ozone-1.3
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/ozone-1.3 by this push:
     new ffdbf145f0 HDDS-7284. JVM crash for rocksdb for read/write after close 
(#3801)
ffdbf145f0 is described below

commit ffdbf145f01d5e601364de9612fb16c2be7c74e6
Author: Sumit Agrawal <[email protected]>
AuthorDate: Tue Oct 25 13:24:21 2022 +0530

    HDDS-7284. JVM crash for rocksdb for read/write after close (#3801)
---
 .../TestSchemaOneBackwardsCompatibility.java       |   2 +-
 .../hadoop/hdds/scm/metadata/SCMMetadataStore.java |   4 +-
 .../org/apache/hadoop/hdds/utils/db/RDBTable.java  |   6 +-
 .../apache/hadoop/hdds/utils/db/RocksDatabase.java | 205 ++++++++++++++++++---
 .../org/apache/hadoop/hdds/utils/db/Table.java     |   4 +-
 .../apache/hadoop/hdds/utils/db/TypedTable.java    |   2 +-
 .../scm/block/DeletedBlockLogStateManager.java     |   3 +-
 .../scm/block/DeletedBlockLogStateManagerImpl.java |   2 +-
 .../hdds/scm/metadata/SCMMetadataStoreImpl.java    |   3 +-
 .../ozone/om/TestObjectStoreWithLegacyFS.java      |   4 +-
 .../ozone/om/TestOzoneManagerHAKeyDeletion.java    |  10 +-
 .../apache/hadoop/ozone/om/OMMetadataManager.java  |   2 +-
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |   3 +-
 .../hadoop/ozone/om/OmMetadataManagerImpl.java     |   2 +-
 .../impl/ReconContainerMetadataManagerImpl.java    |   4 +-
 .../ozone/recon/tasks/TestTableCountTask.java      |   2 +-
 16 files changed, 208 insertions(+), 50 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java
index 1323a98b1e..7aab0af64e 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java
@@ -160,7 +160,7 @@ public class TestSchemaOneBackwardsCompatibility {
       table.iterator();
       Assert.fail("Table iterator should have thrown " +
               "UnsupportedOperationException.");
-    } catch (UnsupportedOperationException ex) {
+    } catch (IOException | UnsupportedOperationException ex) {
       // Exception thrown as expected.
     }
   }
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStore.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStore.java
index a1c9c6bdc1..46b19aa090 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStore.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStore.java
@@ -127,8 +127,10 @@ public interface SCMMetadataStore extends DBStoreHAManager 
{
    *
    * @param certType - CertType.
    * @return Iterator<X509Certificate>
+   * @throws IOException on failure.
    */
-  TableIterator getAllCerts(CertificateStore.CertType certType);
+  TableIterator getAllCerts(CertificateStore.CertType certType)
+      throws IOException;
 
   /**
    * A Table that maintains all the pipeline information.
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java
index d2bb381ed8..09f58a9335 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java
@@ -157,12 +157,14 @@ class RDBTable implements Table<byte[], byte[]> {
   }
 
   @Override
-  public TableIterator<byte[], ByteArrayKeyValue> iterator() {
+  public TableIterator<byte[], ByteArrayKeyValue> iterator()
+      throws IOException {
     return new RDBStoreIterator(db.newIterator(family, false), this);
   }
 
   @Override
-  public TableIterator<byte[], ByteArrayKeyValue> iterator(byte[] prefix) {
+  public TableIterator<byte[], ByteArrayKeyValue> iterator(byte[] prefix)
+      throws IOException {
     return new RDBStoreIterator(db.newIterator(family, false), this,
         prefix);
   }
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java
index 6f20dcee7b..5fb7d51f6d 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java
@@ -48,6 +48,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
 import java.util.function.Supplier;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
@@ -137,12 +138,13 @@ public final class RocksDatabase {
             descriptors, handles);
       }
       // init a column family map.
+      AtomicLong counter = new AtomicLong(0);
       for (ColumnFamilyHandle h : handles) {
-        final ColumnFamily f = new ColumnFamily(h);
+        final ColumnFamily f = new ColumnFamily(h, counter);
         columnFamilies.put(f.getName(), f);
       }
       return new RocksDatabase(dbFile, db, dbOptions, writeOptions,
-          descriptors, Collections.unmodifiableMap(columnFamilies));
+          descriptors, Collections.unmodifiableMap(columnFamilies), counter);
     } catch (RocksDBException e) {
       close(columnFamilies, db, descriptors, writeOptions, dbOptions);
       throw toIOException(RocksDatabase.class, "open " + dbFile, e);
@@ -208,11 +210,15 @@ public final class RocksDatabase {
     }
 
     public void createCheckpoint(Path path) throws IOException {
+      assertClose();
       try {
+        counter.incrementAndGet();
         checkpoint.get().createCheckpoint(path.toString());
       } catch (RocksDBException e) {
-        closeOnError(e);
+        closeOnError(e, true);
         throw toIOException(this, "createCheckpoint " + path, e);
+      } finally {
+        counter.decrementAndGet();
       }
     }
 
@@ -233,11 +239,15 @@ public final class RocksDatabase {
    */
   public static final class ColumnFamily {
     private final byte[] nameBytes;
+    private AtomicLong counter;
     private final String name;
     private final ColumnFamilyHandle handle;
+    private AtomicBoolean isClosed = new AtomicBoolean(false);
 
-    public ColumnFamily(ColumnFamilyHandle handle) throws RocksDBException {
+    public ColumnFamily(ColumnFamilyHandle handle, AtomicLong counter)
+        throws RocksDBException {
       this.nameBytes = handle.getName();
+      this.counter = counter;
       this.name = bytes2String(nameBytes);
       this.handle = handle;
       LOG.debug("new ColumnFamily for {}", name);
@@ -261,19 +271,37 @@ public final class RocksDatabase {
 
     public void batchDelete(ManagedWriteBatch writeBatch, byte[] key)
         throws IOException {
+      assertClosed();
       try {
+        counter.incrementAndGet();
         writeBatch.delete(getHandle(), key);
       } catch (RocksDBException e) {
         throw toIOException(this, "batchDelete key " + bytes2String(key), e);
+      } finally {
+        counter.decrementAndGet();
       }
     }
 
     public void batchPut(ManagedWriteBatch writeBatch, byte[] key, byte[] 
value)
         throws IOException {
+      assertClosed();
       try {
+        counter.incrementAndGet();
         writeBatch.put(getHandle(), key, value);
       } catch (RocksDBException e) {
         throw toIOException(this, "batchPut key " + bytes2String(key), e);
+      } finally {
+        counter.decrementAndGet();
+      }
+    }
+    
+    public void markClosed() {
+      isClosed.set(true);
+    }
+
+    private void assertClosed() throws IOException {
+      if (isClosed.get()) {
+        throw new IOException("Rocks Database is closed");
       }
     }
 
@@ -291,28 +319,58 @@ public final class RocksDatabase {
   private final Map<String, ColumnFamily> columnFamilies;
 
   private final AtomicBoolean isClosed = new AtomicBoolean();
+  
+  private final AtomicLong counter;
 
   private RocksDatabase(File dbFile, ManagedRocksDB db,
       ManagedDBOptions dbOptions, ManagedWriteOptions writeOptions,
       List<ColumnFamilyDescriptor> descriptors,
-      Map<String, ColumnFamily> columnFamilies) {
+      Map<String, ColumnFamily> columnFamilies, AtomicLong counter) {
     this.name = getClass().getSimpleName() + "[" + dbFile + "]";
     this.db = db;
     this.dbOptions = dbOptions;
     this.writeOptions = writeOptions;
     this.descriptors = descriptors;
     this.columnFamilies = columnFamilies;
+    this.counter = counter;
   }
 
   public void close() {
     if (isClosed.compareAndSet(false, true)) {
+      if (columnFamilies != null) {
+        columnFamilies.values().stream().forEach(f -> f.markClosed());
+      }
+      // wait till all access to rocks db is process to avoid crash while close
+      while (true) {
+        if (counter.get() == 0) {
+          break;
+        }
+        try {
+          Thread.currentThread().sleep(1);
+        } catch (InterruptedException e) {
+          close(columnFamilies, db, descriptors, writeOptions, dbOptions);
+          Thread.currentThread().interrupt();
+          return;
+        }
+      }
+      
+      // close when counter is 0, no more operation
       close(columnFamilies, db, descriptors, writeOptions, dbOptions);
     }
   }
 
-  private void closeOnError(RocksDBException e) {
+  private void closeOnError(RocksDBException e, boolean isCounted) {
     if (shouldClose(e)) {
-      close();
+      try {
+        if (isCounted) {
+          counter.decrementAndGet();
+        }
+        close();
+      } finally {
+        if (isCounted) {
+          counter.incrementAndGet();
+        }
+      }
     }
   }
 
@@ -325,55 +383,81 @@ public final class RocksDatabase {
       return false;
     }
   }
+  
+  private void assertClose() throws IOException {
+    if (isClosed()) {
+      throw new IOException("Rocks Database is closed");
+    }
+  }
 
   public void ingestExternalFile(ColumnFamily family, List<String> files,
       ManagedIngestExternalFileOptions ingestOptions) throws IOException {
+    assertClose();
     try {
+      counter.incrementAndGet();
       db.get().ingestExternalFile(family.getHandle(), files, ingestOptions);
     } catch (RocksDBException e) {
-      closeOnError(e);
+      closeOnError(e, true);
       String msg = "Failed to ingest external files " +
           files.stream().collect(Collectors.joining(", ")) + " of " +
           family.getName();
       throw toIOException(this, msg, e);
+    } finally {
+      counter.decrementAndGet();
     }
   }
 
   public void put(ColumnFamily family, byte[] key, byte[] value)
       throws IOException {
+    assertClose();
     try {
+      counter.incrementAndGet();
       db.get().put(family.getHandle(), writeOptions, key, value);
     } catch (RocksDBException e) {
-      closeOnError(e);
+      closeOnError(e, true);
       throw toIOException(this, "put " + bytes2String(key), e);
+    } finally {
+      counter.decrementAndGet();
     }
   }
 
   public void flush() throws IOException {
+    assertClose();
     try (ManagedFlushOptions options = new ManagedFlushOptions()) {
+      counter.incrementAndGet();
       options.setWaitForFlush(true);
       db.get().flush(options);
     } catch (RocksDBException e) {
-      closeOnError(e);
+      closeOnError(e, true);
       throw toIOException(this, "flush", e);
+    } finally {
+      counter.decrementAndGet();
     }
   }
 
   public void flushWal(boolean sync) throws IOException {
+    assertClose();
     try {
+      counter.incrementAndGet();
       db.get().flushWal(sync);
     } catch (RocksDBException e) {
-      closeOnError(e);
+      closeOnError(e, true);
       throw toIOException(this, "flushWal with sync=" + sync, e);
+    } finally {
+      counter.decrementAndGet();
     }
   }
 
   public void compactRange() throws IOException {
+    assertClose();
     try {
+      counter.incrementAndGet();
       db.get().compactRange();
     } catch (RocksDBException e) {
-      closeOnError(e);
+      closeOnError(e, true);
       throw toIOException(this, "compactRange", e);
+    } finally {
+      counter.decrementAndGet();
     }
   }
 
@@ -386,8 +470,15 @@ public final class RocksDatabase {
    *         otherwise, return true.
    * @see org.rocksdb.RocksDB#keyMayExist(ColumnFamilyHandle, byte[], Holder)
    */
-  public boolean keyMayExist(ColumnFamily family, byte[] key) {
-    return db.get().keyMayExist(family.getHandle(), key, null);
+  public boolean keyMayExist(ColumnFamily family, byte[] key)
+      throws IOException {
+    assertClose();
+    try {
+      counter.incrementAndGet();
+      return db.get().keyMayExist(family.getHandle(), key, null);
+    } finally {
+      counter.decrementAndGet();
+    }
   }
 
   /**
@@ -396,10 +487,16 @@ public final class RocksDatabase {
    * @see org.rocksdb.RocksDB#keyMayExist(ColumnFamilyHandle, byte[], Holder)
    */
   public Supplier<byte[]> keyMayExistHolder(ColumnFamily family,
-      byte[] key) {
-    final Holder<byte[]> out = new Holder<>();
-    return db.get().keyMayExist(family.getHandle(), key, out) ?
-        out::getValue : null;
+      byte[] key) throws IOException {
+    assertClose();
+    try {
+      counter.incrementAndGet();
+      final Holder<byte[]> out = new Holder<>();
+      return db.get().keyMayExist(family.getHandle(), key, out) ?
+          out::getValue : null;
+    } finally {
+      counter.decrementAndGet();
+    }
   }
 
   public ColumnFamily getColumnFamily(String key) {
@@ -411,12 +508,16 @@ public final class RocksDatabase {
   }
 
   public byte[] get(ColumnFamily family, byte[] key) throws IOException {
+    assertClose();
     try {
+      counter.incrementAndGet();
       return db.get().get(family.getHandle(), key);
     } catch (RocksDBException e) {
-      closeOnError(e);
+      closeOnError(e, true);
       final String message = "get " + bytes2String(key) + " from " + family;
       throw toIOException(this, message, e);
+    } finally {
+      counter.decrementAndGet();
     }
   }
 
@@ -429,78 +530,118 @@ public final class RocksDatabase {
   }
 
   private long getLongProperty(String key) throws IOException {
+    assertClose();
     try {
+      counter.incrementAndGet();
       return db.get().getLongProperty(key);
     } catch (RocksDBException e) {
-      closeOnError(e);
+      closeOnError(e, true);
       throw toIOException(this, "getLongProperty " + key, e);
+    } finally {
+      counter.decrementAndGet();
     }
   }
 
   private long getLongProperty(ColumnFamily family, String key)
       throws IOException {
+    assertClose();
     try {
+      counter.incrementAndGet();
       return db.get().getLongProperty(family.getHandle(), key);
     } catch (RocksDBException e) {
-      closeOnError(e);
+      closeOnError(e, true);
       final String message = "getLongProperty " + key + " from " + family;
       throw toIOException(this, message, e);
+    } finally {
+      counter.decrementAndGet();
     }
   }
 
   public String getProperty(String key) throws IOException {
+    assertClose();
     try {
+      counter.incrementAndGet();
       return db.get().getProperty(key);
     } catch (RocksDBException e) {
-      closeOnError(e);
+      closeOnError(e, true);
       throw toIOException(this, "getProperty " + key, e);
+    } finally {
+      counter.decrementAndGet();
     }
   }
 
   public String getProperty(ColumnFamily family, String key)
       throws IOException {
+    assertClose();
     try {
+      counter.incrementAndGet();
       return db.get().getProperty(family.getHandle(), key);
     } catch (RocksDBException e) {
-      closeOnError(e);
+      closeOnError(e, true);
       throw toIOException(this, "getProperty " + key + " from " + family, e);
+    } finally {
+      counter.decrementAndGet();
     }
   }
 
   public ManagedTransactionLogIterator getUpdatesSince(long sequenceNumber)
       throws IOException {
+    assertClose();
     try {
+      counter.incrementAndGet();
       return managed(db.get().getUpdatesSince(sequenceNumber));
     } catch (RocksDBException e) {
-      closeOnError(e);
+      closeOnError(e, true);
       throw toIOException(this, "getUpdatesSince " + sequenceNumber, e);
+    } finally {
+      counter.decrementAndGet();
     }
   }
 
   public long getLatestSequenceNumber() {
-    return db.get().getLatestSequenceNumber();
+    try {
+      counter.incrementAndGet();
+      return db.get().getLatestSequenceNumber();
+    } finally {
+      counter.decrementAndGet();
+    }
   }
 
-  public ManagedRocksIterator newIterator(ColumnFamily family) {
-    return managed(db.get().newIterator(family.getHandle()));
+  public ManagedRocksIterator newIterator(ColumnFamily family)
+      throws IOException {
+    assertClose();
+    try {
+      counter.incrementAndGet();
+      return managed(db.get().newIterator(family.getHandle()));
+    } finally {
+      counter.decrementAndGet();
+    }
   }
 
   public ManagedRocksIterator newIterator(ColumnFamily family,
-                                          boolean fillCache) {
+      boolean fillCache) throws IOException {
+    assertClose();
     try (ManagedReadOptions readOptions = new ManagedReadOptions()) {
+      counter.incrementAndGet();
       readOptions.setFillCache(fillCache);
       return managed(db.get().newIterator(family.getHandle(), readOptions));
+    } finally {
+      counter.decrementAndGet();
     }
   }
 
   public void batchWrite(ManagedWriteBatch writeBatch,
                          ManagedWriteOptions options)
       throws IOException {
+    assertClose();
     try {
+      counter.incrementAndGet();
       db.get().write(options, writeBatch);
     } catch (RocksDBException e) {
-      closeOnError(e);
+      closeOnError(e, true);
       throw toIOException(this, "batchWrite", e);
+    } finally {
+      counter.decrementAndGet();
     }
   }
 
@@ -509,12 +650,16 @@ public final class RocksDatabase {
   }
 
   public void delete(ColumnFamily family, byte[] key) throws IOException {
+    assertClose();
     try {
+      counter.incrementAndGet();
       db.get().delete(family.getHandle(), key);
     } catch (RocksDBException e) {
-      closeOnError(e);
+      closeOnError(e, true);
       final String message = "delete " + bytes2String(key) + " from " + family;
       throw toIOException(this, message, e);
+    } finally {
+      counter.decrementAndGet();
     }
   }
 
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
index 3202431474..98fbde6417 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
@@ -151,8 +151,10 @@ public interface Table<KEY, VALUE> extends AutoCloseable {
    * Returns the iterator for this metadata store.
    *
    * @return MetaStoreIterator
+   * @throws IOException on failure.
    */
-  TableIterator<KEY, ? extends KeyValue<KEY, VALUE>> iterator();
+  TableIterator<KEY, ? extends KeyValue<KEY, VALUE>> iterator()
+      throws IOException;
 
   /**
    * Returns a prefixed iterator for this metadata store.
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
index c43855a065..cc2f1345e2 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
@@ -275,7 +275,7 @@ public class TypedTable<KEY, VALUE> implements Table<KEY, 
VALUE> {
   }
 
   @Override
-  public TableIterator<KEY, TypedKeyValue> iterator() {
+  public TableIterator<KEY, TypedKeyValue> iterator() throws IOException {
     TableIterator<byte[], ? extends KeyValue<byte[], byte[]>> iterator =
         rawTable.iterator();
     return new TypedTableIterator(iterator, keyType, valueType);
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManager.java
index 6d35d4c807..0c03152f9d 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManager.java
@@ -49,7 +49,8 @@ public interface DeletedBlockLogStateManager {
       throws IOException, TimeoutException;
 
   TableIterator<Long,
-      KeyValue<Long, DeletedBlocksTransaction>> getReadOnlyIterator();
+      KeyValue<Long, DeletedBlocksTransaction>> getReadOnlyIterator()
+      throws IOException;
 
   void onFlush();
 
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManagerImpl.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManagerImpl.java
index c4e0953fa3..ceeb278613 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManagerImpl.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManagerImpl.java
@@ -72,7 +72,7 @@ public class DeletedBlockLogStateManagerImpl
   }
 
   public TableIterator<Long, TypedTable.KeyValue<Long,
-      DeletedBlocksTransaction>> getReadOnlyIterator() {
+      DeletedBlocksTransaction>> getReadOnlyIterator() throws IOException {
     return new TableIterator<Long, TypedTable.KeyValue<Long,
         DeletedBlocksTransaction>>() {
 
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java
index e71f6d7258..3d57b0b145 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java
@@ -267,7 +267,8 @@ public class SCMMetadataStoreImpl implements 
SCMMetadataStore {
   }
 
   @Override
-  public TableIterator getAllCerts(CertificateStore.CertType certType) {
+  public TableIterator getAllCerts(CertificateStore.CertType certType)
+      throws IOException {
     if (certType == CertificateStore.CertType.VALID_CERTS) {
       return validCertsTable.iterator();
     }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java
index 152e502b70..fb10a346e9 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java
@@ -145,10 +145,10 @@ public class TestObjectStoreWithLegacyFS {
   private boolean assertKeyCount(
       Table<String, OmKeyInfo> keyTable,
       String dbKey, int expectedCnt, String keyName) {
-    TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
-        itr = keyTable.iterator();
     int countKeys = 0;
     try {
+      TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
+          itr = keyTable.iterator();
       itr.seek(dbKey);
       while (itr.hasNext()) {
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAKeyDeletion.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAKeyDeletion.java
index 3e0bda9db1..ca24d5dce7 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAKeyDeletion.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAKeyDeletion.java
@@ -70,8 +70,14 @@ public class TestOzoneManagerHAKeyDeletion extends 
TestOzoneManagerHA {
     // Check delete table is empty or not on all OMs.
     getCluster().getOzoneManagersList().forEach((om) -> {
       try {
-        GenericTestUtils.waitFor(() ->
-                
!om.getMetadataManager().getDeletedTable().iterator().hasNext(),
+        GenericTestUtils.waitFor(() -> {
+          try {
+            return !om.getMetadataManager().getDeletedTable().iterator()
+                .hasNext();
+          } catch (Exception ex) {
+            return false;
+          }
+        },
             10000, 120000);
       } catch (Exception ex) {
         fail("TestOzoneManagerHAKeyDeletion failed");
diff --git 
a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
 
b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
index 67356a88f3..469170475d 100644
--- 
a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
+++ 
b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
@@ -433,7 +433,7 @@ public interface OMMetadataManager extends DBStoreHAManager 
{
       getBucketIterator();
 
   TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
-      getKeyIterator();
+      getKeyIterator() throws IOException;
 
   /**
    * Given parent object id and path component name, return the corresponding
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 12be1cdad8..dd66c33a1e 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -39,7 +39,6 @@ import java.util.Stack;
 import java.util.TreeMap;
 import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
-
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import 
org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
@@ -1571,7 +1570,7 @@ public class KeyManagerImpl implements KeyManager {
       getIteratorForKeyInTableCache(
       boolean recursive, String startKey, String volumeName, String bucketName,
       TreeMap<String, OzoneFileStatus> cacheKeyMap, String keyArgs,
-      Table<String, OmKeyInfo> keyTable) {
+      Table<String, OmKeyInfo> keyTable) throws IOException {
     TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>> 
iterator;
     try {
       Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>>
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index 1db265ed58..1aae8447dc 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -969,7 +969,7 @@ public class OmMetadataManagerImpl implements 
OMMetadataManager {
 
   @Override
   public TableIterator<String, ? extends KeyValue<String, OmKeyInfo>>
-      getKeyIterator() {
+      getKeyIterator() throws IOException {
     return keyTable.iterator();
   }
 
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java
index 8e9931798c..53aca7ade5 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java
@@ -506,12 +506,12 @@ public class ReconContainerMetadataManagerImpl
   }
 
   @Override
-  public TableIterator getContainerTableIterator() {
+  public TableIterator getContainerTableIterator() throws IOException {
     return containerKeyTable.iterator();
   }
 
   @Override
-  public TableIterator getKeyContainerTableIterator() {
+  public TableIterator getKeyContainerTableIterator() throws IOException {
     return keyContainerTable.iterator();
   }
 
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java
index 8151b2385b..fb400f6417 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java
@@ -83,7 +83,7 @@ public class TestTableCountTask extends 
AbstractReconSqlDBTest {
   }
 
   @Test
-  public void testReprocess() {
+  public void testReprocess() throws Exception {
     OMMetadataManager omMetadataManager = mock(OmMetadataManagerImpl.class);
     // Mock 5 rows in each table and test the count
     for (String tableName: tableCountTask.getTaskTables()) {


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to