Repository: hbase Updated Branches: refs/heads/branch-1 c66a6b02a -> 431b8a538
Cache more efficiently getAll() and get() in FSTableDescriptors Signed-off-by: stack <st...@apache.org> Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/431b8a53 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/431b8a53 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/431b8a53 Branch: refs/heads/branch-1 Commit: 431b8a5383b894381583bbb9ceef5911911b705c Parents: c66a6b0 Author: Esteban Gutierrez <este...@cloudera.com> Authored: Tue Nov 4 02:56:42 2014 -0800 Committer: stack <st...@apache.org> Committed: Tue Nov 4 15:10:08 2014 -0800 ---------------------------------------------------------------------- .../apache/hadoop/hbase/TableDescriptors.java | 10 + .../org/apache/hadoop/hbase/master/HMaster.java | 16 +- .../master/handler/CreateTableHandler.java | 5 +- .../master/handler/TruncateTableHandler.java | 4 +- .../hbase/regionserver/HRegionServer.java | 12 +- .../hadoop/hbase/util/FSTableDescriptors.java | 208 ++++++++++--------- .../hadoop/hbase/master/TestCatalogJanitor.java | 7 + .../hbase/util/TestFSTableDescriptors.java | 182 +++++++++++++--- 8 files changed, 304 insertions(+), 140 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hbase/blob/431b8a53/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java index 7197fd7..e4f1a84 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java @@ -69,4 +69,14 @@ public interface TableDescriptors { */ HTableDescriptor remove(final TableName tablename) throws IOException; + + /** + * Enables the tabledescriptor cache + */ + void setCacheOn() throws IOException; + + /** + * Disables the tabledescriptor cache + */ + void setCacheOff() throws IOException; } http://git-wip-us.apache.org/repos/asf/hbase/blob/431b8a53/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 5ee6a66..0b86ece 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -214,6 +214,8 @@ public class HMaster extends HRegionServer implements MasterServices, Server { MasterCoprocessorHost cpHost; + private final boolean preLoadTableDescriptors; + // Time stamps for when a hmaster became active private long masterActiveTime; @@ -285,8 +287,11 @@ public class HMaster extends HRegionServer implements MasterServices, Server { this.metricsMaster = new MetricsMaster( new MetricsMasterWrapperImpl(this)); + // preload table descriptor at startup + this.preLoadTableDescriptors = conf.getBoolean("hbase.master.preload.tabledescriptors", true); + // Do we publish the status? - + boolean shouldPublish = conf.getBoolean(HConstants.STATUS_PUBLISHED, HConstants.STATUS_PUBLISHED_DEFAULT); Class<? extends ClusterStatusPublisher.Publisher> publisherClass = @@ -505,6 +510,15 @@ public class HMaster extends HRegionServer implements MasterServices, Server { // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring. this.fileSystemManager = new MasterFileSystem(this, this); + // enable table descriptors cache + this.tableDescriptors.setCacheOn(); + + // warm-up HTDs cache on master initialization + if (preLoadTableDescriptors) { + status.setStatus("Pre-loading table descriptors"); + this.tableDescriptors.getAll(); + } + // publish cluster ID status.setStatus("Publishing Cluster ID in ZooKeeper"); ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId()); http://git-wip-us.apache.org/repos/asf/hbase/blob/431b8a53/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java index 7ca0f6a..fab1586 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java @@ -266,7 +266,7 @@ public class CreateTableHandler extends EventHandler { ModifyRegionUtils.assignRegions(assignmentManager, regionInfos); } - // 6. Set table enabled flag up in zk. + // 7. Set table enabled flag up in zk. try { assignmentManager.getTableStateManager().setTableState(tableName, ZooKeeperProtos.Table.State.ENABLED); @@ -274,6 +274,9 @@ public class CreateTableHandler extends EventHandler { throw new IOException("Unable to ensure that " + tableName + " will be" + " enabled because of a ZooKeeper issue", e); } + + // 8. Update the tabledescriptor cache. + ((HMaster) this.server).getTableDescriptors().get(tableName); } /** http://git-wip-us.apache.org/repos/asf/hbase/blob/431b8a53/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java index de3c064..f4f9996 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java @@ -97,9 +97,9 @@ public class TruncateTableHandler extends DeleteTableHandler { CreateTableHandler.checkAndSetEnablingTable(assignmentManager, tableName); try { // 1. Create Table Descriptor - new FSTableDescriptors(server.getConfiguration()) - .createTableDescriptorForTableDirectory(tempdir, this.hTableDescriptor, false); Path tempTableDir = FSUtils.getTableDir(tempdir, this.tableName); + new FSTableDescriptors(server.getConfiguration()) + .createTableDescriptorForTableDirectory(tempTableDir, this.hTableDescriptor, false); Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), this.tableName); HRegionInfo[] newRegions; http://git-wip-us.apache.org/repos/asf/hbase/blob/431b8a53/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 2f34587..1a83e76 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -514,7 +514,7 @@ public class HRegionServer extends HasThread implements this.fs = new HFileSystem(this.conf, useHBaseChecksum); this.rootDir = FSUtils.getRootDir(this.conf); this.tableDescriptors = new FSTableDescriptors( - this.fs, this.rootDir, !canUpdateTableDescriptor()); + this.fs, this.rootDir, !canUpdateTableDescriptor(), false); service = new ExecutorService(getServerName().toShortString()); spanReceiverHost = SpanReceiverHost.getInstance(getConfiguration()); @@ -608,7 +608,7 @@ public class HRegionServer extends HasThread implements return ConnectionUtils.createShortCircuitHConnection( HConnectionManager.getConnection(conf), serverName, rpcServices, rpcServices); } - + /** * Run test on configured codecs to make sure supporting libs are in place. * @param c @@ -3011,7 +3011,7 @@ public class HRegionServer extends HasThread implements } return result; } - + public CoprocessorServiceResponse execRegionServerService(final RpcController controller, final CoprocessorServiceRequest serviceRequest) throws ServiceException { try { @@ -3058,7 +3058,7 @@ public class HRegionServer extends HasThread implements throw new ServiceException(ie); } } - + /** * @return The cache config instance used by the regionserver. */ @@ -3072,7 +3072,7 @@ public class HRegionServer extends HasThread implements protected ConfigurationManager getConfigurationManager() { return configurationManager; } - + /** * Reload the configuration from disk. */ @@ -3080,6 +3080,6 @@ public class HRegionServer extends HasThread implements LOG.info("Reloading the configuration from disk."); // Reload the configuration from disk. conf.reloadConfiguration(); - configurationManager.notifyAllObservers(conf); + configurationManager.notifyAllObservers(conf); } } http://git-wip-us.apache.org/repos/asf/hbase/blob/431b8a53/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index 3718ab8..30ebc7d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -74,6 +74,9 @@ public class FSTableDescriptors implements TableDescriptors { private final FileSystem fs; private final Path rootdir; private final boolean fsreadonly; + private volatile boolean usecache; + private volatile boolean fsvisited; + @VisibleForTesting long cachehits = 0; @VisibleForTesting long invocations = 0; @@ -85,29 +88,8 @@ public class FSTableDescriptors implements TableDescriptors { // This cache does not age out the old stuff. Thinking is that the amount // of data we keep up in here is so small, no need to do occasional purge. // TODO. - private final Map<TableName, TableDescriptorAndModtime> cache = - new ConcurrentHashMap<TableName, TableDescriptorAndModtime>(); - - /** - * Data structure to hold modification time and table descriptor. - */ - private static class TableDescriptorAndModtime { - private final HTableDescriptor htd; - private final long modtime; - - TableDescriptorAndModtime(final long modtime, final HTableDescriptor htd) { - this.htd = htd; - this.modtime = modtime; - } - - long getModtime() { - return this.modtime; - } - - HTableDescriptor getTableDescriptor() { - return this.htd; - } - } + private final Map<TableName, HTableDescriptor> cache = + new ConcurrentHashMap<TableName, HTableDescriptor>(); /** * Construct a FSTableDescriptors instance using the hbase root dir of the given @@ -118,8 +100,9 @@ public class FSTableDescriptors implements TableDescriptors { this(FSUtils.getCurrentFileSystem(conf), FSUtils.getRootDir(conf)); } - public FSTableDescriptors(final FileSystem fs, final Path rootdir) { - this(fs, rootdir, false); + public FSTableDescriptors(final FileSystem fs, final Path rootdir) + throws IOException { + this(fs, rootdir, false, true); } /** @@ -127,11 +110,27 @@ public class FSTableDescriptors implements TableDescriptors { * operations; i.e. on remove, we do not do delete in fs. */ public FSTableDescriptors(final FileSystem fs, - final Path rootdir, final boolean fsreadonly) { + final Path rootdir, final boolean fsreadonly, final boolean usecache) throws IOException { super(); this.fs = fs; this.rootdir = rootdir; this.fsreadonly = fsreadonly; + this.usecache = usecache; + } + + public void setCacheOn() throws IOException { + this.cache.clear(); + this.usecache = true; + } + + public void setCacheOff() throws IOException { + this.usecache = false; + this.cache.clear(); + } + + @VisibleForTesting + public boolean isUsecache() { + return this.usecache; } /** @@ -154,20 +153,17 @@ public class FSTableDescriptors implements TableDescriptors { throw new IOException("No descriptor found for non table = " + tablename); } - // Look in cache of descriptors. - TableDescriptorAndModtime cachedtdm = this.cache.get(tablename); - - if (cachedtdm != null) { - // Check mod time has not changed (this is trip to NN). - if (getTableInfoModtime(tablename) <= cachedtdm.getModtime()) { + if (usecache) { + // Look in cache of descriptors. + HTableDescriptor cachedtdm = this.cache.get(tablename); + if (cachedtdm != null) { cachehits++; - return cachedtdm.getTableDescriptor(); + return cachedtdm; } } - - TableDescriptorAndModtime tdmt = null; + HTableDescriptor tdmt = null; try { - tdmt = getTableDescriptorAndModtime(tablename); + tdmt = getTableDescriptorFromFs(fs, rootdir, tablename, !fsreadonly); } catch (NullPointerException e) { LOG.debug("Exception during readTableDecriptor. Current table name = " + tablename, e); @@ -175,11 +171,12 @@ public class FSTableDescriptors implements TableDescriptors { LOG.debug("Exception during readTableDecriptor. Current table name = " + tablename, ioe); } - - if (tdmt != null) { + // last HTD written wins + if (usecache && tdmt != null) { this.cache.put(tablename, tdmt); } - return tdmt == null ? null : tdmt.getTableDescriptor(); + + return tdmt; } /** @@ -189,17 +186,33 @@ public class FSTableDescriptors implements TableDescriptors { public Map<String, HTableDescriptor> getAll() throws IOException { Map<String, HTableDescriptor> htds = new TreeMap<String, HTableDescriptor>(); - List<Path> tableDirs = FSUtils.getTableDirs(fs, rootdir); - for (Path d: tableDirs) { - HTableDescriptor htd = null; - try { - htd = get(FSUtils.getTableName(d)); - } catch (FileNotFoundException fnfe) { - // inability of retrieving one HTD shouldn't stop getting the remaining - LOG.warn("Trouble retrieving htd", fnfe); + + if (fsvisited && usecache) { + for (Map.Entry<TableName, HTableDescriptor> entry: this.cache.entrySet()) { + htds.put(entry.getKey().toString(), entry.getValue()); + } + // add hbase:meta to the response + htds.put(HTableDescriptor.META_TABLEDESC.getTableName().getNameAsString(), + HTableDescriptor.META_TABLEDESC); + } else { + LOG.debug("Fetching table descriptors from the filesystem."); + boolean allvisited = true; + for (Path d : FSUtils.getTableDirs(fs, rootdir)) { + HTableDescriptor htd = null; + try { + htd = get(FSUtils.getTableName(d)); + } catch (FileNotFoundException fnfe) { + // inability of retrieving one HTD shouldn't stop getting the remaining + LOG.warn("Trouble retrieving htd", fnfe); + } + if (htd == null) { + allvisited = false; + continue; + } else { + htds.put(htd.getTableName().getNameAsString(), htd); + } + fsvisited = allvisited; } - if (htd == null) continue; - htds.put(htd.getTableName().getNameAsString(), htd); } return htds; } @@ -244,8 +257,6 @@ public class FSTableDescriptors implements TableDescriptors { "Cannot add a table descriptor for a reserved subdirectory name: " + htd.getNameAsString()); } updateTableDescriptor(htd); - long modtime = getTableInfoModtime(htd.getTableName()); - this.cache.put(htd.getTableName(), new TableDescriptorAndModtime(modtime, htd)); } /** @@ -265,8 +276,12 @@ public class FSTableDescriptors implements TableDescriptors { throw new IOException("Failed delete of " + tabledir.toString()); } } - TableDescriptorAndModtime tdm = this.cache.remove(tablename); - return tdm == null ? null : tdm.getTableDescriptor(); + HTableDescriptor descriptor = this.cache.remove(tablename); + if (descriptor == null) { + return null; + } else { + return descriptor; + } } /** @@ -437,7 +452,6 @@ public class FSTableDescriptors implements TableDescriptors { } /** - * @param tabledir * @param sequenceid * @return Name of tableinfo file. */ @@ -446,25 +460,12 @@ public class FSTableDescriptors implements TableDescriptors { } /** - * @param fs - * @param rootdir - * @param tableName - * @return Modification time for the table {@link #TABLEINFO_FILE_PREFIX} file - * or <code>0</code> if no tableinfo file found. - * @throws IOException - */ - private long getTableInfoModtime(final TableName tableName) throws IOException { - FileStatus status = getTableInfoPath(tableName); - return status == null ? 0 : status.getModificationTime(); - } - - /** * Returns the latest table descriptor for the given table directly from the file system * if it exists, bypassing the local cache. * Returns null if it's not found. */ public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, - Path hbaseRootDir, TableName tableName) throws IOException { + Path hbaseRootDir, TableName tableName) throws IOException { Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName); return getTableDescriptorFromFs(fs, tableDir); } @@ -474,43 +475,34 @@ public class FSTableDescriptors implements TableDescriptors { * directly from the file system if it exists. * @throws TableInfoMissingException if there is no descriptor */ - public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir) - throws IOException { - FileStatus status = getTableInfoPath(fs, tableDir, false); - if (status == null) { - throw new TableInfoMissingException("No table descriptor file under " + tableDir); - } - return readTableDescriptor(fs, status, false); + public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, + Path hbaseRootDir, TableName tableName, boolean rewritePb) throws IOException { + Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName); + return getTableDescriptorFromFs(fs, tableDir, rewritePb); } - /** - * @param tableName table name - * @return TableDescriptorAndModtime or null if no table descriptor was found - * @throws IOException + * Returns the latest table descriptor for the table located at the given directory + * directly from the file system if it exists. + * @throws TableInfoMissingException if there is no descriptor */ - private TableDescriptorAndModtime getTableDescriptorAndModtime(TableName tableName) - throws IOException { - // ignore both -ROOT- and hbase:meta tables - if (tableName.equals(TableName.META_TABLE_NAME)) { - return null; - } - return getTableDescriptorAndModtime(getTableDir(tableName)); + public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir) + throws IOException { + return getTableDescriptorFromFs(fs, tableDir, false); } /** - * @param tableDir path to table directory - * @return TableDescriptorAndModtime or null if no table descriptor was found - * at the specified path - * @throws IOException + * Returns the latest table descriptor for the table located at the given directory + * directly from the file system if it exists. + * @throws TableInfoMissingException if there is no descriptor */ - private TableDescriptorAndModtime getTableDescriptorAndModtime(Path tableDir) + public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir, + boolean rewritePb) throws IOException { - FileStatus status = getTableInfoPath(tableDir); + FileStatus status = getTableInfoPath(fs, tableDir, false); if (status == null) { - return null; + throw new TableInfoMissingException("No table descriptor file under " + tableDir); } - HTableDescriptor htd = readTableDescriptor(fs, status, !fsreadonly); - return new TableDescriptorAndModtime(status.getModificationTime(), htd); + return readTableDescriptor(fs, status, rewritePb); } private static HTableDescriptor readTableDescriptor(FileSystem fs, FileStatus status, @@ -527,17 +519,32 @@ public class FSTableDescriptors implements TableDescriptors { try { htd = HTableDescriptor.parseFrom(content); } catch (DeserializationException e) { - throw new IOException("content=" + Bytes.toShort(content), e); + // we have old HTableDescriptor here + try { + HTableDescriptor ohtd = HTableDescriptor.parseFrom(content); + LOG.warn("Found old table descriptor, converting to new format for table " + + ohtd.getTableName()); + htd = new HTableDescriptor(ohtd); + if (rewritePb) rewriteTableDescriptor(fs, status, htd); + } catch (DeserializationException e1) { + throw new IOException("content=" + Bytes.toShort(content), e1); + } } if (rewritePb && !ProtobufUtil.isPBMagicPrefix(content)) { // Convert the file over to be pb before leaving here. - Path tableInfoDir = status.getPath().getParent(); - Path tableDir = tableInfoDir.getParent(); - writeTableDescriptor(fs, htd, tableDir, status); + rewriteTableDescriptor(fs, status, htd); } return htd; } + private static void rewriteTableDescriptor(final FileSystem fs, final FileStatus status, + final HTableDescriptor td) + throws IOException { + Path tableInfoDir = status.getPath().getParent(); + Path tableDir = tableInfoDir.getParent(); + writeTableDescriptor(fs, td, tableDir, status); + } + /** * Update table descriptor on the file system * @throws IOException Thrown if failed update. @@ -552,6 +559,9 @@ public class FSTableDescriptors implements TableDescriptors { Path p = writeTableDescriptor(fs, htd, tableDir, getTableInfoPath(tableDir)); if (p == null) throw new IOException("Failed update"); LOG.info("Updated tableinfo=" + p); + if (usecache) { + this.cache.put(htd.getTableName(), htd); + } return p; } http://git-wip-us.apache.org/repos/asf/hbase/blob/431b8a53/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index 288d115..b623c70 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -324,6 +324,13 @@ public class TestCatalogJanitor { // TODO Auto-generated method stub } + @Override + public void setCacheOn() throws IOException { + } + + @Override + public void setCacheOff() throws IOException { + } }; } http://git-wip-us.apache.org/repos/asf/hbase/blob/431b8a53/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java index cbe8016..9e11413 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java @@ -28,6 +28,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.util.Arrays; import java.util.Comparator; +import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -53,6 +54,7 @@ import org.junit.experimental.categories.Category; @Category(MediumTests.class) public class TestFSTableDescriptors { private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + private static final Log LOG = LogFactory.getLog(TestFSTableDescriptors.class); @Test (expected=IllegalArgumentException.class) @@ -73,8 +75,8 @@ public class TestFSTableDescriptors { FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir); assertTrue(fstd.createTableDescriptor(htd)); assertFalse(fstd.createTableDescriptor(htd)); - FileStatus [] statuses = fs.listStatus(testdir); - assertTrue("statuses.length="+statuses.length, statuses.length == 1); + FileStatus[] statuses = fs.listStatus(testdir); + assertTrue("statuses.length=" + statuses.length, statuses.length == 1); for (int i = 0; i < 10; i++) { fstd.updateTableDescriptor(htd); } @@ -114,8 +116,7 @@ public class TestFSTableDescriptors { for (int i = 0; i < FSTableDescriptors.WIDTH_OF_SEQUENCE_ID; i++) { sb.append("0"); } - assertEquals(FSTableDescriptors.TABLEINFO_FILE_PREFIX + "." + sb.toString(), - p0.getName()); + assertEquals(FSTableDescriptors.TABLEINFO_FILE_PREFIX + "." + sb.toString(), p0.getName()); // Check a few more. Path p2 = assertWriteAndReadSequenceId(2); Path p10000 = assertWriteAndReadSequenceId(10000); @@ -151,7 +152,9 @@ public class TestFSTableDescriptors { assertNull(htds.remove(htd.getTableName())); } - @Test public void testReadingHTDFromFS() throws IOException { + @Test + public void testReadingHTDFromFS() + throws IOException { final String name = "testReadingHTDFromFS"; FileSystem fs = FileSystem.get(UTIL.getConfiguration()); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); @@ -163,20 +166,14 @@ public class TestFSTableDescriptors { assertTrue(htd.equals(htd2)); } - @Test public void testHTableDescriptors() + @Test + public void testHTableDescriptors() throws IOException, InterruptedException { final String name = "testHTableDescriptors"; FileSystem fs = FileSystem.get(UTIL.getConfiguration()); // Cleanup old tests if any debris laying around. Path rootdir = new Path(UTIL.getDataTestDir(), name); - FSTableDescriptors htds = new FSTableDescriptors(fs, rootdir) { - @Override - public HTableDescriptor get(TableName tablename) - throws TableExistsException, FileNotFoundException, IOException { - LOG.info(tablename + ", cachehits=" + this.cachehits); - return super.get(tablename); - } - }; + FSTableDescriptors htds = new FSTableDescriptorsTest(fs, rootdir); final int count = 10; // Write out table infos. for (int i = 0; i < count; i++) { @@ -185,10 +182,10 @@ public class TestFSTableDescriptors { } for (int i = 0; i < count; i++) { - assertTrue(htds.get(TableName.valueOf(name + i)) != null); + assertTrue(htds.get(TableName.valueOf(name + i)) != null); } for (int i = 0; i < count; i++) { - assertTrue(htds.get(TableName.valueOf(name + i)) != null); + assertTrue(htds.get(TableName.valueOf(name + i)) != null); } // Update the table infos for (int i = 0; i < count; i++) { @@ -199,14 +196,117 @@ public class TestFSTableDescriptors { // Wait a while so mod time we write is for sure different. Thread.sleep(100); for (int i = 0; i < count; i++) { - assertTrue(htds.get(TableName.valueOf(name + i)) != null); + assertTrue(htds.get(TableName.valueOf(name + i)) != null); } for (int i = 0; i < count; i++) { - assertTrue(htds.get(TableName.valueOf(name + i)) != null); + assertTrue(htds.get(TableName.valueOf(name + i)) != null); } assertEquals(count * 4, htds.invocations); assertTrue("expected=" + (count * 2) + ", actual=" + htds.cachehits, - htds.cachehits >= (count * 2)); + htds.cachehits >= (count * 2)); + } + + @Test + public void testHTableDescriptorsNoCache() + throws IOException, InterruptedException { + final String name = "testHTableDescriptorsNoCache"; + FileSystem fs = FileSystem.get(UTIL.getConfiguration()); + // Cleanup old tests if any debris laying around. + Path rootdir = new Path(UTIL.getDataTestDir(), name); + FSTableDescriptors htds = new FSTableDescriptors(fs, rootdir, + false, false); + final int count = 10; + // Write out table infos. + for (int i = 0; i < count; i++) { + HTableDescriptor htd = new HTableDescriptor(name + i); + htds.createTableDescriptor(htd); + } + + for (int i = 0; i < count; i++) { + assertTrue(htds.get(TableName.valueOf(name + i)) != null); + } + for (int i = 0; i < count; i++) { + assertTrue(htds.get(TableName.valueOf(name + i)) != null); + } + // Update the table infos + for (int i = 0; i < count; i++) { + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i)); + htd.addFamily(new HColumnDescriptor("" + i)); + htds.updateTableDescriptor(htd); + } + // Wait a while so mod time we write is for sure different. + Thread.sleep(100); + for (int i = 0; i < count; i++) { + assertTrue(htds.get(TableName.valueOf(name + i)) != null); + } + for (int i = 0; i < count; i++) { + assertTrue(htds.get(TableName.valueOf(name + i)) != null); + } + assertEquals(count * 4, htds.invocations); + assertTrue("expected=0, actual=" + htds.cachehits, htds.cachehits == 0); + } + + @Test + public void testGetAll() + throws IOException, InterruptedException { + final String name = "testGetAll"; + FileSystem fs = FileSystem.get(UTIL.getConfiguration()); + // Cleanup old tests if any debris laying around. + Path rootdir = new Path(UTIL.getDataTestDir(), name); + FSTableDescriptors htds = new FSTableDescriptorsTest(fs, rootdir); + final int count = 4; + // Write out table infos. + for (int i = 0; i < count; i++) { + HTableDescriptor htd = new HTableDescriptor(name + i); + htds.createTableDescriptor(htd); + } + // add hbase:meta + HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC.getTableName()); + htds.createTableDescriptor(htd); + + assertTrue(htds.getAll().size() == count + 1); + + } + + @Test + public void testCacheConsistency() + throws IOException, InterruptedException { + final String name = "testCacheConsistency"; + FileSystem fs = FileSystem.get(UTIL.getConfiguration()); + // Cleanup old tests if any debris laying around. + Path rootdir = new Path(UTIL.getDataTestDir(), name); + FSTableDescriptors chtds = new FSTableDescriptorsTest(fs, rootdir); + FSTableDescriptors nonchtds = new FSTableDescriptorsTest(fs, + rootdir, false, false); + + final int count = 10; + // Write out table infos via non-cached FSTableDescriptors + for (int i = 0; i < count; i++) { + HTableDescriptor htd = new HTableDescriptor(name + i); + nonchtds.createTableDescriptor(htd); + } + + // Calls to getAll() won't increase the cache counter, do per table. + for (int i = 0; i < count; i++) { + assertTrue(chtds.get(TableName.valueOf(name + i)) != null); + } + + assertTrue(nonchtds.getAll().size() == chtds.getAll().size()); + + // add a new entry for hbase:meta + HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC.getTableName()); + nonchtds.createTableDescriptor(htd); + + // hbase:meta will only increase the cachehit by 1 + assertTrue(nonchtds.getAll().size() == chtds.getAll().size()); + + for (Map.Entry entry : nonchtds.getAll().entrySet()) { + String t = (String) entry.getKey(); + HTableDescriptor nchtd = (HTableDescriptor) entry.getValue(); + assertTrue("expected " + htd.toString() + + " got: " + + chtds.get(TableName.valueOf(t)).toString(), (nchtd.equals(chtds.get(TableName.valueOf(t))))); + } } @Test @@ -216,8 +316,7 @@ public class TestFSTableDescriptors { // Cleanup old tests if any detrius laying around. Path rootdir = new Path(UTIL.getDataTestDir(), name); TableDescriptors htds = new FSTableDescriptors(fs, rootdir); - assertNull("There shouldn't be any HTD for this table", - htds.get(TableName.valueOf("NoSuchTable"))); + assertNull("There shouldn't be any HTD for this table", htds.get(TableName.valueOf("NoSuchTable"))); } @Test @@ -235,18 +334,18 @@ public class TestFSTableDescriptors { @Test public void testTableInfoFileStatusComparator() { - FileStatus bare = - new FileStatus(0, false, 0, 0, -1, + FileStatus bare = new FileStatus( + 0, false, 0, 0, -1, new Path("/tmp", FSTableDescriptors.TABLEINFO_FILE_PREFIX)); - FileStatus future = - new FileStatus(0, false, 0, 0, -1, + FileStatus future = new FileStatus( + 0, false, 0, 0, -1, new Path("/tmp/tablinfo." + System.currentTimeMillis())); - FileStatus farFuture = - new FileStatus(0, false, 0, 0, -1, + FileStatus farFuture = new FileStatus( + 0, false, 0, 0, -1, new Path("/tmp/tablinfo." + System.currentTimeMillis() + 1000)); - FileStatus [] alist = {bare, future, farFuture}; - FileStatus [] blist = {bare, farFuture, future}; - FileStatus [] clist = {farFuture, bare, future}; + FileStatus[] alist = {bare, future, farFuture}; + FileStatus[] blist = {bare, farFuture, future}; + FileStatus[] clist = {farFuture, bare, future}; Comparator<FileStatus> c = FSTableDescriptors.TABLEINFO_FILESTATUS_COMPARATOR; Arrays.sort(alist, c); Arrays.sort(blist, c); @@ -255,7 +354,7 @@ public class TestFSTableDescriptors { for (int i = 0; i < alist.length; i++) { assertTrue(alist[i].equals(blist[i])); assertTrue(blist[i].equals(clist[i])); - assertTrue(clist[i].equals(i == 0? farFuture: i == 1? future: bare)); + assertTrue(clist[i].equals(i == 0 ? farFuture : i == 1 ? future : bare)); } } @@ -292,5 +391,26 @@ public class TestFSTableDescriptors { assertEquals(htd, FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir)); } + private static class FSTableDescriptorsTest + extends FSTableDescriptors { + + public FSTableDescriptorsTest(FileSystem fs, Path rootdir) + throws IOException { + this(fs, rootdir, false, true); + } + + public FSTableDescriptorsTest(FileSystem fs, Path rootdir, boolean fsreadonly, boolean usecache) + throws IOException { + super(fs, rootdir, fsreadonly, usecache); + } + + @Override + public HTableDescriptor get(TableName tablename) + throws TableExistsException, FileNotFoundException, IOException { + LOG.info((super.isUsecache() ? "Cached" : "Non-Cached") + + " HTableDescriptor.get() on " + tablename + ", cachehits=" + this.cachehits); + return super.get(tablename); + } + } }