Author: rawson Date: Sat Jun 6 01:26:21 2009 New Revision: 782178 URL: http://svn.apache.org/viewvc?rev=782178&view=rev Log: HBASE-1304 - New client server implementation of how gets and puts are handled. -- Thanks to jgray,holstad,stack,rawson
Added: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/Delete.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/Get.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/Put.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/Result.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/ResultScanner.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/Scan.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/Filter.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/PageFilter.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RowInclusiveStopFilter.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RowPrefixFilter.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RowWhileMatchFilter.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/TimeRange.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/ColumnCount.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/DeleteCompare.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/GetDeleteTracker.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/MinorCompactingStoreScanner.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/QueryMatcher.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFileGetScan.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/KeyValueTestUtil.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestClient.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestGet.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestHBaseAdmin.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestOldAPIGetRowVersions.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestOldAPIHTable.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestOldAPITimestamp.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestPut.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestPageFilter.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestRowPrefixFilter.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/mapred/DisabledTestTableIndex.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/mapred/DisabledTestTableMapReduce.java (contents, props changed) - copied, changed from r782160, hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestDeleteCompare.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestGetDeleteTracker.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestKeyValueScanFixture.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestMemcache.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestMinorCompactingStoreScanner.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestScanDeleteTracker.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestScanWildcardColumnTracker.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestStore.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestWildcardColumnTracker.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/thrift/DisabledTestThriftServer.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/DisabledTestMergeTool.java Removed: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/tableindexed/IndexKeyGenerator.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/tableindexed/IndexNotFoundException.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/tableindexed/IndexSpecification.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/tableindexed/IndexedTable.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/tableindexed/IndexedTableAdmin.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/tableindexed/ReverseByteArrayComparator.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/tableindexed/SimpleIndexKeyGenerator.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/tableindexed/package.html hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/transactional/CommitUnsuccessfulException.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/transactional/LocalTransactionLogger.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/transactional/TransactionLogger.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/transactional/TransactionScannerCallable.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/transactional/TransactionState.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/transactional/TransactionalTable.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/transactional/UnknownTransactionException.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/transactional/package.html hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HAbstractScanner.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/tableindexed/IndexMaintenanceUtils.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/tableindexed/IndexedRegion.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/tableindexed/IndexedRegionServer.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionState.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalHLogManager.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalRegion.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalRegionServer.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestScannerAPI.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/tableindexed/TestIndexedTable.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/transactional/DisabledTestTransactions.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/transactional/StressTestTransactions.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/DisabledTestPageRowFilter.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/mapred/TestTableIndex.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestAtomicIncrement.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestBloomFilters.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestDeleteAll.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestDeleteFamily.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestGet.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestGet2.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHMemcache.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestSplit.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestTimestamp.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/thrift/TestThriftServer.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java Modified: hadoop/hbase/trunk/bin/HBase.rb hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HMerge.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HTableDescriptor.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/KeyValue.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/RegionHistorian.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/WritableComparator.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnection.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTablePool.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/MetaScanner.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/Scanner.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/ScannerCallable.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/transactional/TransactionManager.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/InclusiveStopRowFilter.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/PageRowFilter.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/PrefixRowFilter.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RowFilterInterface.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RowFilterSet.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/StopRowFilter.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/WhileMatchRowFilter.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/package-info.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/BatchOperation.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/BatchUpdate.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/Cell.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/CodeToClassAndBack.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HeapSize.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/Reference.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/RowResult.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HBaseServer.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/TransactionalRegionInterface.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ChangeTableState.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ColumnOperation.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/RegionManager.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/TableOperation.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/FailedLogCloseException.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/MemcacheFlusher.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/transactional/CleanOldTransactionsChore.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/RowController.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/RowModel.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/ScannerModel.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/TableModel.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/TimestampModel.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/parser/XMLRestParser.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/serializer/SimpleXMLSerializer.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/thrift/ThriftServer.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Bytes.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Merge.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/MetaUtils.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/DFSAbort.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HBaseTestCase.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestEmptyMetaInfo.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestKeyValue.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestRegionRebalancing.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestScanMultipleVersions.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestSerialization.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestTable.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestZooKeeper.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TimestampTestBase.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestBatchUpdate.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestForceSplit.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestGetRowVersions.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestHTable.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestListTables.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestScannerTimes.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/DisabledTestRowFilterOnMultipleFamilies.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/TestHbaseObjectWritable.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/DisabledTestRegionServerExit.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestLogRolling.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/transactional/DisabledTestHLogRecovery.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/transactional/DisabledTestTransactionalHLogManager.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/MigrationTest.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/SoftValueSortedMapTest.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestRootPath.java hadoop/hbase/trunk/src/webapps/master/master.jsp hadoop/hbase/trunk/src/webapps/master/table.jsp Modified: hadoop/hbase/trunk/bin/HBase.rb URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/bin/HBase.rb?rev=782178&r1=782177&r2=782178&view=diff ============================================================================== --- hadoop/hbase/trunk/bin/HBase.rb (original) +++ hadoop/hbase/trunk/bin/HBase.rb Sat Jun 6 01:26:21 2009 @@ -311,7 +311,6 @@ arg[IN_MEMORY]? JBoolean.valueOf(arg[IN_MEMORY]): HColumnDescriptor::DEFAULT_IN_MEMORY, arg[HColumnDescriptor::BLOCKCACHE]? JBoolean.valueOf(arg[HColumnDescriptor::BLOCKCACHE]): HColumnDescriptor::DEFAULT_BLOCKCACHE, arg[HColumnDescriptor::BLOCKSIZE]? JInteger.valueOf(arg[HColumnDescriptor::BLOCKSIZE]): HColumnDescriptor::DEFAULT_BLOCKSIZE, - arg[HColumnDescriptor::LENGTH]? JInteger.new(arg[HColumnDescriptor::LENGTH]): HColumnDescriptor::DEFAULT_LENGTH, arg[HColumnDescriptor::TTL]? JInteger.new(arg[HColumnDescriptor::TTL]): HColumnDescriptor::DEFAULT_TTL, arg[HColumnDescriptor::BLOOMFILTER]? JBoolean.valueOf(arg[HColumnDescriptor::BLOOMFILTER]): HColumnDescriptor::DEFAULT_BLOOMFILTER) end Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java?rev=782178&r1=782177&r2=782178&view=diff ============================================================================== --- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java (original) +++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java Sat Jun 6 01:26:21 2009 @@ -26,6 +26,7 @@ import java.util.HashMap; import java.util.Map; +import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.hfile.Compression; import org.apache.hadoop.hbase.io.hfile.HFile; @@ -93,20 +94,6 @@ */ public static final int DEFAULT_VERSIONS = 3; - /** - * Default maximum cell length. - */ - public static final int DEFAULT_LENGTH = Integer.MAX_VALUE; - /** Default maximum cell length as an Integer. */ - public static final Integer DEFAULT_LENGTH_INTEGER = - Integer.valueOf(DEFAULT_LENGTH); - - /* - * Cache here the HCD value. - * Question: its OK to cache since when we're reenable, we create a new HCD? - */ - private volatile Integer maxValueLength = null; - /* * Cache here the HCD value. * Question: its OK to cache since when we're reenable, we create a new HCD? @@ -180,7 +167,7 @@ this (familyName == null || familyName.length <= 0? HConstants.EMPTY_BYTE_ARRAY: familyName, DEFAULT_VERSIONS, DEFAULT_COMPRESSION, DEFAULT_IN_MEMORY, DEFAULT_BLOCKCACHE, - Integer.MAX_VALUE, DEFAULT_TTL, false); + DEFAULT_TTL, false); } /** @@ -219,13 +206,45 @@ */ public HColumnDescriptor(final byte [] familyName, final int maxVersions, final String compression, final boolean inMemory, - final boolean blockCacheEnabled, final int maxValueLength, + final boolean blockCacheEnabled, final int timeToLive, final boolean bloomFilter) { this(familyName, maxVersions, compression, inMemory, blockCacheEnabled, - DEFAULT_BLOCKSIZE, maxValueLength, timeToLive, bloomFilter); + DEFAULT_BLOCKSIZE, timeToLive, bloomFilter); } /** + * Backwards compatible Constructor. Maximum value length is no longer + * configurable. + * + * @param familyName Column family name. Must be 'printable' -- digit or + * letter -- and end in a <code>:<code> + * @param maxVersions Maximum number of versions to keep + * @param compression Compression type + * @param inMemory If true, column data should be kept in an HRegionServer's + * cache + * @param blockCacheEnabled If true, MapFile blocks should be cached + * @param blocksize + * @param maxValueLength Restrict values to <= this value (UNSUPPORTED) + * @param timeToLive Time-to-live of cell contents, in seconds + * (use HConstants.FOREVER for unlimited TTL) + * @param bloomFilter Enable the specified bloom filter for this column + * + * @throws IllegalArgumentException if passed a family name that is made of + * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> and does not + * end in a <code>:</code> + * @throws IllegalArgumentException if the number of versions is <= 0 + * @deprecated As of hbase 0.20.0, max value length no longer supported + */ +// public HColumnDescriptor(final byte [] familyName, final int maxVersions, +// final String compression, final boolean inMemory, +// final boolean blockCacheEnabled, final int blocksize, +// final int maxValueLength, +// final int timeToLive, final boolean bloomFilter) { +// this(familyName, maxVersions, compression, inMemory, blockCacheEnabled, +// blocksize, timeToLive, bloomFilter); +// } + + /** * Constructor * @param familyName Column family name. Must be 'printable' -- digit or * letter -- and end in a <code>:<code> @@ -235,7 +254,6 @@ * cache * @param blockCacheEnabled If true, MapFile blocks should be cached * @param blocksize - * @param maxValueLength Restrict values to <= this value * @param timeToLive Time-to-live of cell contents, in seconds * (use HConstants.FOREVER for unlimited TTL) * @param bloomFilter Enable the specified bloom filter for this column @@ -248,10 +266,10 @@ public HColumnDescriptor(final byte [] familyName, final int maxVersions, final String compression, final boolean inMemory, final boolean blockCacheEnabled, final int blocksize, - final int maxValueLength, final int timeToLive, final boolean bloomFilter) { - isLegalFamilyName(familyName); this.name = stripColon(familyName); + isLegalFamilyName(this.name); + if (maxVersions <= 0) { // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions". // Until there is support, consider 0 or < 0 -- a configuration error. @@ -260,7 +278,6 @@ setMaxVersions(maxVersions); setInMemory(inMemory); setBlockCacheEnabled(blockCacheEnabled); - setMaxValueLength(maxValueLength); setTimeToLive(timeToLive); setCompressionType(Compression.Algorithm. valueOf(compression.toUpperCase())); @@ -269,10 +286,14 @@ } private static byte [] stripColon(final byte [] n) { - byte [] result = new byte [n.length - 1]; - // Have the stored family name be absent the colon delimiter - System.arraycopy(n, 0, result, 0, n.length - 1); - return result; + byte col = n[n.length-1]; + if (col == ':') { + // strip. + byte [] res = new byte[n.length-1]; + System.arraycopy(n, 0, res, 0, n.length-1); + return res; + } + return n; } /** @@ -287,18 +308,14 @@ if (b == null) { return b; } - if (b[b.length - 1] != ':') { - throw new IllegalArgumentException("Family names must end in a colon: " + - Bytes.toString(b)); - } if (b[0] == '.') { throw new IllegalArgumentException("Family names cannot start with a " + "period: " + Bytes.toString(b)); } for (int i = 0; i < (b.length - 1); i++) { - if (Character.isISOControl(b[i])) { + if (Character.isISOControl(b[i]) || b[i] == ':') { throw new IllegalArgumentException("Illegal character <" + b[i] + - ">. Family names cannot contain control characters: " + + ">. Family names cannot contain control characters or colons: " + Bytes.toString(b)); } } @@ -317,7 +334,7 @@ */ @TOJSON(fieldName = "name", base64=true) public byte [] getNameWithColon() { - return HStoreKey.addDelimiter(this.name); + return Bytes.add(this.name, new byte[]{':'}); } /** @@ -463,27 +480,6 @@ } /** - * @return Maximum value length. - */ - @TOJSON - public synchronized int getMaxValueLength() { - if (this.maxValueLength == null) { - String value = getValue(LENGTH); - this.maxValueLength = (value != null)? - Integer.decode(value): DEFAULT_LENGTH_INTEGER; - } - return this.maxValueLength.intValue(); - } - - /** - * @param maxLength Maximum value length. - */ - public void setMaxValueLength(int maxLength) { - setValue(LENGTH, Integer.toString(maxLength)); - this.maxValueLength = null; - } - - /** * @return Time-to-live of cell contents, in seconds. */ @TOJSON @@ -609,9 +605,10 @@ Text t = new Text(); t.readFields(in); this.name = t.getBytes(); - if (HStoreKey.getFamilyDelimiterIndex(this.name) > 0) { - this.name = stripColon(this.name); - } +// if(KeyValue.getFamilyDelimiterIndex(this.name, 0, this.name.length) +// > 0) { +// this.name = stripColon(this.name); +// } } else { this.name = Bytes.readByteArray(in); } @@ -620,7 +617,6 @@ int ordinal = in.readInt(); setCompressionType(Compression.Algorithm.values()[ordinal]); setInMemory(in.readBoolean()); - setMaxValueLength(in.readInt()); setBloomfilter(in.readBoolean()); if (isBloomfilter() && version < 5) { // If a bloomFilter is enabled and the column descriptor is less than Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java?rev=782178&r1=782177&r2=782178&view=diff ============================================================================== --- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java (original) +++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java Sat Jun 6 01:26:21 2009 @@ -136,6 +136,9 @@ * when log splitting. More means faster but bigger mem consumption */ static final int DEFAULT_NUMBER_CONCURRENT_LOG_READS = 10; + /** Maximum value length, enforced on KeyValue construction */ + static final int MAXIMUM_VALUE_LENGTH = Integer.MAX_VALUE; + // Always store the location of the root table's HRegion. // This HRegion is never split. @@ -156,6 +159,11 @@ // be the first to be reassigned if the server(s) they are being served by // should go down. + + // + // New stuff. Making a slow transition. + // + /** The root table's name.*/ static final byte [] ROOT_TABLE_NAME = Bytes.toBytes("-ROOT-"); @@ -165,48 +173,30 @@ /** delimiter used between portions of a region name */ public static final int META_ROW_DELIMITER = ','; - // Defines for the column names used in both ROOT and META HBase 'meta' tables. + /** The catalog family as a string*/ + static final String CATALOG_FAMILY_STR = "info"; - /** The ROOT and META column family (string) */ - static final String COLUMN_FAMILY_STR = "info:"; + /** The catalog family */ + static final byte [] CATALOG_FAMILY = Bytes.toBytes(CATALOG_FAMILY_STR); - /** The META historian column family (string) */ - static final String COLUMN_FAMILY_HISTORIAN_STR = "historian:"; - - /** The ROOT and META column family */ - static final byte [] COLUMN_FAMILY = Bytes.toBytes(COLUMN_FAMILY_STR); + /** The catalog historian family */ + static final byte [] CATALOG_HISTORIAN_FAMILY = Bytes.toBytes("historian"); - /** The META historian column family */ - static final byte [] COLUMN_FAMILY_HISTORIAN = Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR); - - /** Array of meta column names */ - static final byte[][] COLUMN_FAMILY_ARRAY = new byte[][] {COLUMN_FAMILY}; + /** The regioninfo column qualifier */ + static final byte [] REGIONINFO_QUALIFIER = Bytes.toBytes("regioninfo"); + + /** The server column qualifier */ + static final byte [] SERVER_QUALIFIER = Bytes.toBytes("server"); - /** ROOT/META column family member - contains HRegionInfo */ - static final byte [] COL_REGIONINFO = - Bytes.toBytes(COLUMN_FAMILY_STR + "regioninfo"); - - /** Array of column - contains HRegionInfo */ - static final byte[][] COL_REGIONINFO_ARRAY = new byte[][] {COL_REGIONINFO}; - - /** ROOT/META column family member - contains HServerAddress.toString() */ - static final byte[] COL_SERVER = Bytes.toBytes(COLUMN_FAMILY_STR + "server"); + /** The startcode column qualifier */ + static final byte [] STARTCODE_QUALIFIER = Bytes.toBytes("serverstartcode"); - /** ROOT/META column family member - contains server start code (a long) */ - static final byte [] COL_STARTCODE = - Bytes.toBytes(COLUMN_FAMILY_STR + "serverstartcode"); - - /** the lower half of a split region */ - static final byte [] COL_SPLITA = Bytes.toBytes(COLUMN_FAMILY_STR + "splitA"); + /** The lower-half split region column qualifier */ + static final byte [] SPLITA_QUALIFIER = Bytes.toBytes("splitA"); - /** the upper half of a split region */ - static final byte [] COL_SPLITB = Bytes.toBytes(COLUMN_FAMILY_STR + "splitB"); + /** The upper-half split region column qualifier */ + static final byte [] SPLITB_QUALIFIER = Bytes.toBytes("splitB"); - /** All the columns in the catalog -ROOT- and .META. tables. - */ - static final byte[][] ALL_META_COLUMNS = {COL_REGIONINFO, COL_SERVER, - COL_STARTCODE, COL_SPLITA, COL_SPLITB}; - // Other constants /** @@ -246,6 +236,11 @@ static final long LATEST_TIMESTAMP = Long.MAX_VALUE; /** + * LATEST_TIMESTAMP in bytes form + */ + static final byte [] LATEST_TIMESTAMP_BYTES = Bytes.toBytes(LATEST_TIMESTAMP); + + /** * Define for 'return-all-versions'. */ static final int ALL_VERSIONS = Integer.MAX_VALUE; @@ -253,8 +248,12 @@ /** * Unlimited time-to-live. */ - static final int FOREVER = -1; +// static final int FOREVER = -1; + static final int FOREVER = Integer.MAX_VALUE; + /** + * Seconds in a week + */ public static final int WEEK_IN_SECONDS = 7 * 24 * 3600; //TODO: HBASE_CLIENT_RETRIES_NUMBER_KEY is only used by TestMigrate. Move it @@ -277,15 +276,12 @@ public static int RETRY_BACKOFF[] = { 1, 1, 1, 2, 2, 4, 4, 8, 16, 32 }; /** modifyTable op for replacing the table descriptor */ - public static final int MODIFY_TABLE_SET_HTD = 1; - /** modifyTable op for forcing a split */ - public static final int MODIFY_TABLE_SPLIT = 2; - /** modifyTable op for forcing a compaction */ - public static final int MODIFY_TABLE_COMPACT = 3; - - // Messages client can send master. - public static final int MODIFY_CLOSE_REGION = MODIFY_TABLE_COMPACT + 1; - - public static final int MODIFY_TABLE_FLUSH = MODIFY_CLOSE_REGION + 1; - public static final int MODIFY_TABLE_MAJOR_COMPACT = MODIFY_TABLE_FLUSH + 1; + public static enum Modify { + CLOSE_REGION, + TABLE_COMPACT, + TABLE_FLUSH, + TABLE_MAJOR_COMPACT, + TABLE_SET_HTD, + TABLE_SPLIT + } } Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HMerge.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HMerge.java?rev=782178&r1=782177&r2=782178&view=diff ============================================================================== --- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HMerge.java (original) +++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HMerge.java Sat Jun 6 01:26:21 2009 @@ -1,5 +1,5 @@ /** - * Copyright 2007 The Apache Software Foundation + * Copyright 2009 The Apache Software Foundation * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -29,13 +29,14 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.Scanner; -import org.apache.hadoop.hbase.io.BatchUpdate; -import org.apache.hadoop.hbase.io.Cell; -import org.apache.hadoop.hbase.io.RowResult; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.regionserver.HLog; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.InternalScanner; @@ -193,7 +194,7 @@ private static class OnlineMerger extends Merger { private final byte [] tableName; private final HTable table; - private final Scanner metaScanner; + private final ResultScanner metaScanner; private HRegionInfo latestRegion; OnlineMerger(HBaseConfiguration conf, FileSystem fs, @@ -202,22 +203,23 @@ super(conf, fs, tableName); this.tableName = tableName; this.table = new HTable(conf, META_TABLE_NAME); - this.metaScanner = table.getScanner(COL_REGIONINFO_ARRAY, tableName); + this.metaScanner = table.getScanner(CATALOG_FAMILY, REGIONINFO_QUALIFIER); this.latestRegion = null; } private HRegionInfo nextRegion() throws IOException { try { - RowResult results = getMetaRow(); + Result results = getMetaRow(); if (results == null) { return null; } - Cell regionInfo = results.get(COL_REGIONINFO); - if (regionInfo == null || regionInfo.getValue().length == 0) { + byte [] regionInfoValue = results.getValue(CATALOG_FAMILY, REGIONINFO_QUALIFIER); + if (regionInfoValue == null || regionInfoValue.length == 0) { throw new NoSuchElementException("meta region entry missing " + - Bytes.toString(COL_REGIONINFO)); + Bytes.toString(CATALOG_FAMILY) + ":" + + Bytes.toString(REGIONINFO_QUALIFIER)); } - HRegionInfo region = Writables.getHRegionInfo(regionInfo.getValue()); + HRegionInfo region = Writables.getHRegionInfo(regionInfoValue); if (!Bytes.equals(region.getTableDesc().getName(), this.tableName)) { return null; } @@ -244,13 +246,13 @@ * @return A Map of the row content else null if we are off the end. * @throws IOException */ - private RowResult getMetaRow() throws IOException { - RowResult currentRow = metaScanner.next(); + private Result getMetaRow() throws IOException { + Result currentRow = metaScanner.next(); boolean foundResult = false; while (currentRow != null) { LOG.info("Row: <" + Bytes.toString(currentRow.getRow()) + ">"); - Cell regionInfo = currentRow.get(COL_REGIONINFO); - if (regionInfo == null || regionInfo.getValue().length == 0) { + byte [] regionInfoValue = currentRow.getValue(CATALOG_FAMILY, REGIONINFO_QUALIFIER); + if (regionInfoValue == null || regionInfoValue.length == 0) { currentRow = metaScanner.next(); continue; } @@ -286,17 +288,18 @@ if(Bytes.equals(regionsToDelete[r], latestRegion.getRegionName())) { latestRegion = null; } - table.deleteAll(regionsToDelete[r]); + Delete delete = new Delete(regionsToDelete[r]); + table.delete(delete); if(LOG.isDebugEnabled()) { LOG.debug("updated columns in row: " + Bytes.toString(regionsToDelete[r])); } } newRegion.getRegionInfo().setOffline(true); - BatchUpdate update = new BatchUpdate(newRegion.getRegionName()); - update.put(COL_REGIONINFO, + Put put = new Put(newRegion.getRegionName()); + put.add(CATALOG_FAMILY, REGIONINFO_QUALIFIER, Writables.getBytes(newRegion.getRegionInfo())); - table.commit(update); + table.put(put); if(LOG.isDebugEnabled()) { LOG.debug("updated columns in row: " @@ -325,9 +328,10 @@ HRegionInfo.ROOT_REGIONINFO, null); root.initialize(null, null); + Scan scan = new Scan(); + scan.addColumn(CATALOG_FAMILY, REGIONINFO_QUALIFIER); InternalScanner rootScanner = - root.getScanner(COL_REGIONINFO_ARRAY, HConstants.EMPTY_START_ROW, - HConstants.LATEST_TIMESTAMP, null); + root.getScanner(scan); try { List<KeyValue> results = new ArrayList<KeyValue>(); @@ -366,23 +370,29 @@ throws IOException { byte[][] regionsToDelete = {oldRegion1, oldRegion2}; for(int r = 0; r < regionsToDelete.length; r++) { - BatchUpdate b = new BatchUpdate(regionsToDelete[r]); - b.delete(COL_REGIONINFO); - b.delete(COL_SERVER); - b.delete(COL_STARTCODE); - b.delete(COL_SPLITA); - b.delete(COL_SPLITB); - root.batchUpdate(b,null); - + Delete delete = new Delete(regionsToDelete[r]); + delete.deleteColumns(HConstants.CATALOG_FAMILY, + HConstants.REGIONINFO_QUALIFIER); + delete.deleteColumns(HConstants.CATALOG_FAMILY, + HConstants.SERVER_QUALIFIER); + delete.deleteColumns(HConstants.CATALOG_FAMILY, + HConstants.STARTCODE_QUALIFIER); + delete.deleteColumns(HConstants.CATALOG_FAMILY, + HConstants.SPLITA_QUALIFIER); + delete.deleteColumns(HConstants.CATALOG_FAMILY, + HConstants.SPLITB_QUALIFIER); + root.delete(delete, null, true); + if(LOG.isDebugEnabled()) { LOG.debug("updated columns in row: " + Bytes.toString(regionsToDelete[r])); } } HRegionInfo newInfo = newRegion.getRegionInfo(); newInfo.setOffline(true); - BatchUpdate b = new BatchUpdate(newRegion.getRegionName()); - b.put(COL_REGIONINFO, Writables.getBytes(newInfo)); - root.batchUpdate(b,null); + Put put = new Put(newRegion.getRegionName()); + put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, + Writables.getBytes(newInfo)); + root.put(put); if(LOG.isDebugEnabled()) { LOG.debug("updated columns in row: " + Bytes.toString(newRegion.getRegionName())); } Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HTableDescriptor.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HTableDescriptor.java?rev=782178&r1=782177&r2=782178&view=diff ============================================================================== --- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HTableDescriptor.java (original) +++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HTableDescriptor.java Sat Jun 6 01:26:21 2009 @@ -27,10 +27,11 @@ import java.util.HashMap; import java.util.Iterator; import java.util.Map; +import java.util.Set; import java.util.TreeMap; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.client.tableindexed.IndexSpecification; +//import org.apache.hadoop.hbase.client.tableindexed.IndexSpecification; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.hfile.Compression; import org.apache.hadoop.hbase.rest.exception.HBaseRestException; @@ -45,7 +46,8 @@ * HTableDescriptor contains the name of an HTable, and its * column families. */ -public class HTableDescriptor implements WritableComparable<HTableDescriptor>, ISerializable { +public class HTableDescriptor implements WritableComparable<HTableDescriptor>, +ISerializable { // Changes prior to version 3 were not recorded here. // Version 3 adds metadata as a map where keys and values are byte[]. @@ -100,12 +102,14 @@ private volatile Boolean root = null; // Key is hash of the family name. - private final Map<byte [], HColumnDescriptor> families = + public final Map<byte [], HColumnDescriptor> families = new TreeMap<byte [], HColumnDescriptor>(KeyValue.FAMILY_COMPARATOR); - +// private final Map<byte [], HColumnDescriptor> families = +// new TreeMap<byte [], HColumnDescriptor>(KeyValue.FAMILY_COMPARATOR); + // Key is indexId - private final Map<String, IndexSpecification> indexes = - new HashMap<String, IndexSpecification>(); +// private final Map<String, IndexSpecification> indexes = +// new HashMap<String, IndexSpecification>(); /** * Private constructor used internally creating table descriptors for @@ -125,24 +129,38 @@ * Private constructor used internally creating table descriptors for * catalog tables: e.g. .META. and -ROOT-. */ +// protected HTableDescriptor(final byte [] name, HColumnDescriptor[] families, +// Collection<IndexSpecification> indexes, +// Map<ImmutableBytesWritable,ImmutableBytesWritable> values) { +// this.name = name.clone(); +// this.nameAsString = Bytes.toString(this.name); +// setMetaFlags(name); +// for(HColumnDescriptor descriptor : families) { +// this.families.put(descriptor.getName(), descriptor); +// } +// for(IndexSpecification index : indexes) { +// this.indexes.put(index.getIndexId(), index); +// } +// for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> entry: +// values.entrySet()) { +// this.values.put(entry.getKey(), entry.getValue()); +// } +// } protected HTableDescriptor(final byte [] name, HColumnDescriptor[] families, - Collection<IndexSpecification> indexes, - Map<ImmutableBytesWritable,ImmutableBytesWritable> values) { + Map<ImmutableBytesWritable,ImmutableBytesWritable> values) { this.name = name.clone(); this.nameAsString = Bytes.toString(this.name); setMetaFlags(name); for(HColumnDescriptor descriptor : families) { this.families.put(descriptor.getName(), descriptor); } - for(IndexSpecification index : indexes) { - this.indexes.put(index.getIndexId(), index); - } for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> entry: values.entrySet()) { this.values.put(entry.getKey(), entry.getValue()); } } - + + /** * Constructs an empty object. * For deserializing an HTableDescriptor instance only. @@ -198,7 +216,7 @@ desc.values.entrySet()) { this.values.put(e.getKey(), e.getValue()); } - this.indexes.putAll(desc.indexes); +// this.indexes.putAll(desc.indexes); } /* @@ -437,21 +455,17 @@ Bytes.toBytes(Integer.toString(memcacheFlushSize))); } - public Collection<IndexSpecification> getIndexes() { - return indexes.values(); - } - - public IndexSpecification getIndex(String indexId) { - return indexes.get(indexId); - } - - public void addIndex(IndexSpecification index) { - indexes.put(index.getIndexId(), index); - } - - public void removeIndex(String indexId) { - indexes.remove(indexId); - } +// public Collection<IndexSpecification> getIndexes() { +// return indexes.values(); +// } +// +// public IndexSpecification getIndex(String indexId) { +// return indexes.get(indexId); +// } +// +// public void addIndex(IndexSpecification index) { +// indexes.put(index.getIndexId(), index); +// } /** * Adds a column family. @@ -510,13 +524,13 @@ s.append(FAMILIES); s.append(" => "); s.append(families.values()); - if (!indexes.isEmpty()) { - // Don't emit if empty. Has to do w/ transactional hbase. - s.append(", "); - s.append("INDEXES"); - s.append(" => "); - s.append(indexes.values()); - } +// if (!indexes.isEmpty()) { +// // Don't emit if empty. Has to do w/ transactional hbase. +// s.append(", "); +// s.append("INDEXES"); +// s.append(" => "); +// s.append(indexes.values()); +// } s.append('}'); return s.toString(); } @@ -581,16 +595,16 @@ c.readFields(in); families.put(c.getName(), c); } - indexes.clear(); +// indexes.clear(); if (version < 4) { return; } - int numIndexes = in.readInt(); - for (int i = 0; i < numIndexes; i++) { - IndexSpecification index = new IndexSpecification(); - index.readFields(in); - addIndex(index); - } +// int numIndexes = in.readInt(); +// for (int i = 0; i < numIndexes; i++) { +// IndexSpecification index = new IndexSpecification(); +// index.readFields(in); +// addIndex(index); +// } } public void write(DataOutput out) throws IOException { @@ -610,10 +624,10 @@ HColumnDescriptor family = it.next(); family.write(out); } - out.writeInt(indexes.size()); - for(IndexSpecification index : indexes.values()) { - index.write(out); - } +// out.writeInt(indexes.size()); +// for(IndexSpecification index : indexes.values()) { +// index.write(out); +// } } // Comparable @@ -654,6 +668,13 @@ return Collections.unmodifiableCollection(this.families.values()); } + /** + * @return Immutable sorted set of the keys of the families. + */ + public Set<byte[]> getFamiliesKeys() { + return Collections.unmodifiableSet(this.families.keySet()); + } + @TOJSON(fieldName = "columns") public HColumnDescriptor[] getColumnFamilies() { return getFamilies().toArray(new HColumnDescriptor[0]); @@ -689,22 +710,22 @@ /** Table descriptor for <core>-ROOT-</code> catalog table */ public static final HTableDescriptor ROOT_TABLEDESC = new HTableDescriptor( HConstants.ROOT_TABLE_NAME, - new HColumnDescriptor[] { new HColumnDescriptor(HConstants.COLUMN_FAMILY, + new HColumnDescriptor[] { new HColumnDescriptor(HConstants.CATALOG_FAMILY, 10, // Ten is arbitrary number. Keep versions to help debuggging. Compression.Algorithm.NONE.getName(), false, true, 8 * 1024, - Integer.MAX_VALUE, HConstants.FOREVER, false) }); + HConstants.FOREVER, false) }); /** Table descriptor for <code>.META.</code> catalog table */ public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor( HConstants.META_TABLE_NAME, new HColumnDescriptor[] { - new HColumnDescriptor(HConstants.COLUMN_FAMILY, + new HColumnDescriptor(HConstants.CATALOG_FAMILY, 10, // Ten is arbitrary number. Keep versions to help debuggging. Compression.Algorithm.NONE.getName(), false, true, 8 * 1024, - Integer.MAX_VALUE, HConstants.FOREVER, false), - new HColumnDescriptor(HConstants.COLUMN_FAMILY_HISTORIAN, + HConstants.FOREVER, false), + new HColumnDescriptor(HConstants.CATALOG_HISTORIAN_FAMILY, HConstants.ALL_VERSIONS, Compression.Algorithm.NONE.getName(), false, false, 8 * 1024, - Integer.MAX_VALUE, HConstants.WEEK_IN_SECONDS, false)}); + HConstants.WEEK_IN_SECONDS, false)}); /* (non-Javadoc) * @see org.apache.hadoop.hbase.rest.xml.IOutputXML#toXML()