Repository: phoenix Updated Branches: refs/heads/master ab67f3027 -> 203a57f48
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b3473022/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/example/TestCoveredColumnIndexCodec.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/example/TestCoveredColumnIndexCodec.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/example/TestCoveredColumnIndexCodec.java deleted file mode 100644 index b8fa72d..0000000 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/example/TestCoveredColumnIndexCodec.java +++ /dev/null @@ -1,250 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.phoenix.hbase.index.covered.example; - -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.List; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Mutation; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.phoenix.hbase.index.covered.IndexCodec; -import org.apache.phoenix.hbase.index.covered.IndexMetaData; -import org.apache.phoenix.hbase.index.covered.IndexUpdate; -import org.apache.phoenix.hbase.index.covered.LocalTableState; -import org.apache.phoenix.hbase.index.covered.data.LocalHBaseState; -import org.apache.phoenix.hbase.index.covered.example.CoveredColumnIndexCodec.ColumnEntry; -import org.apache.phoenix.hbase.index.covered.update.ColumnReference; -import org.junit.Test; -import org.mockito.Mockito; - -import com.google.common.collect.Lists; - -public class TestCoveredColumnIndexCodec { - private static final byte[] PK = new byte[] { 'a' }; - private static final String FAMILY_STRING = "family"; - private static final byte[] FAMILY = Bytes.toBytes(FAMILY_STRING); - private static final byte[] QUAL = Bytes.toBytes("qual"); - private static final CoveredColumn COLUMN_REF = new CoveredColumn(FAMILY_STRING, QUAL); - private static final byte[] EMPTY_INDEX_KEY = CoveredColumnIndexCodec.composeRowKey(PK, 0, - Arrays.asList(toColumnEntry(new byte[0]))); - private static final byte[] BLANK_INDEX_KEY = CoveredColumnIndexCodec.composeRowKey(PK, 0, - Collections.<ColumnEntry> emptyList()); - - private static ColumnEntry toColumnEntry(byte[] bytes) { - return new ColumnEntry(bytes, COLUMN_REF); - } - - /** - * Convert between an index and a bunch of values - * @throws Exception - */ - @Test - public void toFromIndexKey() throws Exception { - // start with empty values - byte[] indexKey = BLANK_INDEX_KEY; - List<byte[]> stored = CoveredColumnIndexCodec.getValues(indexKey); - assertEquals("Found some stored values in an index row key that wasn't created with values!", - 0, stored.size()); - - // a single, empty value - indexKey = EMPTY_INDEX_KEY; - stored = CoveredColumnIndexCodec.getValues(indexKey); - assertEquals("Found some stored values in an index row key that wasn't created with values!", - 1, stored.size()); - assertEquals("Found a non-zero length value: " + Bytes.toString(stored.get(0)), 0, - stored.get(0).length); - - // try with a couple values, some different lengths - byte[] v1 = new byte[] { 'a' }; - byte[] v2 = new byte[] { 'b' }; - byte[] v3 = Bytes.toBytes("v3"); - int len = v1.length + v2.length + v3.length; - indexKey = - CoveredColumnIndexCodec.composeRowKey(PK, len, - Arrays.asList(toColumnEntry(v1), toColumnEntry(v2), toColumnEntry(v3))); - stored = CoveredColumnIndexCodec.getValues(indexKey); - assertEquals("Didn't find expected number of values in index key!", 3, stored.size()); - assertTrue("First index keys don't match!", Bytes.equals(v1, stored.get(0))); - assertTrue("Second index keys don't match!", Bytes.equals(v2, stored.get(1))); - assertTrue("Third index keys don't match!", Bytes.equals(v3, stored.get(2))); - } - - /** - * Ensure that we correctly can determine when a row key is empty (no values). - */ - @Test - public void testCheckRowKeyForAllNulls() { - byte[] pk = new byte[] { 'a', 'b', 'z' }; - // check positive cases first - byte[] result = EMPTY_INDEX_KEY; - assertTrue("Didn't correctly read single element as being null in row key", - CoveredColumnIndexCodec.checkRowKeyForAllNulls(result)); - result = - CoveredColumnIndexCodec.composeRowKey(pk, 0, - Lists.newArrayList(toColumnEntry(new byte[0]), toColumnEntry(new byte[0]))); - assertTrue("Didn't correctly read two elements as being null in row key", - CoveredColumnIndexCodec.checkRowKeyForAllNulls(result)); - - // check cases where it isn't null - result = - CoveredColumnIndexCodec.composeRowKey(pk, 2, - Arrays.asList(toColumnEntry(new byte[] { 1, 2 }))); - assertFalse("Found a null key, when it wasn't!", - CoveredColumnIndexCodec.checkRowKeyForAllNulls(result)); - result = - CoveredColumnIndexCodec.composeRowKey(pk, 2, - Arrays.asList(toColumnEntry(new byte[] { 1, 2 }), toColumnEntry(new byte[0]))); - assertFalse("Found a null key, when it wasn't!", - CoveredColumnIndexCodec.checkRowKeyForAllNulls(result)); - } - - private static class SimpleTableState implements LocalHBaseState { - - private Result r; - - public SimpleTableState(Result r) { - this.r = r; - } - - @Override - public Result getCurrentRowState(Mutation m, Collection<? extends ColumnReference> toCover, boolean preMutationStateOnly) - throws IOException { - return r; - } - - } - - /** - * Test that we get back the correct index updates for a given column group - * @throws Exception on failure - */ - @Test - public void testGeneratedIndexUpdates() throws Exception { - ColumnGroup group = new ColumnGroup("test-column-group"); - group.add(COLUMN_REF); - - final Result emptyState = Result.create(Collections.<Cell> emptyList()); - - // setup the state we expect for the codec - RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class); - Configuration conf = new Configuration(false); - Mockito.when(env.getConfiguration()).thenReturn(conf); - LocalHBaseState table = new SimpleTableState(emptyState); - - // make a new codec on those kvs - CoveredColumnIndexCodec codec = - CoveredColumnIndexCodec.getCodecForTesting(Arrays.asList(group)); - - // start with a basic put that has some keyvalues - Put p = new Put(PK); - // setup the kvs to add - List<KeyValue> kvs = new ArrayList<KeyValue>(); - byte[] v1 = Bytes.toBytes("v1"); - KeyValue kv = new KeyValue(PK, FAMILY, QUAL, 1, v1); - kvs.add(kv); - p.add(kv); - byte[] v2 = Bytes.toBytes("v2"); - kv = new KeyValue(PK, Bytes.toBytes("family2"), QUAL, 1, v2); - kvs.add(kv); - p.add(kv); - - // check the codec for deletes it should send - LocalTableState state = new LocalTableState(env, table, p); - Iterable<IndexUpdate> updates = codec.getIndexDeletes(state, IndexMetaData.NULL_INDEX_META_DATA); - assertFalse("Found index updates without any existing kvs in table!", updates.iterator().next() - .isValid()); - - // get the updates with the pending update - state.setCurrentTimestamp(1); - state.addPendingUpdates(kvs); - updates = codec.getIndexUpserts(state, IndexMetaData.NULL_INDEX_META_DATA); - assertTrue("Didn't find index updates for pending primary table update!", updates.iterator() - .hasNext()); - for (IndexUpdate update : updates) { - assertTrue("Update marked as invalid, but should be a pending index write!", update.isValid()); - Put m = (Put) update.getUpdate(); - // should just be the single update for the column reference - byte[] expected = - CoveredColumnIndexCodec.composeRowKey(PK, v1.length, Arrays.asList(toColumnEntry(v1))); - assertArrayEquals("Didn't get expected index value", expected, m.getRow()); - } - - // then apply a delete - Delete d = new Delete(PK, 2); - // need to set the timestamp here, as would actually happen on the server, unlike what happens - // with puts, where the get the constructor specified timestamp for unspecified methods. - d.deleteFamily(FAMILY, 2); - // setup the next batch of 'current state', basically just ripping out the current state from - // the last round - table = new SimpleTableState(new Result(kvs)); - state = new LocalTableState(env, table, d); - state.setCurrentTimestamp(2); - // check the cleanup of the current table, after the puts (mocking a 'next' update) - updates = codec.getIndexDeletes(state, IndexMetaData.NULL_INDEX_META_DATA); - for (IndexUpdate update : updates) { - assertTrue("Didn't have any index cleanup, even though there is current state", - update.isValid()); - Delete m = (Delete) update.getUpdate(); - // should just be the single update for the column reference - byte[] expected = - CoveredColumnIndexCodec.composeRowKey(PK, v1.length, Arrays.asList(toColumnEntry(v1))); - assertArrayEquals("Didn't get expected index value", expected, m.getRow()); - } - ensureNoUpdatesWhenCoveredByDelete(env, codec, kvs, d); - - // now with the delete of the columns - d = new Delete(PK, 2); - d.deleteColumns(FAMILY, QUAL, 2); - ensureNoUpdatesWhenCoveredByDelete(env, codec, kvs, d); - - // this delete needs to match timestamps exactly, by contract, to have any effect - d = new Delete(PK, 1); - d.deleteColumn(FAMILY, QUAL, 1); - ensureNoUpdatesWhenCoveredByDelete(env, codec, kvs, d); - } - - private void ensureNoUpdatesWhenCoveredByDelete(RegionCoprocessorEnvironment env, IndexCodec codec, List<KeyValue> currentState, - Delete d) throws IOException { - LocalHBaseState table = new SimpleTableState(new Result(currentState)); - LocalTableState state = new LocalTableState(env, table, d); - state.setCurrentTimestamp(d.getTimeStamp()); - // now we shouldn't see anything when getting the index update - state.addPendingUpdates(d.getFamilyMap().get(FAMILY)); - Iterable<IndexUpdate> updates = codec.getIndexUpserts(state, IndexMetaData.NULL_INDEX_META_DATA); - for (IndexUpdate update : updates) { - assertFalse("Had some index updates, though it should have been covered by the delete", - update.isValid()); - } - } -} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/phoenix/blob/b3473022/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/example/TestCoveredIndexSpecifierBuilder.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/example/TestCoveredIndexSpecifierBuilder.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/example/TestCoveredIndexSpecifierBuilder.java deleted file mode 100644 index dbed613..0000000 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/example/TestCoveredIndexSpecifierBuilder.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.phoenix.hbase.index.covered.example; - -import static org.junit.Assert.assertEquals; - -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.phoenix.hbase.index.covered.example.ColumnGroup; -import org.apache.phoenix.hbase.index.covered.example.CoveredColumn; -import org.apache.phoenix.hbase.index.covered.example.CoveredColumnIndexSpecifierBuilder; -import org.junit.Test; - -public class TestCoveredIndexSpecifierBuilder { - private static final String FAMILY = "FAMILY"; - private static final String FAMILY2 = "FAMILY2"; - private static final String INDEX_TABLE = "INDEX_TABLE"; - private static final String INDEX_TABLE2 = "INDEX_TABLE2"; - - - @Test - public void testSimpleSerialziationDeserialization() throws Exception { - byte[] indexed_qualifer = Bytes.toBytes("indexed_qual"); - - //setup the index - CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder(); - ColumnGroup fam1 = new ColumnGroup(INDEX_TABLE); - // match a single family:qualifier pair - CoveredColumn col1 = new CoveredColumn(FAMILY, indexed_qualifer); - fam1.add(col1); - // matches the family2:* columns - CoveredColumn col2 = new CoveredColumn(FAMILY2, null); - fam1.add(col2); - builder.addIndexGroup(fam1); - ColumnGroup fam2 = new ColumnGroup(INDEX_TABLE2); - // match a single family2:qualifier pair - CoveredColumn col3 = new CoveredColumn(FAMILY2, indexed_qualifer); - fam2.add(col3); - builder.addIndexGroup(fam2); - - Configuration conf = new Configuration(false); - //convert the map that HTableDescriptor gets into the conf the coprocessor receives - Map<String, String> map = builder.convertToMap(); - for(Entry<String, String> entry: map.entrySet()){ - conf.set(entry.getKey(), entry.getValue()); - } - - List<ColumnGroup> columns = CoveredColumnIndexSpecifierBuilder.getColumns(conf); - assertEquals("Didn't deserialize the expected number of column groups", 2, columns.size()); - ColumnGroup group = columns.get(0); - assertEquals("Didn't deserialize expected column in first group", col1, group.getColumnForTesting(0)); - assertEquals("Didn't deserialize expected column in first group", col2, group.getColumnForTesting(1)); - group = columns.get(1); - assertEquals("Didn't deserialize expected column in second group", col3, group.getColumnForTesting(0)); - } -} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/phoenix/blob/b3473022/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/util/TestIndexManagementUtil.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/util/TestIndexManagementUtil.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/util/TestIndexManagementUtil.java index 15ecd3f..ae3efc6 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/util/TestIndexManagementUtil.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/util/TestIndexManagementUtil.java @@ -17,11 +17,17 @@ */ package org.apache.phoenix.hbase.index.util; +import java.io.IOException; + import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.regionserver.wal.IndexedHLogReader; import org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec; import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec; +import org.apache.phoenix.hbase.index.covered.CoveredColumnIndexCodec; import org.junit.Test; public class TestIndexManagementUtil { @@ -63,4 +69,27 @@ public class TestIndexManagementUtil { conf.set(IndexManagementUtil.HLOG_READER_IMPL_KEY, IndexedHLogReader.class.getName()); IndexManagementUtil.ensureMutableIndexingCorrectlyConfigured(conf); } + + /** + * Create the specified index table with the necessary columns + * @param admin {@link HBaseAdmin} to use when creating the table + * @param indexTable name of the index table. + * @throws IOException + */ + public static void createIndexTable(HBaseAdmin admin, String indexTable) throws IOException { + createIndexTable(admin, new HTableDescriptor(indexTable)); + } + + /** + * @param admin to create the table + * @param index descriptor to update before creating table + */ + public static void createIndexTable(HBaseAdmin admin, HTableDescriptor index) throws IOException { + HColumnDescriptor col = + new HColumnDescriptor(CoveredColumnIndexCodec.INDEX_ROW_COLUMN_FAMILY); + // ensure that we can 'see past' delete markers when doing scans + col.setKeepDeletedCells(true); + index.addFamily(col); + admin.createTable(index); + } } \ No newline at end of file http://git-wip-us.apache.org/repos/asf/phoenix/blob/b3473022/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java index b381e9f..017470a 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java @@ -59,12 +59,12 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.phoenix.hbase.index.IndexTestingUtils; import org.apache.phoenix.hbase.index.Indexer; import org.apache.phoenix.hbase.index.TableName; -import org.apache.phoenix.hbase.index.covered.example.ColumnGroup; -import org.apache.phoenix.hbase.index.covered.example.CoveredColumn; -import org.apache.phoenix.hbase.index.covered.example.CoveredColumnIndexSpecifierBuilder; -import org.apache.phoenix.hbase.index.covered.example.CoveredColumnIndexer; +import org.apache.phoenix.hbase.index.covered.ColumnGroup; +import org.apache.phoenix.hbase.index.covered.CoveredColumn; +import org.apache.phoenix.hbase.index.covered.CoveredColumnIndexSpecifierBuilder; import org.apache.phoenix.hbase.index.table.HTableInterfaceReference; import org.apache.phoenix.hbase.index.util.IndexManagementUtil; +import org.apache.phoenix.hbase.index.util.TestIndexManagementUtil; import org.apache.phoenix.hbase.index.write.recovery.PerRegionIndexWriteCache; import org.apache.phoenix.hbase.index.write.recovery.StoreFailuresInCachePolicy; import org.junit.Assert; @@ -182,7 +182,7 @@ public class TestWALRecoveryCaching { // create the index table HTableDescriptor indexTableDesc = new HTableDescriptor(Bytes.toBytes(getIndexTableName())); indexTableDesc.addCoprocessor(IndexTableBlockingReplayObserver.class.getName()); - CoveredColumnIndexer.createIndexTable(admin, indexTableDesc); + TestIndexManagementUtil.createIndexTable(admin, indexTableDesc); // figure out where our tables live ServerName shared =