PHOENIX-4099 Do not write table data again when replaying mutations for partial 
index rebuild (addendum)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/fff7963a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/fff7963a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/fff7963a

Branch: refs/heads/4.x-HBase-1.1
Commit: fff7963a3e0e0db67ed6f89ad3189f967f1a152c
Parents: 2ed183c
Author: James Taylor <jamestay...@apache.org>
Authored: Thu Aug 17 21:57:23 2017 -0700
Committer: James Taylor <jamestay...@apache.org>
Committed: Thu Aug 17 22:13:06 2017 -0700

----------------------------------------------------------------------
 ...ReplayWithIndexWritesAndCompressedWALIT.java |  11 +-
 .../FailForUnsupportedHBaseVersionsIT.java      |   6 +-
 .../index/covered/FailWithoutRetriesIT.java     | 140 +++
 .../example/EndToEndCoveredIndexingIT.java      | 902 -------------------
 .../EndtoEndIndexingWithCompressionIT.java      |  45 -
 .../covered/example/FailWithoutRetriesIT.java   | 138 ---
 .../index/covered/example/ColumnGroup.java      | 112 ---
 .../index/covered/example/CoveredColumn.java    | 107 ---
 .../example/CoveredColumnIndexCodec.java        | 376 --------
 .../CoveredColumnIndexSpecifierBuilder.java     | 184 ----
 .../covered/example/CoveredColumnIndexer.java   | 165 ----
 .../hbase/index/covered/ColumnGroup.java        | 112 +++
 .../hbase/index/covered/CoveredColumn.java      | 106 +++
 .../index/covered/CoveredColumnIndexCodec.java  | 372 ++++++++
 .../CoveredColumnIndexSpecifierBuilder.java     | 182 ++++
 .../hbase/index/covered/TestColumnTracker.java  |  61 ++
 .../covered/TestCoveredColumnIndexCodec.java    | 246 +++++
 .../TestCoveredIndexSpecifierBuilder.java       |  72 ++
 .../covered/example/TestColumnTracker.java      |  61 --
 .../example/TestCoveredColumnIndexCodec.java    | 250 -----
 .../TestCoveredIndexSpecifierBuilder.java       |  75 --
 .../index/util/TestIndexManagementUtil.java     |  29 +
 .../index/write/TestWALRecoveryCaching.java     |  10 +-
 23 files changed, 1333 insertions(+), 2429 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/fff7963a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
 
b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
index 22f459c..5c29f7c 100644
--- 
a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
@@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.RegionServerAccounting;
-import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.wal.WAL;
@@ -55,10 +54,10 @@ import org.apache.hadoop.hbase.wal.WALSplitter;
 import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
 import org.apache.phoenix.hbase.index.IndexTestingUtils;
 import org.apache.phoenix.hbase.index.TableName;
-import org.apache.phoenix.hbase.index.covered.example.ColumnGroup;
-import org.apache.phoenix.hbase.index.covered.example.CoveredColumn;
-import 
org.apache.phoenix.hbase.index.covered.example.CoveredColumnIndexSpecifierBuilder;
-import org.apache.phoenix.hbase.index.covered.example.CoveredColumnIndexer;
+import org.apache.phoenix.hbase.index.covered.ColumnGroup;
+import org.apache.phoenix.hbase.index.covered.CoveredColumn;
+import 
org.apache.phoenix.hbase.index.covered.CoveredColumnIndexSpecifierBuilder;
+import org.apache.phoenix.hbase.index.util.TestIndexManagementUtil;
 import org.apache.phoenix.util.ConfigUtil;
 import org.junit.After;
 import org.junit.Before;
@@ -214,7 +213,7 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
       Mockito.any(Exception.class));
 
     // then create the index table so we are successful on WAL replay
-    CoveredColumnIndexer.createIndexTable(UTIL.getHBaseAdmin(), 
INDEX_TABLE_NAME);
+    TestIndexManagementUtil.createIndexTable(UTIL.getHBaseAdmin(), 
INDEX_TABLE_NAME);
 
     // run the WAL split and setup the region
     runWALSplit(this.conf, walFactory);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fff7963a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java
index 9c4b57f..5916c43 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java
@@ -35,9 +35,9 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.VersionInfo;
 import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
-import org.apache.phoenix.hbase.index.covered.example.ColumnGroup;
-import org.apache.phoenix.hbase.index.covered.example.CoveredColumn;
-import 
org.apache.phoenix.hbase.index.covered.example.CoveredColumnIndexSpecifierBuilder;
+import org.apache.phoenix.hbase.index.covered.ColumnGroup;
+import org.apache.phoenix.hbase.index.covered.CoveredColumn;
+import 
org.apache.phoenix.hbase.index.covered.CoveredColumnIndexSpecifierBuilder;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fff7963a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/FailWithoutRetriesIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/FailWithoutRetriesIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/FailWithoutRetriesIT.java
new file mode 100644
index 0000000..ba8340c
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/FailWithoutRetriesIT.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license agreements. See the NOTICE
+ * file distributed with this work for additional information regarding 
copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the "License"); you may not 
use this file except in compliance with the
+ * License. You may obtain a copy of the License at 
http://www.apache.org/licenses/LICENSE-2.0 Unless required by
+ * applicable law or agreed to in writing, software distributed under the 
License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 
See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+package org.apache.phoenix.hbase.index.covered;
+
+import static org.apache.phoenix.query.BaseTest.setUpConfigForMiniCluster;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
+import org.apache.phoenix.hbase.index.IndexTestingUtils;
+import org.apache.phoenix.hbase.index.Indexer;
+import org.apache.phoenix.hbase.index.TableName;
+import org.apache.phoenix.hbase.index.builder.BaseIndexCodec;
+import org.apache.phoenix.hbase.index.covered.ColumnGroup;
+import org.apache.phoenix.hbase.index.covered.CoveredColumn;
+import 
org.apache.phoenix.hbase.index.covered.CoveredColumnIndexSpecifierBuilder;
+import org.apache.phoenix.hbase.index.covered.IndexMetaData;
+import org.apache.phoenix.hbase.index.covered.IndexUpdate;
+import org.apache.phoenix.hbase.index.covered.TableState;
+import org.apache.phoenix.hbase.index.util.IndexManagementUtil;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * If {@link DoNotRetryIOException} is not subclassed correctly (with the 
{@link String} constructor),
+ * {@link MultiResponse#readFields(java.io.DataInput)} will not correctly 
deserialize the exception, and just return
+ * <tt>null</tt> to the client, which then just goes and retries.
+ */
+@Category(NeedsOwnMiniClusterTest.class)
+public class FailWithoutRetriesIT {
+
+    private static final Log LOG = 
LogFactory.getLog(FailWithoutRetriesIT.class);
+    @Rule
+    public TableName table = new TableName();
+
+    private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+    private String getIndexTableName() {
+        return Bytes.toString(table.getTableName()) + "_index";
+    }
+
+    public static class FailingTestCodec extends BaseIndexCodec {
+
+        @Override
+        public Iterable<IndexUpdate> getIndexDeletes(TableState state, 
IndexMetaData context) throws IOException {
+            throw new RuntimeException("Intentionally failing deletes for " + 
FailWithoutRetriesIT.class.getName());
+        }
+
+        @Override
+        public Iterable<IndexUpdate> getIndexUpserts(TableState state, 
IndexMetaData context) throws IOException {
+            throw new RuntimeException("Intentionally failing upserts for " + 
FailWithoutRetriesIT.class.getName());
+        }
+    }
+
+    @BeforeClass
+    public static void setupCluster() throws Exception {
+        // setup and verify the config
+        Configuration conf = UTIL.getConfiguration();
+        setUpConfigForMiniCluster(conf);
+        IndexTestingUtils.setupConfig(conf);
+        IndexManagementUtil.ensureMutableIndexingCorrectlyConfigured(conf);
+        // start the cluster
+        UTIL.startMiniCluster();
+    }
+
+    /**
+     * If this test times out, then we didn't fail quickly enough. {@link 
Indexer} maybe isn't rethrowing the exception
+     * correctly?
+     * <p>
+     * We use a custom codec to enforce the thrown exception.
+     * 
+     * @throws Exception
+     */
+    @Test(timeout = 300000)
+    public void testQuickFailure() throws Exception {
+        // incorrectly setup indexing for the primary table - target index 
table doesn't exist, which
+        // should quickly return to the client
+        byte[] family = Bytes.toBytes("family");
+        ColumnGroup fam1 = new ColumnGroup(getIndexTableName());
+        // values are [col1]
+        fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS));
+        CoveredColumnIndexSpecifierBuilder builder = new 
CoveredColumnIndexSpecifierBuilder();
+        // add the index family
+        builder.addIndexGroup(fam1);
+        // usually, we would create the index table here, but we don't for the 
sake of the test.
+
+        // setup the primary table
+        String primaryTable = Bytes.toString(table.getTableName());
+        @SuppressWarnings("deprecation")
+        HTableDescriptor pTable = new HTableDescriptor(primaryTable);
+        pTable.addFamily(new HColumnDescriptor(family));
+        // override the codec so we can use our test one
+        builder.build(pTable, FailingTestCodec.class);
+
+        // create the primary table
+        HBaseAdmin admin = UTIL.getHBaseAdmin();
+        admin.createTable(pTable);
+        Configuration conf = new Configuration(UTIL.getConfiguration());
+        // up the number of retries/wait time to make it obvious that we are 
failing with retries here
+        conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 20);
+        conf.setLong(HConstants.HBASE_CLIENT_PAUSE, 1000);
+        HTable primary = new HTable(conf, primaryTable);
+        primary.setAutoFlush(false, true);
+
+        // do a simple put that should be indexed
+        Put p = new Put(Bytes.toBytes("row"));
+        p.add(family, null, Bytes.toBytes("value"));
+        primary.put(p);
+        try {
+            primary.flushCommits();
+            fail("Shouldn't have gotten a successful write to the primary 
table");
+        } catch (RetriesExhaustedWithDetailsException e) {
+            LOG.info("Correclty got a failure of the put!");
+        }
+        primary.close();
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fff7963a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/example/EndToEndCoveredIndexingIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/example/EndToEndCoveredIndexingIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/example/EndToEndCoveredIndexingIT.java
deleted file mode 100644
index 70f29b1..0000000
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/example/EndToEndCoveredIndexingIT.java
+++ /dev/null
@@ -1,902 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.covered.example;
-
-import static org.apache.phoenix.query.BaseTest.initAndRegisterTestDriver;
-import static org.apache.phoenix.query.BaseTest.setUpConfigForMiniCluster;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-import org.apache.commons.lang.ArrayUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
-import org.apache.phoenix.hbase.index.IndexTestingUtils;
-import org.apache.phoenix.hbase.index.Indexer;
-import org.apache.phoenix.hbase.index.TableName;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.query.QueryServicesOptions;
-import org.apache.phoenix.util.PhoenixRuntime;
-import org.apache.phoenix.util.ReadOnlyProps;
-import org.apache.phoenix.util.TestUtil;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-/**
- * Test Covered Column indexing in an 'end-to-end' manner on a minicluster. 
This covers cases where
- * we manage custom timestamped updates that arrive in and out of order as 
well as just using the
- * generically timestamped updates.
- */
-@Category(NeedsOwnMiniClusterTest.class)
-public class EndToEndCoveredIndexingIT {
-    private static final Log LOG = 
LogFactory.getLog(EndToEndCoveredIndexingIT.class);
-    protected static final HBaseTestingUtility UTIL = new 
HBaseTestingUtility();
-    private static final String FAM_STRING = "FAMILY";
-    private static final byte[] FAM = Bytes.toBytes(FAM_STRING);
-    private static final String FAM2_STRING = "FAMILY2";
-    private static final byte[] FAM2 = Bytes.toBytes(FAM2_STRING);
-    private static final byte[] EMPTY_BYTES = new byte[0];
-    private static final byte[] indexed_qualifer = 
Bytes.toBytes("indexed_qual");
-    private static final byte[] regular_qualifer = Bytes.toBytes("reg_qual");
-    private static final byte[] row1 = Bytes.toBytes("row1");
-    private static final byte[] value1 = Bytes.toBytes("val1");
-    private static final byte[] value2 = Bytes.toBytes("val2");
-    private static final byte[] value3 = Bytes.toBytes("val3");
-    // match a single family:qualifier pair
-    private static final CoveredColumn col1 = new CoveredColumn(FAM_STRING, 
indexed_qualifer);
-    // matches the family2:* columns
-    private static final CoveredColumn col2 = new CoveredColumn(FAM2_STRING, 
null);
-    private static final CoveredColumn col3 = new CoveredColumn(FAM2_STRING, 
indexed_qualifer);
-
-    @Rule
-    public TableName TestTable = new TableName();
-
-    private ColumnGroup fam1;
-    private ColumnGroup fam2;
-
-    // setup a couple of index columns
-    private void setupColumns() {
-        fam1 = new ColumnGroup(getIndexTableName());
-        fam2 = new ColumnGroup(getIndexTableName() + "2");
-        // values are [col1][col2_1]...[col2_n]
-        fam1.add(col1);
-        fam1.add(col2);
-        // value is [col2]
-        fam2.add(col3);
-    }
-
-    private String getIndexTableName() {
-        return Bytes.toString(TestTable.getTableName()) + "_index";
-    }
-
-    protected static void setupConfig() throws Exception {
-        Configuration conf = UTIL.getConfiguration();
-        setUpConfigForMiniCluster(conf);
-        IndexTestingUtils.setupConfig(conf);
-        // disable version checking, so we can test against whatever version 
of HBase happens to be
-        // installed (right now, its generally going to be SNAPSHOT versions).
-        conf.setBoolean(Indexer.CHECK_VERSION_CONF_KEY, false);
-    }
-
-    protected static void initDriver() throws Exception {
-        Configuration conf = UTIL.getConfiguration();
-        String zkQuorum = TestUtil.LOCALHOST + 
PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + UTIL.getZkCluster().getClientPort();
-        String url = PhoenixRuntime.JDBC_PROTOCOL +
-                PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + zkQuorum + 
-                PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR + 
-                conf.get(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, 
QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
-        initAndRegisterTestDriver(url, ReadOnlyProps.EMPTY_PROPS);
-    }
-
-    @BeforeClass
-    public static void setupCluster() throws Exception {
-        setupConfig();
-        UTIL.startMiniCluster();
-        initDriver();
-    }
-
-    @Before
-    public void setup() throws Exception {
-        setupColumns();
-    }
-
-    /**
-     * Test that a bunch of puts with a single timestamp across all the puts 
builds and inserts index
-     * entries as expected
-     * @throws Exception on failure
-     */
-    @Test
-    public void testSimpleTimestampedUpdates() throws Exception {
-        HTable primary = createSetupTables(fam1);
-
-        // do a put to the primary table
-        Put p = new Put(row1);
-        long ts = 10;
-        p.add(FAM, indexed_qualifer, ts, value1);
-        p.add(FAM, regular_qualifer, ts, value2);
-        primary.put(p);
-        primary.flushCommits();
-
-        // read the index for the expected values
-        HTable index1 = new HTable(UTIL.getConfiguration(), 
getIndexTableName());
-
-        // build the expected kvs
-        List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], 
CoveredColumn>>();
-        pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-        pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-        List<KeyValue> expected = 
CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts, pairs);
-
-        // verify that the index matches
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, 
value1);
-
-        // cleanup
-        closeAndCleanupTables(primary, index1);
-    }
-
-    /**
-     * Test that the multiple timestamps in a single put build the correct 
index updates.
-     * @throws Exception on failure
-     */
-    @Test
-    public void testMultipleTimestampsInSinglePut() throws Exception {
-        HTable primary = createSetupTables(fam1);
-
-        // do a put to the primary table
-        Put p = new Put(row1);
-        long ts1 = 10;
-        long ts2 = 11;
-        p.add(FAM, indexed_qualifer, ts1, value1);
-        p.add(FAM, regular_qualifer, ts1, value2);
-        // our group indexes all columns in the this family, so any qualifier 
here is ok
-        p.add(FAM2, regular_qualifer, ts2, value3);
-        primary.put(p);
-        primary.flushCommits();
-
-        // read the index for the expected values
-        HTable index1 = new HTable(UTIL.getConfiguration(), 
getIndexTableName());
-
-        // build the expected kvs
-        List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], 
CoveredColumn>>();
-        pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-        pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-
-        // check the first entry at ts1
-        List<KeyValue> expected = 
CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs);
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, 
value1);
-
-        // check the second entry at ts2
-        pairs.clear();
-        pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-        pairs.add(new Pair<byte[], CoveredColumn>(value3, col2));
-        expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, 
ts2, pairs);
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, 
value1);
-
-        // cleanup
-        closeAndCleanupTables(primary, index1);
-    }
-
-    /**
-     * Test that we make updates to multiple {@link ColumnGroup}s across a 
single put/delete 
-     * @throws Exception on failure
-     */
-    @Test
-    public void testMultipleConcurrentGroupsUpdated() throws Exception {
-        HTable primary = createSetupTables(fam1, fam2);
-
-        // do a put to the primary table
-        Put p = new Put(row1);
-        long ts = 10;
-        p.add(FAM, indexed_qualifer, ts, value1);
-        p.add(FAM, regular_qualifer, ts, value2);
-        p.add(FAM2, indexed_qualifer, ts, value3);
-        primary.put(p);
-        primary.flushCommits();
-
-        // read the index for the expected values
-        HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable());
-        HTable index2 = new HTable(UTIL.getConfiguration(), fam2.getTable());
-
-        // build the expected kvs
-        List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], 
CoveredColumn>>();
-        pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-        pairs.add(new Pair<byte[], CoveredColumn>(value3, col2));
-        List<KeyValue> expected = 
CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts, pairs);
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, 
value1);
-
-        // and check the second index as well
-        pairs.clear();
-        pairs.add(new Pair<byte[], CoveredColumn>(value3, col3));
-        expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, 
ts, pairs);
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index2, expected, ts, 
value3);
-
-        // cleanup
-        closeAndCleanupTables(primary, index1, index2);
-    }
-
-    /**
-     * HBase has a 'fun' property wherein you can completely clobber an 
existing row if you make a
-     * {@link Put} at the exact same dimension (row, cf, cq, ts) as an 
existing row. The old row
-     * disappears and the new value (since the rest of the row is the same) 
completely subsumes it.
-     * This test ensures that we remove the old entry and put a new entry in 
its place.
-     * @throws Exception on failure
-     */
-    @Test
-    public void testOverwritingPutsCorrectlyGetIndexed() throws Exception {
-        HTable primary = createSetupTables(fam1);
-
-        // do a put to the primary table
-        Put p = new Put(row1);
-        long ts = 10;
-        p.add(FAM, indexed_qualifer, ts, value1);
-        p.add(FAM, regular_qualifer, ts, value2);
-        primary.put(p);
-        primary.flushCommits();
-
-        // read the index for the expected values
-        HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable());
-
-        // build the expected kvs
-        List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], 
CoveredColumn>>();
-        pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-        pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-
-        // check the first entry at ts
-        List<KeyValue> expected = 
CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts, pairs);
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, 
value1);
-
-        // now overwrite the put in the primary table with a new value
-        p = new Put(row1);
-        p.add(FAM, indexed_qualifer, ts, value3);
-        primary.put(p);
-        primary.flushCommits();
-
-        pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
-        pairs.add(new Pair<byte[], CoveredColumn>(value3, col1));
-        pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-
-        // check the first entry at ts
-        expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, 
ts, pairs);
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, 
value3);
-        // and verify that a scan at the first entry returns nothing (ignore 
the updated row)
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, 
Collections.<KeyValue> emptyList(), ts,
-                value1, value2);
-
-        // cleanup
-        closeAndCleanupTables(primary, index1);
-    }
-
-    @Test
-    public void testSimpleDeletes() throws Exception {
-        HTable primary = createSetupTables(fam1);
-
-        // do a simple Put
-        long ts = 10;
-        Put p = new Put(row1);
-        p.add(FAM, indexed_qualifer, ts, value1);
-        p.add(FAM, regular_qualifer, ts, value2);
-        primary.put(p);
-        primary.flushCommits();
-
-        Delete d = new Delete(row1);
-        primary.delete(d);
-
-        HTable index = new HTable(UTIL.getConfiguration(), fam1.getTable());
-        List<KeyValue> expected = Collections.<KeyValue> emptyList();
-        // scan over all time should cause the delete to be covered
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index, expected, 0, 
Long.MAX_VALUE, value1,
-                HConstants.EMPTY_END_ROW);
-
-        // scan at the older timestamp should still show the older value
-        List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], 
CoveredColumn>>();
-        pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-        pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-        expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, 
ts, pairs);
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index, expected, ts, 
value1);
-
-        // cleanup
-        closeAndCleanupTables(index, primary);
-    }
-
-    /**
-     * If we don't have any updates to make to the index, we don't take a lock 
on the WAL. However, we
-     * need to make sure that we don't try to unlock the WAL on write time 
when we don't write
-     * anything, since that will cause an 
java.lang.IllegalMonitorStateException
-     * @throws Exception on failure
-     */
-    @Test
-    public void testDeletesWithoutPreviousState() throws Exception {
-        HTable primary = createSetupTables(fam1);
-
-        // do a delete on the primary table (no data, so no index 
updates...hopefully).
-        long ts = 10;
-        Delete d = new Delete(row1);
-        primary.delete(d);
-
-        HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable());
-        List<KeyValue> expected = Collections.<KeyValue> emptyList();
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, 
value1);
-
-        // a delete of a specific family/column should also not show any index 
updates
-        d = new Delete(row1);
-        d.deleteColumn(FAM, indexed_qualifer);
-        primary.delete(d);
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, 
value1);
-
-        // also just a family marker should have the same effect
-        d = new Delete(row1);
-        d.deleteFamily(FAM);
-        primary.delete(d);
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, 
value1);
-
-        // also just a family marker should have the same effect
-        d = new Delete(row1);
-        d.deleteColumns(FAM, indexed_qualifer);
-        primary.delete(d);
-        primary.flushCommits();
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, 
value1);
-
-        // cleanup
-        closeAndCleanupTables(primary, index1);
-    }
-
-    /**
-     * Similar to the {@link #testMultipleTimestampsInSinglePut()}, this check 
the same with deletes
-     * @throws Exception on failure
-     */
-    @Test
-    @Ignore("PHOENIX-4057 Do not issue index updates for out of order 
mutation")
-    public void testMultipleTimestampsInSingleDelete() throws Exception {
-        HTable primary = createSetupTables(fam1);
-
-        // do a put to the primary table
-        Put p = new Put(row1);
-        long ts1 = 10, ts2 = 11, ts3 = 12;
-        p.add(FAM, indexed_qualifer, ts1, value1);
-        // our group indexes all columns in the this family, so any qualifier 
here is ok
-        p.add(FAM2, regular_qualifer, ts2, value3);
-        primary.put(p);
-        primary.flushCommits();
-
-        // check to make sure everything we expect is there
-        HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable());
-
-        // ts1, we just have v1
-        List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], 
CoveredColumn>>();
-        pairs.clear();
-        pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-        pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-        List<KeyValue> expected = 
CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs);
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, 
value1);
-
-        // at ts2, don't have the above anymore
-        pairs.clear();
-        expected = Collections.emptyList();
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, 
ts2 + 1, value1, value1);
-        // but we do have the new entry at ts2
-        pairs.clear();
-        pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-        pairs.add(new Pair<byte[], CoveredColumn>(value3, col2));
-        expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, 
ts2, pairs);
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, 
value1);
-
-        // now build up a delete with a couple different timestamps
-        Delete d = new Delete(row1);
-        // these deletes have to match the exact ts since we are doing an 
exact match (deleteColumn).
-        d.deleteColumn(FAM, indexed_qualifer, ts1);
-        // since this doesn't match exactly, we actually shouldn't see a 
change in table state
-        d.deleteColumn(FAM2, regular_qualifer, ts3);
-        primary.delete(d);
-
-        // at ts1, we should have the put covered exactly by the delete and 
into the entire future
-        expected = Collections.emptyList();
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, 
Long.MAX_VALUE, value1,
-                value1);
-
-        // at ts2, we should just see value3
-        pairs.clear();
-        pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col1));
-        pairs.add(new Pair<byte[], CoveredColumn>(value3, col2));
-        expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, 
ts2, pairs);
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, 
value1);
-
-        // the later delete is a point delete, so we shouldn't see any change 
at ts3
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, 
ts3, value1,
-                HConstants.EMPTY_END_ROW);
-
-        // cleanup
-        closeAndCleanupTables(primary, index1);
-    }
-
-    /**
-     * Covering deletes (via {@link Delete#deleteColumns}) cover everything 
back in time from the
-     * given time. If its modifying the latest state, we don't need to do 
anything but add deletes. If
-     * its modifying back in time state, we need to just fix up the 
surrounding elements as anything
-     * else ahead of it will be fixed up by later updates.
-     * <p>
-     * similar to {@link #testMultipleTimestampsInSingleDelete()}, but with 
covering deletes.
-     * @throws Exception on failure
-     */
-    @Test
-    public void testDeleteColumnsInThePast() throws Exception {
-        HTable primary = createSetupTables(fam1);
-
-        // do a put to the primary table
-        Put p = new Put(row1);
-        long ts1 = 10, ts2 = 11, ts3 = 12;
-        p.add(FAM, indexed_qualifer, ts1, value1);
-        p.add(FAM2, regular_qualifer, ts2, value3);
-        primary.put(p);
-        primary.flushCommits();
-
-        // now build up a delete with a couple different timestamps
-        Delete d = new Delete(row1);
-        // these deletes don't need to match the exact ts because they cover 
everything earlier
-        d.deleteColumns(FAM, indexed_qualifer, ts2);
-        d.deleteColumns(FAM2, regular_qualifer, ts3);
-        primary.delete(d);
-
-        // read the index for the expected values
-        HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable());
-
-        // build the expected kvs
-        List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], 
CoveredColumn>>();
-        pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-        pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-
-        // check the first entry at ts1
-        List<KeyValue> expected = 
CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs);
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, 
value1);
-
-        // delete at ts2 changes what the put would insert
-        pairs.clear();
-        pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col1));
-        pairs.add(new Pair<byte[], CoveredColumn>(value3, col2));
-        expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, 
ts2, pairs);
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, 
value1);
-
-        // final delete clears out everything
-        expected = Collections.emptyList();
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts3, 
value1);
-
-        // cleanup
-        closeAndCleanupTables(primary, index1);
-    }
-
-    /**
-     * If the client is using custom timestamps is possible that the updates 
come out-of-order (i.e.
-     * update to ts 10 comes after the update to ts 12). In the case, we need 
to be sure that the
-     * index is correctly updated when the out of order put arrives.
-     * @throws Exception
-     */
-    @Test
-    @Ignore("PHOENIX-4057 Do not issue index updates for out of order 
mutation")
-    public void testOutOfOrderUpdates() throws Exception {
-        HTable primary = createSetupTables(fam1);
-
-        // do a put to the primary table
-        Put p = new Put(row1);
-        long ts = 12;
-        p.add(FAM, indexed_qualifer, ts, value1);
-        primary.put(p);
-        primary.flushCommits();
-
-        // read the index for the expected values
-        HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable());
-
-        // build the expected kvs
-        List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], 
CoveredColumn>>();
-        pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-        pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-
-        // check the first entry at ts
-        List<KeyValue> expectedTs1 = CoveredColumnIndexCodec
-                .getIndexKeyValueForTesting(row1, ts, pairs);
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expectedTs1, ts, 
value1);
-
-        // now make a put back in time
-        long ts2 = ts - 2;
-        p = new Put(row1);
-        p.add(FAM, indexed_qualifer, ts2, value2);
-        primary.put(p);
-        primary.flushCommits();
-
-        pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
-        pairs.add(new Pair<byte[], CoveredColumn>(value2, col1));
-        pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-
-        // check to make sure the back in time entry exists
-        List<KeyValue> expectedTs2 = 
CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2,
-                pairs);
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expectedTs2, 
ts2, value2);
-        // then it should be gone at the newer ts (because it deletes itself)
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, 
Collections.<KeyValue> emptyList(), ts2,
-                ts + 1, value2, HConstants.EMPTY_END_ROW);
-
-        // but that the original index entry is still visible at ts, just fine
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expectedTs1, ts, 
value1);
-
-        // cleanup
-        closeAndCleanupTables(primary, index1);
-    }
-
-    /**
-     * Its possible (i.e. from a fast, frequently writing client) that they 
put more than the
-     * 'visible' number of versions in a row before a client make a put 'back 
in time' on that row. If
-     * we don't scan the current table properly, we won't see an index update 
for that 'back in time'
-     * update since the usual lookup will only see the regular number of 
versions. This ability to see
-     * back in time depends on running HBase 0.94.9
-     * @throws Exception on failure
-     */
-    @Test
-    @Ignore("PHOENIX-4057 Do not issue index updates for out of order 
mutation")
-    public void testExceedVersionsOutOfOrderPut() throws Exception {
-        // setup the index
-        HTable primary = createSetupTables(fam2);
-
-        // do a put to the primary table
-        Put p = new Put(row1);
-        long ts1 = 1, ts2 = 2, ts3 = 3, ts4 = 4, ts5 = 5;
-        byte[] value4 = Bytes.toBytes("val4");
-        byte[] value5 = Bytes.toBytes("val5");
-        p.add(FAM2, indexed_qualifer, ts1, value1);
-        primary.put(p);
-        primary.flushCommits();
-
-        p = new Put(row1);
-        p.add(FAM2, indexed_qualifer, ts3, value3);
-        primary.put(p);
-        primary.flushCommits();
-
-        p = new Put(row1);
-        p.add(FAM2, indexed_qualifer, ts4, value4);
-        primary.put(p);
-        primary.flushCommits();
-
-        p = new Put(row1);
-        p.add(FAM2, indexed_qualifer, ts5, value5);
-        primary.put(p);
-        primary.flushCommits();
-
-        // read the index for the expected values
-        HTable index = new HTable(UTIL.getConfiguration(), fam2.getTable());
-
-        // do a raw scan of everything in the table
-        if (LOG.isDebugEnabled()) {
-            // the whole table, all the keys
-            Scan s = new Scan();
-            s.setRaw(true);
-            ResultScanner scanner = index.getScanner(s);
-            for (Result r : scanner) {
-                LOG.debug("Found row:" + r);
-            }
-            scanner.close();
-        }
-
-        /*
-         * now we have definitely exceeded the number of versions visible to a 
usual client of the
-         * primary table, so we should try doing a put 'back in time' an make 
sure that has the correct
-         * index values and cleanup
-         */
-        p = new Put(row1);
-        p.add(FAM2, indexed_qualifer, ts2, value2);
-        primary.put(p);
-        primary.flushCommits();
-
-        // // read the index for the expected values
-        // HTable index = new HTable(UTIL.getConfiguration(), fam2.getTable());
-        //
-        // do a raw scan of everything in the table
-        if (LOG.isDebugEnabled()) {
-            // the whole table, all the keys
-            Scan s = new Scan();
-            s.setRaw(true);
-            ResultScanner scanner = index.getScanner(s);
-            for (Result r : scanner) {
-                LOG.debug("Found row:" + r);
-            }
-            scanner.close();
-        }
-
-        // build the expected kvs
-        List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], 
CoveredColumn>>();
-        pairs.add(new Pair<byte[], CoveredColumn>(value1, col3));
-
-        // check the value1 should be present at the earliest timestamp
-        List<KeyValue> expected = 
CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs);
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index, expected, ts1, 
value1, value2);
-
-        // and value1 should be removed at ts2 (even though it came later)
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index, 
Collections.<KeyValue> emptyList(), ts1,
-                ts2 + 1, value1, value2); // timestamp + 1 since its exclusive 
end timestamp
-
-        // late added column should be there just fine at its timestamp
-        pairs.clear();
-        pairs.add(new Pair<byte[], CoveredColumn>(value2, col3));
-        expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, 
ts2, pairs);
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index, expected, ts2, 
value2);
-
-        // and check that the late entry also removes its self at the next 
timestamp up
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index, 
Collections.<KeyValue> emptyList(), ts3,
-                value2, value3);
-
-        // then we should have the rest of the inserts at their appropriate 
timestamps. Everything else
-        // should be exactly the same, except we shouldn't see ts0 anymore at 
ts2
-
-        // check the third entry
-        pairs.clear();
-        pairs.add(new Pair<byte[], CoveredColumn>(value3, col3));
-        expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, 
ts3, pairs);
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index, expected, ts3, 
value3);
-
-        // check the fourth entry
-        pairs.clear();
-        pairs.add(new Pair<byte[], CoveredColumn>(value4, col3));
-        expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, 
ts4, pairs);
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index, expected, ts4, 
value4);
-
-        // check the first entry at ts4
-        pairs.clear();
-        pairs.add(new Pair<byte[], CoveredColumn>(value2, col3));
-        expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, 
ts2, pairs);
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index, expected, ts2, 
value2);
-        // verify that we remove the entry, even though its too far 'back in 
time'
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index, 
Collections.<KeyValue> emptyList(), ts3,
-                value4);
-
-        // cleanup
-        closeAndCleanupTables(primary, index);
-    }
-
-    /**
-     * Similar to {@link #testExceedVersionsOutOfOrderPut()}, but mingles 
deletes and puts.
-     * @throws Exception on failure
-     */
-    @Test
-    @Ignore("PHOENIX-4057 Do not issue index updates for out of order 
mutation")
-    public void testExceedVersionsOutOfOrderUpdates() throws Exception {
-        HTable primary = createSetupTables(fam1);
-
-        // setup the data to store
-        long ts1 = 1, ts2 = 2, ts3 = 3, ts4 = 4, ts5 = 5, ts6 = 6;
-        byte[] value4 = Bytes.toBytes("val4"), value5 = Bytes.toBytes("val5"), 
value6 =
-                Bytes.toBytes("val6");
-        // values for the other column to index
-        byte[] v1_1 = ArrayUtils.addAll(value1, Bytes.toBytes("_otherCol")), 
v3_1 =
-                ArrayUtils.addAll(value3, Bytes.toBytes("_otherCol")), v5_1 =
-                ArrayUtils.addAll(value5, Bytes.toBytes("_otherCol")), v6_1 =
-                ArrayUtils.addAll(value6, Bytes.toBytes("_otherCol"));
-
-        // make some puts to the primary table
-        Put p = new Put(row1);
-        p.add(FAM, indexed_qualifer, ts1, value1);
-        p.add(FAM2, indexed_qualifer, ts1, v1_1);
-        primary.put(p);
-        primary.flushCommits();
-
-        p = new Put(row1);
-        p.add(FAM, indexed_qualifer, ts3, value3);
-        p.add(FAM2, indexed_qualifer, ts3, v3_1);
-        primary.put(p);
-        primary.flushCommits();
-
-        p = new Put(row1);
-        p.add(FAM, indexed_qualifer, ts5, value5);
-        p.add(FAM2, indexed_qualifer, ts5, v5_1);
-        primary.put(p);
-        primary.flushCommits();
-
-        p = new Put(row1);
-        p.add(FAM, indexed_qualifer, ts6, value6);
-        p.add(FAM2, indexed_qualifer, ts6, v6_1);
-        primary.put(p);
-        primary.flushCommits();
-
-        /*
-         * now we have definitely exceeded the number of versions visible to a 
usual client of the
-         * primary table, so we should try doing a put 'back in time' an make 
sure that has the correct
-         * index values and cleanup
-         */
-        p = new Put(row1);
-        p.add(FAM, indexed_qualifer, ts2, value2);
-        primary.put(p);
-        primary.flushCommits();
-
-        // read the index for the expected values
-        HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable());
-
-        // do a raw scan of everything in the table
-        if (LOG.isDebugEnabled()) {
-            Scan s = new Scan();
-            s.setRaw(true);
-            ResultScanner scanner = index1.getScanner(s);
-            for (Result r : scanner) {
-                LOG.debug("Found row:" + r);
-            }
-            scanner.close();
-        }
-
-        // build the expected kvs
-        List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], 
CoveredColumn>>();
-        pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-        pairs.add(new Pair<byte[], CoveredColumn>(v1_1, col2));
-
-        // check the value1 should be present at the earliest timestamp
-        List<KeyValue> expected = 
CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs);
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, 
value1, value2);
-
-        // and value1 should be removed at ts2 (even though it came later)
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, 
Collections.<KeyValue> emptyList(), ts1,
-                ts2 + 1, value1, value2); // timestamp + 1 since its exclusive 
end timestamp
-
-        // late added column should be there just fine at its timestamp
-        pairs.clear();
-        pairs.add(new Pair<byte[], CoveredColumn>(value2, col1));
-        pairs.add(new Pair<byte[], CoveredColumn>(v1_1, col2));
-        expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, 
ts2, pairs);
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, 
value2);
-
-        // and check that the late entry also removes its self at the next 
timestamp up
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, 
Collections.<KeyValue> emptyList(), ts3,
-                value2, value3);
-
-        // -----------------------------------------------
-        // Check Delete intermingled
-        // -----------------------------------------------
-
-        // verify that the old row is there
-        pairs.clear();
-        pairs.add(new Pair<byte[], CoveredColumn>(value3, col1));
-        pairs.add(new Pair<byte[], CoveredColumn>(v3_1, col2));
-        expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, 
ts3, pairs);
-        // scan from the start key forward (should only include [value3][v3_3])
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts3, 
expected.get(0).getKey(),
-                value4);
-
-        // then do a delete of just one of the indexed columns. This should 
insert a delete for all just
-        // the single value, then a put & a later corresponding in the past 
for the new value
-        Delete d = new Delete(row1);
-        d.deleteColumn(FAM2, indexed_qualifer, ts3);
-        primary.delete(d);
-
-        // we shouldn't find that entry, but we should find [value3][v1_1] 
since that is next entry back
-        // in time from the current
-        pairs.clear();
-        pairs.add(new Pair<byte[], CoveredColumn>(value3, col1));
-        pairs.add(new Pair<byte[], CoveredColumn>(v1_1, col2));
-        expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, 
ts3, pairs);
-        // it should be re-written at 3
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts3, 
value3, value4);
-
-        // but we shouldn't find it at ts5 since it should be covered again
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, 
Collections.<KeyValue> emptyList(), ts5,
-                value3, value4);
-
-        // now remove all the older columns in FAM2 at 4
-        d = new Delete(row1);
-        d.deleteColumns(FAM2, indexed_qualifer, ts4);
-        primary.delete(d);
-
-        // we shouldn't find that entry, but we should find [value3][null] 
since deleteColumns removes
-        // all the entries for that column
-        pairs.clear();
-        pairs.add(new Pair<byte[], CoveredColumn>(value3, col1));
-        pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-        expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, 
ts4, pairs);
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts4, 
value3, value4);
-
-        // same as above, but now do it at 3 (on earlier)
-        d = new Delete(row1);
-        d.deleteColumns(FAM2, indexed_qualifer, ts3);
-        primary.delete(d);
-
-        // we shouldn't find that entry, but we should find [value3][null] 
since deleteColumns removes
-        // all the entries for that column
-        pairs.clear();
-        pairs.add(new Pair<byte[], CoveredColumn>(value3, col1));
-        pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-        expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, 
ts3, pairs);
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts3, 
value3, value4);
-
-        // -----------------------------------------------
-        // then we should have the rest of the inserts at their appropriate 
timestamps. Everything else
-        // should be exactly the same, except we shouldn't see ts0 anymore at 
ts2
-        // -----------------------------------------------
-
-        // check the entry at 5
-        pairs.clear();
-        pairs.add(new Pair<byte[], CoveredColumn>(value5, col1));
-        pairs.add(new Pair<byte[], CoveredColumn>(v5_1, col2));
-        expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, 
ts5, pairs);
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts5, 
value5);
-
-        // check the entry at 6
-        pairs.clear();
-        pairs.add(new Pair<byte[], CoveredColumn>(value6, col1));
-        pairs.add(new Pair<byte[], CoveredColumn>(v6_1, col2));
-        expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, 
ts6, pairs);
-        IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts6, 
value5);
-
-        // cleanup
-        closeAndCleanupTables(primary, index1);
-    }
-
-    /**
-     * Create the primary table (to which you should write), setup properly 
for indexing the given
-     * {@link ColumnGroup}s. Also creates the necessary index tables to match 
the passes groups.
-     * @param groups {@link ColumnGroup}s to index, creating one index table 
per column group.
-     * @return reference to the primary table
-     * @throws IOException if there is an issue communicating with HBase
-     */
-    private HTable createSetupTables(ColumnGroup... groups) throws IOException 
{
-        HBaseAdmin admin = UTIL.getHBaseAdmin();
-        // setup the index
-        CoveredColumnIndexSpecifierBuilder builder = new 
CoveredColumnIndexSpecifierBuilder();
-        for (ColumnGroup group : groups) {
-            builder.addIndexGroup(group);
-            // create the index tables
-            CoveredColumnIndexer.createIndexTable(admin, group.getTable());
-        }
-
-        // setup the primary table
-        String indexedTableName = Bytes.toString(TestTable.getTableName());
-        @SuppressWarnings("deprecation")
-        HTableDescriptor pTable = new HTableDescriptor(indexedTableName);
-        pTable.addFamily(new HColumnDescriptor(FAM));
-        pTable.addFamily(new HColumnDescriptor(FAM2));
-        builder.build(pTable);
-
-        // create the primary table
-        admin.createTable(pTable);
-        HTable primary = new HTable(UTIL.getConfiguration(), indexedTableName);
-        primary.setAutoFlush(false);
-        return primary;
-    }
-
-    private void closeAndCleanupTables(HTable... tables) throws IOException {
-        if (tables == null) {
-            return;
-        }
-
-        for (HTable table : tables) {
-            table.close();
-            UTIL.deleteTable(table.getTableName());
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fff7963a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/example/EndtoEndIndexingWithCompressionIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/example/EndtoEndIndexingWithCompressionIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/example/EndtoEndIndexingWithCompressionIT.java
deleted file mode 100644
index b0aeea3..0000000
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/example/EndtoEndIndexingWithCompressionIT.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.covered.example;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec;
-import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
-import org.junit.BeforeClass;
-
-/**
- * Test secondary indexing from an end-to-end perspective (client to server to 
index table).
- */
-
-
-public class EndtoEndIndexingWithCompressionIT extends 
EndToEndCoveredIndexingIT {
-
-    @BeforeClass
-    public static void setupCluster() throws Exception {
-        setupConfig();
-        //add our codec and enable WAL compression
-        Configuration conf = UTIL.getConfiguration();
-        conf.set(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY,
-                IndexedWALEditCodec.class.getName());
-        conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true);
-        //start the mini-cluster
-        UTIL.startMiniCluster();
-        initDriver();
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fff7963a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/example/FailWithoutRetriesIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/example/FailWithoutRetriesIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/example/FailWithoutRetriesIT.java
deleted file mode 100644
index 6f60f7f..0000000
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/example/FailWithoutRetriesIT.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license agreements. See the NOTICE
- * file distributed with this work for additional information regarding 
copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the "License"); you may not 
use this file except in compliance with the
- * License. You may obtain a copy of the License at 
http://www.apache.org/licenses/LICENSE-2.0 Unless required by
- * applicable law or agreed to in writing, software distributed under the 
License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 
See the License for the specific language
- * governing permissions and limitations under the License.
- */
-package org.apache.phoenix.hbase.index.covered.example;
-
-import static org.apache.phoenix.query.BaseTest.setUpConfigForMiniCluster;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
-import org.apache.phoenix.hbase.index.IndexTestingUtils;
-import org.apache.phoenix.hbase.index.Indexer;
-import org.apache.phoenix.hbase.index.TableName;
-import org.apache.phoenix.hbase.index.builder.BaseIndexCodec;
-import org.apache.phoenix.hbase.index.covered.IndexMetaData;
-import org.apache.phoenix.hbase.index.covered.IndexUpdate;
-import org.apache.phoenix.hbase.index.covered.TableState;
-import org.apache.phoenix.hbase.index.util.IndexManagementUtil;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-/**
- * If {@link DoNotRetryIOException} is not subclassed correctly (with the 
{@link String} constructor),
- * {@link MultiResponse#readFields(java.io.DataInput)} will not correctly 
deserialize the exception, and just return
- * <tt>null</tt> to the client, which then just goes and retries.
- */
-@Category(NeedsOwnMiniClusterTest.class)
-public class FailWithoutRetriesIT {
-
-    private static final Log LOG = 
LogFactory.getLog(FailWithoutRetriesIT.class);
-    @Rule
-    public TableName table = new TableName();
-
-    private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
-
-    private String getIndexTableName() {
-        return Bytes.toString(table.getTableName()) + "_index";
-    }
-
-    public static class FailingTestCodec extends BaseIndexCodec {
-
-        @Override
-        public Iterable<IndexUpdate> getIndexDeletes(TableState state, 
IndexMetaData context) throws IOException {
-            throw new RuntimeException("Intentionally failing deletes for " + 
FailWithoutRetriesIT.class.getName());
-        }
-
-        @Override
-        public Iterable<IndexUpdate> getIndexUpserts(TableState state, 
IndexMetaData context) throws IOException {
-            throw new RuntimeException("Intentionally failing upserts for " + 
FailWithoutRetriesIT.class.getName());
-        }
-    }
-
-    @BeforeClass
-    public static void setupCluster() throws Exception {
-        // setup and verify the config
-        Configuration conf = UTIL.getConfiguration();
-        setUpConfigForMiniCluster(conf);
-        IndexTestingUtils.setupConfig(conf);
-        IndexManagementUtil.ensureMutableIndexingCorrectlyConfigured(conf);
-        // start the cluster
-        UTIL.startMiniCluster();
-    }
-
-    /**
-     * If this test times out, then we didn't fail quickly enough. {@link 
Indexer} maybe isn't rethrowing the exception
-     * correctly?
-     * <p>
-     * We use a custom codec to enforce the thrown exception.
-     * 
-     * @throws Exception
-     */
-    @Test(timeout = 300000)
-    public void testQuickFailure() throws Exception {
-        // incorrectly setup indexing for the primary table - target index 
table doesn't exist, which
-        // should quickly return to the client
-        byte[] family = Bytes.toBytes("family");
-        ColumnGroup fam1 = new ColumnGroup(getIndexTableName());
-        // values are [col1]
-        fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS));
-        CoveredColumnIndexSpecifierBuilder builder = new 
CoveredColumnIndexSpecifierBuilder();
-        // add the index family
-        builder.addIndexGroup(fam1);
-        // usually, we would create the index table here, but we don't for the 
sake of the test.
-
-        // setup the primary table
-        String primaryTable = Bytes.toString(table.getTableName());
-        @SuppressWarnings("deprecation")
-        HTableDescriptor pTable = new HTableDescriptor(primaryTable);
-        pTable.addFamily(new HColumnDescriptor(family));
-        // override the codec so we can use our test one
-        builder.build(pTable, FailingTestCodec.class);
-
-        // create the primary table
-        HBaseAdmin admin = UTIL.getHBaseAdmin();
-        admin.createTable(pTable);
-        Configuration conf = new Configuration(UTIL.getConfiguration());
-        // up the number of retries/wait time to make it obvious that we are 
failing with retries here
-        conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 20);
-        conf.setLong(HConstants.HBASE_CLIENT_PAUSE, 1000);
-        HTable primary = new HTable(conf, primaryTable);
-        primary.setAutoFlush(false, true);
-
-        // do a simple put that should be indexed
-        Put p = new Put(Bytes.toBytes("row"));
-        p.add(family, null, Bytes.toBytes("value"));
-        primary.put(p);
-        try {
-            primary.flushCommits();
-            fail("Shouldn't have gotten a successful write to the primary 
table");
-        } catch (RetriesExhaustedWithDetailsException e) {
-            LOG.info("Correclty got a failure of the put!");
-        }
-        primary.close();
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fff7963a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/ColumnGroup.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/ColumnGroup.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/ColumnGroup.java
deleted file mode 100644
index ba2b092..0000000
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/ColumnGroup.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.phoenix.hbase.index.covered.example;
-
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-
-import org.apache.hadoop.hbase.util.Bytes;
-
-
-/**
- * A collection of {@link CoveredColumn}s that should be included in a covered 
index.
- */
-public class ColumnGroup implements Iterable<CoveredColumn> {
-
-  private List<CoveredColumn> columns = new ArrayList<CoveredColumn>();
-  private String table;
-
-  public ColumnGroup(String tableName) {
-    this.table = tableName;
-  }
-
-  public void add(CoveredColumn column) {
-    this.columns.add(column);
-  }
-
-  public String getTable() {
-    return table;
-  }
-
-  /**
-   * Check to see if any {@link CoveredColumn} in <tt>this</tt> matches the 
given family
-   * @param family to check
-   * @return <tt>true</tt> if any column covers the family
-   */
-  public boolean matches(String family) {
-    for (CoveredColumn column : columns) {
-      if (column.matchesFamily(family)) {
-        return true;
-      }
-    }
-
-    return false;
-  }
-
-  /**
-   * Check to see if any column matches the family/qualifier pair
-   * @param family family to match against
-   * @param qualifier qualifier to match, can be <tt>null</tt>, in which case 
we match all
-   *          qualifiers
-   * @return <tt>true</tt> if any column matches, <tt>false</tt> otherwise
-   */
-  public boolean matches(byte[] family, byte[] qualifier) {
-    // families are always printable characters
-    String fam = Bytes.toString(family);
-    for (CoveredColumn column : columns) {
-      if (column.matchesFamily(fam)) {
-        // check the qualifier
-          if (column.matchesQualifier(qualifier)) {
-            return true;
-        }
-      }
-    }
-    return false;
-  }
-
-  /**
-   * @return the number of columns in the group
-   */
-  public int size() {
-    return this.columns.size();
-  }
-
-  @Override
-  public Iterator<CoveredColumn> iterator() {
-    return columns.iterator();
-  }
-
-  /**
-   * @param index index of the column to get
-   * @return the specified column
-   */
-  public CoveredColumn getColumnForTesting(int index) {
-    return this.columns.get(index);
-  }
-
-  @Override
-  public String toString() {
-    return "ColumnGroup - table: " + table + ", columns: " + columns;
-  }
-
-  public List<CoveredColumn> getColumns() {
-    return this.columns;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fff7963a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/CoveredColumn.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/CoveredColumn.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/CoveredColumn.java
deleted file mode 100644
index 78a1042..0000000
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/CoveredColumn.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.phoenix.hbase.index.covered.example;
-
-import org.apache.hadoop.hbase.util.Bytes;
-
-import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
-
-/**
- * A single Column (either a Column Family or a full Family:Qualifier pair) in 
a {@link ColumnGroup}
- * . If no column qualifier is specified (null), matches all known qualifiers 
of the family.
- */
-public class CoveredColumn extends ColumnReference {
-
-  public static final String SEPARATOR = ":";
-  String familyString;
-  private final int hashCode;
-
-  public CoveredColumn(byte[] family, byte[] qualifier){
-    this(Bytes.toString(family), qualifier);
-  }
-
-  public CoveredColumn(String family, byte[] qualifier) {
-    super(Bytes.toBytes(family), qualifier == null ? 
ColumnReference.ALL_QUALIFIERS : qualifier);
-    this.familyString = family;
-    this.hashCode = calcHashCode(family, qualifier);
-  }
-
-  public static CoveredColumn parse(String spec) {
-    int sep = spec.indexOf(SEPARATOR);
-    if (sep < 0) {
-      throw new IllegalArgumentException(spec + " is not a valid specifier!");
-    }
-    String family = spec.substring(0, sep);
-    String qual = spec.substring(sep + 1);
-    byte[] column = qual.length() == 0 ? null : Bytes.toBytes(qual);
-    return new CoveredColumn(family, column);
-  }
-
-  public String serialize() {
-    return CoveredColumn.serialize(familyString, getQualifier());
-  }
-
-  public static String serialize(String first, byte[] second) {
-    String nextValue = first + CoveredColumn.SEPARATOR;
-    if (second != null) {
-      nextValue += Bytes.toString(second);
-    }
-    return nextValue;
-  }
-
-  /**
-   * @param family2 to check
-   * @return <tt>true</tt> if the passed family matches the family this column 
covers
-   */
-  public boolean matchesFamily(String family2) {
-    return this.familyString.equals(family2);
-  }
-
-  @Override
-  public int hashCode() {
-    return hashCode;
-  }
-
-  private static int calcHashCode(String familyString, byte[] qualifier) {
-    final int prime = 31;
-    int result = 1;
-    result = prime * result + familyString.hashCode();
-    if (qualifier != null) {
-      result = prime * result + Bytes.hashCode(qualifier);
-    }
-    return result;
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (this == obj) return true;
-    if (!super.equals(obj)) return false;
-    if (getClass() != obj.getClass()) return false;
-    CoveredColumn other = (CoveredColumn) obj;
-    if (hashCode != other.hashCode) return false;
-    if (!familyString.equals(other.familyString)) return false;
-    return Bytes.equals(getQualifier(), other.getQualifier());
-  }
-
-  @Override
-  public String toString() {
-    String qualString = getQualifier() == null ? "null" : 
Bytes.toString(getQualifier());
-    return "CoveredColumn:[" + familyString + ":" + qualString + "]";
-  }
-}
\ No newline at end of file

Reply via email to