PHOENIX-1642 Make Phoenix Master Branch pointing to HBase1.0.0

Conflicts:
        
phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnReference.java


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a29e163f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a29e163f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a29e163f

Branch: refs/heads/4.x-HBase-1.x
Commit: a29e163fcdf3ec06c98de423bfd34861af227307
Parents: 03fce01
Author: Enis Soztutar <e...@apache.org>
Authored: Thu Mar 19 12:07:16 2015 -0700
Committer: Enis Soztutar <e...@apache.org>
Committed: Thu Mar 19 13:37:21 2015 -0700

----------------------------------------------------------------------
 phoenix-assembly/src/build/client.xml           |   4 +-
 phoenix-core/pom.xml                            |   4 +-
 ...ReplayWithIndexWritesAndCompressedWALIT.java |  34 ++--
 .../phoenix/end2end/index/LocalIndexIT.java     |  32 ++--
 .../end2end/index/MutableIndexFailureIT.java    |   6 +-
 .../index/balancer/IndexLoadBalancerIT.java     |   6 +-
 .../phoenix/trace/PhoenixTraceReaderIT.java     |   2 +-
 .../phoenix/trace/PhoenixTracingEndToEndIT.java |  20 +-
 .../regionserver/IndexHalfStoreFileReader.java  |  41 ++++-
 .../IndexHalfStoreFileReaderGenerator.java      |  14 +-
 .../regionserver/IndexSplitTransaction.java     |  28 +--
 .../hbase/regionserver/KeyValueSkipListSet.java | 183 +++++++++++++++++++
 .../hbase/regionserver/LocalIndexMerger.java    |   4 +-
 .../hbase/regionserver/LocalIndexSplitter.java  |  29 +--
 .../apache/phoenix/compile/TraceQueryPlan.java  |  14 +-
 .../coprocessor/BaseScannerRegionObserver.java  |   4 +-
 .../apache/phoenix/execute/BaseQueryPlan.java   |   2 +-
 .../apache/phoenix/execute/MutationState.java   |   4 +-
 .../org/apache/phoenix/hbase/index/Indexer.java |  10 +-
 .../hbase/index/balancer/IndexLoadBalancer.java |   5 +
 .../hbase/index/covered/data/IndexMemStore.java |  27 +--
 .../index/covered/data/LazyValueGetter.java     |   5 +-
 .../example/CoveredColumnIndexCodec.java        |   6 +-
 .../filter/ApplyAndFilterDeletesFilter.java     |   8 +-
 .../index/covered/update/ColumnReference.java   |  10 +-
 .../ipc/PhoenixIndexRpcSchedulerFactory.java    |  19 +-
 .../index/scanner/FilteredKeyValueScanner.java  |  17 +-
 .../phoenix/hbase/index/scanner/Scanner.java    |   5 +-
 .../hbase/index/scanner/ScannerBuilder.java     |  10 +-
 .../hbase/index/wal/IndexedKeyValue.java        |  17 --
 .../apache/phoenix/jdbc/PhoenixConnection.java  |   4 +-
 .../apache/phoenix/trace/TraceMetricSource.java |  15 +-
 .../org/apache/phoenix/trace/TraceReader.java   |   2 +-
 .../apache/phoenix/trace/TracingIterator.java   |   2 +-
 .../org/apache/phoenix/trace/TracingUtils.java  |   2 +-
 .../org/apache/phoenix/trace/util/NullSpan.java |  10 +-
 .../org/apache/phoenix/trace/util/Tracing.java  |  51 ++++--
 .../java/org/apache/phoenix/util/IndexUtil.java |  12 +-
 .../hbase/ipc/PhoenixIndexRpcSchedulerTest.java |   4 +-
 .../PhoenixIndexRpcSchedulerFactoryTest.java    |   6 +-
 .../index/covered/TestLocalTableState.java      |   8 +-
 .../index/covered/data/TestIndexMemStore.java   |   5 +-
 .../index/write/TestWALRecoveryCaching.java     |  14 +-
 .../recovery/TestPerRegionIndexWriteCache.java  |  15 +-
 .../phoenix/trace/TraceMetricsSourceTest.java   |   4 +-
 phoenix-flume/pom.xml                           |   4 +-
 pom.xml                                         |  14 +-
 47 files changed, 481 insertions(+), 261 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-assembly/src/build/client.xml
----------------------------------------------------------------------
diff --git a/phoenix-assembly/src/build/client.xml 
b/phoenix-assembly/src/build/client.xml
index f674331..101ccd6 100644
--- a/phoenix-assembly/src/build/client.xml
+++ b/phoenix-assembly/src/build/client.xml
@@ -46,8 +46,8 @@
         <include>jline:jline</include>
         <include>sqlline:sqlline</include>
         <include>org.apache.hbase:hbase*</include>
-        <include>org.cloudera.htrace:htrace-core</include>
-        <include>io.netty:netty</include>
+        <include>org.apache.htrace:htrace-core</include>
+        <include>io.netty:netty-all</include>
         <include>commons-codec:commons-codec</include>
       </includes>
     </dependencySet>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index a325b27..d4dc2e2 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -318,12 +318,12 @@
       <scope>test</scope>
     </dependency>
     <dependency>
-      <groupId>org.cloudera.htrace</groupId>
+      <groupId>org.apache.htrace</groupId>
       <artifactId>htrace-core</artifactId>
     </dependency>
     <dependency>
       <groupId>io.netty</groupId>
-      <artifactId>netty</artifactId>
+      <artifactId>netty-all</artifactId>
     </dependency>
     <dependency>
       <groupId>commons-codec</groupId>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
 
b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
index 8cf8a8a..3b8ff29 100644
--- 
a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
@@ -48,6 +48,9 @@ import 
org.apache.hadoop.hbase.regionserver.RegionServerAccounting;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALFactory;
+import org.apache.hadoop.hbase.wal.WALSplitter;
 import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
 import org.apache.phoenix.hbase.index.IndexTestingUtils;
 import org.apache.phoenix.hbase.index.TableName;
@@ -65,7 +68,7 @@ import org.mockito.Mockito;
 
 /**
  * For pre-0.94.9 instances, this class tests correctly deserializing WALEdits 
w/o compression. Post
- * 0.94.9 we can support a custom {@link WALEditCodec}, which handles 
reading/writing the compressed
+ * 0.94.9 we can support a custom  {@link WALCellCodec} which handles 
reading/writing the compressed
  * edits.
  * <p>
  * Most of the underlying work (creating/splitting the WAL, etc) is from
@@ -93,13 +96,12 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
   @Before
   public void setUp() throws Exception {
     setupCluster();
+    Path hbaseRootDir = UTIL.getDataTestDir();
     this.conf = HBaseConfiguration.create(UTIL.getConfiguration());
     this.fs = UTIL.getDFSCluster().getFileSystem();
     this.hbaseRootDir = new Path(this.conf.get(HConstants.HBASE_DIR));
     this.oldLogDir = new Path(this.hbaseRootDir, 
HConstants.HREGION_OLDLOGDIR_NAME);
     this.logDir = new Path(this.hbaseRootDir, HConstants.HREGION_LOGDIR_NAME);
-    // reset the log reader to ensure we pull the one from this config
-    HLogFactory.resetLogReaderClass();
   }
 
   private void setupCluster() throws Exception {
@@ -133,11 +135,11 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
   protected void startCluster() throws Exception {
     UTIL.startMiniDFSCluster(3);
     UTIL.startMiniZKCluster();
-    UTIL.startMiniHBaseCluster(1, 1);
 
     Path hbaseRootDir = UTIL.getDFSCluster().getFileSystem().makeQualified(new 
Path("/hbase"));
     LOG.info("hbase.rootdir=" + hbaseRootDir);
     UTIL.getConfiguration().set(HConstants.HBASE_DIR, hbaseRootDir.toString());
+    UTIL.startMiniHBaseCluster(1, 1);
   }
 
   @After
@@ -183,8 +185,11 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
     // create the region + its WAL
     HRegion region0 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd);
     region0.close();
-    region0.getLog().closeAndDelete();
-    HLog wal = createWAL(this.conf);
+    region0.getWAL().close();
+
+    WALFactory walFactory = new WALFactory(this.conf, null, "localhost,1234");
+
+    WAL wal = createWAL(this.conf, walFactory);
     RegionServerServices mockRS = Mockito.mock(RegionServerServices.class);
     // mock out some of the internals of the RSS, so we can run CPs
     Mockito.when(mockRS.getWAL(null)).thenReturn(wal);
@@ -206,15 +211,13 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
     // we should then see the server go down
     Mockito.verify(mockRS, Mockito.times(1)).abort(Mockito.anyString(),
       Mockito.any(Exception.class));
-    region.close(true);
-    wal.close();
 
     // then create the index table so we are successful on WAL replay
     CoveredColumnIndexer.createIndexTable(UTIL.getHBaseAdmin(), 
INDEX_TABLE_NAME);
 
     // run the WAL split and setup the region
-    runWALSplit(this.conf);
-    HLog wal2 = createWAL(this.conf);
+    runWALSplit(this.conf, walFactory);
+    WAL wal2 = createWAL(this.conf, walFactory);
     HRegion region1 = new HRegion(basedir, wal2, this.fs, this.conf, hri, htd, 
mockRS);
 
     // initialize the region - this should replay the WALEdits from the WAL
@@ -257,8 +260,9 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
    * @return WAL with retries set down from 5 to 1 only.
    * @throws IOException
    */
-  private HLog createWAL(final Configuration c) throws IOException {
-    HLog wal = HLogFactory.createHLog(FileSystem.get(c), logDir, 
"localhost,1234", c);
+  private WAL createWAL(final Configuration c, WALFactory walFactory) throws 
IOException {
+    WAL wal = walFactory.getWAL(new byte[]{});
+
     // Set down maximum recovery so we dfsclient doesn't linger retrying 
something
     // long gone.
     HBaseTestingUtility.setMaxRecoveryErrorCount(((FSHLog) 
wal).getOutputStream(), 1);
@@ -271,11 +275,11 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
    * @return The single split file made
    * @throws IOException
    */
-  private Path runWALSplit(final Configuration c) throws IOException {
+  private Path runWALSplit(final Configuration c, WALFactory walFactory) 
throws IOException {
     FileSystem fs = FileSystem.get(c);
     
-    List<Path> splits = HLogSplitter.split(this.hbaseRootDir, new 
Path(this.logDir, "localhost,1234"),
-        this.oldLogDir, fs, c);
+    List<Path> splits = WALSplitter.split(this.hbaseRootDir, new 
Path(this.logDir, "localhost,1234"),
+        this.oldLogDir, fs, c, walFactory);
     // Split should generate only 1 file since there's only 1 region
     assertEquals("splits=" + splits, 1, splits.size());
     // Make sure the file exists

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index a7b7655..5e01510 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -37,9 +37,8 @@ import java.util.concurrent.CountDownLatch;
 
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaReader;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Result;
@@ -727,24 +726,27 @@ public class LocalIndexIT extends BaseHBaseManagedTimeIT {
             
             HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), 
TestUtil.TEST_PROPERTIES).getAdmin();
             for (int i = 1; i < 5; i++) {
-                CatalogTracker ct = new 
CatalogTracker(admin.getConfiguration());
                 admin.split(Bytes.toBytes(TestUtil.DEFAULT_DATA_TABLE_NAME), 
ByteUtil.concat(Bytes.toBytes(strings[3*i])));
                 List<HRegionInfo> regionsOfUserTable =
-                        MetaReader.getTableRegions(ct, 
TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME), false);
+                        
MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(), 
admin.getConnection(),
+                                
TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME), false);
 
                 while (regionsOfUserTable.size() != (4+i)) {
                     Thread.sleep(100);
-                    regionsOfUserTable = MetaReader.getTableRegions(ct, 
TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME), false);
+                    regionsOfUserTable = 
MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(),
+                            admin.getConnection(), 
TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME), false);
                 }
                 assertEquals(4+i, regionsOfUserTable.size());
                 TableName indexTable =
                         
TableName.valueOf(MetaDataUtil.getLocalIndexTableName(TestUtil.DEFAULT_DATA_TABLE_NAME));
                 List<HRegionInfo> regionsOfIndexTable =
-                        MetaReader.getTableRegions(ct, indexTable, false);
+                        
MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(),
+                                admin.getConnection(), indexTable, false);
 
                 while (regionsOfIndexTable.size() != (4 + i)) {
                     Thread.sleep(100);
-                    regionsOfIndexTable = MetaReader.getTableRegions(ct, 
indexTable, false);
+                    regionsOfIndexTable = 
MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(),
+                            admin.getConnection(), indexTable, false);
                 }
                 assertEquals(4 + i, regionsOfIndexTable.size());
                 String query = "SELECT t_id,k1,v1 FROM " + 
TestUtil.DEFAULT_DATA_TABLE_NAME;
@@ -847,32 +849,32 @@ public class LocalIndexIT extends BaseHBaseManagedTimeIT {
             assertTrue(rs.next());
 
             HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), 
TestUtil.TEST_PROPERTIES).getAdmin();
-            CatalogTracker ct = new CatalogTracker(admin.getConfiguration());
             List<HRegionInfo> regionsOfUserTable =
-                    MetaReader.getTableRegions(ct,
+                    
MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(), 
admin.getConnection(),
                         TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME), 
false);
             
admin.mergeRegions(regionsOfUserTable.get(0).getEncodedNameAsBytes(),
                 regionsOfUserTable.get(1).getEncodedNameAsBytes(), false);
             regionsOfUserTable =
-                    MetaReader.getTableRegions(ct,
+                    
MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(), 
admin.getConnection(),
                         TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME), 
false);
 
             while (regionsOfUserTable.size() != 3) {
                 Thread.sleep(100);
-                regionsOfUserTable =
-                        MetaReader.getTableRegions(ct,
-                            
TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME), false);
+                regionsOfUserTable = 
MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(),
+                        admin.getConnection(), 
TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME), false);
             }
             assertEquals(3, regionsOfUserTable.size());
             TableName indexTable =
                     TableName.valueOf(MetaDataUtil
                             
.getLocalIndexTableName(TestUtil.DEFAULT_DATA_TABLE_NAME));
             List<HRegionInfo> regionsOfIndexTable =
-                    MetaReader.getTableRegions(ct, indexTable, false);
+                    
MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(),
+                            admin.getConnection(), indexTable, false);
 
             while (regionsOfIndexTable.size() != 3) {
                 Thread.sleep(100);
-                regionsOfIndexTable = MetaReader.getTableRegions(ct, 
indexTable, false);
+                regionsOfIndexTable = MetaTableAccessor.getTableRegions(
+                        getUtility().getZooKeeperWatcher(), 
admin.getConnection(), indexTable, false);
             }
             assertEquals(3, regionsOfIndexTable.size());
             String query = "SELECT t_id,k1,v1 FROM " + 
TestUtil.DEFAULT_DATA_TABLE_NAME;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
index d11c059..dfc7ffb 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
@@ -304,7 +304,8 @@ public class MutableIndexFailureIT extends BaseTest {
         Collection<ServerName> rss = cluster.getClusterStatus().getServers();
         HBaseAdmin admin = this.util.getHBaseAdmin();
         List<HRegionInfo> regions = admin.getTableRegions(catalogTable);
-        ServerName catalogRS = 
cluster.getServerHoldingRegion(regions.get(0).getRegionName());
+        ServerName catalogRS = 
cluster.getServerHoldingRegion(regions.get(0).getTable(),
+                regions.get(0).getRegionName());
         ServerName metaRS = cluster.getServerHoldingMeta();
         ServerName rsToBeKilled = null;
         
@@ -324,7 +325,8 @@ public class MutableIndexFailureIT extends BaseTest {
         this.util.waitFor(30000, 200, new Waiter.Predicate<Exception>() {
             @Override
             public boolean evaluate() throws Exception {
-              ServerName sn = 
cluster.getServerHoldingRegion(indexRegion.getRegionName());
+              ServerName sn = 
cluster.getServerHoldingRegion(indexRegion.getTable(),
+                      indexRegion.getRegionName());
               return (sn != null && sn.equals(dstRS));
             }
           });

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/balancer/IndexLoadBalancerIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/balancer/IndexLoadBalancerIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/balancer/IndexLoadBalancerIT.java
index d534b6a..449dccf 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/balancer/IndexLoadBalancerIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/balancer/IndexLoadBalancerIT.java
@@ -29,10 +29,10 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.MetaReader;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
@@ -447,8 +447,8 @@ public class IndexLoadBalancerIT {
             throws IOException, InterruptedException {
 
         List<Pair<HRegionInfo, ServerName>> tableRegionsAndLocations =
-                
MetaReader.getTableRegionsAndLocations(master.getCatalogTracker(), TableName
-                        .valueOf(tableName));
+                
MetaTableAccessor.getTableRegionsAndLocations(master.getZooKeeper(), 
master.getConnection(),
+                        TableName.valueOf(tableName));
         List<Pair<byte[], ServerName>> startKeyAndLocationPairs =
                 new ArrayList<Pair<byte[], 
ServerName>>(tableRegionsAndLocations.size());
         Pair<byte[], ServerName> startKeyAndLocation = null;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTraceReaderIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTraceReaderIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTraceReaderIT.java
index 1308c13..2315074 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTraceReaderIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTraceReaderIT.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.metrics2.MetricsTag;
 import org.apache.phoenix.metrics.MetricInfo;
 import org.apache.phoenix.trace.TraceReader.SpanInfo;
 import org.apache.phoenix.trace.TraceReader.TraceHolder;
-import org.cloudera.htrace.Span;
+import org.apache.htrace.Span;
 import org.junit.Test;
 
 /**

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
index 05d9e41..8febfff 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
@@ -35,18 +35,18 @@ import java.util.concurrent.TimeUnit;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.htrace.Sampler;
+import org.apache.htrace.Span;
+import org.apache.htrace.SpanReceiver;
+import org.apache.htrace.Trace;
+import org.apache.htrace.TraceScope;
+import org.apache.htrace.impl.ProbabilitySampler;
 import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.metrics.Metrics;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.trace.TraceReader.SpanInfo;
 import org.apache.phoenix.trace.TraceReader.TraceHolder;
-import org.cloudera.htrace.Sampler;
-import org.cloudera.htrace.Span;
-import org.cloudera.htrace.SpanReceiver;
-import org.cloudera.htrace.Trace;
-import org.cloudera.htrace.TraceScope;
-import org.cloudera.htrace.impl.ProbabilitySampler;
 import org.junit.After;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -349,7 +349,7 @@ public class PhoenixTracingEndToEndIT extends 
BaseTracingTestIT {
         });
         assertTrue("Didn't find the parallel scanner in the tracing", found);
     }
-    
+
     @Test
     public void testCustomAnnotationTracing() throws Exception {
        final String customAnnotationKey = "myannot";
@@ -375,7 +375,7 @@ public class PhoenixTracingEndToEndIT extends 
BaseTracingTestIT {
         stmt.execute();
         conn.commit();
         conn.rollback();
-        
+
         // setup for next set of updates
         stmt.setString(1, "key2");
         stmt.setLong(2, 2);
@@ -456,10 +456,10 @@ public class PhoenixTracingEndToEndIT extends 
BaseTracingTestIT {
                return currentTrace.toString().contains(annotationKey + " - " + 
annotationValue);
             }
         });
-        
+
         assertTrue("Didn't find the custom annotation in the tracing", 
tracingComplete);
     }
-    
+
     private boolean checkStoredTraces(Connection conn, TraceChecker checker) 
throws Exception {
         TraceReader reader = new TraceReader(conn);
         int retries = 0;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
index 172486d..654daf0 100644
--- 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
+++ 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
@@ -25,11 +25,13 @@ import java.util.Map.Entry;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.Type;
+import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -159,7 +161,7 @@ public class IndexHalfStoreFileReader extends 
StoreFile.Reader {
                 return getChangedKey(delegate.getKeyValue(), changeBottomKeys);
             }
             
-            private ByteBuffer getChangedKey(KeyValue kv, boolean 
changeBottomKeys) {
+            private ByteBuffer getChangedKey(Cell kv, boolean 
changeBottomKeys) {
                 // new KeyValue(row, family, qualifier, timestamp, type, value)
                 byte[] newRowkey = 
getNewRowkeyByRegionStartKeyReplacedWithSplitKey(kv, changeBottomKeys);
                 KeyValue newKv =
@@ -171,7 +173,7 @@ public class IndexHalfStoreFileReader extends 
StoreFile.Reader {
                 return keyBuffer;
             }
 
-            private byte[] 
getNewRowkeyByRegionStartKeyReplacedWithSplitKey(KeyValue kv, boolean 
changeBottomKeys) {
+            private byte[] 
getNewRowkeyByRegionStartKeyReplacedWithSplitKey(Cell kv, boolean 
changeBottomKeys) {
                 int lenOfRemainingKey = kv.getRowLength() - offset;
                 byte[] keyReplacedStartKey = new byte[lenOfRemainingKey + 
splitRow.length];
                 System.arraycopy(changeBottomKeys ? new byte[splitRow.length] 
: splitRow, 0,
@@ -202,11 +204,11 @@ public class IndexHalfStoreFileReader extends 
StoreFile.Reader {
                 return Bytes.toStringBinary(getValue());
             }
 
-            public KeyValue getKeyValue() {
+            public Cell getKeyValue() {
                 if (atEnd) {
                     return null;
                 }
-                KeyValue kv = delegate.getKeyValue();
+                Cell kv = delegate.getKeyValue();
                 boolean changeBottomKeys =
                         regionInfo.getStartKey().length == 0 && 
splitRow.length != offset;
                 if (!top) {
@@ -221,7 +223,7 @@ public class IndexHalfStoreFileReader extends 
StoreFile.Reader {
                                 kv.getQualifierOffset(), 
kv.getQualifierLength(),
                                 kv.getTimestamp(), 
Type.codeToType(kv.getTypeByte()),
                                 kv.getValueArray(), kv.getValueOffset(), 
kv.getValueLength(),
-                                kv.getTags());
+                                kv.getTagsArray(), kv.getTagsOffset(), 
kv.getTagsLength());
                 return changedKv;
             }
 
@@ -251,6 +253,7 @@ public class IndexHalfStoreFileReader extends 
StoreFile.Reader {
             }
 
             public boolean seekBefore(byte[] key, int offset, int length) 
throws IOException {
+
                 if (top) {
                     byte[] fk = getFirstKey();
                     // This will be null when the file is empty in which we 
can not seekBefore to
@@ -262,8 +265,7 @@ public class IndexHalfStoreFileReader extends 
StoreFile.Reader {
                         return false;
                     }
                     KeyValue replacedKey = getKeyPresentInHFiles(key);
-                    return this.delegate.seekBefore(replacedKey.getBuffer(),
-                        replacedKey.getKeyOffset(), 
replacedKey.getKeyLength());
+                    return this.delegate.seekBefore(replacedKey);
                 } else {
                     // The equals sign isn't strictly necessary just here to 
be consistent with
                     // seekTo
@@ -274,6 +276,12 @@ public class IndexHalfStoreFileReader extends 
StoreFile.Reader {
                 return this.delegate.seekBefore(key, offset, length);
             }
 
+            @Override
+            public boolean seekBefore(Cell cell) throws IOException {
+                KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
+                return seekBefore(kv.getBuffer(), kv.getKeyOffset(), 
kv.getKeyLength());
+            }
+
             public boolean seekTo() throws IOException {
                 boolean b = delegate.seekTo();
                 if (!b) {
@@ -328,6 +336,12 @@ public class IndexHalfStoreFileReader extends 
StoreFile.Reader {
                 return delegate.seekTo(key, offset, length);
             }
 
+            @Override
+            public int seekTo(Cell cell) throws IOException {
+                KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
+                return seekTo(kv.getBuffer(), kv.getKeyOffset(), 
kv.getKeyLength());
+            }
+
             public int reseekTo(byte[] key) throws IOException {
                 return reseekTo(key, 0, key.length);
             }
@@ -355,6 +369,12 @@ public class IndexHalfStoreFileReader extends 
StoreFile.Reader {
                 return delegate.reseekTo(key, offset, length);
             }
 
+            @Override
+            public int reseekTo(Cell cell) throws IOException {
+                KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
+                return reseekTo(kv.getBuffer(), kv.getKeyOffset(), 
kv.getKeyLength());
+            }
+
             public org.apache.hadoop.hbase.io.hfile.HFile.Reader getReader() {
                 return this.delegate.getReader();
             }
@@ -373,7 +393,7 @@ public class IndexHalfStoreFileReader extends 
StoreFile.Reader {
         };
     }
 
-    private boolean isSatisfiedMidKeyCondition(KeyValue kv) {
+    private boolean isSatisfiedMidKeyCondition(Cell kv) {
         if (CellUtil.isDelete(kv) && kv.getValueLength() == 0) {
             // In case of a Delete type KV, let it be going to both the 
daughter regions.
             // No problems in doing so. In the correct daughter region where 
it belongs to, this delete
@@ -428,9 +448,10 @@ public class IndexHalfStoreFileReader extends 
StoreFile.Reader {
                 && keyValue.getTimestamp() == HConstants.LATEST_TIMESTAMP
                 && Bytes.compareTo(keyValue.getRowArray(), 
keyValue.getRowOffset(),
                     keyValue.getRowLength(), splitRow, 0, splitRow.length) == 0
-                && keyValue.isDeleteFamily()) {
+                && CellUtil.isDeleteFamily(keyValue)) {
             KeyValue createFirstDeleteFamilyOnRow =
-                    
KeyValue.createFirstDeleteFamilyOnRow(regionStartKeyInHFile, 
keyValue.getFamily());
+                    
KeyValueUtil.createFirstDeleteFamilyOnRow(regionStartKeyInHFile,
+                            keyValue.getFamily());
             return createFirstDeleteFamilyOnRow;
         }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
index 718f820..1284dcf 100644
--- 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
+++ 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
@@ -30,8 +30,8 @@ import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.MetaReader;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
@@ -80,7 +80,7 @@ public class IndexHalfStoreFileReaderGenerator extends 
BaseRegionObserver {
         HRegionInfo childRegion = region.getRegionInfo();
         byte[] splitKey = null;
         if (reader == null && r != null) {
-            Scan scan = MetaReader.getScanForTableName(tableName);
+            Scan scan = MetaTableAccessor.getScanForTableName(tableName);
             SingleColumnValueFilter scvf = null;
             if (Reference.isTopFileRegion(r.getFileRegion())) {
                 scvf = new SingleColumnValueFilter(HConstants.CATALOG_FAMILY,
@@ -107,8 +107,8 @@ public class IndexHalfStoreFileReaderGenerator extends 
BaseRegionObserver {
                 }
                 if (result == null || result.isEmpty()) {
                     Pair<HRegionInfo, HRegionInfo> mergeRegions =
-                            
MetaReader.getRegionsFromMergeQualifier(ctx.getEnvironment()
-                                    
.getRegionServerServices().getCatalogTracker(),
+                            
MetaTableAccessor.getRegionsFromMergeQualifier(ctx.getEnvironment()
+                                    .getRegionServerServices().getConnection(),
                                 region.getRegionName());
                     if (mergeRegions == null || mergeRegions.getFirst() == 
null) return reader;
                     byte[] splitRow =
@@ -121,10 +121,8 @@ public class IndexHalfStoreFileReaderGenerator extends 
BaseRegionObserver {
                         childRegion = mergeRegions.getSecond();
                         regionStartKeyInHFile = 
mergeRegions.getSecond().getStartKey();
                     }
-                    splitKey =
-                            KeyValue.createFirstOnRow(
-                                region.getStartKey().length == 0 ? new 
byte[region.getEndKey().length] : region
-                                        .getStartKey()).getKey();
+                    splitKey = 
KeyValue.createFirstOnRow(region.getStartKey().length == 0 ?
+                            new byte[region.getEndKey().length] : 
region.getStartKey()).getKey();
                 } else {
                     HRegionInfo parentRegion = 
HRegionInfo.getHRegionInfo(result);
                     regionStartKeyInHFile =

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java
index 048506d..920380b 100644
--- 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java
+++ 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java
@@ -42,11 +42,11 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.RegionTransition;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.executor.EventType;
@@ -286,11 +286,11 @@ public class IndexSplitTransaction extends 
SplitTransaction {
     // and assign the parent region.
     if (!testing) {
       if (metaEntries == null || metaEntries.isEmpty()) {
-        MetaEditor.splitRegion(server.getCatalogTracker(),
-            parent.getRegionInfo(), daughterRegions.getFirst().getRegionInfo(),
-            daughterRegions.getSecond().getRegionInfo(), 
server.getServerName());
+        MetaTableAccessor.splitRegion(server.getConnection(), 
parent.getRegionInfo(),
+                daughterRegions.getFirst().getRegionInfo(),
+                daughterRegions.getSecond().getRegionInfo(), 
server.getServerName());
       } else {
-        offlineParentInMetaAndputMetaEntries(server.getCatalogTracker(),
+        offlineParentInMetaAndputMetaEntries(server.getConnection(),
           parent.getRegionInfo(), daughterRegions.getFirst().getRegionInfo(), 
daughterRegions
               .getSecond().getRegionInfo(), server.getServerName(), 
metaEntries);
       }
@@ -415,10 +415,10 @@ public class IndexSplitTransaction extends 
SplitTransaction {
       if (services != null) {
         try {
           // add 2nd daughter first (see HBASE-4335)
-          services.postOpenDeployTasks(b, server.getCatalogTracker());
+          services.postOpenDeployTasks(b);
           // Should add it to OnlineRegions
           services.addToOnlineRegions(b);
-          services.postOpenDeployTasks(a, server.getCatalogTracker());
+          services.postOpenDeployTasks(a);
           services.addToOnlineRegions(a);
         } catch (KeeperException ke) {
           throw new IOException(ke);
@@ -583,7 +583,7 @@ public class IndexSplitTransaction extends SplitTransaction 
{
     return regions;
   }
 
-  private void offlineParentInMetaAndputMetaEntries(CatalogTracker 
catalogTracker,
+  private void offlineParentInMetaAndputMetaEntries(Connection conn,
       HRegionInfo parent, HRegionInfo splitA, HRegionInfo splitB,
       ServerName serverName, List<Mutation> metaEntries) throws IOException {
     List<Mutation> mutations = metaEntries;
@@ -592,19 +592,19 @@ public class IndexSplitTransaction extends 
SplitTransaction {
     copyOfParent.setSplit(true);
 
     //Put for parent
-    Put putParent = MetaEditor.makePutFromRegionInfo(copyOfParent);
-    MetaEditor.addDaughtersToPut(putParent, splitA, splitB);
+    Put putParent = MetaTableAccessor.makePutFromRegionInfo(copyOfParent);
+    MetaTableAccessor.addDaughtersToPut(putParent, splitA, splitB);
     mutations.add(putParent);
     
     //Puts for daughters
-    Put putA = MetaEditor.makePutFromRegionInfo(splitA);
-    Put putB = MetaEditor.makePutFromRegionInfo(splitB);
+    Put putA = MetaTableAccessor.makePutFromRegionInfo(splitA);
+    Put putB = MetaTableAccessor.makePutFromRegionInfo(splitB);
 
     addLocation(putA, serverName, 1); //these are new regions, openSeqNum = 1 
is fine.
     addLocation(putB, serverName, 1);
     mutations.add(putA);
     mutations.add(putB);
-    MetaEditor.mutateMetaTable(catalogTracker, mutations);
+    MetaTableAccessor.mutateMetaTable(conn, mutations);
   }
 
   public Put addLocation(final Put p, final ServerName sn, long openSeqNum) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java
new file mode 100644
index 0000000..211aa10
--- /dev/null
+++ 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java
@@ -0,0 +1,183 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.KeyValue;
+
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.NavigableSet;
+import java.util.SortedSet;
+import java.util.concurrent.ConcurrentNavigableMap;
+import java.util.concurrent.ConcurrentSkipListMap;
+
+/**
+ * A {@link java.util.Set} of {@link KeyValue}s implemented on top of a
+ * {@link java.util.concurrent.ConcurrentSkipListMap}.  Works like a
+ * {@link java.util.concurrent.ConcurrentSkipListSet} in all but one regard:
+ * An add will overwrite if already an entry for the added key.  In other 
words,
+ * where CSLS does "Adds the specified element to this set if it is not already
+ * present.", this implementation "Adds the specified element to this set EVEN
+ * if it is already present overwriting what was there previous".  The call to
+ * add returns true if no value in the backing map or false if there was an
+ * entry with same key (though value may be different).
+ * <p>Otherwise,
+ * has same attributes as ConcurrentSkipListSet: e.g. tolerant of concurrent
+ * get and set and won't throw ConcurrentModificationException when iterating.
+ */
+public class KeyValueSkipListSet implements NavigableSet<KeyValue> {
+  private final ConcurrentNavigableMap<KeyValue, KeyValue> delegatee;
+
+  KeyValueSkipListSet(final KeyValue.KVComparator c) {
+    this.delegatee = new ConcurrentSkipListMap<KeyValue, KeyValue>(c);
+  }
+
+  KeyValueSkipListSet(final ConcurrentNavigableMap<KeyValue, KeyValue> m) {
+    this.delegatee = m;
+  }
+
+  public KeyValue ceiling(KeyValue e) {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public Iterator<KeyValue> descendingIterator() {
+    return this.delegatee.descendingMap().values().iterator();
+  }
+
+  public NavigableSet<KeyValue> descendingSet() {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public KeyValue floor(KeyValue e) {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public SortedSet<KeyValue> headSet(final KeyValue toElement) {
+    return headSet(toElement, false);
+  }
+
+  public NavigableSet<KeyValue> headSet(final KeyValue toElement,
+      boolean inclusive) {
+    return new KeyValueSkipListSet(this.delegatee.headMap(toElement, 
inclusive));
+  }
+
+  public KeyValue higher(KeyValue e) {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public Iterator<KeyValue> iterator() {
+    return this.delegatee.values().iterator();
+  }
+
+  public KeyValue lower(KeyValue e) {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public KeyValue pollFirst() {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public KeyValue pollLast() {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public SortedSet<KeyValue> subSet(KeyValue fromElement, KeyValue toElement) {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public NavigableSet<KeyValue> subSet(KeyValue fromElement,
+      boolean fromInclusive, KeyValue toElement, boolean toInclusive) {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public SortedSet<KeyValue> tailSet(KeyValue fromElement) {
+    return tailSet(fromElement, true);
+  }
+
+  public NavigableSet<KeyValue> tailSet(KeyValue fromElement, boolean 
inclusive) {
+    return new KeyValueSkipListSet(this.delegatee.tailMap(fromElement, 
inclusive));
+  }
+
+  public Comparator<? super KeyValue> comparator() {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public KeyValue first() {
+    return this.delegatee.get(this.delegatee.firstKey());
+  }
+
+  public KeyValue last() {
+    return this.delegatee.get(this.delegatee.lastKey());
+  }
+
+  public boolean add(KeyValue e) {
+    return this.delegatee.put(e, e) == null;
+  }
+
+  public boolean addAll(Collection<? extends KeyValue> c) {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public void clear() {
+    this.delegatee.clear();
+  }
+
+  public boolean contains(Object o) {
+    //noinspection SuspiciousMethodCalls
+    return this.delegatee.containsKey(o);
+  }
+
+  public boolean containsAll(Collection<?> c) {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public boolean isEmpty() {
+    return this.delegatee.isEmpty();
+  }
+
+  public boolean remove(Object o) {
+    return this.delegatee.remove(o) != null;
+  }
+
+  public boolean removeAll(Collection<?> c) {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public boolean retainAll(Collection<?> c) {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public KeyValue get(KeyValue kv) {
+    return this.delegatee.get(kv);
+  }
+
+  public int size() {
+    return this.delegatee.size();
+  }
+
+  public Object[] toArray() {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public <T> T[] toArray(T[] a) {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java
index 6f8dd79..f074df7 100644
--- 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java
+++ 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java
@@ -23,8 +23,8 @@ import java.util.List;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.MetaReader;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.coprocessor.BaseRegionServerObserver;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
@@ -55,7 +55,7 @@ public class LocalIndexMerger extends 
BaseRegionServerObserver {
                         
.getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_BYTES)))) {
             TableName indexTable =
                     
TableName.valueOf(MetaDataUtil.getLocalIndexPhysicalName(tableDesc.getName()));
-            if (!MetaReader.tableExists(rs.getCatalogTracker(), indexTable)) 
return;
+            if (!MetaTableAccessor.tableExists(rs.getConnection(), 
indexTable)) return;
             HRegion indexRegionA = IndexUtil.getIndexRegion(regionA, 
ctx.getEnvironment());
             if (indexRegionA == null) {
                 LOG.warn("Index region corresponindg to data region " + regionA

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
index 2ac61cb..9af8251 100644
--- 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
+++ 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
@@ -17,17 +17,12 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import java.io.IOException;
-import java.sql.SQLException;
-import java.util.List;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
-import org.apache.hadoop.hbase.catalog.MetaReader;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
@@ -39,16 +34,20 @@ import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.parse.AlterIndexStatement;
 import org.apache.phoenix.parse.ParseNodeFactory;
 import org.apache.phoenix.schema.MetaDataClient;
-import org.apache.phoenix.schema.types.PBoolean;
 import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTable.IndexType;
+import org.apache.phoenix.schema.types.PBoolean;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.SchemaUtil;
 
+import java.io.IOException;
+import java.sql.SQLException;
+import java.util.List;
+
 public class LocalIndexSplitter extends BaseRegionObserver {
 
     private static final Log LOG = LogFactory.getLog(LocalIndexSplitter.class);
@@ -73,7 +72,7 @@ public class LocalIndexSplitter extends BaseRegionObserver {
                         
.getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_BYTES)))) {
             TableName indexTable =
                     
TableName.valueOf(MetaDataUtil.getLocalIndexPhysicalName(tableDesc.getName()));
-            if (!MetaReader.tableExists(rss.getCatalogTracker(), indexTable)) 
return;
+            if (!MetaTableAccessor.tableExists(rss.getConnection(), 
indexTable)) return;
 
             HRegion indexRegion = IndexUtil.getIndexRegion(environment);
             if (indexRegion == null) {
@@ -105,14 +104,16 @@ public class LocalIndexSplitter extends 
BaseRegionObserver {
                 copyOfParent.setOffline(true);
                 copyOfParent.setSplit(true);
                 // Put for parent
-                Put putParent = MetaEditor.makePutFromRegionInfo(copyOfParent);
-                MetaEditor.addDaughtersToPut(putParent, 
daughterRegions.getFirst().getRegionInfo(),
-                    daughterRegions.getSecond().getRegionInfo());
+                Put putParent = 
MetaTableAccessor.makePutFromRegionInfo(copyOfParent);
+                MetaTableAccessor.addDaughtersToPut(putParent,
+                        daughterRegions.getFirst().getRegionInfo(),
+                        daughterRegions.getSecond().getRegionInfo());
                 metaEntries.add(putParent);
                 // Puts for daughters
-                Put putA = 
MetaEditor.makePutFromRegionInfo(daughterRegions.getFirst().getRegionInfo());
-                Put putB =
-                    
MetaEditor.makePutFromRegionInfo(daughterRegions.getSecond().getRegionInfo());
+                Put putA = MetaTableAccessor.makePutFromRegionInfo(
+                        daughterRegions.getFirst().getRegionInfo());
+                Put putB = MetaTableAccessor.makePutFromRegionInfo(
+                        daughterRegions.getSecond().getRegionInfo());
                 st.addLocation(putA, rss.getServerName(), 1);
                 st.addLocation(putB, rss.getServerName(), 1);
                 metaEntries.add(putA);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java
index 815ac1e..3b601b0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java
@@ -30,6 +30,8 @@ import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.htrace.Sampler;
+import org.apache.htrace.TraceScope;
 import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
 import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
 import org.apache.phoenix.expression.Determinism;
@@ -58,9 +60,6 @@ import org.apache.phoenix.schema.types.PLong;
 import org.apache.phoenix.trace.util.Tracing;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.SizedUtil;
-import org.cloudera.htrace.Sampler;
-import org.cloudera.htrace.TraceScope;
-import org.cloudera.htrace.impl.ProbabilitySampler;
 
 public class TraceQueryPlan implements QueryPlan {
 
@@ -124,14 +123,9 @@ public class TraceQueryPlan implements QueryPlan {
                 if(!first) return null;
                 TraceScope traceScope = conn.getTraceScope();
                 if (traceStatement.isTraceOn()) {
-                    double samplingRate = traceStatement.getSamplingRate();
-                    if (samplingRate >= 1.0) {
-                        conn.setSampler(Sampler.ALWAYS);
-                    } else if (samplingRate < 1.0 && samplingRate > 0.0) {
-                        conn.setSampler(new ProbabilitySampler(samplingRate));
-                    } else {
+                    
conn.setSampler(Tracing.getConfiguredSampler(traceStatement));
+                    if (conn.getSampler() == Sampler.NEVER) {
                         closeTraceScope(conn);
-                        conn.setSampler(Sampler.NEVER);
                     }
                     if (traceScope == null && 
!conn.getSampler().equals(Sampler.NEVER)) {
                         traceScope = Tracing.startNewSpan(conn, "Enabling 
trace");

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
index 25ac408..c3988a0 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
@@ -53,8 +53,8 @@ import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.ServerUtil;
-import org.cloudera.htrace.Span;
-import org.cloudera.htrace.Trace;
+import org.apache.htrace.Span;
+import org.apache.htrace.Trace;
 
 import com.google.common.collect.ImmutableList;
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java 
b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
index 94233c8..4ca2dee 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
@@ -66,7 +66,7 @@ import org.apache.phoenix.util.LogUtil;
 import org.apache.phoenix.util.SQLCloseable;
 import org.apache.phoenix.util.SQLCloseables;
 import org.apache.phoenix.util.ScanUtil;
-import org.cloudera.htrace.TraceScope;
+import org.apache.htrace.TraceScope;
 
 import com.google.common.collect.Lists;
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java 
b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index b98d705..467746b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -54,8 +54,8 @@ import org.apache.phoenix.util.LogUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.SQLCloseable;
 import org.apache.phoenix.util.ServerUtil;
-import org.cloudera.htrace.Span;
-import org.cloudera.htrace.TraceScope;
+import org.apache.htrace.Span;
+import org.apache.htrace.TraceScope;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index a4fc96b..1c3d1e2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -65,9 +65,9 @@ import 
org.apache.phoenix.hbase.index.write.recovery.StoreFailuresInCachePolicy;
 import 
org.apache.phoenix.hbase.index.write.recovery.TrackingParallelWriterIndexCommitter;
 import org.apache.phoenix.trace.TracingUtils;
 import org.apache.phoenix.trace.util.NullSpan;
-import org.cloudera.htrace.Span;
-import org.cloudera.htrace.Trace;
-import org.cloudera.htrace.TraceScope;
+import org.apache.htrace.Span;
+import org.apache.htrace.Trace;
+import org.apache.htrace.TraceScope;
 
 import com.google.common.collect.Multimap;
 
@@ -475,7 +475,7 @@ public class Indexer extends BaseRegionObserver {
    *         present
    */
   private IndexedKeyValue getFirstIndexedKeyValue(WALEdit edit) {
-    for (KeyValue kv : edit.getKeyValues()) {
+    for (Cell kv : edit.getCells()) {
       if (kv instanceof IndexedKeyValue) {
         return (IndexedKeyValue) kv;
       }
@@ -490,7 +490,7 @@ public class Indexer extends BaseRegionObserver {
    */
   private Collection<Pair<Mutation, byte[]>> extractIndexUpdate(WALEdit edit) {
     Collection<Pair<Mutation, byte[]>> indexUpdates = new 
ArrayList<Pair<Mutation, byte[]>>();
-    for (KeyValue kv : edit.getKeyValues()) {
+    for (Cell kv : edit.getCells()) {
       if (kv instanceof IndexedKeyValue) {
         IndexedKeyValue ikv = (IndexedKeyValue) kv;
         indexUpdates.add(new Pair<Mutation, byte[]>(ikv.getMutation(), 
ikv.getIndexTable()));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/balancer/IndexLoadBalancer.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/balancer/IndexLoadBalancer.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/balancer/IndexLoadBalancer.java
index 296ff95..146028e 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/balancer/IndexLoadBalancer.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/balancer/IndexLoadBalancer.java
@@ -124,6 +124,11 @@ public class IndexLoadBalancer implements LoadBalancer {
     }
 
     @Override
+    public void onConfigurationChange(Configuration conf) {
+        setConf(conf);
+    }
+
+    @Override
     public void setClusterStatus(ClusterStatus st) {
         this.clusterStatus = st;
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
index 89489ec..7ae54ad 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
@@ -24,8 +24,10 @@ import java.util.SortedSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.KVComparator;
+import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.regionserver.IndexKeyValueSkipListSet;
 import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
@@ -213,7 +215,7 @@ public class IndexMemStore implements KeyValueStore {
      * @return false if the key is null or if there is no data
      */
     @Override
-    public synchronized boolean seek(KeyValue key) {
+    public synchronized boolean seek(Cell key) {
       if (key == null) {
         close();
         return false;
@@ -221,16 +223,16 @@ public class IndexMemStore implements KeyValueStore {
 
       // kvset and snapshot will never be null.
       // if tailSet can't find anything, SortedSet is empty (not null).
-      kvsetIt = kvsetAtCreation.tailSet(key).iterator();
+      kvsetIt = 
kvsetAtCreation.tailSet(KeyValueUtil.ensureKeyValue(key)).iterator();
       kvsetItRow = null;
 
-      return seekInSubLists(key);
+      return seekInSubLists();
     }
 
     /**
      * (Re)initialize the iterators after a seek or a reseek.
      */
-    private synchronized boolean seekInSubLists(KeyValue key) {
+    private synchronized boolean seekInSubLists() {
       nextRow = getNext(kvsetIt);
       return nextRow != null;
     }
@@ -241,7 +243,7 @@ public class IndexMemStore implements KeyValueStore {
      * @return true if there is at least one KV to read, false otherwise
      */
     @Override
-    public synchronized boolean reseek(KeyValue key) {
+    public synchronized boolean reseek(Cell key) {
       /*
        * See HBASE-4195 & HBASE-3855 & HBASE-6591 for the background on this 
implementation. This
        * code is executed concurrently with flush and puts, without locks. Two 
points must be known
@@ -252,8 +254,9 @@ public class IndexMemStore implements KeyValueStore {
        * we iterated to and restore the reseeked set to at least that point.
        */
 
-      kvsetIt = kvsetAtCreation.tailSet(getHighest(key, 
kvsetItRow)).iterator();
-      return seekInSubLists(key);
+      KeyValue kv = KeyValueUtil.ensureKeyValue(key);
+      kvsetIt = kvsetAtCreation.tailSet(getHighest(kv, kvsetItRow)).iterator();
+      return seekInSubLists();
     }
 
     /*
@@ -272,18 +275,18 @@ public class IndexMemStore implements KeyValueStore {
     }
 
     @Override
-    public synchronized KeyValue peek() {
+    public synchronized Cell peek() {
       // DebugPrint.println(" MS@" + hashCode() + " peek = " + getLowest());
       return nextRow;
     }
 
     @Override
-    public synchronized KeyValue next() {
+    public synchronized Cell next() {
       if (nextRow == null) {
         return null;
       }
 
-      final KeyValue ret = nextRow;
+      final Cell ret = nextRow;
 
       // Advance the iterators
       nextRow = getNext(kvsetIt);
@@ -314,7 +317,7 @@ public class IndexMemStore implements KeyValueStore {
     }
 
     @Override
-    public boolean backwardSeek(KeyValue arg0) throws IOException {
+    public boolean backwardSeek(Cell arg0) throws IOException {
         throw new UnsupportedOperationException();
     }
 
@@ -324,7 +327,7 @@ public class IndexMemStore implements KeyValueStore {
     }
 
     @Override
-    public boolean seekToPreviousRow(KeyValue arg0) throws IOException {
+    public boolean seekToPreviousRow(Cell arg0) throws IOException {
         throw new UnsupportedOperationException();
     }
   }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LazyValueGetter.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LazyValueGetter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LazyValueGetter.java
index 21eb5cf..554b394 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LazyValueGetter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LazyValueGetter.java
@@ -22,6 +22,7 @@ import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.phoenix.hbase.index.ValueGetter;
 import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
@@ -78,9 +79,9 @@ public class LazyValueGetter implements ValueGetter {
       return null;
     }
     // there is a next value - we only care about the current value, so we can 
just snag that
-    KeyValue next = scan.next();
+    Cell next = scan.next();
     if (ref.matches(next)) {
-      return new ImmutableBytesPtr(next.getBuffer(), next.getValueOffset(), 
next.getValueLength());
+      return new ImmutableBytesPtr(next.getValueArray(), 
next.getValueOffset(), next.getValueLength());
     }
     return null;
   }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/CoveredColumnIndexCodec.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/CoveredColumnIndexCodec.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/CoveredColumnIndexCodec.java
index 6750be2..658ce91 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/CoveredColumnIndexCodec.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/CoveredColumnIndexCodec.java
@@ -24,6 +24,7 @@ import java.util.List;
 import java.util.Map.Entry;
 
 import org.apache.commons.lang.ArrayUtils;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Mutation;
@@ -169,7 +170,8 @@ public class CoveredColumnIndexCodec extends BaseIndexCodec 
{
   /**
    * Get the next batch of primary table values for the given columns
    * @param refs columns to match against
-   * @param state
+   * @param kvs
+   * @param currentRow
    * @return the total length of all values found and the entries to add for 
the index
    */
   @SuppressWarnings("deprecation")
@@ -187,7 +189,7 @@ private Pair<Integer, List<ColumnEntry>> 
getNextEntries(List<CoveredColumn> refs
         continue;
       }
       // there is a next value - we only care about the current value, so we 
can just snag that
-      KeyValue next = kvs.next();
+      Cell next = kvs.next();
       if (ref.matchesFamily(next.getFamily()) && 
ref.matchesQualifier(next.getQualifier())) {
         byte[] v = next.getValue();
         totalValueLength += v.length;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java
index de21d56..03ff760 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java
@@ -205,8 +205,8 @@ public class ApplyAndFilterDeletesFilter extends FilterBase 
{
     @SuppressWarnings("deprecation")
     @Override
     public KeyValue getHint(KeyValue kv) {
-      return KeyValue.createLastOnRow(kv.getBuffer(), kv.getRowOffset(), 
kv.getRowLength(),
-        kv.getBuffer(), kv.getFamilyOffset(), kv.getFamilyLength(), 
kv.getBuffer(),
+      return KeyValueUtil.createLastOnRow(kv.getRowArray(), kv.getRowOffset(), 
kv.getRowLength(),
+        kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), 
kv.getQualifierArray(),
         kv.getQualifierOffset(), kv.getQualifierLength());
     }
   }
@@ -259,7 +259,7 @@ public class ApplyAndFilterDeletesFilter extends FilterBase 
{
       if (deleteColumn == null) {
         return false;
       }
-      if (CellUtil.matchingFamily(deleteColumn, next) && 
deleteColumn.matchingQualifier(next)) {
+      if (CellUtil.matchingFamily(deleteColumn, next) && 
CellUtil.matchingQualifier(deleteColumn, next)) {
         // falls within the timestamp range
         if (deleteColumn.getTimestamp() >= next.getTimestamp()) {
           return true;
@@ -280,7 +280,7 @@ public class ApplyAndFilterDeletesFilter extends FilterBase 
{
       // keyvalue has the exact timestamp or is an older (smaller) timestamp, 
and we can allow that
       // one.
       if (pointDelete != null && CellUtil.matchingFamily(pointDelete, next)
-          && pointDelete.matchingQualifier(next)) {
+          && CellUtil.matchingQualifier(pointDelete, next)) {
         if (pointDelete.getTimestamp() == next.getTimestamp()) {
           return true;
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnReference.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnReference.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnReference.java
index 4ea7295..ddb5850 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnReference.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnReference.java
@@ -17,6 +17,7 @@
  */
 package org.apache.phoenix.hbase.index.covered.update;
 
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -88,10 +89,9 @@ public class ColumnReference implements 
Comparable<ColumnReference> {
         return this.qualifierPtr;
     }
 
-  @SuppressWarnings("deprecation")
-  public boolean matches(KeyValue kv) {
-    if (matchesFamily(kv.getRowArray(), kv.getFamilyOffset(), 
kv.getFamilyLength())) {
-      return matchesQualifier(kv.getRowArray(), kv.getQualifierOffset(), 
kv.getQualifierLength());
+  public boolean matches(Cell kv) {
+    if (matchesFamily(kv.getFamilyArray(), kv.getFamilyOffset(), 
kv.getFamilyLength())) {
+      return matchesQualifier(kv.getQualifierArray(), kv.getQualifierOffset(), 
kv.getQualifierLength());
     }
     return false;
   }
@@ -175,4 +175,4 @@ public class ColumnReference implements 
Comparable<ColumnReference> {
   public String toString() {
     return "ColumnReference - " + Bytes.toString(getFamily()) + ":" + 
Bytes.toString(getQualifier());
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/ipc/PhoenixIndexRpcSchedulerFactory.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/ipc/PhoenixIndexRpcSchedulerFactory.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/ipc/PhoenixIndexRpcSchedulerFactory.java
index 8e0b86f..1789b0e 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/ipc/PhoenixIndexRpcSchedulerFactory.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/ipc/PhoenixIndexRpcSchedulerFactory.java
@@ -20,8 +20,10 @@ package org.apache.phoenix.hbase.index.ipc;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.ipc.PhoenixIndexRpcScheduler;
+import org.apache.hadoop.hbase.ipc.PriorityFunction;
 import org.apache.hadoop.hbase.ipc.RpcScheduler;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory;
@@ -43,24 +45,16 @@ public class PhoenixIndexRpcSchedulerFactory implements 
RpcSchedulerFactory {
             "Running an older version of HBase (less than 0.98.4), Phoenix 
index RPC handling cannot be enabled.";
 
     @Override
-    public RpcScheduler create(Configuration conf, RegionServerServices 
services) {
+    public RpcScheduler create(Configuration conf, PriorityFunction 
priorityFunction, Abortable abortable) {
         // create the delegate scheduler
         RpcScheduler delegate;
         try {
             // happens in <=0.98.4 where the scheduler factory is not visible
-            delegate = new SimpleRpcSchedulerFactory().create(conf, services);
+            delegate = new SimpleRpcSchedulerFactory().create(conf, 
priorityFunction, abortable);
         } catch (IllegalAccessError e) {
             LOG.fatal(VERSION_TOO_OLD_FOR_INDEX_RPC);
             throw e;
         }
-        try {
-            // make sure we are on a version that phoenix can support
-            Class.forName("org.apache.hadoop.hbase.ipc.RpcExecutor");
-        } catch (ClassNotFoundException e) {
-            LOG.error(VERSION_TOO_OLD_FOR_INDEX_RPC
-                    + " Instead, using falling back to Simple RPC 
scheduling.");
-            return delegate;
-        }
 
         int indexHandlerCount = 
conf.getInt(QueryServices.INDEX_HANDLER_COUNT_ATTRIB, 
QueryServicesOptions.DEFAULT_INDEX_HANDLER_COUNT);
         int minPriority = getMinPriority(conf);
@@ -85,6 +79,11 @@ public class PhoenixIndexRpcSchedulerFactory implements 
RpcSchedulerFactory {
         return scheduler;
     }
 
+    @Override
+    public RpcScheduler create(Configuration configuration, PriorityFunction 
priorityFunction) {
+        return create(configuration, priorityFunction, null);
+    }
+
     public static int getMinPriority(Configuration conf) {
         return conf.getInt(QueryServices.MIN_INDEX_PRIOIRTY_ATTRIB, 
QueryServicesOptions.DEFAULT_INDEX_MIN_PRIORITY);
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
index 1f16bef..bdf7126 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
@@ -21,6 +21,7 @@ package org.apache.phoenix.hbase.index.scanner;
 import java.io.IOException;
 import java.util.SortedSet;
 
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.client.Scan;
@@ -49,7 +50,7 @@ public class FilteredKeyValueScanner implements 
KeyValueScanner {
     }
 
     @Override
-    public KeyValue peek() {
+    public Cell peek() {
         return delegate.peek();
     }
 
@@ -61,13 +62,13 @@ public class FilteredKeyValueScanner implements 
KeyValueScanner {
      *         filters.
      */
     @Override
-    public KeyValue next() throws IOException {
+    public Cell next() throws IOException {
         seekToNextUnfilteredKeyValue();
         return delegate.next();
     }
 
     @Override
-    public boolean seek(KeyValue key) throws IOException {
+    public boolean seek(Cell key) throws IOException {
         if (filter.filterAllRemaining()) { return false; }
         // see if we can seek to the next key
         if (!delegate.seek(key)) { return false; }
@@ -78,7 +79,7 @@ public class FilteredKeyValueScanner implements 
KeyValueScanner {
     @SuppressWarnings("deprecation")
     private boolean seekToNextUnfilteredKeyValue() throws IOException {
         while (true) {
-            KeyValue peeked = delegate.peek();
+            Cell peeked = delegate.peek();
             // no more key values, so we are done
             if (peeked == null) { return false; }
 
@@ -103,13 +104,13 @@ public class FilteredKeyValueScanner implements 
KeyValueScanner {
     }
 
     @Override
-    public boolean reseek(KeyValue key) throws IOException {
+    public boolean reseek(Cell key) throws IOException {
         this.delegate.reseek(key);
         return this.seekToNextUnfilteredKeyValue();
     }
 
     @Override
-    public boolean requestSeek(KeyValue kv, boolean forward, boolean useBloom) 
throws IOException {
+    public boolean requestSeek(Cell kv, boolean forward, boolean useBloom) 
throws IOException {
         return this.reseek(kv);
     }
 
@@ -145,7 +146,7 @@ public class FilteredKeyValueScanner implements 
KeyValueScanner {
     }
 
     @Override
-    public boolean backwardSeek(KeyValue arg0) throws IOException {
+    public boolean backwardSeek(Cell arg0) throws IOException {
         return this.delegate.backwardSeek(arg0);
     }
 
@@ -155,7 +156,7 @@ public class FilteredKeyValueScanner implements 
KeyValueScanner {
     }
 
     @Override
-    public boolean seekToPreviousRow(KeyValue arg0) throws IOException {
+    public boolean seekToPreviousRow(Cell arg0) throws IOException {
         return this.delegate.seekToPreviousRow(arg0);
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/Scanner.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/Scanner.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/Scanner.java
index 868e892..43ddc45 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/Scanner.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/Scanner.java
@@ -21,6 +21,7 @@ package org.apache.phoenix.hbase.index.scanner;
 import java.io.Closeable;
 import java.io.IOException;
 
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 
 /**
@@ -33,7 +34,7 @@ public interface Scanner extends Closeable {
    * @return the next keyvalue in the scanner or <tt>null</tt> if there is no 
next {@link KeyValue}
    * @throws IOException if there is an underlying error reading the data
    */
-  public KeyValue next() throws IOException;
+  public Cell next() throws IOException;
 
   /**
    * Seek to immediately before the given {@link KeyValue}. If that exact 
{@link KeyValue} is
@@ -51,5 +52,5 @@ public interface Scanner extends Closeable {
    * @return the next {@link KeyValue} or <tt>null</tt> if there are no more 
values in <tt>this</tt>
    * @throws IOException if there is an error reading the underlying data.
    */
-  public KeyValue peek() throws IOException;
+  public Cell peek() throws IOException;
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java
index 575779a..ff33ec2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java
@@ -23,7 +23,9 @@ import java.util.Collection;
 import java.util.HashSet;
 import java.util.Set;
 
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.filter.BinaryComparator;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
@@ -110,7 +112,7 @@ public class ScannerBuilder {
     // create a scanner and wrap it as an iterator, meaning you can only go 
forward
     final FilteredKeyValueScanner kvScanner = new 
FilteredKeyValueScanner(filters, memstore);
     // seek the scanner to initialize it
-    KeyValue start = KeyValue.createFirstOnRow(update.getRow());
+    KeyValue start = KeyValueUtil.createFirstOnRow(update.getRow());
     try {
       if (!kvScanner.seek(start)) {
         return new EmptyScanner();
@@ -125,7 +127,7 @@ public class ScannerBuilder {
     return new Scanner() {
 
       @Override
-      public KeyValue next() {
+      public Cell next() {
         try {
           return kvScanner.next();
         } catch (IOException e) {
@@ -137,7 +139,7 @@ public class ScannerBuilder {
       public boolean seek(KeyValue next) throws IOException {
         // check to see if the next kv is after the current key, in which case 
we can use reseek,
         // which will be more efficient
-        KeyValue peek = kvScanner.peek();
+        Cell peek = kvScanner.peek();
         // there is another value and its before the requested one - we can do 
a reseek!
         if (peek != null) {
           int compare = KeyValue.COMPARATOR.compare(peek, next);
@@ -152,7 +154,7 @@ public class ScannerBuilder {
       }
 
       @Override
-      public KeyValue peek() throws IOException {
+      public Cell peek() throws IOException {
         return kvScanner.peek();
       }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
index 0270de5..b04cf0a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
@@ -116,23 +116,6 @@ public class IndexedKeyValue extends KeyValue {
         return COLUMN_QUALIFIER.length;
     }
 
-    /**
-     * This is a KeyValue that shouldn't actually be replayed/replicated, so 
we always mark it as 
-     * an {@link WALEdit#METAFAMILY} so it isn't replayed/replicated via the 
normal replay mechanism
-     */
-    @Override
-    public boolean matchingFamily(final byte[] family) {
-        return Bytes.equals(family, WALEdit.METAFAMILY);
-    }
-    
-    /**
-     * Not a real KeyValue
-     */
-    @Override
-    public boolean matchingRow(final byte [] row) {
-        return false;
-    }
-
     @Override
     public String toString() {
         return "IndexWrite:\n\ttable: " + indexTableName + "\n\tmutation:" + 
mutation;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index 630c8f5..732dd8b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -92,8 +92,8 @@ import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SQLCloseable;
 import org.apache.phoenix.util.SQLCloseables;
-import org.cloudera.htrace.Sampler;
-import org.cloudera.htrace.TraceScope;
+import org.apache.htrace.Sampler;
+import org.apache.htrace.TraceScope;
 
 import com.google.common.base.Objects;
 import com.google.common.base.Strings;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceMetricSource.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceMetricSource.java 
b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceMetricSource.java
index 1b9e31a..e92dd6a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceMetricSource.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceMetricSource.java
@@ -22,11 +22,11 @@ import org.apache.hadoop.metrics2.*;
 import org.apache.hadoop.metrics2.lib.Interns;
 import org.apache.phoenix.metrics.MetricInfo;
 import org.apache.phoenix.metrics.Metrics;
-import org.cloudera.htrace.HTraceConfiguration;
-import org.cloudera.htrace.Span;
-import org.cloudera.htrace.SpanReceiver;
-import org.cloudera.htrace.TimelineAnnotation;
-import org.cloudera.htrace.impl.MilliSpan;
+import org.apache.htrace.HTraceConfiguration;
+import org.apache.htrace.Span;
+import org.apache.htrace.SpanReceiver;
+import org.apache.htrace.TimelineAnnotation;
+import org.apache.htrace.impl.MilliSpan;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -153,11 +153,6 @@ public class TraceMetricSource implements SpanReceiver, 
MetricsSource {
     // noop
   }
 
-  @Override
-  public void configure(HTraceConfiguration conf) {
-    // noop
-  }
-
   private static class Metric {
 
     List<Pair<MetricsInfo, Long>> counters = new ArrayList<Pair<MetricsInfo, 
Long>>();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java 
b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
index f3fc81d..ccb9064 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
@@ -34,7 +34,7 @@ import org.apache.phoenix.metrics.MetricInfo;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.util.LogUtil;
-import org.cloudera.htrace.Span;
+import org.apache.htrace.Span;
 
 import com.google.common.base.Joiner;
 import com.google.common.primitives.Longs;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingIterator.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingIterator.java 
b/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingIterator.java
index bee5a1c..4808f25 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingIterator.java
@@ -22,7 +22,7 @@ import java.sql.SQLException;
 import org.apache.phoenix.iterate.DelegateResultIterator;
 import org.apache.phoenix.iterate.ResultIterator;
 import org.apache.phoenix.schema.tuple.Tuple;
-import org.cloudera.htrace.TraceScope;
+import org.apache.htrace.TraceScope;
 
 /**
  * A simple iterator that closes the trace scope when the iterator is closed.

Reply via email to