Repository: phoenix
Updated Branches:
  refs/heads/5.x-HBase-2.0 1beac2746 -> 372006816


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
index b748568..142b80e 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
@@ -31,14 +31,15 @@ import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Addressing;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -159,9 +160,11 @@ public class ConnectionlessQueryServicesImpl extends 
DelegateQueryServices imple
         if (regions != null) {
             return regions;
         }
-        return Collections.singletonList(new HRegionLocation(
-            new HRegionInfo(TableName.valueOf(tableName), 
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW),
-            SERVER_NAME, -1));
+        RegionInfo hri =
+                RegionInfoBuilder.newBuilder(TableName.valueOf(tableName))
+                        .setStartKey(HConstants.EMPTY_START_ROW)
+                        .setStartKey(HConstants.EMPTY_END_ROW).build();
+        return Collections.singletonList(new HRegionLocation(hri, SERVER_NAME, 
-1));
     }
 
     @Override
@@ -222,14 +225,14 @@ public class ConnectionlessQueryServicesImpl extends 
DelegateQueryServices imple
         byte[] startKey = HConstants.EMPTY_START_ROW;
         List<HRegionLocation> regions = 
Lists.newArrayListWithExpectedSize(splits.length);
         for (byte[] split : splits) {
-            regions.add(new HRegionLocation(
-                    new HRegionInfo(TableName.valueOf(physicalName), startKey, 
split),
-                    SERVER_NAME, -1));
+            regions.add(new HRegionLocation(RegionInfoBuilder
+                    
.newBuilder(TableName.valueOf(physicalName)).setStartKey(startKey)
+                    .setEndKey(split).build(), SERVER_NAME, -1));
             startKey = split;
         }
-        regions.add(new HRegionLocation(
-                new HRegionInfo(TableName.valueOf(physicalName), startKey, 
HConstants.EMPTY_END_ROW),
-                SERVER_NAME, -1));
+        regions.add(new HRegionLocation(RegionInfoBuilder
+                
.newBuilder(TableName.valueOf(physicalName)).setStartKey(startKey)
+                .setEndKey(HConstants.EMPTY_END_ROW).build(), SERVER_NAME, 
-1));
         return regions;
     }
     
@@ -383,7 +386,7 @@ public class ConnectionlessQueryServicesImpl extends 
DelegateQueryServices imple
     }
 
     @Override
-    public HTableDescriptor getTableDescriptor(byte[] tableName) throws 
SQLException {
+    public TableDescriptor getTableDescriptor(byte[] tableName) throws 
SQLException {
         return null;
     }
 
@@ -582,16 +585,16 @@ public class ConnectionlessQueryServicesImpl extends 
DelegateQueryServices imple
     public HRegionLocation getTableRegionLocation(byte[] tableName, byte[] 
row) throws SQLException {
        List<HRegionLocation> regions = 
tableSplits.get(Bytes.toString(tableName));
        if (regions != null) {
-               for(HRegionLocation region: regions) {
-                       if 
(Bytes.compareTo(region.getRegionInfo().getStartKey(), row) <= 0
-                                       && 
Bytes.compareTo(region.getRegionInfo().getEndKey(), row) > 0) {
-                           return region;
-                       }
-               }
+            for (HRegionLocation region : regions) {
+                if (Bytes.compareTo(region.getRegion().getStartKey(), row) <= 0
+                        && Bytes.compareTo(region.getRegion().getEndKey(), 
row) > 0) {
+                    return region;
+                }
+            }
        }
-       return new HRegionLocation(
-                       new HRegionInfo(TableName.valueOf(tableName), 
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW),
-                       SERVER_NAME, -1);
+        return new 
HRegionLocation(RegionInfoBuilder.newBuilder(TableName.valueOf(tableName))
+                
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW)
+                .build(), SERVER_NAME, -1);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
index bb24602..3c307e0 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
@@ -25,11 +25,11 @@ import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.compile.MutationPlan;
@@ -158,7 +158,7 @@ public class DelegateConnectionQueryServices extends 
DelegateQueryServices imple
     }
 
     @Override
-    public HTableDescriptor getTableDescriptor(byte[] tableName) throws 
SQLException {
+    public TableDescriptor getTableDescriptor(byte[] tableName) throws 
SQLException {
         return getDelegate().getTableDescriptor(tableName);
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
index d695f41..7c57122 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
@@ -24,9 +24,9 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.schema.PColumnFamily;
@@ -186,9 +186,9 @@ public class GuidePostsCache {
         }
     }
     
-    public void invalidateAll(HTableDescriptor htableDesc) {
+    public void invalidateAll(TableDescriptor htableDesc) {
         byte[] tableName = htableDesc.getTableName().getName();
-        for (byte[] fam : htableDesc.getFamiliesKeys()) {
+        for (byte[] fam : htableDesc.getColumnFamilyNames()) {
             invalidate(new GuidePostsKey(tableName, fam));
         }
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
index 7607388..28e96a8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
@@ -112,9 +112,9 @@ import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE;
 
 import java.math.BigDecimal;
 
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
@@ -327,9 +327,9 @@ public interface QueryConstants {
             "CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + 
TENANT_ID + ","
             + TABLE_SCHEM + "," + TABLE_NAME + "," + COLUMN_NAME + "," + 
COLUMN_FAMILY + "))\n" +
             HConstants.VERSIONS + "=" + 
MetaDataProtocol.DEFAULT_MAX_META_DATA_VERSIONS + ",\n" +
-            HColumnDescriptor.KEEP_DELETED_CELLS + "="  + 
MetaDataProtocol.DEFAULT_META_DATA_KEEP_DELETED_CELLS + ",\n" +
+            ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "="  + 
MetaDataProtocol.DEFAULT_META_DATA_KEEP_DELETED_CELLS + ",\n" +
             // Install split policy to prevent a tenant's metadata from being 
split across regions.
-            HTableDescriptor.SPLIT_POLICY + "='" + 
MetaDataSplitPolicy.class.getName() + "',\n" + 
+            TableDescriptorBuilder.SPLIT_POLICY + "='" + 
MetaDataSplitPolicy.class.getName() + "',\n" + 
             PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE;
 
     public static final String CREATE_STATS_TABLE_METADATA =
@@ -345,9 +345,9 @@ public interface QueryConstants {
             + PHYSICAL_NAME + ","
             + COLUMN_FAMILY + ","+ GUIDE_POST_KEY+"))\n" +
             HConstants.VERSIONS + "=" + 
MetaDataProtocol.DEFAULT_MAX_STAT_DATA_VERSIONS + ",\n" +
-            HColumnDescriptor.KEEP_DELETED_CELLS + "="  + 
MetaDataProtocol.DEFAULT_STATS_KEEP_DELETED_CELLS + ",\n" +
+            ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "="  + 
MetaDataProtocol.DEFAULT_STATS_KEEP_DELETED_CELLS + ",\n" +
             // Install split policy to prevent a physical table's stats from 
being split across regions.
-            HTableDescriptor.SPLIT_POLICY + "='" + 
MetaDataSplitPolicy.class.getName() + "',\n" + 
+            TableDescriptorBuilder.SPLIT_POLICY + "='" + 
MetaDataSplitPolicy.class.getName() + "',\n" + 
             PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE;
 
     public static final String CREATE_SEQUENCE_METADATA =
@@ -366,7 +366,7 @@ public interface QueryConstants {
             LIMIT_REACHED_FLAG + " BOOLEAN \n" +
             " CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + 
TENANT_ID + "," + SEQUENCE_SCHEMA + "," + SEQUENCE_NAME + "))\n" +
             HConstants.VERSIONS + "=" + 
MetaDataProtocol.DEFAULT_MAX_META_DATA_VERSIONS + ",\n" +
-            HColumnDescriptor.KEEP_DELETED_CELLS + "="  + 
MetaDataProtocol.DEFAULT_META_DATA_KEEP_DELETED_CELLS + ",\n" +
+            ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "="  + 
MetaDataProtocol.DEFAULT_META_DATA_KEEP_DELETED_CELLS + ",\n" +
             PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE;
     public static final String CREATE_SYSTEM_SCHEMA = "CREATE SCHEMA " + 
SYSTEM_CATALOG_SCHEMA;
     public static final String UPGRADE_TABLE_SNAPSHOT_PREFIX = 
"_UPGRADING_TABLE_";
@@ -391,9 +391,9 @@ public interface QueryConstants {
             MAX_VALUE + " VARCHAR, \n" +
             " CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + 
TENANT_ID + ", " + FUNCTION_NAME + ", " + TYPE + ", " + ARG_POSITION + "))\n" +
             HConstants.VERSIONS + "=" + 
MetaDataProtocol.DEFAULT_MAX_META_DATA_VERSIONS + ",\n" +
-            HColumnDescriptor.KEEP_DELETED_CELLS + "="  + 
MetaDataProtocol.DEFAULT_META_DATA_KEEP_DELETED_CELLS + ",\n"+
+            ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "="  + 
MetaDataProtocol.DEFAULT_META_DATA_KEEP_DELETED_CELLS + ",\n"+
             // Install split policy to prevent a tenant's metadata from being 
split across regions.
-            HTableDescriptor.SPLIT_POLICY + "='" + 
MetaDataSplitPolicy.class.getName() + "',\n" + 
+            TableDescriptorBuilder.SPLIT_POLICY + "='" + 
MetaDataSplitPolicy.class.getName() + "',\n" + 
             PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE;
     public static final byte[] OFFSET_FAMILY = "f_offset".getBytes();
     public static final byte[] OFFSET_COLUMN = "c_offset".getBytes();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index d618183..ee9ddc0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -19,7 +19,6 @@ package org.apache.phoenix.schema;
 
 import static com.google.common.collect.Sets.newLinkedHashSet;
 import static com.google.common.collect.Sets.newLinkedHashSetWithExpectedSize;
-import static org.apache.hadoop.hbase.HColumnDescriptor.TTL;
 import static 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver.ANALYZE_TABLE;
 import static 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver.RUN_UPDATE_STATS_ASYNC_ATTRIB;
 import static 
org.apache.phoenix.exception.SQLExceptionCode.INSUFFICIENT_MULTI_TENANT_COLUMNS;
@@ -127,14 +126,15 @@ import java.util.Map.Entry;
 import java.util.Properties;
 import java.util.Set;
 
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.compile.ColumnResolver;
@@ -1065,11 +1065,11 @@ public class MetaDataClient {
     private void populatePropertyMaps(ListMultimap<String,Pair<String,Object>> 
props, Map<String, Object> tableProps,
             Map<String, Object> commonFamilyProps) {
         // Somewhat hacky way of determining if property is for 
HColumnDescriptor or HTableDescriptor
-        HColumnDescriptor defaultDescriptor = new 
HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES);
+        ColumnFamilyDescriptor defaultDescriptor = 
ColumnFamilyDescriptorBuilder.of(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES);
         if (!props.isEmpty()) {
             Collection<Pair<String,Object>> propsList = 
props.get(QueryConstants.ALL_FAMILY_PROPERTIES_KEY);
             for (Pair<String,Object> prop : propsList) {
-                if (defaultDescriptor.getValue(prop.getFirst()) == null) {
+                if (defaultDescriptor.getValue(Bytes.toBytes(prop.getFirst())) 
== null) {
                     tableProps.put(prop.getFirst(), prop.getSecond());
                 } else {
                     commonFamilyProps.put(prop.getFirst(), prop.getSecond());
@@ -2003,7 +2003,7 @@ public class MetaDataClient {
             tableProps.put(PhoenixDatabaseMetaData.TRANSACTIONAL, 
transactional);
             if (transactional) {
                 // If TTL set, use Tephra TTL property name instead
-                Object ttl = commonFamilyProps.remove(HColumnDescriptor.TTL);
+                Object ttl = 
commonFamilyProps.remove(ColumnFamilyDescriptorBuilder.TTL);
                 if (ttl != null) {
                     
commonFamilyProps.put(PhoenixTransactionContext.PROPERTY_TTL, ttl);
                 }
@@ -2034,9 +2034,9 @@ public class MetaDataClient {
                     Integer maxVersionsProp = (Integer) 
commonFamilyProps.get(HConstants.VERSIONS);
                     if (maxVersionsProp == null) {
                         if (parent != null) {
-                            HTableDescriptor desc = 
connection.getQueryServices().getTableDescriptor(parent.getPhysicalName().getBytes());
+                            TableDescriptor desc = 
connection.getQueryServices().getTableDescriptor(parent.getPhysicalName().getBytes());
                             if (desc != null) {
-                                maxVersionsProp = 
desc.getFamily(SchemaUtil.getEmptyColumnFamily(parent)).getMaxVersions();
+                                maxVersionsProp = 
desc.getColumnFamily(SchemaUtil.getEmptyColumnFamily(parent)).getMaxVersions();
                             }
                         }
                         if (maxVersionsProp == null) {
@@ -2225,7 +2225,7 @@ public class MetaDataClient {
                 byte[] tableNameBytes = 
SchemaUtil.getTableNameAsBytes(schemaName, tableName);
                 boolean tableExists = true;
                 try {
-                    HTableDescriptor tableDescriptor = 
connection.getQueryServices().getTableDescriptor(tableNameBytes);
+                    TableDescriptor tableDescriptor = 
connection.getQueryServices().getTableDescriptor(tableNameBytes);
                     if (tableDescriptor == null) { // for connectionless
                         tableExists = false;
                     }
@@ -2420,7 +2420,7 @@ public class MetaDataClient {
                     for (Pair<String,Object> prop : props) {
                         // Don't allow specifying column families for TTL. TTL 
can only apply for the all the column families of the table
                         // i.e. it can't be column family specific.
-                        if 
(!familyName.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY) && 
prop.getFirst().equals(TTL)) {
+                        if 
(!familyName.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY) && 
prop.getFirst().equals(ColumnFamilyDescriptorBuilder.TTL)) {
                             throw new 
SQLExceptionInfo.Builder(SQLExceptionCode.COLUMN_FAMILY_NOT_ALLOWED_FOR_TTL).build().buildException();
                         }
                         combinedFamilyProps.put(prop.getFirst(), 
prop.getSecond());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
index 9798f79..047ccf6 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
@@ -20,8 +20,10 @@ package org.apache.phoenix.transaction;
 import java.io.IOException;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CompareOperator;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Append;
@@ -36,6 +38,7 @@ import org.apache.hadoop.hbase.client.Row;
 import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.coprocessor.Batch.Call;
 import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
@@ -180,13 +183,6 @@ public class OmidTransactionTable implements 
PhoenixTransactionalTable {
     }
 
     @Override
-    public Object[] batch(List<? extends Row> actions) throws IOException,
-            InterruptedException {
-        // TODO Auto-generated method stub
-        return null;
-    }
-
-    @Override
     public <R> void batchCallback(List<? extends Row> actions,
             Object[] results, Callback<R> callback) throws IOException,
             InterruptedException {
@@ -194,13 +190,6 @@ public class OmidTransactionTable implements 
PhoenixTransactionalTable {
     }
 
     @Override
-    public <R> Object[] batchCallback(List<? extends Row> actions,
-            Callback<R> callback) throws IOException, InterruptedException {
-        // TODO Auto-generated method stub
-        return null;
-    }
-
-    @Override
     public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
             byte[] value, Put put) throws IOException {
         // TODO Auto-generated method stub
@@ -331,4 +320,85 @@ public class OmidTransactionTable implements 
PhoenixTransactionalTable {
         
     }
 
+    @Override
+    public TableDescriptor getDescriptor() throws IOException {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public boolean[] exists(List<Get> gets) throws IOException {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, 
CompareOperator op, byte[] value, Put put)
+            throws IOException {
+        // TODO Auto-generated method stub
+        return false;
+    }
+
+    @Override
+    public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, 
CompareOperator op, byte[] value,
+            Delete delete) throws IOException {
+        // TODO Auto-generated method stub
+        return false;
+    }
+
+    @Override
+    public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, 
CompareOperator op, byte[] value,
+            RowMutations mutation) throws IOException {
+        // TODO Auto-generated method stub
+        return false;
+    }
+
+    @Override
+    public long getRpcTimeout(TimeUnit unit) {
+        // TODO Auto-generated method stub
+        return 0;
+    }
+
+    @Override
+    public long getReadRpcTimeout(TimeUnit unit) {
+        // TODO Auto-generated method stub
+        return 0;
+    }
+
+    @Override
+    public int getReadRpcTimeout() {
+        // TODO Auto-generated method stub
+        return 0;
+    }
+
+    @Override
+    public void setReadRpcTimeout(int readRpcTimeout) {
+        // TODO Auto-generated method stub
+        
+    }
+
+    @Override
+    public long getWriteRpcTimeout(TimeUnit unit) {
+        // TODO Auto-generated method stub
+        return 0;
+    }
+
+    @Override
+    public int getWriteRpcTimeout() {
+        // TODO Auto-generated method stub
+        return 0;
+    }
+
+    @Override
+    public void setWriteRpcTimeout(int writeRpcTimeout) {
+        // TODO Auto-generated method stub
+        
+    }
+
+    @Override
+    public long getOperationTimeout(TimeUnit unit) {
+        // TODO Auto-generated method stub
+        return 0;
+    }
+
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
index 0e46ae9..ed3f44e 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
@@ -23,6 +23,7 @@ import java.util.concurrent.TimeoutException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.coprocessor.RegionObserver;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.jdbc.PhoenixEmbeddedDriver.ConnectionInfo;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.util.ReadOnlyProps;
@@ -45,6 +46,7 @@ public interface PhoenixTransactionContext {
     public static final String TX_ROLLBACK_ATTRIBUTE_KEY = 
"tephra.tx.rollback"; //"phoenix.tx.rollback"; 
 
     public static final String PROPERTY_TTL = "dataset.table.ttl";
+    public static final byte[] PROPERTY_TTL_BYTES = 
Bytes.toBytes(PROPERTY_TTL);
 
     public static final String READ_NON_TX_DATA = "data.tx.read.pre.existing";
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
index ede2896..e248f33 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.transaction;
 import java.io.IOException;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CompareOperator;
@@ -42,10 +43,10 @@ import 
org.apache.hadoop.hbase.client.coprocessor.Batch.Call;
 import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-import org.apache.tephra.TxConstants;
-import org.apache.tephra.hbase.TransactionAwareHTable;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableType;
+import org.apache.tephra.TxConstants;
+import org.apache.tephra.hbase.TransactionAwareHTable;
 
 import com.google.protobuf.Descriptors.MethodDescriptor;
 import com.google.protobuf.Message;
@@ -354,4 +355,29 @@ public class TephraTransactionTable implements 
PhoenixTransactionalTable {
     public void setWriteRpcTimeout(int writeRpcTimeout) {
         return transactionAwareHTable.setWriteRpcTimeout(writeRpcTimeout);
     }
+
+    @Override
+    public boolean[] exists(List<Get> gets) throws IOException {
+        return transactionAwareHTable.exists(gets);
+    }
+
+    @Override
+    public long getRpcTimeout(TimeUnit unit) {
+        return transactionAwareHTable.getRpcTimeout();
+    }
+
+    @Override
+    public long getReadRpcTimeout(TimeUnit unit) {
+        return transactionAwareHTable.getReadRpcTimeout(unit);
+    }
+
+    @Override
+    public long getWriteRpcTimeout(TimeUnit unit) {
+        return transactionAwareHTable.getWriteRpcTimeout(unit);
+    }
+
+    @Override
+    public long getOperationTimeout(TimeUnit unit) {
+        return transactionAwareHTable.getOperationTimeout(unit);
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
index 90760bc..9193172 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
@@ -282,8 +282,8 @@ public class IndexUtil {
                 byte[] regionEndkey = null;
                 if(maintainer.isLocalIndex()) {
                     HRegionLocation tableRegionLocation = 
connection.getQueryServices().getTableRegionLocation(table.getPhysicalName().getBytes(),
 dataMutation.getRow());
-                    regionStartKey = 
tableRegionLocation.getRegionInfo().getStartKey();
-                    regionEndkey = 
tableRegionLocation.getRegionInfo().getEndKey();
+                    regionStartKey = 
tableRegionLocation.getRegion().getStartKey();
+                    regionEndkey = tableRegionLocation.getRegion().getEndKey();
                 }
                 Delete delete = maintainer.buildDeleteMutation(kvBuilder, 
null, ptr, Collections.<Cell>emptyList(), ts, regionStartKey, regionEndkey);
                 
delete.setAttribute(PhoenixTransactionContext.TX_ROLLBACK_ATTRIBUTE_KEY, 
dataMutation.getAttribute(PhoenixTransactionContext.TX_ROLLBACK_ATTRIBUTE_KEY));
@@ -349,8 +349,8 @@ public class IndexUtil {
                     byte[] regionEndkey = null;
                     if(maintainer.isLocalIndex()) {
                         HRegionLocation tableRegionLocation = 
connection.getQueryServices().getTableRegionLocation(table.getPhysicalName().getBytes(),
 dataMutation.getRow());
-                        regionStartKey = 
tableRegionLocation.getRegionInfo().getStartKey();
-                        regionEndkey = 
tableRegionLocation.getRegionInfo().getEndKey();
+                        regionStartKey = 
tableRegionLocation.getRegion().getStartKey();
+                        regionEndkey = 
tableRegionLocation.getRegion().getEndKey();
                     }
                     
indexMutations.add(maintainer.buildUpdateMutation(kvBuilder, valueGetter, ptr, 
ts, regionStartKey, regionEndkey));
                 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
index 7914e3e..6a9ec85 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
@@ -28,25 +28,23 @@ import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.RequestConverter;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
-import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
@@ -459,7 +457,7 @@ public class MetaDataUtil {
             throws SQLException {
         byte[] physicalIndexName = 
MetaDataUtil.getViewIndexPhysicalName(physicalTableName);
         try {
-            HTableDescriptor desc = 
connection.getQueryServices().getTableDescriptor(physicalIndexName);
+            TableDescriptor desc = 
connection.getQueryServices().getTableDescriptor(physicalIndexName);
             return desc != null && 
Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(desc.getValue(IS_VIEW_INDEX_TABLE_PROP_BYTES)));
         } catch (TableNotFoundException e) {
             return false;
@@ -472,7 +470,7 @@ public class MetaDataUtil {
 
     public static boolean hasLocalIndexTable(PhoenixConnection connection, 
byte[] physicalTableName) throws SQLException {
         try {
-            HTableDescriptor desc = 
connection.getQueryServices().getTableDescriptor(physicalTableName);
+            TableDescriptor desc = 
connection.getQueryServices().getTableDescriptor(physicalTableName);
             if(desc == null ) return false;
             return hasLocalIndexColumnFamily(desc);
         } catch (TableNotFoundException e) {
@@ -480,8 +478,8 @@ public class MetaDataUtil {
         }
     }
 
-    public static boolean hasLocalIndexColumnFamily(HTableDescriptor desc) {
-        for (HColumnDescriptor cf : desc.getColumnFamilies()) {
+    public static boolean hasLocalIndexColumnFamily(TableDescriptor desc) {
+        for (ColumnFamilyDescriptor cf : desc.getColumnFamilies()) {
             if 
(cf.getNameAsString().startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX))
 {
                 return true;
             }
@@ -489,9 +487,9 @@ public class MetaDataUtil {
         return false;
     }
 
-    public static List<byte[]> getNonLocalIndexColumnFamilies(HTableDescriptor 
desc) {
+    public static List<byte[]> getNonLocalIndexColumnFamilies(TableDescriptor 
desc) {
        List<byte[]> families = new 
ArrayList<byte[]>(desc.getColumnFamilies().length);
-        for (HColumnDescriptor cf : desc.getColumnFamilies()) {
+        for (ColumnFamilyDescriptor cf : desc.getColumnFamilies()) {
             if 
(!cf.getNameAsString().startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX))
 {
                families.add(cf.getName());
             }
@@ -500,10 +498,10 @@ public class MetaDataUtil {
     }
 
     public static List<byte[]> getLocalIndexColumnFamilies(PhoenixConnection 
conn, byte[] physicalTableName) throws SQLException {
-        HTableDescriptor desc = 
conn.getQueryServices().getTableDescriptor(physicalTableName);
+        TableDescriptor desc = 
conn.getQueryServices().getTableDescriptor(physicalTableName);
         if(desc == null ) return Collections.emptyList();
         List<byte[]> families = new 
ArrayList<byte[]>(desc.getColumnFamilies().length / 2);
-        for (HColumnDescriptor cf : desc.getColumnFamilies()) {
+        for (ColumnFamilyDescriptor cf : desc.getColumnFamilies()) {
             if 
(cf.getNameAsString().startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX))
 {
                 families.add(cf.getName());
             }
@@ -530,10 +528,10 @@ public class MetaDataUtil {
      * @throws
      */
     public static boolean tableRegionsOnline(Configuration conf, PTable table) 
{
-        Connection hcon = null;
+        ClusterConnection hcon = null;
 
         try {
-            hcon = ConnectionFactory.createConnection(conf);
+            hcon = (ClusterConnection)ConnectionFactory.createConnection(conf);
             List<HRegionLocation> locations = 
((ClusterConnection)hcon).locateRegions(
                 
org.apache.hadoop.hbase.TableName.valueOf(table.getPhysicalName().getBytes()));
 
@@ -542,17 +540,12 @@ public class MetaDataUtil {
                     ServerName sn = loc.getServerName();
                     if (sn == null) continue;
 
-                    AdminService.BlockingInterface admin = (BlockingInterface) 
((ClusterConnection)hcon).getAdmin(sn);
-                    GetRegionInfoRequest request = 
RequestConverter.buildGetRegionInfoRequest(
-                        loc.getRegionInfo().getRegionName());
-
-                    admin.getRegionInfo(null, request);
-                } catch (ServiceException e) {
-                    IOException ie = ProtobufUtil.getRemoteException(e);
-                    logger.debug("Region " + 
loc.getRegionInfo().getEncodedName() + " isn't online due to:" + ie);
-                    return false;
+                    AdminService.BlockingInterface admin = 
((ClusterConnection) hcon).getAdmin(sn);
+                    HBaseRpcController controller = 
hcon.getRpcControllerFactory().newController();
+                    
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.getRegionInfo(controller,
+                        (AdminService.BlockingInterface) admin, 
loc.getRegion().getRegionName());
                 } catch (RemoteException e) {
-                    logger.debug("Cannot get region " + 
loc.getRegionInfo().getEncodedName() + " info due to error:" + e);
+                    logger.debug("Cannot get region " + 
loc.getRegion().getEncodedName() + " info due to error:" + e);
                     return false;
                 }
             }
@@ -651,7 +644,7 @@ public class MetaDataUtil {
     }
 
     public static boolean isHColumnProperty(String propName) {
-        return HColumnDescriptor.getDefaultValues().containsKey(propName);
+        return 
ColumnFamilyDescriptorBuilder.getDefaultValues().containsKey(propName);
     }
 
     public static boolean isHTableProperty(String propName) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/util/RepairUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/RepairUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/RepairUtil.java
index d394a68..0b54819 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/RepairUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/RepairUtil.java
@@ -17,7 +17,7 @@
  */
 package org.apache.phoenix.util;
 
-import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
@@ -29,8 +29,8 @@ public class RepairUtil {
         byte[] endKey = environment.getRegion().getRegionInfo().getEndKey();
         byte[] indexKeyEmbedded = startKey.length == 0 ? new 
byte[endKey.length] : startKey;
         for (StoreFile file : store.getStorefiles()) {
-            if (file.getReader() != null && file.getReader().getFirstKey() != 
null) {
-                byte[] fileFirstRowKey = 
KeyValue.createKeyValueFromKey(file.getReader().getFirstKey()).getRow();
+            if (file.getFirstKey().isPresent() && file.getFirstKey().get() != 
null) {
+                byte[] fileFirstRowKey = 
CellUtil.cloneRow(file.getFirstKey().get());
                 if ((fileFirstRowKey != null && 
Bytes.compareTo(fileFirstRowKey, 0,
                         indexKeyEmbedded.length, indexKeyEmbedded, 0, 
indexKeyEmbedded.length) != 0)) {
                     return false; }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
index a844226..94ac19f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
@@ -36,8 +36,8 @@ import java.util.NavigableSet;
 import java.util.TreeMap;
 
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.FilterList;
@@ -669,7 +669,7 @@ public class ScanUtil {
         }
     }
 
-    public static byte[] getActualStartRow(Scan localIndexScan, HRegionInfo 
regionInfo) {
+    public static byte[] getActualStartRow(Scan localIndexScan, RegionInfo 
regionInfo) {
         return localIndexScan.getAttribute(SCAN_START_ROW_SUFFIX) == null ? 
localIndexScan
                 .getStartRow() : 
ScanRanges.prefixKey(localIndexScan.getAttribute(SCAN_START_ROW_SUFFIX), 0 ,
             regionInfo.getStartKey().length == 0 ? new 
byte[regionInfo.getEndKey().length]

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
index e913d39..bb9ba3b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
@@ -60,6 +60,7 @@ import java.sql.SQLException;
 import java.text.Format;
 import java.text.SimpleDateFormat;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -75,12 +76,12 @@ import javax.annotation.Nullable;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Mutation;
@@ -89,7 +90,8 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.regionserver.LocalIndexSplitter;
 import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -190,9 +192,9 @@ public class UpgradeUtil {
     
     private static void createSequenceSnapshot(Admin admin, PhoenixConnection 
conn) throws SQLException {
         byte[] tableName = getSequenceSnapshotName();
-        HColumnDescriptor columnDesc = new 
HColumnDescriptor(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES);
-        HTableDescriptor desc = new 
HTableDescriptor(TableName.valueOf(tableName));
-        desc.addFamily(columnDesc);
+        TableDescriptor desc = 
TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName))
+                
.addColumnFamily(ColumnFamilyDescriptorBuilder.of(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES))
+                .build();
         try {
             admin.createTable(desc);
             copyTable(conn, 
PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, tableName);
@@ -294,7 +296,7 @@ public class UpgradeUtil {
                 return;
             }
             logger.warn("Pre-splitting SYSTEM.SEQUENCE table " + nSaltBuckets 
+ "-ways. This may take some time - please do not close window.");
-            HTableDescriptor desc = 
admin.getTableDescriptor(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES));
+            TableDescriptor desc = 
admin.getDescriptor(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES));
             createSequenceSnapshot(admin, conn);
             snapshotCreated = true;
             
admin.disableTable(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME));
@@ -346,33 +348,34 @@ public class UpgradeUtil {
             boolean droppedLocalIndexes = false;
             while (rs.next()) {
                 if(!droppedLocalIndexes) {
-                    HTableDescriptor[] localIndexTables = 
admin.listTables(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX+".*");
+                    TableDescriptor[] localIndexTables = 
admin.listTables(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX+".*");
                     String localIndexSplitter = 
LocalIndexSplitter.class.getName();
-                    for (HTableDescriptor table : localIndexTables) {
-                        HTableDescriptor dataTableDesc = 
admin.getTableDescriptor(TableName.valueOf(MetaDataUtil.getLocalIndexUserTableName(table.getNameAsString())));
-                        HColumnDescriptor[] columnFamilies = 
dataTableDesc.getColumnFamilies();
+                    for (TableDescriptor table : localIndexTables) {
+                        TableDescriptor dataTableDesc = 
admin.getDescriptor(TableName.valueOf(MetaDataUtil.getLocalIndexUserTableName(table.getTableName().getNameAsString())));
+                        TableDescriptorBuilder dataTableDescBuilder = 
TableDescriptorBuilder.newBuilder(dataTableDesc);
+                        ColumnFamilyDescriptor[] columnFamilies = 
dataTableDesc.getColumnFamilies();
                         boolean modifyTable = false;
-                        for(HColumnDescriptor cf : columnFamilies) {
+                        for(ColumnFamilyDescriptor cf : columnFamilies) {
                             String localIndexCf = 
QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX+cf.getNameAsString();
-                            
if(dataTableDesc.getFamily(Bytes.toBytes(localIndexCf))==null){
-                                HColumnDescriptor colDef =
-                                        new HColumnDescriptor(localIndexCf);
-                                for(Entry<ImmutableBytesWritable, 
ImmutableBytesWritable> keyValue: cf.getValues().entrySet()){
-                                    
colDef.setValue(keyValue.getKey().copyBytes(), keyValue.getValue().copyBytes());
+                            
if(dataTableDesc.getColumnFamily(Bytes.toBytes(localIndexCf))==null){
+                                ColumnFamilyDescriptorBuilder colDefBuilder =
+                                        
ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(localIndexCf));
+                                for(Entry<Bytes, Bytes> keyValue: 
cf.getValues().entrySet()){
+                                    
colDefBuilder.setValue(keyValue.getKey().copyBytes(), 
keyValue.getValue().copyBytes());
                                 }
-                                dataTableDesc.addFamily(colDef);
+                                
dataTableDescBuilder.addColumnFamily(colDefBuilder.build());
                                 modifyTable = true;
                             }
                         }
-                        List<String> coprocessors = 
dataTableDesc.getCoprocessors();
+                        Collection<String> coprocessors = 
dataTableDesc.getCoprocessors();
                         for(String coprocessor:  coprocessors) {
                             if(coprocessor.equals(localIndexSplitter)) {
-                                
dataTableDesc.removeCoprocessor(localIndexSplitter);
+                                
dataTableDescBuilder.removeCoprocessor(localIndexSplitter);
                                 modifyTable = true;
                             }
                         }
                         if(modifyTable) {
-                            admin.modifyTable(dataTableDesc.getTableName(), 
dataTableDesc);
+                            admin.modifyTable(dataTableDescBuilder.build());
                         }
                     }
                     
admin.disableTables(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX+".*");

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
index b3c7dca..cceed3f 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
@@ -2429,8 +2429,8 @@ public class QueryCompilerTest extends 
BaseConnectionlessQueryTest {
             List<HRegionLocation> regionLocations=
                     
conn.getQueryServices().getAllTableRegions(Bytes.toBytes("SALT_TEST2900"));
             for (HRegionLocation regionLocation : regionLocations) {
-                
assertTrue(ranges.intersectRegion(regionLocation.getRegionInfo().getStartKey(),
-                    regionLocation.getRegionInfo().getEndKey(), false));
+                
assertTrue(ranges.intersectRegion(regionLocation.getRegion().getStartKey(),
+                    regionLocation.getRegion().getEndKey(), false));
             }
         } finally {
             conn.close();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexSpecifierBuilder.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexSpecifierBuilder.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexSpecifierBuilder.java
index c7b2685..6494db2 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexSpecifierBuilder.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexSpecifierBuilder.java
@@ -27,7 +27,7 @@ import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Coprocessor;
-import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.phoenix.hbase.index.Indexer;
 
 /**
@@ -127,15 +127,15 @@ public class CoveredColumnIndexSpecifierBuilder {
     }
   }
 
-  public void build(HTableDescriptor desc) throws IOException {
-    build(desc, CoveredColumnIndexCodec.class);
-  }
+    public TableDescriptor build(TableDescriptor desc) throws IOException {
+        return build(desc, CoveredColumnIndexCodec.class);
+    }
 
-  public void build(HTableDescriptor desc, Class<? extends IndexCodec> clazz) 
throws IOException {
+  public TableDescriptor build(TableDescriptor desc, Class<? extends 
IndexCodec> clazz) throws IOException {
     // add the codec for the index to the map of options
     Map<String, String> opts = this.convertToMap();
     opts.put(NonTxIndexBuilder.CODEC_CLASS_NAME_KEY, clazz.getName());
-    Indexer.enableIndexing(desc, NonTxIndexBuilder.class, opts, 
Coprocessor.PRIORITY_USER);
+   return Indexer.enableIndexing(desc, NonTxIndexBuilder.class, opts, 
Coprocessor.PRIORITY_USER);
   }
 
   public static List<ColumnGroup> getColumns(Configuration conf) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilderTest.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilderTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilderTest.java
index d94cce0..81529fe 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilderTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilderTest.java
@@ -34,11 +34,11 @@ import java.util.Properties;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -64,6 +64,7 @@ import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.junit.Before;
@@ -102,7 +103,6 @@ public class NonTxIndexBuilderTest extends 
BaseConnectionlessQueryTest {
     private static final byte[] VALUE_2 = Bytes.toBytes(222);
     private static final byte[] VALUE_3 = Bytes.toBytes(333);
     private static final byte[] VALUE_4 = Bytes.toBytes(444);
-    private static final byte PUT_TYPE = KeyValue.Type.Put.getCode();
 
     private NonTxIndexBuilder indexBuilder;
     private PhoenixIndexMetaData mockIndexMetaData;
@@ -139,7 +139,7 @@ public class NonTxIndexBuilderTest extends 
BaseConnectionlessQueryTest {
                 });
 
         // the following is called by PhoenixIndexCodec#getIndexUpserts() , 
getIndexDeletes()
-        HRegionInfo mockRegionInfo = Mockito.mock(HRegionInfo.class);
+        RegionInfo mockRegionInfo = Mockito.mock(RegionInfo.class);
         Mockito.when(mockRegion.getRegionInfo()).thenReturn(mockRegionInfo);
         
Mockito.when(mockRegionInfo.getStartKey()).thenReturn(Bytes.toBytes("a"));
         
Mockito.when(mockRegionInfo.getEndKey()).thenReturn(Bytes.toBytes("z"));
@@ -232,10 +232,10 @@ public class NonTxIndexBuilderTest extends 
BaseConnectionlessQueryTest {
 
         // the current row state has 3 versions, but if we rebuild as of t=2, 
scanner in LocalTable
         // should only return first
-        Cell currentCell1 = CellUtil.createCell(ROW, FAM, INDEXED_QUALIFIER, 
1, PUT_TYPE, VALUE_1);
-        Cell currentCell2 = CellUtil.createCell(ROW, FAM, INDEXED_QUALIFIER, 
2, PUT_TYPE, VALUE_2);
-        Cell currentCell3 = CellUtil.createCell(ROW, FAM, INDEXED_QUALIFIER, 
3, PUT_TYPE, VALUE_3);
-        Cell currentCell4 = CellUtil.createCell(ROW, FAM, INDEXED_QUALIFIER, 
4, PUT_TYPE, VALUE_4);
+        Cell currentCell1 = PhoenixKeyValueUtil.newKeyValue(ROW, FAM, 
INDEXED_QUALIFIER, 1, VALUE_1);
+        Cell currentCell2 = PhoenixKeyValueUtil.newKeyValue(ROW, FAM, 
INDEXED_QUALIFIER, 2, VALUE_2);
+        Cell currentCell3 = PhoenixKeyValueUtil.newKeyValue(ROW, FAM, 
INDEXED_QUALIFIER, 3, VALUE_3);
+        Cell currentCell4 = PhoenixKeyValueUtil.newKeyValue(ROW, FAM, 
INDEXED_QUALIFIER, 4, VALUE_4);
         setCurrentRowState(Arrays.asList(currentCell4, currentCell3, 
currentCell2, currentCell1));
 
         // rebuilder replays mutations starting from t=2
@@ -322,7 +322,7 @@ public class NonTxIndexBuilderTest extends 
BaseConnectionlessQueryTest {
     }
 
     private void setCurrentRowState(byte[] fam2, byte[] indexedQualifier, int 
i, byte[] value1) {
-        Cell cell = CellUtil.createCell(ROW, FAM, INDEXED_QUALIFIER, 1, 
PUT_TYPE, VALUE_1);
+        Cell cell = PhoenixKeyValueUtil.newKeyValue(ROW, FAM, 
INDEXED_QUALIFIER, 1, VALUE_1);
         currentRowCells = Collections.singletonList(cell);
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/util/TestIndexManagementUtil.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/util/TestIndexManagementUtil.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/util/TestIndexManagementUtil.java
index 317d07a..89386e0 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/util/TestIndexManagementUtil.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/util/TestIndexManagementUtil.java
@@ -20,10 +20,12 @@ package org.apache.phoenix.hbase.index.util;
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeepDeletedCells;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.regionserver.wal.IndexedHLogReader;
 import org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec;
 import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
@@ -77,19 +79,17 @@ public class TestIndexManagementUtil {
    * @throws IOException
    */
   public static void createIndexTable(Admin admin, String indexTable) throws 
IOException {
-    createIndexTable(admin, new HTableDescriptor(indexTable));
+    createIndexTable(admin, 
TableDescriptorBuilder.newBuilder(TableName.valueOf(indexTable)));
   }
 
   /**
    * @param admin to create the table
    * @param index descriptor to update before creating table
    */
-  public static void createIndexTable(Admin admin, HTableDescriptor index) 
throws IOException {
-    HColumnDescriptor col =
-        new HColumnDescriptor(CoveredColumnIndexCodec.INDEX_ROW_COLUMN_FAMILY);
-    // ensure that we can 'see past' delete markers when doing scans
-    col.setKeepDeletedCells(true);
-    index.addFamily(col);
-    admin.createTable(index);
+  public static void createIndexTable(Admin admin, TableDescriptorBuilder 
indexBuilder) throws IOException {
+        indexBuilder.addColumnFamily(
+                
ColumnFamilyDescriptorBuilder.newBuilder(CoveredColumnIndexCodec.INDEX_ROW_COLUMN_FAMILY)
+                        .setKeepDeletedCells(KeepDeletedCells.TRUE).build());
+    admin.createTable(indexBuilder.build());
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
index b302210..57e3ba1 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
@@ -17,10 +17,8 @@
  */
 package org.apache.phoenix.hbase.index.write;
 
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
 
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -37,11 +35,12 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -79,7 +78,8 @@ public class TestIndexWriter {
     Region region = Mockito.mock(Region.class);
     Mockito.when(env.getRegion()).thenReturn(region);
     Mockito.when(env.getConfiguration()).thenReturn(conf);
-    Mockito.when(region.getTableDesc()).thenReturn(new HTableDescriptor());
+    Mockito.when(region.getTableDescriptor()).thenReturn(
+        TableDescriptorBuilder.newBuilder(TableName.valueOf("dummy")).build());
     assertNotNull(IndexWriter.getFailurePolicy(env));
   }
 
@@ -111,7 +111,7 @@ public class TestIndexWriter {
 
     Table table = Mockito.mock(Table.class);
     final boolean[] completed = new boolean[] { false };
-    Mockito.when(table.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() 
{
+    Mockito.when(table.batch(Mockito.anyList(), 
Mockito.anyList())).thenAnswer(new Answer<Void>() {
 
       @Override
       public Void answer(InvocationOnMock invocation) throws Throwable {
@@ -120,7 +120,7 @@ public class TestIndexWriter {
         return null;
       }
     });
-    Mockito.when(table.getTableName()).thenReturn(testName.getTableName());
+    
Mockito.when(table.getName()).thenReturn(TableName.valueOf(testName.getTableName()));
     // add the table to the set of tables, so its returned to the writer
     tables.put(new ImmutableBytesPtr(tableName), table);
 
@@ -158,8 +158,8 @@ public class TestIndexWriter {
     FakeTableFactory factory = new FakeTableFactory(tables);
 
     byte[] tableName = this.testName.getTableName();
-    HTableInterface table = Mockito.mock(HTableInterface.class);
-    Mockito.when(table.getTableName()).thenReturn(tableName);
+    Table table = Mockito.mock(Table.class);
+    Mockito.when(table.getName()).thenReturn(TableName.valueOf(tableName));
     final CountDownLatch writeStartedLatch = new CountDownLatch(1);
     // latch never gets counted down, so we wait forever
     final CountDownLatch waitOnAbortedLatch = new CountDownLatch(1);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
index 0d6ac7f..1ace4c5 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
@@ -34,12 +34,11 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Mutation;
@@ -173,18 +172,19 @@ public class TestWALRecoveryCaching {
     builder.addIndexGroup(columns);
 
     // create the primary table w/ indexing enabled
-    HTableDescriptor primaryTable = new 
HTableDescriptor(testTable.getTableName());
-    primaryTable.addFamily(new HColumnDescriptor(family));
-    primaryTable.addFamily(new HColumnDescriptor(nonIndexedFamily));
+    TableDescriptor primaryTable = 
TableDescriptorBuilder.newBuilder(TableName.valueOf(testTable.getTableName()))
+                .addColumnFamily(ColumnFamilyDescriptorBuilder.of(family))
+                
.addColumnFamily(ColumnFamilyDescriptorBuilder.of(nonIndexedFamily)).build();
     
builder.addArbitraryConfigForTesting(Indexer.RecoveryFailurePolicyKeyForTesting,
       ReleaseLatchOnFailurePolicy.class.getName());
     builder.build(primaryTable);
     admin.createTable(primaryTable);
 
     // create the index table
-    HTableDescriptor indexTableDesc = new 
HTableDescriptor(Bytes.toBytes(getIndexTableName()));
-    
indexTableDesc.addCoprocessor(IndexTableBlockingReplayObserver.class.getName());
-    TestIndexManagementUtil.createIndexTable(admin, indexTableDesc);
+    TableDescriptorBuilder indexTableBuilder = TableDescriptorBuilder
+                
.newBuilder(TableName.valueOf(Bytes.toBytes(getIndexTableName())))
+                
.addCoprocessor(IndexTableBlockingReplayObserver.class.getName());
+    TestIndexManagementUtil.createIndexTable(admin, indexTableBuilder);
 
     // figure out where our tables live
     ServerName shared =
@@ -218,7 +218,8 @@ public class TestWALRecoveryCaching {
         LOG.info("\t== Offline: " + server.getServerName());
         continue;
       }
-      List<HRegionInfo> regions = 
ProtobufUtil.getOnlineRegions(server.getRSRpcServices());
+      
+      List<HRegion> regions = server.getRegions();
       LOG.info("\t" + server.getServerName() + " regions: " + regions);
     }
 
@@ -268,14 +269,14 @@ public class TestWALRecoveryCaching {
    * @param table
    * @return
    */
-  private List<Region> getRegionsFromServerForTable(MiniHBaseCluster cluster, 
ServerName server,
+  private List<HRegion> getRegionsFromServerForTable(MiniHBaseCluster cluster, 
ServerName server,
       byte[] table) {
-    List<Region> online = Collections.emptyList();
+    List<HRegion> online = Collections.emptyList();
     for (RegionServerThread rst : cluster.getRegionServerThreads()) {
       // if its the server we are going to kill, get the regions we want to 
reassign
       if (rst.getRegionServer().getServerName().equals(server)) {
-        online = 
rst.getRegionServer().getOnlineRegions(org.apache.hadoop.hbase.TableName.valueOf(table));
-        break;
+          online = 
rst.getRegionServer().getRegions(org.apache.hadoop.hbase.TableName.valueOf(table));
+          break;
       }
     }
     return online;
@@ -306,7 +307,7 @@ public class TestWALRecoveryCaching {
       tryIndex = !tryIndex;
       for (ServerName server : servers) {
         // find the regionserver that matches the passed server
-        List<Region> online = getRegionsFromServerForTable(cluster, server, 
table);
+        List<HRegion> online = getRegionsFromServerForTable(cluster, server, 
table);
 
         LOG.info("Shutting down and reassigning regions from " + server);
         cluster.stopRegionServer(server);
@@ -314,7 +315,7 @@ public class TestWALRecoveryCaching {
 
         // force reassign the regions from the table
         for (Region region : online) {
-          cluster.getMaster().assignRegion(region.getRegionInfo());
+          
cluster.getMaster().getAssignmentManager().assign(region.getRegionInfo());
         }
 
         LOG.info("Starting region server:" + server.getHostname());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
index 7253165..2bfbcbf 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
@@ -30,12 +30,14 @@ import java.util.Set;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -76,15 +78,18 @@ public class TestPerRegionIndexWriteCache {
       TEST_UTIL.getConfiguration().set("hbase.rootdir", 
hbaseRootDir.toString());
 
       FileSystem newFS = FileSystem.newInstance(TEST_UTIL.getConfiguration());
-      HRegionInfo hri = new HRegionInfo(tableName, null, null, false);
+      RegionInfo hri = 
RegionInfoBuilder.newBuilder(tableName).setStartKey(null).setEndKey(null).setSplit(false).build();
       Path basedir = FSUtils.getTableDir(hbaseRootDir, tableName);
       Random rn = new Random();
       tableName = TableName.valueOf("TestPerRegion" + rn.nextInt());
       WALFactory walFactory = new WALFactory(TEST_UTIL.getConfiguration(), 
null, "TestPerRegionIndexWriteCache");
       wal = walFactory.getWAL(Bytes.toBytes("logs"), null);
-      HTableDescriptor htd = new HTableDescriptor(tableName);
-      HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a"));
-      htd.addFamily(a);
+        TableDescriptor htd =
+                TableDescriptorBuilder
+                        .newBuilder(tableName)
+                        .addColumnFamily(
+                            
ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("a")).build())
+                        .build();
       
       r1 = new HRegion(basedir, wal, newFS, TEST_UTIL.getConfiguration(), hri, 
htd, null) {
           @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index 1133826..8a09420 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -112,10 +112,10 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.IntegrationTestingUtility;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -1526,9 +1526,9 @@ public abstract class BaseTest {
         if (driver == null) return;
         Admin admin = driver.getConnectionQueryServices(null, null).getAdmin();
         try {
-            HTableDescriptor[] tables = admin.listTables();
-            for (HTableDescriptor table : tables) {
-                String schemaName = 
SchemaUtil.getSchemaNameFromFullName(table.getName());
+            TableDescriptor[] tables = admin.listTables();
+            for (TableDescriptor table : tables) {
+                String schemaName = 
SchemaUtil.getSchemaNameFromFullName(table.getTableName().getName());
                 if (!QueryConstants.SYSTEM_SCHEMA_NAME.equals(schemaName)) {
                     disableAndDropTable(admin, table.getTableName());
                 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/test/java/org/apache/phoenix/schema/stats/StatisticsScannerTest.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/schema/stats/StatisticsScannerTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/schema/stats/StatisticsScannerTest.java
index dd96d8c..177aff3 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/schema/stats/StatisticsScannerTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/schema/stats/StatisticsScannerTest.java
@@ -28,7 +28,7 @@ import java.io.IOException;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
@@ -50,7 +50,7 @@ public class StatisticsScannerTest {
     private StatisticsScanner mockScanner;
     private StatisticsCollector tracker;
     private InternalScanner delegate;
-    private HRegionInfo regionInfo;
+    private RegionInfo regionInfo;
 
     private Configuration config;
 
@@ -67,11 +67,10 @@ public class StatisticsScannerTest {
         this.mockScanner = mock(StatisticsScanner.class);
         this.tracker = mock(StatisticsCollector.class);
         this.delegate = mock(InternalScanner.class);
-        this.regionInfo = mock(HRegionInfo.class);
+        this.regionInfo = mock(RegionInfo.class);
 
         // Wire up the mocks to the mock StatisticsScanner
         when(mockScanner.getStatisticsWriter()).thenReturn(statsWriter);
-        when(mockScanner.getRegionServerServices()).thenReturn(rsServices);
         when(mockScanner.createCallable()).thenReturn(callable);
         
when(mockScanner.getStatsCollectionRunTracker(any(Configuration.class))).thenReturn(runTracker);
         when(mockScanner.getRegion()).thenReturn(region);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java 
b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
index fcc0261..d0bfc7f 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
@@ -55,7 +55,6 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Delete;
@@ -64,6 +63,8 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.client.coprocessor.Batch;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -965,17 +966,20 @@ public class TestUtil {
     public static void addCoprocessor(Connection conn, String tableName, Class 
coprocessorClass) throws Exception {
         int priority = QueryServicesOptions.DEFAULT_COPROCESSOR_PRIORITY + 100;
         ConnectionQueryServices services = 
conn.unwrap(PhoenixConnection.class).getQueryServices();
-        HTableDescriptor descriptor = 
services.getTableDescriptor(Bytes.toBytes(tableName));
+        TableDescriptor descriptor = 
services.getTableDescriptor(Bytes.toBytes(tableName));
+        TableDescriptorBuilder descriptorBuilder = null;
                if 
(!descriptor.getCoprocessors().contains(coprocessorClass.getName())) {
-                       descriptor.addCoprocessor(coprocessorClass.getName(), 
null, priority, null);
+                   
descriptorBuilder=TableDescriptorBuilder.newBuilder(descriptor);
+                   
descriptorBuilder.addCoprocessor(coprocessorClass.getName(), null, priority, 
null);
                }else{
                        return;
                }
         final int retries = 10;
         int numTries = 10;
+        descriptor = descriptorBuilder.build();
         try (Admin admin = services.getAdmin()) {
-            admin.modifyTable(TableName.valueOf(tableName), descriptor);
-            while 
(!admin.getTableDescriptor(TableName.valueOf(tableName)).equals(descriptor)
+            admin.modifyTable(descriptor);
+            while 
(!admin.getDescriptor(TableName.valueOf(tableName)).equals(descriptor)
                     && numTries > 0) {
                 numTries--;
                 if (numTries == 0) {
@@ -990,17 +994,20 @@ public class TestUtil {
 
     public static void removeCoprocessor(Connection conn, String tableName, 
Class coprocessorClass) throws Exception {
         ConnectionQueryServices services = 
conn.unwrap(PhoenixConnection.class).getQueryServices();
-        HTableDescriptor descriptor = 
services.getTableDescriptor(Bytes.toBytes(tableName));
+        TableDescriptor descriptor = 
services.getTableDescriptor(Bytes.toBytes(tableName));
+        TableDescriptorBuilder descriptorBuilder = null;
         if (descriptor.getCoprocessors().contains(coprocessorClass.getName())) 
{
-            descriptor.removeCoprocessor(coprocessorClass.getName());
+            descriptorBuilder=TableDescriptorBuilder.newBuilder(descriptor);
+            descriptorBuilder.removeCoprocessor(coprocessorClass.getName());
         }else{
             return;
         }
         final int retries = 10;
         int numTries = retries;
+        descriptor = descriptorBuilder.build();
         try (Admin admin = services.getAdmin()) {
-            admin.modifyTable(TableName.valueOf(tableName), descriptor);
-            while 
(!admin.getTableDescriptor(TableName.valueOf(tableName)).equals(descriptor)
+            admin.modifyTable(descriptor);
+            while 
(!admin.getDescriptor(TableName.valueOf(tableName)).equals(descriptor)
                     && numTries > 0) {
                 numTries--;
                 if (numTries == 0) {

Reply via email to