http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java index 658ef92..26b4f5c 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java @@ -30,9 +30,9 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.util.Properties; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.query.ConnectionQueryServices; @@ -69,7 +69,7 @@ public class SkipScanAfterManualSplitIT extends ParallelStatsDisabledIT { Connection conn = getConnection(); conn.createStatement().execute("CREATE TABLE " + tableName + "(" + "a VARCHAR PRIMARY KEY, b VARCHAR) " - + HTableDescriptor.MAX_FILESIZE + "=" + MAX_FILESIZE + "," + + TableDescriptorBuilder.MAX_FILESIZE + "=" + MAX_FILESIZE + "," + " SALT_BUCKETS = 4"); PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + tableName + " VALUES(?,?)"); int rowCount = 0;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/UseSchemaIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UseSchemaIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UseSchemaIT.java index 92871aa..8f17281 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UseSchemaIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UseSchemaIT.java @@ -27,15 +27,13 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.util.Properties; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.query.QueryServices; @@ -170,8 +168,8 @@ public class UseSchemaIT extends ParallelStatsDisabledIT { Connection conn = DriverManager.getConnection(getUrl(), props); Admin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin(); admin.createNamespace(NamespaceDescriptor.create(schema).build()); - admin.createTable(new HTableDescriptor(fullTablename) - .addFamily(new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES))); + admin.createTable(TableDescriptorBuilder.newBuilder(TableName.valueOf(fullTablename)). + addColumnFamily(ColumnFamilyDescriptorBuilder.of(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES)).build()); Put put = new Put(PVarchar.INSTANCE.toBytes(fullTablename)); put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES); http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java index 94f306f..4b64a09 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java @@ -36,10 +36,10 @@ import java.sql.SQLException; import java.util.List; import java.util.Properties; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.phoenix.compile.QueryPlan; import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.jdbc.PhoenixConnection; @@ -729,9 +729,9 @@ public class ViewIT extends BaseViewIT { // test for a view that is in non-default schema { - HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(NS, TBL)); - desc.addFamily(new HColumnDescriptor(CF)); - admin.createTable(desc); + TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(NS, TBL)); + builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(CF)); + admin.createTable(builder.build()); String view = NS + "." + TBL; conn.createStatement().execute( @@ -746,9 +746,9 @@ public class ViewIT extends BaseViewIT { // test for a view whose name contains a dot (e.g. "AAA.BBB") in default schema (for backward compatibility) { - HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(NS + "." + TBL)); - desc.addFamily(new HColumnDescriptor(CF)); - admin.createTable(desc); + TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(NS + "." + TBL)); + builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(CF)); + admin.createTable(builder.build()); String view = "\"" + NS + "." + TBL + "\""; conn.createStatement().execute( @@ -763,9 +763,9 @@ public class ViewIT extends BaseViewIT { // test for a view whose name contains a dot (e.g. "AAA.BBB") in non-default schema { - HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(NS, NS + "." + TBL)); - desc.addFamily(new HColumnDescriptor(CF)); - admin.createTable(desc); + TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(NS, NS + "." + TBL)); + builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(CF)); + admin.createTable(builder.build()); String view = NS + ".\"" + NS + "." + TBL + "\""; conn.createStatement().execute( http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java index 7b060e3..3fd6b3b 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java @@ -42,14 +42,14 @@ import java.util.Random; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory; import org.apache.phoenix.compile.ColumnResolver; import org.apache.phoenix.compile.FromCompiler; @@ -967,7 +967,7 @@ public abstract class BaseIndexIT extends ParallelStatsDisabledIT { CellScanner cellScanner = result.cellScanner(); while (cellScanner.advance()) { Cell current = cellScanner.current(); - assertEquals (KeyValue.Type.Put.getCode(), current.getTypeByte()); + assertTrue(CellUtil.isPut(current)); } } }; @@ -1059,7 +1059,7 @@ public abstract class BaseIndexIT extends ParallelStatsDisabledIT { String schemaName = rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM); String tName = rs.getString(PhoenixDatabaseMetaData.TABLE_NAME); org.apache.hadoop.hbase.TableName hbaseTableName = SchemaUtil.getPhysicalTableName(SchemaUtil.getTableName(schemaName, tName), p); - HTableDescriptor htd = admin.getTableDescriptor(hbaseTableName); + TableDescriptor htd = admin.getDescriptor(hbaseTableName); String val = htd.getValue("PRIORITY"); assertNotNull("PRIORITY is not set for table:" + htd, val); assertTrue(Integer.parseInt(val) @@ -1078,13 +1078,13 @@ public abstract class BaseIndexIT extends ParallelStatsDisabledIT { stmt.execute(ddl); } - HTableDescriptor dataTable = admin.getTableDescriptor( + TableDescriptor dataTable = admin.getDescriptor( org.apache.hadoop.hbase.TableName.valueOf(fullTableName)); String val = dataTable.getValue("PRIORITY"); assertTrue(val == null || Integer.parseInt(val) < HConstants.HIGH_QOS); if (!localIndex && mutable) { - HTableDescriptor indexTable = admin.getTableDescriptor( + TableDescriptor indexTable = admin.getDescriptor( org.apache.hadoop.hbase.TableName.valueOf(indexName)); val = indexTable.getValue("PRIORITY"); assertNotNull("PRIORITY is not set for table:" + indexTable, val); http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java index 550e9e2..71a9f00 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java @@ -23,9 +23,12 @@ import java.sql.Connection; import java.sql.DriverManager; import java.util.Properties; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeepDeletedCells; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.end2end.ParallelStatsDisabledIT; @@ -65,12 +68,11 @@ public class DropMetadataIT extends ParallelStatsDisabledIT { byte[] hbaseNativeBytes = SchemaUtil.getTableNameAsBytes(HBASE_NATIVE_SCHEMA_NAME, hbaseNativeViewName); try { - @SuppressWarnings("deprecation") - HTableDescriptor descriptor = new HTableDescriptor(hbaseNativeBytes); - HColumnDescriptor columnDescriptor = new HColumnDescriptor(FAMILY_NAME); - columnDescriptor.setKeepDeletedCells(true); - descriptor.addFamily(columnDescriptor); - admin.createTable(descriptor); + TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(hbaseNativeBytes)); + ColumnFamilyDescriptor columnDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(FAMILY_NAME) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build(); + builder.addColumnFamily(columnDescriptor); + admin.createTable(builder.build()); } finally { admin.close(); } @@ -82,7 +84,7 @@ public class DropMetadataIT extends ParallelStatsDisabledIT { " \"1\".uint_col unsigned_int," + " \"1\".ulong_col unsigned_long" + " CONSTRAINT pk PRIMARY KEY (uint_key, ulong_key, string_key))\n" + - HColumnDescriptor.DATA_BLOCK_ENCODING + "='" + DataBlockEncoding.NONE + "'"); + ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING + "='" + DataBlockEncoding.NONE + "'"); conn.createStatement().execute("drop view " + hbaseNativeViewName); conn.close(); } http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java index 04f34c6..41616f2 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java @@ -33,7 +33,6 @@ import java.sql.DriverManager; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; -import java.util.Collection; import java.util.Iterator; import java.util.List; @@ -41,18 +40,18 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Pair; @@ -177,9 +176,9 @@ public class LocalIndexIT extends BaseLocalIndexIT { conn1.createStatement().execute("CREATE LOCAL INDEX " + indexName + " ON " + tableName + "(v1)"); conn2.createStatement().executeQuery("SELECT * FROM " + tableName).next(); Admin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin(); - HTableDescriptor htd = admin - .getTableDescriptor(TableName.valueOf(indexPhysicalTableName)); - assertEquals(IndexRegionSplitPolicy.class.getName(), htd.getValue(HTableDescriptor.SPLIT_POLICY)); + TableDescriptor htd = admin + .getDescriptor(TableName.valueOf(indexPhysicalTableName)); + assertEquals(IndexRegionSplitPolicy.class.getName(), htd.getValue(TableDescriptorBuilder.SPLIT_POLICY)); try(org.apache.hadoop.hbase.client.Connection c = ConnectionFactory.createConnection(admin.getConfiguration())) { try (RegionLocator userTable= c.getRegionLocator(SchemaUtil.getPhysicalTableName(tableName.getBytes(), isNamespaceMapped))) { try (RegionLocator indxTable = c.getRegionLocator(TableName.valueOf(indexPhysicalTableName))) { @@ -443,8 +442,8 @@ public class LocalIndexIT extends BaseLocalIndexIT { Scan s = new Scan(); s.setStartRow(startKeys[i]); s.setStopRow(endKeys[i]); - Collection<HColumnDescriptor> families = table.getTableDescriptor().getFamilies(); - for(HColumnDescriptor cf: families) { + ColumnFamilyDescriptor[] families = table.getDescriptor().getColumnFamilies(); + for(ColumnFamilyDescriptor cf: families) { if(cf.getNameAsString().startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)){ s.addFamily(cf.getName()); } @@ -607,7 +606,7 @@ public class LocalIndexIT extends BaseLocalIndexIT { ResultSet rs = statement.executeQuery("SELECT COUNT(*) FROM " + indexName); assertTrue(rs.next()); assertEquals(2000, rs.getLong(1)); - List<HRegionInfo> tableRegions = admin.getTableRegions(TableName.valueOf(tableName)); + List<RegionInfo> tableRegions = admin.getRegions(TableName.valueOf(tableName)); admin.disableTable(TableName.valueOf(tableName)); copyLocalIndexHFiles(config, tableRegions.get(0), tableRegions.get(1), false); copyLocalIndexHFiles(config, tableRegions.get(3), tableRegions.get(0), false); @@ -671,14 +670,14 @@ public class LocalIndexIT extends BaseLocalIndexIT { conn1.close(); } - private void copyLocalIndexHFiles(Configuration conf, HRegionInfo fromRegion, HRegionInfo toRegion, boolean move) + private void copyLocalIndexHFiles(Configuration conf, RegionInfo fromRegion, RegionInfo toRegion, boolean move) throws IOException { Path root = FSUtils.getRootDir(conf); - Path seondRegion = new Path(HTableDescriptor.getTableDir(root, fromRegion.getTableName()) + Path.SEPARATOR + Path seondRegion = new Path(FSUtils.getTableDir(root, fromRegion.getTable()) + Path.SEPARATOR + fromRegion.getEncodedName() + Path.SEPARATOR + "L#0/"); Path hfilePath = FSUtils.getCurrentFileSystem(conf).listFiles(seondRegion, true).next().getPath(); - Path firstRegionPath = new Path(HTableDescriptor.getTableDir(root, toRegion.getTableName()) + Path.SEPARATOR + Path firstRegionPath = new Path(FSUtils.getTableDir(root, toRegion.getTable()) + Path.SEPARATOR + toRegion.getEncodedName() + Path.SEPARATOR + "L#0/"); FileSystem currentFileSystem = FSUtils.getCurrentFileSystem(conf); assertTrue(FileUtil.copy(currentFileSystem, hfilePath, currentFileSystem, firstRegionPath, move, conf)); http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java index 66fe338..a931084 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java @@ -23,7 +23,6 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; -import java.io.IOException; import java.sql.Connection; import java.sql.DriverManager; import java.sql.PreparedStatement; @@ -37,13 +36,10 @@ import java.util.Properties; import jline.internal.Log; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; import org.apache.phoenix.end2end.ParallelStatsDisabledIT; import org.apache.phoenix.jdbc.PhoenixConnection; @@ -51,7 +47,6 @@ import org.apache.phoenix.query.BaseTest; import org.apache.phoenix.query.ConnectionQueryServices; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.schema.PTableKey; -import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.PhoenixRuntime; import org.apache.phoenix.util.PropertiesUtil; import org.apache.phoenix.util.QueryUtil; @@ -622,18 +617,6 @@ public class MutableIndexIT extends ParallelStatsDisabledIT { } } - private void createTableAndLoadData(Connection conn1, String tableName, String indexName, String[] strings, boolean isReverse) throws SQLException { - createBaseTable(conn1, tableName, null); - for (int i = 0; i < 26; i++) { - conn1.createStatement().execute( - "UPSERT INTO " + tableName + " values('"+strings[i]+"'," + i + "," - + (i + 1) + "," + (i + 2) + ",'" + strings[25 - i] + "')"); - } - conn1.commit(); - conn1.createStatement().execute( - "CREATE " + (localIndex ? "LOCAL" : "")+" INDEX " + indexName + " ON " + tableName + "(v1"+(isReverse?" DESC":"")+") include (k3)"); - } - @Test public void testIndexHalfStoreFileReader() throws Exception { Connection conn1 = getConnection(); @@ -658,7 +641,6 @@ public class MutableIndexIT extends ParallelStatsDisabledIT { TableName indexTable = TableName.valueOf(localIndex?tableName: indexName); admin.flush(indexTable); boolean merged = false; - Table table = connectionQueryServices.getTable(indexTable.getName()); // merge regions until 1 left long numRegions = 0; while (true) { @@ -666,16 +648,16 @@ public class MutableIndexIT extends ParallelStatsDisabledIT { assertTrue(rs.next()); assertEquals(4, rs.getInt(1)); //TODO this returns 5 sometimes instead of 4, duplicate results? try { - List<HRegionInfo> indexRegions = admin.getTableRegions(indexTable); + List<RegionInfo> indexRegions = admin.getRegions(indexTable); numRegions = indexRegions.size(); if (numRegions==1) { break; } if(!merged) { - List<HRegionInfo> regions = - admin.getTableRegions(indexTable); + List<RegionInfo> regions = + admin.getRegions(indexTable); Log.info("Merging: " + regions.size()); - admin.mergeRegions(regions.get(0).getEncodedNameAsBytes(), + admin.mergeRegionsAsync(regions.get(0).getEncodedNameAsBytes(), regions.get(1).getEncodedNameAsBytes(), false); merged = true; Threads.sleep(10000); @@ -686,7 +668,7 @@ public class MutableIndexIT extends ParallelStatsDisabledIT { long waitStartTime = System.currentTimeMillis(); // wait until merge happened while (System.currentTimeMillis() - waitStartTime < 10000) { - List<HRegionInfo> regions = admin.getTableRegions(indexTable); + List<RegionInfo> regions = admin.getRegions(indexTable); Log.info("Waiting:" + regions.size()); if (regions.size() < numRegions) { break; @@ -698,72 +680,6 @@ public class MutableIndexIT extends ParallelStatsDisabledIT { } } - - private List<HRegionInfo> splitDuringScan(Connection conn1, String tableName, String indexName, String[] strings, Admin admin, boolean isReverse) - throws SQLException, IOException, InterruptedException { - ResultSet rs; - - String query = "SELECT t_id,k1,v1 FROM " + tableName; - rs = conn1.createStatement().executeQuery(query); - String[] tIdColumnValues = new String[26]; - String[] v1ColumnValues = new String[26]; - int[] k1ColumnValue = new int[26]; - for (int j = 0; j < 5; j++) { - assertTrue(rs.next()); - tIdColumnValues[j] = rs.getString("t_id"); - k1ColumnValue[j] = rs.getInt("k1"); - v1ColumnValues[j] = rs.getString("V1"); - } - - String[] splitKeys = new String[2]; - splitKeys[0] = strings[4]; - splitKeys[1] = strings[12]; - - int[] splitInts = new int[2]; - splitInts[0] = 22; - splitInts[1] = 4; - List<HRegionInfo> regionsOfUserTable = null; - for(int i = 0; i <=1; i++) { - Threads.sleep(10000); - if(localIndex) { - admin.split(TableName.valueOf(tableName), - ByteUtil.concat(Bytes.toBytes(splitKeys[i]))); - } else { - admin.split(TableName.valueOf(indexName), ByteUtil.concat(Bytes.toBytes(splitInts[i]))); - } - Thread.sleep(100); - regionsOfUserTable = - MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(), - admin.getConnection(), TableName.valueOf(localIndex?tableName:indexName), - false); - - while (regionsOfUserTable.size() != (i+2)) { - Thread.sleep(100); - regionsOfUserTable = - MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(), - admin.getConnection(), - TableName.valueOf(localIndex?tableName:indexName), false); - } - assertEquals(i+2, regionsOfUserTable.size()); - } - for (int j = 5; j < 26; j++) { - assertTrue(rs.next()); - tIdColumnValues[j] = rs.getString("t_id"); - k1ColumnValue[j] = rs.getInt("k1"); - v1ColumnValues[j] = rs.getString("V1"); - } - Arrays.sort(tIdColumnValues); - Arrays.sort(v1ColumnValues); - Arrays.sort(k1ColumnValue); - assertTrue(Arrays.equals(strings, tIdColumnValues)); - assertTrue(Arrays.equals(strings, v1ColumnValues)); - for(int i=0;i<26;i++) { - assertEquals(i, k1ColumnValue[i]); - } - assertFalse(rs.next()); - return regionsOfUserTable; - } - private void createBaseTable(Connection conn, String tableName, String splits) throws SQLException { String ddl = "CREATE TABLE " + tableName + " (t_id VARCHAR NOT NULL,\n" + "k1 INTEGER NOT NULL,\n" + http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java index b8b96ac..1a380b8 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java @@ -38,20 +38,23 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; +import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest; import org.apache.phoenix.jdbc.PhoenixTestDriver; import org.apache.phoenix.query.BaseTest; @@ -91,8 +94,8 @@ public class MutableIndexReplicationIT extends BaseTest { protected static Configuration conf1 = HBaseConfiguration.create(); protected static Configuration conf2; - protected static ZooKeeperWatcher zkw1; - protected static ZooKeeperWatcher zkw2; + protected static ZKWatcher zkw1; + protected static ZKWatcher zkw2; protected static ReplicationAdmin admin; @@ -122,7 +125,6 @@ public class MutableIndexReplicationIT extends BaseTest { conf1.setLong("hbase.master.logcleaner.ttl", 10); conf1.setInt("zookeeper.recovery.retry", 1); conf1.setInt("zookeeper.recovery.retry.intervalmill", 10); - conf1.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT); conf1.setBoolean("dfs.support.append", true); conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100); conf1.setInt("replication.stats.thread.period.seconds", 5); @@ -134,7 +136,7 @@ public class MutableIndexReplicationIT extends BaseTest { // Have to reset conf1 in case zk cluster location different // than default conf1 = utility1.getConfiguration(); - zkw1 = new ZooKeeperWatcher(conf1, "cluster1", null, true); + zkw1 = new ZKWatcher(conf1, "cluster1", null, true); admin = new ReplicationAdmin(conf1); LOG.info("Setup first Zk"); @@ -142,16 +144,15 @@ public class MutableIndexReplicationIT extends BaseTest { conf2 = HBaseConfiguration.create(conf1); conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2"); conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6); - conf2.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT); conf2.setBoolean("dfs.support.append", true); conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false); utility2 = new HBaseTestingUtility(conf2); utility2.setZkCluster(miniZK); - zkw2 = new ZooKeeperWatcher(conf2, "cluster2", null, true); + zkw2 = new ZKWatcher(conf2, "cluster2", null, true); //replicate from cluster 1 -> cluster 2, but not back again - admin.addPeer("1", utility2.getClusterKey()); + admin.addPeer("1", new ReplicationPeerConfig().setClusterKey(utility2.getClusterKey()),null); LOG.info("Setup second Zk"); utility1.startMiniCluster(2); @@ -193,28 +194,27 @@ public class MutableIndexReplicationIT extends BaseTest { assertFalse(rs.next()); // make sure the data tables are created on the remote cluster - Admin admin = utility1.getHBaseAdmin(); - Admin admin2 = utility2.getHBaseAdmin(); + Admin admin = utility1.getAdmin(); + Admin admin2 = utility2.getAdmin(); List<String> dataTables = new ArrayList<String>(); dataTables.add(DATA_TABLE_FULL_NAME); dataTables.add(INDEX_TABLE_FULL_NAME); for (String tableName : dataTables) { - HTableDescriptor desc = admin.getTableDescriptor(TableName.valueOf(tableName)); + TableDescriptor desc = admin.getDescriptor(TableName.valueOf(tableName)); //create it as-is on the remote cluster admin2.createTable(desc); LOG.info("Enabling replication on source table: "+tableName); - HColumnDescriptor[] cols = desc.getColumnFamilies(); + ColumnFamilyDescriptor[] cols = desc.getColumnFamilies(); assertEquals(1, cols.length); // add the replication scope to the column - HColumnDescriptor col = desc.removeFamily(cols[0].getName()); - col.setScope(HConstants.REPLICATION_SCOPE_GLOBAL); - desc.addFamily(col); + ColumnFamilyDescriptor col = ColumnFamilyDescriptorBuilder.newBuilder(cols[0].getName()).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build(); + desc=TableDescriptorBuilder.newBuilder(desc).addColumnFamily(col).build(); //disable/modify/enable table so it has replication enabled admin.disableTable(desc.getTableName()); - admin.modifyTable(TableName.valueOf(tableName), desc); + admin.modifyTable(desc); admin.enableTable(desc.getTableName()); LOG.info("Replication enabled on source table: "+tableName); } @@ -250,7 +250,7 @@ public class MutableIndexReplicationIT extends BaseTest { for (int i = 0; i < REPLICATION_RETRIES; i++) { if (i >= REPLICATION_RETRIES - 1) { fail("Waited too much time for put replication on table " + remoteTable - .getTableDescriptor().getNameAsString()); + .getDescriptor().getTableName()); } if (ensureAnyRows(remoteTable)) { break; http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexSplitIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexSplitIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexSplitIT.java index 902a83e..4d0e56f 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexSplitIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexSplitIT.java @@ -32,10 +32,10 @@ import java.util.Collection; import java.util.List; import java.util.Properties; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; import org.apache.phoenix.end2end.ParallelStatsDisabledIT; @@ -103,7 +103,7 @@ public abstract class MutableIndexSplitIT extends ParallelStatsDisabledIT { "CREATE " + (localIndex ? "LOCAL" : "")+" INDEX " + indexName + " ON " + tableName + "(v1"+(isReverse?" DESC":"")+") include (k3)"); } - private List<HRegionInfo> splitDuringScan(Connection conn1, String tableName, String indexName, String[] strings, Admin admin, boolean isReverse) + private List<RegionInfo> splitDuringScan(Connection conn1, String tableName, String indexName, String[] strings, Admin admin, boolean isReverse) throws SQLException, IOException, InterruptedException { ResultSet rs; @@ -126,7 +126,7 @@ public abstract class MutableIndexSplitIT extends ParallelStatsDisabledIT { int[] splitInts = new int[2]; splitInts[0] = 22; splitInts[1] = 4; - List<HRegionInfo> regionsOfUserTable = null; + List<RegionInfo> regionsOfUserTable = null; for(int i = 0; i <=1; i++) { Threads.sleep(10000); if(localIndex) { @@ -137,16 +137,14 @@ public abstract class MutableIndexSplitIT extends ParallelStatsDisabledIT { } Thread.sleep(100); regionsOfUserTable = - MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(), - admin.getConnection(), TableName.valueOf(localIndex?tableName:indexName), - false); + MetaTableAccessor.getTableRegions(admin.getConnection(), + TableName.valueOf(localIndex ? tableName : indexName), false); while (regionsOfUserTable.size() != (i+2)) { Thread.sleep(100); regionsOfUserTable = - MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(), - admin.getConnection(), - TableName.valueOf(localIndex?tableName:indexName), false); + MetaTableAccessor.getTableRegions(admin.getConnection(), + TableName.valueOf(localIndex ? tableName : indexName), false); } assertEquals(i+2, regionsOfUserTable.size()); } http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java b/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java index 59ed0d0..e0c8484 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java @@ -39,12 +39,12 @@ import org.apache.commons.lang.exception.ExceptionUtils; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Mutation; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver; @@ -205,14 +205,14 @@ public class UpsertSelectOverlappingBatchesIT extends BaseUniqueNamesOwnClusterI // keep trying to split the region final HBaseTestingUtility utility = getUtility(); - final Admin admin = utility.getHBaseAdmin(); + final Admin admin = utility.getAdmin(); final TableName dataTN = TableName.valueOf(dataTable); assertEquals(1, utility.getHBaseCluster().getRegions(dataTN).size()); utility.waitFor(60000L, 1000, new Waiter.Predicate<Exception>() { @Override public boolean evaluate() throws Exception { try { - List<HRegionInfo> regions = admin.getTableRegions(dataTN); + List<RegionInfo> regions = admin.getRegions(dataTN); if (regions.size() > 1) { logger.info("Found region was split"); return true; @@ -223,9 +223,9 @@ public class UpsertSelectOverlappingBatchesIT extends BaseUniqueNamesOwnClusterI return false; } ; - HRegionInfo hRegion = regions.get(0); + RegionInfo hRegion = regions.get(0); logger.info("Attempting to split region"); - admin.splitRegion(hRegion.getRegionName(), Bytes.toBytes(2)); + admin.splitRegionAsync(hRegion.getRegionName(), Bytes.toBytes(2)); return false; } catch (NotServingRegionException nsre) { // during split @@ -260,18 +260,18 @@ public class UpsertSelectOverlappingBatchesIT extends BaseUniqueNamesOwnClusterI final HBaseTestingUtility utility = getUtility(); // try to close the region while UPSERT SELECTs are happening, final HRegionServer dataRs = utility.getHBaseCluster().getRegionServer(0); - final Admin admin = utility.getHBaseAdmin(); - final HRegionInfo dataRegion = - admin.getTableRegions(TableName.valueOf(dataTable)).get(0); + final Admin admin = utility.getAdmin(); + final RegionInfo dataRegion = + admin.getRegions(TableName.valueOf(dataTable)).get(0); logger.info("Closing data table region"); - admin.closeRegion(dataRs.getServerName(), dataRegion); + admin.unassign(dataRegion.getEncodedNameAsBytes(), true); // make sure the region is offline utility.waitFor(60000L, 1000, new Waiter.Predicate<Exception>() { @Override public boolean evaluate() throws Exception { - List<HRegionInfo> onlineRegions = - admin.getOnlineRegions(dataRs.getServerName()); - for (HRegionInfo onlineRegion : onlineRegions) { + List<RegionInfo> onlineRegions = + admin.getRegions(dataRs.getServerName()); + for (RegionInfo onlineRegion : onlineRegions) { if (onlineRegion.equals(dataRegion)) { logger.info("Data region still online"); return false; http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java index 7c6de68..beb4762 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java @@ -27,10 +27,12 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.VersionInfo; @@ -122,12 +124,12 @@ public class FailForUnsupportedHBaseVersionsIT { try { // setup the primary table - @SuppressWarnings("deprecation") - HTableDescriptor desc = new HTableDescriptor( - "testDoesNotStartRegionServerForUnsupportedCompressionAndVersion"); + TableDescriptorBuilder descBuilder = TableDescriptorBuilder.newBuilder(TableName.valueOf( + "testDoesNotStartRegionServerForUnsupportedCompressionAndVersion")); byte[] family = Bytes.toBytes("f"); - desc.addFamily(new HColumnDescriptor(family)); - + + descBuilder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(family)); + TableDescriptor desc=descBuilder.build(); // enable indexing to a non-existant index table String indexTableName = "INDEX_TABLE"; ColumnGroup fam1 = new ColumnGroup(indexTableName); @@ -140,7 +142,7 @@ public class FailForUnsupportedHBaseVersionsIT { HRegionServer server = util.getMiniHBaseCluster().getRegionServer(0); // create the primary table - Admin admin = util.getHBaseAdmin(); + Admin admin = util.getAdmin(); if (supported) { admin.createTable(desc); assertFalse("Hosting regeion server failed, even the HBase version (" + version http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/iterate/RoundRobinResultIteratorIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/iterate/RoundRobinResultIteratorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/iterate/RoundRobinResultIteratorIT.java index b0c2cb4..35492cc 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/iterate/RoundRobinResultIteratorIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/iterate/RoundRobinResultIteratorIT.java @@ -37,9 +37,9 @@ import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.compile.StatementContext; import org.apache.phoenix.end2end.ParallelStatsDisabledIT; @@ -240,7 +240,7 @@ public class RoundRobinResultIteratorIT extends ParallelStatsDisabledIT { Connection conn = getConnection(); conn.createStatement().execute("CREATE TABLE " + tableName + "(" + "a VARCHAR PRIMARY KEY, b VARCHAR) " - + HTableDescriptor.MAX_FILESIZE + "=" + maxFileSize + "," + + TableDescriptorBuilder.MAX_FILESIZE + "=" + maxFileSize + "," + " SALT_BUCKETS = " + NUM_SALT_BUCKETS); PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + tableName + " VALUES(?,?)"); int rowCount = 0; @@ -309,7 +309,6 @@ public class RoundRobinResultIteratorIT extends ParallelStatsDisabledIT { @Test public void testBug2074() throws Exception { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); Connection conn = getConnection(); try { conn.createStatement().execute("CREATE TABLE EVENTS" http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java index bde8aeb..6167259 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java @@ -34,14 +34,14 @@ import java.util.Map; import java.util.Properties; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.ipc.CallRunner; -import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.RSRpcServices; import org.apache.hadoop.hbase.util.Bytes; @@ -216,13 +216,13 @@ public class PhoenixServerRpcIT extends BaseUniqueNamesOwnClusterIT { AssignmentManager am = master.getAssignmentManager(); // verify there is only a single region for data table - List<HRegionInfo> tableRegions = admin.getTableRegions(TableName.valueOf(table1)); + List<RegionInfo> tableRegions = admin.getRegions(TableName.valueOf(table1)); assertEquals("Expected single region for " + table1, tableRegions.size(), 1); - HRegionInfo hri1 = tableRegions.get(0); + RegionInfo hri1 = tableRegions.get(0); // verify there is only a single region for index table - tableRegions = admin.getTableRegions(TableName.valueOf(table2)); - HRegionInfo hri2 = tableRegions.get(0); + tableRegions = admin.getRegions(TableName.valueOf(table2)); + RegionInfo hri2 = tableRegions.get(0); assertEquals("Expected single region for " + table2, tableRegions.size(), 1); ServerName serverName1 = am.getRegionStates().getRegionServerOfRegion(hri1); @@ -246,15 +246,15 @@ public class PhoenixServerRpcIT extends BaseUniqueNamesOwnClusterIT { while (dstServer.getOnlineRegion(hri2.getRegionName()) == null || dstServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameInBytes) || srcServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameInBytes) - || master.getAssignmentManager().getRegionStates().isRegionsInTransition()) { + || master.getAssignmentManager().getRegionStates().isRegionInTransition(hri2)) { // wait for the move to be finished Thread.sleep(1); } } - hri1 = admin.getTableRegions(TableName.valueOf(table1)).get(0); + hri1 = admin.getRegions(TableName.valueOf(table1)).get(0); serverName1 = am.getRegionStates().getRegionServerOfRegion(hri1); - hri2 = admin.getTableRegions(TableName.valueOf(table2)).get(0); + hri2 = admin.getRegions(TableName.valueOf(table2)).get(0); serverName2 = am.getRegionStates().getRegionServerOfRegion(hri2); // verify index and data tables are on different servers http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java index 76e3e8e..5aa97ab 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java @@ -38,14 +38,15 @@ import java.util.Map; import java.util.Properties; import java.util.Random; -import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver; @@ -429,7 +430,7 @@ public abstract class StatsCollectorIT extends BaseUniqueNamesOwnClusterIT { PreparedStatement stmt; conn.createStatement().execute("CREATE TABLE " + tableName + "(k CHAR(1) PRIMARY KEY, v INTEGER, w INTEGER) " + (!tableDDLOptions.isEmpty() ? tableDDLOptions + "," : "") - + HColumnDescriptor.KEEP_DELETED_CELLS + "=" + Boolean.FALSE); + + ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=" + Boolean.FALSE); stmt = conn.prepareStatement("UPSERT INTO " + tableName + " VALUES(?,?,?)"); for (int i = 0; i < nRows; i++) { stmt.setString(1, Character.toString((char) ('a' + i))); http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java index cf08d63..78c3bd2 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java @@ -16,8 +16,8 @@ * limitations under the License. */ package org.apache.phoenix.tx; -import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES; import static org.apache.phoenix.util.TestUtil.INDEX_DATA_SCHEMA; +import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; @@ -35,12 +35,13 @@ import java.util.Collection; import java.util.List; import java.util.Properties; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.coprocessor.PhoenixTransactionalProcessor; import org.apache.phoenix.end2end.ParallelStatsDisabledIT; @@ -280,9 +281,9 @@ public class ParameterizedTransactionIT extends ParallelStatsDisabledIT { conn.createStatement().execute("ALTER TABLE " + nonTxTableName + " SET TRANSACTIONAL=true"); htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes( nonTxTableName)); - assertTrue(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName())); + assertTrue(htable.getDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName())); htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(index)); - assertTrue(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName())); + assertTrue(htable.getDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName())); conn.createStatement().execute("UPSERT INTO " + nonTxTableName + " VALUES (4, 'c')"); ResultSet rs = conn.createStatement().executeQuery("SELECT /*+ NO_INDEX */ k FROM " + nonTxTableName + " WHERE v IS NULL"); @@ -357,10 +358,10 @@ public class ParameterizedTransactionIT extends ParallelStatsDisabledIT { assertFalse(rs.next()); htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes("SYSTEM." + nonTxTableName)); - assertFalse(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName())); + assertFalse(htable.getDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName())); assertEquals(1,conn.unwrap(PhoenixConnection.class).getQueryServices(). getTableDescriptor(Bytes.toBytes("SYSTEM." + nonTxTableName)). - getFamily(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES).getMaxVersions()); + getColumnFamily(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES).getMaxVersions()); } @Test @@ -375,7 +376,7 @@ public class ParameterizedTransactionIT extends ParallelStatsDisabledIT { PTable table = pconn.getTable(new PTableKey(null, t1)); Table htable = pconn.getQueryServices().getTable(Bytes.toBytes(t1)); assertTrue(table.isTransactional()); - assertTrue(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName())); + assertTrue(htable.getDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName())); try { ddl = "ALTER TABLE " + t1 + " SET transactional=false"; @@ -386,14 +387,14 @@ public class ParameterizedTransactionIT extends ParallelStatsDisabledIT { } Admin admin = pconn.getQueryServices().getAdmin(); - HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(t2)); - desc.addFamily(new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES)); - admin.createTable(desc); + + admin.createTable(TableDescriptorBuilder.newBuilder(TableName.valueOf(t2)) + .addColumnFamily(ColumnFamilyDescriptorBuilder.of(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES)).build()); ddl = "CREATE TABLE " + t2 + " (k varchar primary key) transactional=true"; conn.createStatement().execute(ddl); - HTableDescriptor htableDescriptor = admin.getTableDescriptor(TableName.valueOf(t2)); - String str = htableDescriptor.getValue(PhoenixTransactionContext.READ_NON_TX_DATA); + TableDescriptor tableDescriptor = admin.getDescriptor(TableName.valueOf(t2)); + String str = tableDescriptor.getValue(PhoenixTransactionContext.READ_NON_TX_DATA); assertEquals(Boolean.TRUE.toString(), str); // Should be ok, as HBase metadata should match existing metadata. @@ -409,7 +410,7 @@ public class ParameterizedTransactionIT extends ParallelStatsDisabledIT { table = pconn.getTable(new PTableKey(null, t1)); htable = pconn.getQueryServices().getTable(Bytes.toBytes(t1)); assertTrue(table.isTransactional()); - assertTrue(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName())); + assertTrue(htable.getDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName())); } @Test http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java index 9286c2e..dcbc83e 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java @@ -33,8 +33,9 @@ import java.sql.SQLException; import java.sql.Statement; import java.util.Properties; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.end2end.ParallelStatsDisabledIT; import org.apache.phoenix.exception.SQLExceptionCode; @@ -166,53 +167,53 @@ public class TransactionIT extends ParallelStatsDisabledIT { conn.createStatement().execute("ALTER TABLE " + nonTxTableName + "1 SET TRANSACTIONAL=true"); - HTableDescriptor desc = conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(Bytes.toBytes(nonTxTableName + "1")); - for (HColumnDescriptor colDesc : desc.getFamilies()) { + TableDescriptor desc = conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(Bytes.toBytes(nonTxTableName + "1")); + for (ColumnFamilyDescriptor colDesc : desc.getColumnFamilies()) { assertEquals(QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL, colDesc.getMaxVersions()); assertEquals(1000, colDesc.getTimeToLive()); - String propertyTTL = colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL); - assertEquals(1000, Integer.parseInt(propertyTTL)); + byte[] propertyTTL = colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL_BYTES); + assertEquals(1000, Bytes.toInt(propertyTTL)); } desc = conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(Bytes.toBytes("IDX1")); - for (HColumnDescriptor colDesc : desc.getFamilies()) { + for (ColumnFamilyDescriptor colDesc : desc.getColumnFamilies()) { assertEquals(QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL, colDesc.getMaxVersions()); assertEquals(1000, colDesc.getTimeToLive()); - String propertyTTL = colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL); - assertEquals(1000, Integer.parseInt(propertyTTL)); + byte[] propertyTTL = colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL_BYTES); + assertEquals(1000, Bytes.toInt(propertyTTL)); } desc = conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(Bytes.toBytes("IDX2")); - for (HColumnDescriptor colDesc : desc.getFamilies()) { + for (ColumnFamilyDescriptor colDesc : desc.getColumnFamilies()) { assertEquals(QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL, colDesc.getMaxVersions()); assertEquals(1000, colDesc.getTimeToLive()); - String propertyTTL = colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL); - assertEquals(1000, Integer.parseInt(propertyTTL)); + byte[] propertyTTL = colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL_BYTES); + assertEquals(1000, Bytes.toInt(propertyTTL)); } conn.createStatement().execute("CREATE TABLE " + nonTxTableName + "2(k INTEGER PRIMARY KEY, a.v VARCHAR, b.v VARCHAR, c.v VARCHAR)"); conn.createStatement().execute("ALTER TABLE " + nonTxTableName + "2 SET TRANSACTIONAL=true, VERSIONS=10"); desc = conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(Bytes.toBytes( nonTxTableName + "2")); - for (HColumnDescriptor colDesc : desc.getFamilies()) { + for (ColumnFamilyDescriptor colDesc : desc.getColumnFamilies()) { assertEquals(10, colDesc.getMaxVersions()); - assertEquals(HColumnDescriptor.DEFAULT_TTL, colDesc.getTimeToLive()); - assertEquals(null, colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL)); + assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_TTL, colDesc.getTimeToLive()); + assertEquals(null, colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL_BYTES)); } conn.createStatement().execute("ALTER TABLE " + nonTxTableName + "2 SET TTL=1000"); desc = conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(Bytes.toBytes( nonTxTableName + "2")); - for (HColumnDescriptor colDesc : desc.getFamilies()) { + for (ColumnFamilyDescriptor colDesc : desc.getColumnFamilies()) { assertEquals(10, colDesc.getMaxVersions()); assertEquals(1000, colDesc.getTimeToLive()); - String propertyTTL = colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL); - assertEquals(1000, Integer.parseInt(propertyTTL)); + byte[] propertyTTL = colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL_BYTES); + assertEquals(1000, Bytes.toInt(propertyTTL)); } conn.createStatement().execute("CREATE TABLE " + nonTxTableName + "3(k INTEGER PRIMARY KEY, a.v VARCHAR, b.v VARCHAR, c.v VARCHAR)"); conn.createStatement().execute("ALTER TABLE " + nonTxTableName + "3 SET TRANSACTIONAL=true, b.VERSIONS=10, c.VERSIONS=20"); desc = conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(Bytes.toBytes( nonTxTableName + "3")); - assertEquals(QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL, desc.getFamily(Bytes.toBytes("A")).getMaxVersions()); - assertEquals(10, desc.getFamily(Bytes.toBytes("B")).getMaxVersions()); - assertEquals(20, desc.getFamily(Bytes.toBytes("C")).getMaxVersions()); + assertEquals(QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL, desc.getColumnFamily(Bytes.toBytes("A")).getMaxVersions()); + assertEquals(10, desc.getColumnFamily(Bytes.toBytes("B")).getMaxVersions()); + assertEquals(20, desc.getColumnFamily(Bytes.toBytes("C")).getMaxVersions()); conn.createStatement().execute("CREATE TABLE " + nonTxTableName + "4(k INTEGER PRIMARY KEY, a.v VARCHAR, b.v VARCHAR, c.v VARCHAR)"); try { @@ -231,11 +232,11 @@ public class TransactionIT extends ParallelStatsDisabledIT { conn.createStatement().execute("CREATE TABLE TX_TABLE1(k INTEGER PRIMARY KEY, v VARCHAR) TTL=1000, TRANSACTIONAL=true"); desc = conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(Bytes.toBytes("TX_TABLE1")); - for (HColumnDescriptor colDesc : desc.getFamilies()) { + for (ColumnFamilyDescriptor colDesc : desc.getColumnFamilies()) { assertEquals(QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL, colDesc.getMaxVersions()); - assertEquals(HColumnDescriptor.DEFAULT_TTL, colDesc.getTimeToLive()); - String propertyTTL = colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL); - assertEquals(1000, Integer.parseInt(propertyTTL)); + assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_TTL, colDesc.getTimeToLive()); + byte[] propertyTTL = colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL_BYTES); + assertEquals(1000, Bytes.toInt(propertyTTL)); } } http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java index 3cb36ee..3a70f66 100644 --- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java +++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java @@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; -import org.apache.hadoop.hbase.io.HalfStoreFileReader; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.io.hfile.CacheConfig; http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java index e9b5b37..9b88b03 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java @@ -248,15 +248,15 @@ public class ServerCacheClient { Set<HRegionLocation> servers = new HashSet<HRegionLocation>(nRegions); for (HRegionLocation entry : locations) { // Keep track of servers we've sent to and only send once - byte[] regionStartKey = entry.getRegionInfo().getStartKey(); - byte[] regionEndKey = entry.getRegionInfo().getEndKey(); + byte[] regionStartKey = entry.getRegion().getStartKey(); + byte[] regionEndKey = entry.getRegion().getEndKey(); if ( ! servers.contains(entry) && keyRanges.intersectRegion(regionStartKey, regionEndKey, cacheUsingTable.getIndexType() == IndexType.LOCAL)) { // Call RPC once per server servers.add(entry); if (LOG.isDebugEnabled()) {LOG.debug(addCustomAnnotations("Adding cache entry to be sent for " + entry, connection));} - final byte[] key = getKeyInRegion(entry.getRegionInfo().getStartKey()); + final byte[] key = getKeyInRegion(entry.getRegion().getStartKey()); final Table htable = services.getTable(cacheUsingTableRef.getTable().getPhysicalName().getBytes()); closeables.add(htable); futures.add(executor.submit(new JobCallable<Boolean>() { @@ -355,7 +355,7 @@ public class ServerCacheClient { // Call once per server if (remainingOnServers.contains(entry)) { try { - byte[] key = getKeyInRegion(entry.getRegionInfo().getStartKey()); + byte[] key = getKeyInRegion(entry.getRegion().getStartKey()); iterateOverTable.coprocessorService(ServerCachingService.class, key, key, new Batch.Call<ServerCachingService, RemoveServerCacheResponse>() { @Override http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java index 9eaaf62..a0c0971 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java @@ -158,8 +158,8 @@ public class UpsertCompiler { statement.getConnection().getQueryServices() .getTableRegionLocation(table.getParentName().getBytes(), rowKey); byte[] regionPrefix = - region.getRegionInfo().getStartKey().length == 0 ? new byte[region - .getRegionInfo().getEndKey().length] : region.getRegionInfo() + region.getRegion().getStartKey().length == 0 ? new byte[region + .getRegion().getEndKey().length] : region.getRegion() .getStartKey(); if (regionPrefix.length != 0) { ptr.set(ScanRanges.prefixKey(ptr.get(), 0, ptr.getLength(), regionPrefix, http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java index 7f0be01..602df4b 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java @@ -48,7 +48,6 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; @@ -62,6 +61,7 @@ import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; @@ -464,7 +464,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver if (flushSize <= 0) { flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, - HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE); + TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE); } /** http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java index 8ef1f8d..3017231 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java @@ -25,7 +25,6 @@ import java.util.Collections; import java.util.List; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.phoenix.compile.ExplainPlan; @@ -204,7 +203,7 @@ public class ClientAggregatePlan extends ClientProcessingPlan { } @Override - protected Tuple wrapKeyValueAsResult(KeyValue keyValue) { + protected Tuple wrapKeyValueAsResult(Cell keyValue) { return new MultiKeyValueTuple(Collections.<Cell> singletonList(keyValue)); } @@ -230,7 +229,7 @@ public class ClientAggregatePlan extends ClientProcessingPlan { } @Override - protected Tuple wrapKeyValueAsResult(KeyValue keyValue) + protected Tuple wrapKeyValueAsResult(Cell keyValue) throws SQLException { return new MultiKeyValueTuple(Collections.<Cell> singletonList(keyValue)); } http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java index 729b928..cb8accf 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java @@ -40,8 +40,6 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants.OperationStatusCode; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.client.Delete; @@ -49,7 +47,10 @@ import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; @@ -329,7 +330,6 @@ public class Indexer implements RegionObserver, RegionCoprocessor { // Causes the Increment to be ignored as we're committing the mutations // ourselves below. e.bypass(); - e.complete(); // ON DUPLICATE KEY IGNORE will return empty list if row already exists // as no action is required in that case. if (!mutations.isEmpty()) { @@ -789,20 +789,20 @@ public class Indexer implements RegionObserver, RegionCoprocessor { /** * Enable indexing on the given table - * @param desc {@link HTableDescriptor} for the table on which indexing should be enabled + * @param desc {@link TableDescriptor} for the table on which indexing should be enabled * @param builder class to use when building the index for this table * @param properties map of custom configuration options to make available to your * {@link IndexBuilder} on the server-side * @param priority TODO * @throws IOException the Indexer coprocessor cannot be added */ - public static void enableIndexing(HTableDescriptor desc, Class<? extends IndexBuilder> builder, + public static void enableIndexing(TableDescriptorBuilder descBuilder, Class<? extends IndexBuilder> builder, Map<String, String> properties, int priority) throws IOException { if (properties == null) { properties = new HashMap<String, String>(); } properties.put(Indexer.INDEX_BUILDER_CONF_KEY, builder.getName()); - desc.addCoprocessor(Indexer.class.getName(), null, priority, properties); + descBuilder.addCoprocessor(Indexer.class.getName(), null, priority, properties); } } http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java index 4adc7b9..cc1c773 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java @@ -16,8 +16,6 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java index ceac999..d9abd75 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java @@ -113,7 +113,7 @@ public class ParallelWriterIndexCommitter implements IndexCommitter { if (env != null && !allowLocalUpdates && tableReference.getTableName().equals( - env.getRegion().getTableDesc().getNameAsString())) { + env.getRegion().getTableDescriptor().getTableName().getNameAsString())) { continue; } /* @@ -147,7 +147,7 @@ public class ParallelWriterIndexCommitter implements IndexCommitter { if (allowLocalUpdates && env != null && tableReference.getTableName().equals( - env.getRegion().getTableDesc().getNameAsString())) { + env.getRegion().getTableDescriptor().getTableName().getNameAsString())) { try { throwFailureIfDone(); IndexUtil.writeLocalUpdates(env.getRegion(), mutations, true); http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java index f427646..cf8279a 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java @@ -124,7 +124,7 @@ public class TrackingParallelWriterIndexCommitter implements IndexCommitter { if (env != null && !allowLocalUpdates && tableReference.getTableName().equals( - env.getRegion().getTableDesc().getNameAsString())) { + env.getRegion().getTableDescriptor().getTableName().getNameAsString())) { continue; } tables.add(tableReference); @@ -153,7 +153,7 @@ public class TrackingParallelWriterIndexCommitter implements IndexCommitter { if (allowLocalUpdates && env != null && tableReference.getTableName().equals( - env.getRegion().getTableDesc().getNameAsString())) { + env.getRegion().getTableDescriptor().getTableName().getNameAsString())) { try { throwFailureIfDone(); IndexUtil.writeLocalUpdates(env.getRegion(), mutations, true); http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java index ffb199a..ef1b40a 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java @@ -75,8 +75,10 @@ public class PhoenixIndexCodec extends BaseIndexCodec { Pair<ValueGetter, IndexUpdate> statePair = state.getIndexUpdateState(maintainer.getAllColumns(), metaData.getReplayWrite() != null, false, context); ValueGetter valueGetter = statePair.getFirst(); IndexUpdate indexUpdate = statePair.getSecond(); - indexUpdate.setTable(maintainer.isLocalIndex() ? state.getEnvironment().getRegion() - .getTableDesc().getName() : maintainer.getIndexTableName()); + indexUpdate + .setTable(maintainer.isLocalIndex() ? state.getEnvironment().getRegion() + .getTableDescriptor().getTableName().getName() : maintainer + .getIndexTableName()); Put put = maintainer.buildUpdateMutation(KV_BUILDER, valueGetter, ptr, state.getCurrentTimestamp(), env .getRegion().getRegionInfo().getStartKey(), env.getRegion().getRegionInfo().getEndKey()); indexUpdate.setUpdate(put); @@ -104,7 +106,8 @@ public class PhoenixIndexCodec extends BaseIndexCodec { if (valueGetter!=null) { IndexUpdate indexUpdate = statePair.getSecond(); indexUpdate.setTable(maintainer.isLocalIndex() ? state.getEnvironment().getRegion() - .getTableDesc().getName() : maintainer.getIndexTableName()); + .getTableDescriptor().getTableName().getName() : maintainer + .getIndexTableName()); Delete delete = maintainer.buildDeleteMutation(KV_BUILDER, valueGetter, ptr, state.getPendingUpdate(), state.getCurrentTimestamp(), env.getRegion().getRegionInfo().getStartKey(), env.getRegion().getRegionInfo().getEndKey()); indexUpdate.setUpdate(delete); http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java index 297902f..9a2981f 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java @@ -32,11 +32,11 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.client.Mutation; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; @@ -90,7 +90,7 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy { this.env = env; rebuildIndexOnFailure = env.getConfiguration().getBoolean(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_ATTRIB, QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD); - HTableDescriptor htd = env.getRegion().getTableDesc(); + TableDescriptor htd = env.getRegion().getTableDescriptor(); // If rebuild index is turned off globally, no need to check the table because the background thread // won't be running in this case if (rebuildIndexOnFailure) { @@ -194,8 +194,8 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy { timestamp = minTimeStamp; // If the data table has local index column families then get local indexes to disable. - if (ref.getTableName().equals(env.getRegion().getTableDesc().getTableName().getNameAsString()) - && MetaDataUtil.hasLocalIndexColumnFamily(env.getRegion().getTableDesc())) { + if (ref.getTableName().equals(env.getRegion().getTableDescriptor().getTableName().getNameAsString()) + && MetaDataUtil.hasLocalIndexColumnFamily(env.getRegion().getTableDescriptor())) { for (String tableName : getLocalIndexNames(ref, mutations)) { indexTableNames.put(tableName, minTimeStamp); } @@ -283,7 +283,7 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy { } IndexMaintainer indexMaintainer = localIndex.getIndexMaintainer(dataTable, conn); - HRegionInfo regionInfo = this.env.getRegion().getRegionInfo(); + RegionInfo regionInfo = this.env.getRegion().getRegionInfo(); int offset = regionInfo.getStartKey().length == 0 ? regionInfo.getEndKey().length : regionInfo.getStartKey().length;