Repository: phoenix Updated Branches: refs/heads/calcite 751d134be -> 8b68b1c4e
http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b68b1c4/phoenix-core/src/it/java/org/apache/phoenix/calcite/CalciteIndexIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/calcite/CalciteIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/calcite/CalciteIndexIT.java index f200a24..3f0f754 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/calcite/CalciteIndexIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/calcite/CalciteIndexIT.java @@ -55,62 +55,71 @@ public class CalciteIndexIT extends BaseCalciteIT { initATableValues(getOrganizationId(), null, url); initSaltedTables(index); initMultiTenantTables(index); - final Connection connection = DriverManager.getConnection(url); + Connection connection = DriverManager.getConnection(url); connection.createStatement().execute("CREATE " + index + " IF NOT EXISTS IDX1 ON aTable (a_string) INCLUDE (b_string, x_integer)"); connection.createStatement().execute("CREATE " + index + " IF NOT EXISTS IDX2 ON aTable (b_string) INCLUDE (a_string, y_integer)"); connection.createStatement().execute("CREATE " + index + " IF NOT EXISTS IDX_FULL ON aTable (b_string) INCLUDE (a_string, a_integer, a_date, a_time, a_timestamp, x_decimal, x_long, x_integer, y_integer, a_byte, a_short, a_float, a_double, a_unsigned_float, a_unsigned_double)"); connection.createStatement().execute("UPDATE STATISTICS ATABLE"); + connection.createStatement().execute("UPDATE STATISTICS " + NOSALT_TABLE_NAME); connection.createStatement().execute("UPDATE STATISTICS " + SALTED_TABLE_NAME); - connection.createStatement().execute("UPDATE STATISTICS IDX_" + SALTED_TABLE_NAME); - connection.createStatement().execute("UPDATE STATISTICS IDX1"); - connection.createStatement().execute("UPDATE STATISTICS IDX2"); - connection.createStatement().execute("UPDATE STATISTICS IDX_FULL"); + connection.createStatement().execute("UPDATE STATISTICS " + MULTI_TENANT_TABLE); + connection.close(); + + Properties props = new Properties(); + props.setProperty("TenantId", "10"); + connection = DriverManager.getConnection(url, props); + connection.createStatement().execute("UPDATE STATISTICS " + MULTI_TENANT_VIEW1); + connection.close(); + + props.setProperty("TenantId", "20"); + connection = DriverManager.getConnection(url, props); + connection.createStatement().execute("UPDATE STATISTICS " + MULTI_TENANT_VIEW2); connection.close(); } @Test public void testIndex() throws Exception { - start(true).sql("select * from aTable where b_string = 'b'") + start(true, 1000f).sql("select * from aTable where b_string = 'b'") .explainIs("PhoenixToEnumerableConverter\n" + " PhoenixServerProject(ORGANIZATION_ID=[$1], ENTITY_ID=[$2], A_STRING=[$3], B_STRING=[$0], A_INTEGER=[$4], A_DATE=[$5], A_TIME=[$6], A_TIMESTAMP=[$7], X_DECIMAL=[$8], X_LONG=[$9], X_INTEGER=[$10], Y_INTEGER=[$11], A_BYTE=[$12], A_SHORT=[$13], A_FLOAT=[$14], A_DOUBLE=[$15], A_UNSIGNED_FLOAT=[$16], A_UNSIGNED_DOUBLE=[$17])\n" + " PhoenixTableScan(table=[[phoenix, IDX_FULL]], filter=[=($0, 'b')])\n") .close(); - start(true).sql("select x_integer from aTable") + start(true, 1000f).sql("select x_integer from aTable") .explainIs("PhoenixToEnumerableConverter\n" + " PhoenixServerProject(X_INTEGER=[$4])\n" + " PhoenixTableScan(table=[[phoenix, IDX1]])\n") .close(); - start(true).sql("select a_string from aTable order by a_string") + start(true, 1000f).sql("select a_string from aTable order by a_string") .explainIs("PhoenixToEnumerableConverter\n" + " PhoenixServerProject(A_STRING=[$0])\n" + " PhoenixTableScan(table=[[phoenix, IDX1]], scanOrder=[FORWARD])\n") .close(); - start(true).sql("select a_string from aTable order by organization_id") + start(true, 1000f).sql("select a_string from aTable order by organization_id") .explainIs("PhoenixToEnumerableConverter\n" + " PhoenixServerProject(A_STRING=[$2], ORGANIZATION_ID=[$0])\n" + " PhoenixTableScan(table=[[phoenix, ATABLE]], scanOrder=[FORWARD])\n") .close(); - start(true).sql("select a_integer from aTable order by a_string") + start(true, 1000f).sql("select a_integer from aTable order by a_string") .explainIs("PhoenixToEnumerableConverter\n" + " PhoenixServerSort(sort0=[$1], dir0=[ASC])\n" + - " PhoenixServerProject(A_INTEGER=[$4], A_STRING=[$2])\n" + - " PhoenixTableScan(table=[[phoenix, ATABLE]])\n") + " PhoenixServerProject(A_INTEGER=[$4], A_STRING=[$3])\n" + + " PhoenixTableScan(table=[[phoenix, IDX_FULL]])\n") .close(); - start(true).sql("select a_string, b_string from aTable where a_string = 'a'") + start(true, 1000f).sql("select a_string, b_string from aTable where a_string = 'a'") .explainIs("PhoenixToEnumerableConverter\n" + " PhoenixServerProject(A_STRING=[$0], B_STRING=[$3])\n" + " PhoenixTableScan(table=[[phoenix, IDX1]], filter=[=($0, 'a')])\n") .close(); - start(true).sql("select a_string, b_string from aTable where b_string = 'b'") + start(true, 1000f).sql("select a_string, b_string from aTable where b_string = 'b'") .explainIs("PhoenixToEnumerableConverter\n" + " PhoenixServerProject(A_STRING=[$3], B_STRING=[$0])\n" + " PhoenixTableScan(table=[[phoenix, IDX2]], filter=[=($0, 'b')])\n") .close(); - start(true).sql("select a_string, b_string, x_integer, y_integer from aTable where b_string = 'b'") + start(true, 1000f).sql("select a_string, b_string, x_integer, y_integer from aTable where b_string = 'b'") .explainIs("PhoenixToEnumerableConverter\n" + " PhoenixServerProject(A_STRING=[$3], B_STRING=[$0], X_INTEGER=[$10], Y_INTEGER=[$11])\n" + " PhoenixTableScan(table=[[phoenix, IDX_FULL]], filter=[=($0, 'b')])\n") .close(); - start(true).sql("select a_string, count(*) from aTable group by a_string") + start(true, 1000f).sql("select a_string, count(*) from aTable group by a_string") .explainIs("PhoenixToEnumerableConverter\n" + " PhoenixServerAggregate(group=[{0}], EXPR$1=[COUNT()], isOrdered=[true])\n" + " PhoenixTableScan(table=[[phoenix, IDX1]], scanOrder=[FORWARD])\n") @@ -118,13 +127,13 @@ public class CalciteIndexIT extends BaseCalciteIT { } @Test public void testSaltedIndex() throws Exception { - start(true).sql("select count(*) from " + NOSALT_TABLE_NAME + " where col0 > 3") + start(true, 1f).sql("select count(*) from " + NOSALT_TABLE_NAME + " where col0 > 3") .explainIs("PhoenixToEnumerableConverter\n" + " PhoenixServerAggregate(group=[{}], EXPR$0=[COUNT()])\n" + " PhoenixTableScan(table=[[phoenix, IDXSALTED_NOSALT_TEST_TABLE]], filter=[>(CAST($0):INTEGER, 3)])\n") - .resultIs(false, new Object[][]{{2L}}) + .resultIs(false, new Object[][]{{999L}}) .close(); - start(true).sql("select mypk0, mypk1, col0 from " + NOSALT_TABLE_NAME + " where col0 <= 4") + start(true, 1f).sql("select mypk0, mypk1, col0 from " + NOSALT_TABLE_NAME + " where col0 <= 4") .explainIs("PhoenixToEnumerableConverter\n" + " PhoenixServerProject(MYPK0=[$1], MYPK1=[$2], COL0=[CAST($0):INTEGER])\n" + " PhoenixTableScan(table=[[phoenix, IDXSALTED_NOSALT_TEST_TABLE]], filter=[<=(CAST($0):INTEGER, 4)])\n") @@ -132,20 +141,20 @@ public class CalciteIndexIT extends BaseCalciteIT { {2, 3, 4}, {1, 2, 3}}) .close(); - start(true).sql("select * from " + SALTED_TABLE_NAME + " where mypk0 < 3") + start(true, 1f).sql("select * from " + SALTED_TABLE_NAME + " where mypk0 < 3") .explainIs("PhoenixToEnumerableConverter\n" + " PhoenixTableScan(table=[[phoenix, SALTED_TEST_TABLE]], filter=[<($0, 3)])\n") .resultIs(false, new Object[][] { {1, 2, 3, 4}, {2, 3, 4, 5}}) .close(); - start(true).sql("select count(*) from " + SALTED_TABLE_NAME + " where col0 > 3") + start(true, 1f).sql("select count(*) from " + SALTED_TABLE_NAME + " where col0 > 3") .explainIs("PhoenixToEnumerableConverter\n" + " PhoenixServerAggregate(group=[{}], EXPR$0=[COUNT()])\n" + " PhoenixTableScan(table=[[phoenix, IDX_SALTED_TEST_TABLE]], filter=[>(CAST($0):INTEGER, 3)])\n") - .resultIs(false, new Object[][]{{2L}}) + .resultIs(false, new Object[][]{{999L}}) .close(); - start(true).sql("select mypk0, mypk1, col0 from " + SALTED_TABLE_NAME + " where col0 <= 4") + start(true, 1f).sql("select mypk0, mypk1, col0 from " + SALTED_TABLE_NAME + " where col0 <= 4") .explainIs("PhoenixToEnumerableConverter\n" + " PhoenixServerProject(MYPK0=[$1], MYPK1=[$2], COL0=[CAST($0):INTEGER])\n" + " PhoenixTableScan(table=[[phoenix, IDX_SALTED_TEST_TABLE]], filter=[<=(CAST($0):INTEGER, 4)])\n") @@ -153,13 +162,13 @@ public class CalciteIndexIT extends BaseCalciteIT { {2, 3, 4}, {1, 2, 3}}) .close(); - start(true).sql("select count(*) from " + SALTED_TABLE_NAME + " where col1 > 4") + start(true, 1f).sql("select count(*) from " + SALTED_TABLE_NAME + " where col1 > 4") .explainIs("PhoenixToEnumerableConverter\n" + " PhoenixServerAggregate(group=[{}], EXPR$0=[COUNT()])\n" + " PhoenixTableScan(table=[[phoenix, IDXSALTED_SALTED_TEST_TABLE]], filter=[>(CAST($0):INTEGER, 4)])\n") - .resultIs(false, new Object[][]{{2L}}) + .resultIs(false, new Object[][]{{999L}}) .close(); - start(true).sql("select * from " + SALTED_TABLE_NAME + " where col1 <= 5 order by col1") + start(true, 1f).sql("select * from " + SALTED_TABLE_NAME + " where col1 <= 5 order by col1") .explainIs("PhoenixToEnumerableConverter\n" + " PhoenixServerProject(MYPK0=[$1], MYPK1=[$2], COL0=[$3], COL1=[CAST($0):INTEGER])\n" + " PhoenixTableScan(table=[[phoenix, IDXSALTED_SALTED_TEST_TABLE]], filter=[<=(CAST($0):INTEGER, 5)], scanOrder=[FORWARD])\n") @@ -167,36 +176,35 @@ public class CalciteIndexIT extends BaseCalciteIT { {1, 2, 3, 4}, {2, 3, 4, 5}}) .close(); - start(true).sql("select * from " + SALTED_TABLE_NAME + " s1, " + SALTED_TABLE_NAME + " s2 where s1.mypk0 = s2.mypk0 and s1.mypk1 = s2.mypk1 and s1.mypk0 > 1 and s2.col1 < 6") + start(true, 1f).sql("select * from " + SALTED_TABLE_NAME + " s1, " + SALTED_TABLE_NAME + " s2 where s1.mypk0 = s2.mypk0 and s1.mypk1 = s2.mypk1 and s1.mypk0 > 500 and s2.col1 < 505") .explainIs("PhoenixToEnumerableConverter\n" + " PhoenixServerJoin(condition=[AND(=($0, $4), =($1, $5))], joinType=[inner])\n" + - " PhoenixTableScan(table=[[phoenix, SALTED_TEST_TABLE]], filter=[>($0, 1)])\n" + + " PhoenixTableScan(table=[[phoenix, SALTED_TEST_TABLE]], filter=[>($0, 500)])\n" + " PhoenixServerProject(MYPK0=[$1], MYPK1=[$2], COL0=[$3], COL1=[CAST($0):INTEGER])\n" + - " PhoenixTableScan(table=[[phoenix, IDXSALTED_SALTED_TEST_TABLE]], filter=[<(CAST($0):INTEGER, 6)])\n") + " PhoenixTableScan(table=[[phoenix, IDXSALTED_SALTED_TEST_TABLE]], filter=[<(CAST($0):INTEGER, 505)])\n") .resultIs(false, new Object[][] { - {2, 3, 4, 5, 2, 3, 4, 5}}) + {501, 502, 503, 504, 501, 502, 503, 504}}) .close(); } @Test public void testMultiTenant() throws Exception { - Properties props = getConnectionProps(true); - start(props).sql("select * from " + MULTI_TENANT_TABLE) + Properties props = getConnectionProps(true, 1f); + start(props).sql("select * from " + MULTI_TENANT_TABLE + " where tenant_id = '10' and id <= '0004'") .explainIs("PhoenixToEnumerableConverter\n" + - " PhoenixTableScan(table=[[phoenix, MULTITENANT_TEST_TABLE]])\n") + " PhoenixTableScan(table=[[phoenix, MULTITENANT_TEST_TABLE]], filter=[AND(=(CAST($0):VARCHAR(2) CHARACTER SET \"ISO-8859-1\" COLLATE \"ISO-8859-1$en_US$primary\" NOT NULL, '10'), <=($1, '0004'))])\n") .resultIs(false, new Object[][] { - {"10", "2", 3, 4, 5}, - {"15", "3", 4, 5, 6}, - {"20", "4", 5, 6, 7}, - {"20", "5", 6, 7, 8}}) + {"10", "0002", 3, 4, 5}, + {"10", "0003", 4, 5, 6}, + {"10", "0004", 5, 6, 7}}) .close(); - start(props).sql("select * from " + MULTI_TENANT_TABLE + " where tenant_id = '20' and col1 > 1") + start(props).sql("select * from " + MULTI_TENANT_TABLE + " where tenant_id = '20' and col1 < 8") .explainIs("PhoenixToEnumerableConverter\n" + " PhoenixServerProject(TENANT_ID=[$0], ID=[$2], COL0=[$3], COL1=[CAST($1):INTEGER], COL2=[$4])\n" + - " PhoenixTableScan(table=[[phoenix, IDX_MULTITENANT_TEST_TABLE]], filter=[AND(=(CAST($0):VARCHAR(2) CHARACTER SET \"ISO-8859-1\" COLLATE \"ISO-8859-1$en_US$primary\" NOT NULL, '20'), >(CAST($1):INTEGER, 1))])\n") + " PhoenixTableScan(table=[[phoenix, IDX_MULTITENANT_TEST_TABLE]], filter=[AND(=(CAST($0):VARCHAR(2) CHARACTER SET \"ISO-8859-1\" COLLATE \"ISO-8859-1$en_US$primary\" NOT NULL, '20'), <(CAST($1):INTEGER, 8))])\n") .resultIs(false, new Object[][] { - {"20", "4", 5, 6, 7}, - {"20", "5", 6, 7, 8}}) + {"20", "0004", 5, 6, 7}, + {"20", "0005", 6, 7, 8}}) .close(); try { @@ -208,19 +216,22 @@ public class CalciteIndexIT extends BaseCalciteIT { } props.setProperty("TenantId", "15"); - start(props).sql("select * from " + MULTI_TENANT_TABLE) + start(props).sql("select * from " + MULTI_TENANT_TABLE + " where id = '0284'") .explainIs("PhoenixToEnumerableConverter\n" + - " PhoenixTableScan(table=[[phoenix, MULTITENANT_TEST_TABLE]])\n") + " PhoenixTableScan(table=[[phoenix, MULTITENANT_TEST_TABLE]], filter=[=(CAST($0):VARCHAR(4) CHARACTER SET \"ISO-8859-1\" COLLATE \"ISO-8859-1$en_US$primary\" NOT NULL, '0284')])\n") .resultIs(false, new Object[][] { - {"3", 4, 5, 6}}) + {"0284", 285, 286, 287}}) .close(); - start(props).sql("select * from " + MULTI_TENANT_TABLE + " where col1 > 1") + start(props).sql("select * from " + MULTI_TENANT_TABLE + " where col1 > 1000") .explainIs("PhoenixToEnumerableConverter\n" + " PhoenixServerProject(ID=[$1], COL0=[$2], COL1=[CAST($0):INTEGER], COL2=[$3])\n" + - " PhoenixTableScan(table=[[phoenix, IDX_MULTITENANT_TEST_TABLE]], filter=[>(CAST($0):INTEGER, 1)])\n") + " PhoenixTableScan(table=[[phoenix, IDX_MULTITENANT_TEST_TABLE]], filter=[>(CAST($0):INTEGER, 1000)])\n") .resultIs(false, new Object[][] { - {"3", 4, 5, 6}}) + {"0999", 1000, 1001, 1002}, + {"1000", 1001, 1002, 1003}, + {"1001", 1002, 1003, 1004}, + {"1002", 1003, 1004, 1005}}) .close(); try { @@ -232,51 +243,63 @@ public class CalciteIndexIT extends BaseCalciteIT { } props.setProperty("TenantId", "10"); - start(props).sql("select * from " + MULTI_TENANT_VIEW1) + start(props).sql("select * from " + MULTI_TENANT_VIEW1 + " where id = '0512'") .explainIs("PhoenixToEnumerableConverter\n" + - " PhoenixTableScan(table=[[phoenix, MULTITENANT_TEST_TABLE]])\n") + " PhoenixTableScan(table=[[phoenix, MULTITENANT_TEST_TABLE]], filter=[=(CAST($0):VARCHAR(4) CHARACTER SET \"ISO-8859-1\" COLLATE \"ISO-8859-1$en_US$primary\" NOT NULL, '0512')])\n") .resultIs(false, new Object[][] { - {"2", 3, 4, 5}}) + {"0512", 513, 514, 515}}) .close(); - start(props).sql("select * from " + MULTI_TENANT_TABLE + " where col1 > 1") + start(props).sql("select * from " + MULTI_TENANT_TABLE + " where col1 <= 6") .explainIs("PhoenixToEnumerableConverter\n" + " PhoenixServerProject(ID=[$1], COL0=[$2], COL1=[CAST($0):INTEGER], COL2=[$3])\n" + - " PhoenixTableScan(table=[[phoenix, IDX_MULTITENANT_TEST_TABLE]], filter=[>(CAST($0):INTEGER, 1)])\n") + " PhoenixTableScan(table=[[phoenix, IDX_MULTITENANT_TEST_TABLE]], filter=[<=(CAST($0):INTEGER, 6)])\n") .resultIs(false, new Object[][] { - {"2", 3, 4, 5}}) + {"0002", 3, 4, 5}, + {"0003", 4, 5, 6}, + {"0004", 5, 6, 7}}) .close(); - start(props).sql("select id, col0 from " + MULTI_TENANT_VIEW1 + " where col0 > 1") + start(props).sql("select id, col0 from " + MULTI_TENANT_VIEW1 + " where col0 >= 1000") .explainIs("PhoenixToEnumerableConverter\n" + " PhoenixServerProject(ID=[$1], COL0=[CAST($0):INTEGER])\n" + - " PhoenixTableScan(table=[[phoenix, S1, IDX_MULTITENANT_TEST_VIEW1]], filter=[>(CAST($0):INTEGER, 1)])\n") + " PhoenixTableScan(table=[[phoenix, S1, IDX_MULTITENANT_TEST_VIEW1]], filter=[>=(CAST($0):INTEGER, 1000)])\n") .resultIs(false, new Object[][] { - {"2", 3}}) + {"0999", 1000}, + {"1000", 1001}, + {"1001", 1002}}) .close(); props.setProperty("TenantId", "20"); - start(props).sql("select * from " + MULTI_TENANT_VIEW2) + start(props).sql("select * from " + MULTI_TENANT_VIEW2 + " where id = '0765'") .explainIs("PhoenixToEnumerableConverter\n" + - " PhoenixTableScan(table=[[phoenix, MULTITENANT_TEST_TABLE]], filter=[>($3, 7)])\n") + " PhoenixTableScan(table=[[phoenix, MULTITENANT_TEST_TABLE]], filter=[AND(>($3, 7), =(CAST($0):VARCHAR(4) CHARACTER SET \"ISO-8859-1\" COLLATE \"ISO-8859-1$en_US$primary\" NOT NULL, '0765'))])\n") .resultIs(false, new Object[][] { - {"5", 6, 7, 8}}) + {"0765", 766, 767, 768}}) .close(); - start(props).sql("select id, col0 from " + MULTI_TENANT_VIEW2 + " where col0 > 1") + start(props).sql("select id, col0 from " + MULTI_TENANT_VIEW2 + " where col0 between 272 and 275") .explainIs("PhoenixToEnumerableConverter\n" + " PhoenixServerProject(ID=[$1], COL0=[CAST($0):INTEGER])\n" + - " PhoenixTableScan(table=[[phoenix, S2, IDX_MULTITENANT_TEST_VIEW2]], filter=[>(CAST($0):INTEGER, 1)])\n") + " PhoenixTableScan(table=[[phoenix, S2, IDX_MULTITENANT_TEST_VIEW2]], filter=[AND(>=(CAST($0):INTEGER, 272), <=(CAST($0):INTEGER, 275))])\n") .resultIs(false, new Object[][] { - {"5", 6}}) + {"0271", 272}, + {"0272", 273}, + {"0273", 274}, + {"0274", 275}}) .close(); - start(props).sql("select id, col0 from " + MULTI_TENANT_VIEW2 + " order by col0") + start(props).sql("select id, col0 from " + MULTI_TENANT_VIEW2 + " order by col0 limit 5") .explainIs("PhoenixToEnumerableConverter\n" + - " PhoenixServerProject(ID=[$1], COL0=[CAST($0):INTEGER])\n" + - " PhoenixTableScan(table=[[phoenix, S2, IDX_MULTITENANT_TEST_VIEW2]], scanOrder=[FORWARD])\n") + " PhoenixLimit(fetch=[5])\n" + + " PhoenixServerProject(ID=[$1], COL0=[CAST($0):INTEGER])\n" + + " PhoenixTableScan(table=[[phoenix, S2, IDX_MULTITENANT_TEST_VIEW2]], scanOrder=[FORWARD])\n") .resultIs(true, new Object[][] { - {"5", 6}}) + {"0005", 6}, + {"0006", 7}, + {"0007", 8}, + {"0008", 9}, + {"0009", 10}}) .close(); } http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b68b1c4/phoenix-core/src/main/java/org/apache/phoenix/calcite/PhoenixTable.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/calcite/PhoenixTable.java b/phoenix-core/src/main/java/org/apache/phoenix/calcite/PhoenixTable.java index 22d4e68..1539dcd 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/calcite/PhoenixTable.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/calcite/PhoenixTable.java @@ -1,5 +1,7 @@ package org.apache.phoenix.calcite; +import java.io.IOException; +import java.sql.SQLException; import java.util.List; import org.apache.calcite.plan.RelOptTable; @@ -18,14 +20,22 @@ import org.apache.calcite.schema.TranslatableTable; import org.apache.calcite.schema.impl.AbstractTable; import org.apache.calcite.sql.type.SqlTypeName; import org.apache.calcite.util.ImmutableBitSet; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.phoenix.calcite.rel.PhoenixRel; import org.apache.phoenix.calcite.rel.PhoenixTableScan; import org.apache.phoenix.jdbc.PhoenixConnection; +import org.apache.phoenix.query.QueryServices; +import org.apache.phoenix.query.QueryServicesOptions; import org.apache.phoenix.schema.PColumn; +import org.apache.phoenix.schema.PColumnFamily; import org.apache.phoenix.schema.PTable; +import org.apache.phoenix.schema.RowKeySchema; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.stats.GuidePostsInfo; +import org.apache.phoenix.schema.stats.StatisticsUtil; import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.util.SchemaUtil; +import org.apache.phoenix.util.SizedUtil; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; @@ -40,6 +50,8 @@ public class PhoenixTable extends AbstractTable implements TranslatableTable { public final List<PColumn> mappedColumns; public final ImmutableBitSet pkBitSet; public final RelCollation collation; + public final long byteCount; + public final long rowCount; public final PhoenixConnection pc; public static List<PColumn> getMappedColumns(PTable pTable) { @@ -78,6 +90,52 @@ public class PhoenixTable extends AbstractTable implements TranslatableTable { } this.pkBitSet = ImmutableBitSet.of(pkPositions); this.collation = RelCollationTraitDef.INSTANCE.canonize(RelCollations.of(fieldCollations)); + byte[] emptyCf = SchemaUtil.getEmptyColumnFamily(pTable); + GuidePostsInfo info = pTable.getTableStats().getGuidePosts().get(emptyCf); + long rowCount; + long byteCount; + try { + if (info == null) { + // TODO The props might not be the same as server props. + int guidepostPerRegion = pc.getQueryServices().getProps().getInt( + QueryServices.STATS_GUIDEPOST_PER_REGION_ATTRIB, + QueryServicesOptions.DEFAULT_STATS_GUIDEPOST_PER_REGION); + long guidepostWidth = pc.getQueryServices().getProps().getLong( + QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, + QueryServicesOptions.DEFAULT_STATS_GUIDEPOST_WIDTH_BYTES); + HTableDescriptor desc = null; + if (guidepostPerRegion > 0) { + desc = pc.getQueryServices().getAdmin().getTableDescriptor( + pTable.getPhysicalName().getBytes()); + } + byteCount = StatisticsUtil.getGuidePostDepth( + guidepostPerRegion, guidepostWidth, desc) / 2; + long keySize = pTable.getRowKeySchema().getEstimatedByteSize(); + long rowSize = 0; + for (PColumnFamily cf : pTable.getColumnFamilies()) { + for (PColumn column : cf.getColumns()) { + Integer maxLength = column.getMaxLength(); + int byteSize = column.getDataType().isFixedWidth() ? + maxLength == null ? + column.getDataType().getByteSize() + : maxLength + : RowKeySchema.ESTIMATED_VARIABLE_LENGTH_SIZE; + rowSize += SizedUtil.KEY_VALUE_SIZE + keySize + byteSize; + } + } + if (rowSize == 0) { + rowSize = keySize; + } + rowCount = byteCount / rowSize; + } else { + byteCount = info.getByteCount(); + rowCount = info.getRowCount(); + } + } catch (SQLException | IOException e) { + throw new RuntimeException(e); + } + this.byteCount = byteCount; + this.rowCount = rowCount; } public PTable getTable() { @@ -127,13 +185,9 @@ public class PhoenixTable extends AbstractTable implements TranslatableTable { return new Statistic() { @Override public Double getRowCount() { - byte[] emptyCf = SchemaUtil.getEmptyColumnFamily(pTable); - GuidePostsInfo info = pTable.getTableStats().getGuidePosts().get(emptyCf); - long rowCount = info == null ? 0 : info.getRowCount(); - - // Return an non-zero value to make the query plans stable. - // TODO remove "* 10.0" which is for test purpose. - return rowCount > 0 ? rowCount * 10.0 : 100.0; + float f = pc.getQueryServices().getProps().getFloat( + PhoenixRel.ROW_COUNT_FACTOR, 1f); + return (double) (rowCount * f); } @Override http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b68b1c4/phoenix-core/src/main/java/org/apache/phoenix/calcite/metadata/PhoenixRelMdSize.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/calcite/metadata/PhoenixRelMdSize.java b/phoenix-core/src/main/java/org/apache/phoenix/calcite/metadata/PhoenixRelMdSize.java new file mode 100644 index 0000000..31ed29f --- /dev/null +++ b/phoenix-core/src/main/java/org/apache/phoenix/calcite/metadata/PhoenixRelMdSize.java @@ -0,0 +1,75 @@ +package org.apache.phoenix.calcite.metadata; + +import org.apache.calcite.plan.volcano.RelSubset; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.metadata.ReflectiveRelMetadataProvider; +import org.apache.calcite.rel.metadata.RelMetadataProvider; +import org.apache.calcite.rel.metadata.RelMetadataQuery; +import org.apache.calcite.util.BuiltInMethod; +import org.apache.phoenix.calcite.PhoenixTable; +import org.apache.phoenix.calcite.rel.PhoenixAbstractAggregate; +import org.apache.phoenix.calcite.rel.PhoenixAbstractProject; +import org.apache.phoenix.calcite.rel.PhoenixAbstractSemiJoin; +import org.apache.phoenix.calcite.rel.PhoenixTableScan; +import org.apache.phoenix.calcite.rel.PhoenixUnion; + +public class PhoenixRelMdSize { + /** Source for + * {@link org.apache.calcite.rel.metadata.BuiltInMetadata.Size}. */ + public static final RelMetadataProvider SOURCE = + ReflectiveRelMetadataProvider.reflectiveSource(new PhoenixRelMdSize(), + BuiltInMethod.AVERAGE_ROW_SIZE.method); + + protected PhoenixRelMdSize() { } + + public Double averageRowSize(PhoenixUnion rel, RelMetadataQuery mq) { + double rowSize = 0; + for (RelNode input : rel.getInputs()) { + rowSize += mq.getAverageRowSize(input); + } + + return rowSize / rel.getInputs().size(); + } + + public Double averageRowSize(PhoenixAbstractAggregate rel, RelMetadataQuery mq) { + RelNode input = rel.getInput(); + double rowSize = mq.getAverageRowSize(input); + rowSize = rowSize * (rel.getGroupCount() + rel.getAggCallList().size()) / input.getRowType().getFieldCount(); + + return rowSize; + } + + public Double averageRowSize(PhoenixAbstractProject rel, RelMetadataQuery mq) { + RelNode input = rel.getInput(); + double rowSize = mq.getAverageRowSize(input); + rowSize = rowSize * rel.getProjects().size() / input.getRowType().getFieldCount(); + + return rowSize; + } + + public Double averageRowSize(PhoenixAbstractSemiJoin rel, RelMetadataQuery mq) { + return mq.getAverageRowSize(rel.getLeft()); + } + + public Double averageRowSize(PhoenixTableScan rel, RelMetadataQuery mq) { + PhoenixTable phoenixTable = rel.getTable().unwrap(PhoenixTable.class); + return 1.0 * phoenixTable.byteCount / phoenixTable.rowCount; + } + + public Double averageRowSize(RelSubset rel, RelMetadataQuery mq) { + RelNode best = rel.getBest(); + if (best != null) { + return mq.getAverageRowSize(best); + } + return Double.POSITIVE_INFINITY; + } + + public Double averageRowSize(RelNode rel, RelMetadataQuery mq) { + double rowSize = 0; + for (RelNode input : rel.getInputs()) { + rowSize += mq.getAverageRowSize(input); + } + + return rowSize; + } +} http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b68b1c4/phoenix-core/src/main/java/org/apache/phoenix/calcite/metadata/PhoenixRelMetadataProvider.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/calcite/metadata/PhoenixRelMetadataProvider.java b/phoenix-core/src/main/java/org/apache/phoenix/calcite/metadata/PhoenixRelMetadataProvider.java index c9412c6..5572952 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/calcite/metadata/PhoenixRelMetadataProvider.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/calcite/metadata/PhoenixRelMetadataProvider.java @@ -10,6 +10,7 @@ public class PhoenixRelMetadataProvider extends ChainedRelMetadataProvider { super(ImmutableList.of( PhoenixRelMdRowCount.SOURCE, PhoenixRelMdCollation.SOURCE, + PhoenixRelMdSize.SOURCE, new DefaultRelMetadataProvider())); } http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b68b1c4/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixAbstractSort.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixAbstractSort.java b/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixAbstractSort.java index 66ad9f0..b8c0136 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixAbstractSort.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixAbstractSort.java @@ -3,6 +3,8 @@ package org.apache.phoenix.calcite.rel; import java.util.List; import org.apache.calcite.plan.RelOptCluster; +import org.apache.calcite.plan.RelOptCost; +import org.apache.calcite.plan.RelOptPlanner; import org.apache.calcite.plan.RelTraitSet; import org.apache.calcite.rel.RelCollation; import org.apache.calcite.rel.RelFieldCollation; @@ -10,6 +12,8 @@ import org.apache.calcite.rel.RelFieldCollation.Direction; import org.apache.calcite.rel.RelFieldCollation.NullDirection; import org.apache.calcite.rel.RelNode; import org.apache.calcite.rel.core.Sort; +import org.apache.calcite.rel.metadata.RelMetadataQuery; +import org.apache.calcite.util.Util; import org.apache.phoenix.compile.OrderByCompiler.OrderBy; import org.apache.phoenix.execute.TupleProjector; import org.apache.phoenix.expression.Expression; @@ -25,13 +29,21 @@ import com.google.common.collect.Lists; * <p>Like {@code Sort}, it also supports LIMIT and OFFSET. */ abstract public class PhoenixAbstractSort extends Sort implements PhoenixRel { - protected static final double CLIENT_MERGE_FACTOR = 0.5; protected PhoenixAbstractSort(RelOptCluster cluster, RelTraitSet traits, RelNode child, RelCollation collation) { super(cluster, traits, child, collation, null, null); assert !getCollation().getFieldCollations().isEmpty(); } - + + @Override + public RelOptCost computeSelfCost(RelOptPlanner planner, + RelMetadataQuery mq) { + double rowCount = mq.getRowCount(this); + double bytesPerRow = mq.getAverageRowSize(this); + return planner.getCostFactory().makeCost( + Util.nLogN(rowCount) * bytesPerRow, rowCount, 0); + } + protected static OrderBy getOrderBy(RelCollation collation, Implementor implementor, TupleProjector tupleProjector) { List<OrderByExpression> orderByExpressions = Lists.newArrayList(); for (RelFieldCollation fieldCollation : collation.getFieldCollations()) { http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b68b1c4/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixCompactClientSort.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixCompactClientSort.java b/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixCompactClientSort.java index bb27c46..8d68a57 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixCompactClientSort.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixCompactClientSort.java @@ -43,7 +43,7 @@ public class PhoenixCompactClientSort extends PhoenixAbstractSort { return planner.getCostFactory().makeInfiniteCost(); return super.computeSelfCost(planner, mq) - .multiplyBy(CLIENT_MERGE_FACTOR) + .multiplyBy(SERVER_FACTOR) .multiplyBy(PHOENIX_FACTOR); } http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b68b1c4/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixRel.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixRel.java b/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixRel.java index a15ceb0..501060e 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixRel.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixRel.java @@ -30,20 +30,23 @@ public interface PhoenixRel extends RelNode { /** Metadata Provider for PhoenixRel */ RelMetadataProvider METADATA_PROVIDER = new PhoenixRelMetadataProvider(); + + /** For test purpose */ + String ROW_COUNT_FACTOR = "phoenix.calcite.metadata.rowcount.factor"; /** Relative cost of Phoenix versus Enumerable convention. * * <p>Multiply by the value (which is less than unity), and you will get a cheaper cost. * Phoenix is cheaper. */ - double PHOENIX_FACTOR = 0.5; + double PHOENIX_FACTOR = 0.0001; /** Relative cost of server plan versus client plan. * * <p>Multiply by the value (which is less than unity), and you will get a cheaper cost. * Server is cheaper. */ - double SERVER_FACTOR = 0.2; + double SERVER_FACTOR = 0.1; QueryPlan implement(Implementor implementor); http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b68b1c4/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixServerJoin.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixServerJoin.java b/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixServerJoin.java index e79f1da..2f09a9d 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixServerJoin.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixServerJoin.java @@ -16,7 +16,6 @@ import org.apache.calcite.rel.core.CorrelationId; import org.apache.calcite.rel.core.JoinRelType; import org.apache.calcite.rel.metadata.RelMetadataQuery; import org.apache.calcite.rex.RexNode; -import org.apache.calcite.util.Util; import org.apache.phoenix.calcite.CalciteUtils; import org.apache.phoenix.calcite.metadata.PhoenixRelMdCollation; import org.apache.phoenix.compile.JoinCompiler; @@ -95,7 +94,8 @@ public class PhoenixServerJoin extends PhoenixAbstractJoin { if (Double.isInfinite(rightRowCount)) { rowCount = rightRowCount; } else { - rowCount += Util.nLogN(rightRowCount); + double rightRowSize = mq.getAverageRowSize(getRight()); + rowCount += (rightRowCount + rightRowCount * rightRowSize); } } http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b68b1c4/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixServerSemiJoin.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixServerSemiJoin.java b/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixServerSemiJoin.java index 183a066..74c3e4d 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixServerSemiJoin.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixServerSemiJoin.java @@ -15,7 +15,6 @@ import org.apache.calcite.rel.core.SemiJoin; import org.apache.calcite.rel.metadata.RelMetadataQuery; import org.apache.calcite.rex.RexNode; import org.apache.calcite.util.ImmutableIntList; -import org.apache.calcite.util.Util; import org.apache.phoenix.calcite.metadata.PhoenixRelMdCollation; import org.apache.phoenix.compile.QueryPlan; import org.apache.phoenix.execute.HashJoinPlan; @@ -82,7 +81,8 @@ public class PhoenixServerSemiJoin extends PhoenixAbstractSemiJoin { if (Double.isInfinite(rightRowCount)) { rowCount = rightRowCount; } else { - rowCount += Util.nLogN(rightRowCount); + double rightRowSize = mq.getAverageRowSize(getRight()); + rowCount += (rightRowCount + rightRowCount * rightRowSize); } } http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b68b1c4/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixTableScan.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixTableScan.java b/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixTableScan.java index 23f6e43..d9fa23f 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixTableScan.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixTableScan.java @@ -1,5 +1,6 @@ package org.apache.phoenix.calcite.rel; +import java.io.IOException; import java.sql.SQLException; import java.util.List; @@ -19,6 +20,7 @@ import org.apache.calcite.rex.RexNode; import org.apache.calcite.util.ImmutableBitSet; import org.apache.calcite.util.ImmutableIntList; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hbase.client.Scan; import org.apache.phoenix.calcite.CalciteUtils; import org.apache.phoenix.calcite.PhoenixTable; @@ -36,7 +38,9 @@ import org.apache.phoenix.execute.ScanPlan; import org.apache.phoenix.execute.TupleProjector; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.LiteralExpression; +import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; import org.apache.phoenix.iterate.ParallelIteratorFactory; +import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData; import org.apache.phoenix.jdbc.PhoenixStatement; import org.apache.phoenix.parse.SelectStatement; import org.apache.phoenix.query.QueryServices; @@ -45,7 +49,11 @@ import org.apache.phoenix.schema.PColumn; import org.apache.phoenix.schema.PName; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.TableRef; +import org.apache.phoenix.schema.stats.GuidePostsInfo; +import org.apache.phoenix.schema.stats.StatisticsUtil; import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.util.SchemaUtil; + import com.google.common.base.Supplier; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableList.Builder; @@ -64,6 +72,8 @@ public class PhoenixTableScan extends TableScan implements PhoenixRel { public final ScanOrder scanOrder; public final ScanRanges scanRanges; + protected final GuidePostsInfo filteredGuideposts; + public static PhoenixTableScan create(RelOptCluster cluster, final RelOptTable table) { return create(cluster, table, null, getDefaultScanOrder(table.unwrap(PhoenixTable.class))); @@ -92,6 +102,8 @@ public class PhoenixTableScan extends TableScan implements PhoenixRel { this.scanOrder = scanOrder; ScanRanges scanRanges = null; + GuidePostsInfo info = null; + HTableInterface statsHTable = null; if (filter != null) { try { // TODO simplify this code @@ -124,11 +136,34 @@ public class PhoenixTableScan extends TableScan implements PhoenixRel { Expression filterExpr = CalciteUtils.toExpression(filter, tmpImplementor); filterExpr = WhereOptimizer.pushKeyExpressionsToScan(context, select, filterExpr); scanRanges = context.getScanRanges(); - } catch (SQLException e) { + if (!scanRanges.isPointLookup() + && !scanRanges.isDegenerate() + && !scanRanges.isEverything()) { + // TODO get the cf and timestamp right. + Scan scan = context.getScan(); + byte[] cf = SchemaUtil.getEmptyColumnFamily(pTable); + statsHTable = phoenixTable.pc.getQueryServices() + .getTable(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES); + info = StatisticsUtil.readStatistics( + statsHTable, pTable.getPhysicalName().getBytes(), + new ImmutableBytesPtr(cf), + scan.getStartRow(), + scan.getStopRow(), + HConstants.LATEST_TIMESTAMP).getGuidePosts().get(cf); + } + } catch (SQLException | IOException e) { throw new RuntimeException(e); + } finally { + if (statsHTable != null) { + try { + statsHTable.close(); + } catch (IOException e) { + } + } } } this.scanRanges = scanRanges; + this.filteredGuideposts = info; } private static ScanOrder getDefaultScanOrder(PhoenixTable table) { @@ -170,21 +205,39 @@ public class PhoenixTableScan extends TableScan implements PhoenixRel { @Override public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { - double rowCount = super.estimateRowCount(mq); - Double filteredRowCount = null; + double byteCount; + PhoenixTable phoenixTable = table.unwrap(PhoenixTable.class); if (scanRanges != null) { if (scanRanges.isPointLookup()) { - filteredRowCount = 1.0; - } else if (scanRanges.getBoundPkColumnCount() > 0) { - // TODO - int pkCount = scanRanges.getBoundPkColumnCount(); - filteredRowCount = rowCount * Math.pow(mq.getSelectivity(this, filter), pkCount); + byteCount = 1.0; + } else if (scanRanges.isDegenerate()) { + byteCount = 0.0; + } else if (scanRanges.isEverything()) { + byteCount = phoenixTable.byteCount; + } else { + if (filteredGuideposts != null) { + byteCount = filteredGuideposts.getByteCount(); + // TODO why zero byteCount? a bug? + if (byteCount == 0 && filteredGuideposts.getGuidePostsCount() > 0) { + PTable pTable = phoenixTable.getTable(); + byte[] emptyCf = SchemaUtil.getEmptyColumnFamily(pTable); + GuidePostsInfo info = pTable.getTableStats().getGuidePosts().get(emptyCf); + byteCount = phoenixTable.byteCount * filteredGuideposts.getGuidePostsCount() / info.getGuidePostsCount(); + } + } else { + PTable pTable = phoenixTable.getTable(); + byte[] emptyCf = SchemaUtil.getEmptyColumnFamily(pTable); + GuidePostsInfo info = pTable.getTableStats().getGuidePosts().get(emptyCf); + if (info != null) { + byteCount = phoenixTable.byteCount / info.getGuidePostsCount() / 2; + } else { + int pkCount = scanRanges.getBoundPkColumnCount(); + byteCount = phoenixTable.byteCount * Math.pow(mq.getSelectivity(this, filter), pkCount); + } + } } - } - if (filteredRowCount != null) { - rowCount = filteredRowCount; - } else if (table.unwrap(PhoenixTable.class).getTable().getParentName() != null){ - rowCount = addEpsilon(rowCount); + } else { + byteCount = phoenixTable.byteCount; } if (scanOrder != ScanOrder.NONE) { // We don't want to make a big difference here. The idea is to avoid @@ -197,14 +250,13 @@ public class PhoenixTableScan extends TableScan implements PhoenixRel { // above it can be an stream aggregate, although at runtime this will // eventually be an AggregatePlan, in which the "forceRowKeyOrder" // flag takes no effect. - rowCount = addEpsilon(rowCount); + byteCount = addEpsilon(byteCount); if (scanOrder == ScanOrder.REVERSE) { - rowCount = addEpsilon(rowCount); + byteCount = addEpsilon(byteCount); } } - int fieldCount = this.table.getRowType().getFieldCount(); return planner.getCostFactory() - .makeCost(rowCount * 2 * fieldCount / (fieldCount + 1), rowCount + 1, 0) + .makeCost(byteCount, byteCount + 1, 0) .multiplyBy(PHOENIX_FACTOR); } http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b68b1c4/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java index 78f9700..ddacb91 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java @@ -848,17 +848,15 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso PName physicalTableName = physicalTables.isEmpty() ? PNameFactory.newName(SchemaUtil.getTableName( schemaName.getString(), tableName.getString())) : physicalTables.get(0); PTableStats stats = PTableStats.EMPTY_STATS; - if (tenantId == null) { - HTableInterface statsHTable = null; - try { - statsHTable = ServerUtil.getHTableForCoprocessorScan(env, PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES); - stats = StatisticsUtil.readStatistics(statsHTable, physicalTableName.getBytes(), clientTimeStamp); - timeStamp = Math.max(timeStamp, stats.getTimestamp()); - } catch (org.apache.hadoop.hbase.TableNotFoundException e) { - logger.warn(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME + " not online yet?"); - } finally { - if (statsHTable != null) statsHTable.close(); - } + HTableInterface statsHTable = null; + try { + statsHTable = ServerUtil.getHTableForCoprocessorScan(env, PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES); + stats = StatisticsUtil.readStatistics(statsHTable, physicalTableName.getBytes(), clientTimeStamp); + timeStamp = Math.max(timeStamp, stats.getTimestamp()); + } catch (org.apache.hadoop.hbase.TableNotFoundException e) { + logger.warn(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME + " not online yet?"); + } finally { + if (statsHTable != null) statsHTable.close(); } return PTableImpl.makePTable(tenantId, schemaName, tableName, tableType, indexState, timeStamp, tableSeqNum, pkName, saltBucketNum, columns, tableType == INDEX ? schemaName : null, http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b68b1c4/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java index 5b47104..4166b5b 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java @@ -99,7 +99,7 @@ public class StatisticsUtil { return key; } - public static List<Result> readStatistics(HTableInterface statsHTable, byte[] tableNameBytes, ImmutableBytesPtr cf, + public static List<Result> readStatisticsForDelete(HTableInterface statsHTable, byte[] tableNameBytes, ImmutableBytesPtr cf, byte[] startKey, byte[] stopKey, long clientTimeStamp) throws IOException { List<Result> statsForRegion = new ArrayList<Result>(); Scan s = MetaDataUtil.newTableRowsScan(getAdjustedKey(startKey, tableNameBytes, cf, false), @@ -123,8 +123,22 @@ public class StatisticsUtil { public static PTableStats readStatistics(HTableInterface statsHTable, byte[] tableNameBytes, long clientTimeStamp) throws IOException { + return readStatistics(statsHTable, tableNameBytes, null, null, null, clientTimeStamp); + } + + public static PTableStats readStatistics(HTableInterface statsHTable, + byte[] tableNameBytes, ImmutableBytesPtr cf, byte[] startKey, byte[] stopKey, + long clientTimeStamp) + throws IOException { ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - Scan s = MetaDataUtil.newTableRowsScan(tableNameBytes, MetaDataProtocol.MIN_TABLE_TIMESTAMP, clientTimeStamp); + Scan s; + if (cf == null) { + s = MetaDataUtil.newTableRowsScan(tableNameBytes, MetaDataProtocol.MIN_TABLE_TIMESTAMP, clientTimeStamp); + } else { + s = MetaDataUtil.newTableRowsScan(getAdjustedKey(startKey, tableNameBytes, cf, false), + getAdjustedKey(stopKey, tableNameBytes, cf, true), MetaDataProtocol.MIN_TABLE_TIMESTAMP, + clientTimeStamp); + } s.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES); s.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_ROW_COUNT_BYTES); s.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES); http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b68b1c4/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java index d03af7a..830b1d6 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java @@ -219,7 +219,7 @@ public class StatisticsWriter implements Closeable { throws IOException { long timeStamp = clientTimeStamp == StatisticsCollector.NO_TIMESTAMP ? tracker.getMaxTimeStamp() : clientTimeStamp; - List<Result> statsForRegion = StatisticsUtil.readStatistics(statsWriterTable, tableName, fam, + List<Result> statsForRegion = StatisticsUtil.readStatisticsForDelete(statsWriterTable, tableName, fam, region.getRegionInfo().getStartKey(), region.getRegionInfo().getEndKey(), timeStamp); for (Result result : statsForRegion) { mutations.add(new Delete(result.getRow(), timeStamp - 1));