PHOENIX-4298 refactoring to avoid using deprecated API for Put/Delete(Sergey Soldatov)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/fc6cf43a Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/fc6cf43a Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/fc6cf43a Branch: refs/heads/system-catalog Commit: fc6cf43a476c2048c2ee4431311487b30517a208 Parents: 153b357 Author: Rajeshbabu Chintaguntla <rajeshb...@apache.org> Authored: Thu Apr 19 17:19:20 2018 +0530 Committer: Rajeshbabu Chintaguntla <rajeshb...@apache.org> Committed: Thu Apr 19 17:19:20 2018 +0530 ---------------------------------------------------------------------- ...ReplayWithIndexWritesAndCompressedWALIT.java | 2 +- .../end2end/ColumnProjectionOptimizationIT.java | 14 ++++---- .../apache/phoenix/end2end/DynamicColumnIT.java | 12 +++---- .../apache/phoenix/end2end/DynamicFamilyIT.java | 26 +++++++------- .../phoenix/end2end/MappingTableDataTypeIT.java | 4 +-- .../phoenix/end2end/NativeHBaseTypesIT.java | 30 ++++++++-------- .../end2end/QueryDatabaseMetaDataIT.java | 4 +-- .../org/apache/phoenix/end2end/UpgradeIT.java | 4 +-- .../phoenix/tx/ParameterizedTransactionIT.java | 4 +-- .../coprocessor/MetaDataEndpointImpl.java | 36 ++++++++++---------- .../UngroupedAggregateRegionObserver.java | 2 +- .../apache/phoenix/index/IndexMaintainer.java | 12 +++---- .../query/ConnectionQueryServicesImpl.java | 2 +- .../org/apache/phoenix/schema/PTableImpl.java | 4 +-- .../phoenix/schema/stats/StatisticsWriter.java | 9 +++-- .../java/org/apache/phoenix/util/IndexUtil.java | 6 ++-- .../wal/ReadWriteKeyValuesWithCodecTest.java | 6 ++-- .../index/covered/CoveredColumnIndexCodec.java | 2 +- .../index/covered/LocalTableStateTest.java | 10 +++--- .../covered/TestCoveredColumnIndexCodec.java | 6 ++-- .../hbase/index/write/TestIndexWriter.java | 10 ++---- .../index/write/TestParalleIndexWriter.java | 2 +- .../write/TestParalleWriterIndexCommitter.java | 2 +- .../index/write/TestWALRecoveryCaching.java | 4 +-- .../recovery/TestPerRegionIndexWriteCache.java | 4 +-- .../java/org/apache/phoenix/util/TestUtil.java | 4 +-- 26 files changed, 106 insertions(+), 115 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc6cf43a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java index e2ddd4e..49933b2 100644 --- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java +++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java @@ -210,7 +210,7 @@ public class WALReplayWithIndexWritesAndCompressedWALIT { //make an attempted write to the primary that should also be indexed byte[] rowkey = Bytes.toBytes("indexed_row_key"); Put p = new Put(rowkey); - p.add(family, Bytes.toBytes("qual"), Bytes.toBytes("value")); + p.addColumn(family, Bytes.toBytes("qual"), Bytes.toBytes("value")); region.put(p); // we should then see the server go down http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc6cf43a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java index e4ff66f..43dc302 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java @@ -242,19 +242,19 @@ public class ColumnProjectionOptimizationIT extends ParallelStatsDisabledIT { try { htable = conn2.getQueryServices().getTable(htableName); Put put = new Put(PInteger.INSTANCE.toBytes(1)); - put.add(cfB, c1, PInteger.INSTANCE.toBytes(1)); - put.add(cfC, c2, PLong.INSTANCE.toBytes(2)); + put.addColumn(cfB, c1, PInteger.INSTANCE.toBytes(1)); + put.addColumn(cfC, c2, PLong.INSTANCE.toBytes(2)); htable.put(put); put = new Put(PInteger.INSTANCE.toBytes(2)); - put.add(cfC, c2, PLong.INSTANCE.toBytes(10)); - put.add(cfC, c3, PVarchar.INSTANCE.toBytes("abcd")); + put.addColumn(cfC, c2, PLong.INSTANCE.toBytes(10)); + put.addColumn(cfC, c3, PVarchar.INSTANCE.toBytes("abcd")); htable.put(put); put = new Put(PInteger.INSTANCE.toBytes(3)); - put.add(cfB, c1, PInteger.INSTANCE.toBytes(3)); - put.add(cfC, c2, PLong.INSTANCE.toBytes(10)); - put.add(cfC, c3, PVarchar.INSTANCE.toBytes("abcd")); + put.addColumn(cfB, c1, PInteger.INSTANCE.toBytes(3)); + put.addColumn(cfC, c2, PLong.INSTANCE.toBytes(10)); + put.addColumn(cfC, c3, PVarchar.INSTANCE.toBytes("abcd")); htable.put(put); conn2.close(); http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc6cf43a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicColumnIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicColumnIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicColumnIT.java index f55d01a..04402cd 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicColumnIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicColumnIT.java @@ -88,12 +88,12 @@ public class DynamicColumnIT extends ParallelStatsDisabledIT { byte[] key = Bytes.toBytes("entry1"); Put put = new Put(key); - put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, dv, Bytes.toBytes("default")); - put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, first, Bytes.toBytes("first")); - put.add(FAMILY_NAME_A, f1v1, Bytes.toBytes("f1value1")); - put.add(FAMILY_NAME_A, f1v2, Bytes.toBytes("f1value2")); - put.add(FAMILY_NAME_B, f2v1, Bytes.toBytes("f2value1")); - put.add(FAMILY_NAME_B, f2v2, Bytes.toBytes("f2value2")); + put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, dv, Bytes.toBytes("default")); + put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, first, Bytes.toBytes("first")); + put.addColumn(FAMILY_NAME_A, f1v1, Bytes.toBytes("f1value1")); + put.addColumn(FAMILY_NAME_A, f1v2, Bytes.toBytes("f1value2")); + put.addColumn(FAMILY_NAME_B, f2v1, Bytes.toBytes("f2value1")); + put.addColumn(FAMILY_NAME_B, f2v2, Bytes.toBytes("f2value2")); mutations.add(put); hTable.batch(mutations); http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc6cf43a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicFamilyIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicFamilyIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicFamilyIT.java index 866a8d2..acae6ee 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicFamilyIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicFamilyIT.java @@ -115,25 +115,25 @@ public class DynamicFamilyIT extends ParallelStatsDisabledIT { Put put; List<Row> mutations = new ArrayList<Row>(); put = new Put(Bytes.toBytes("entry1")); - put.add(A_CF, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, ByteUtil.EMPTY_BYTE_ARRAY); - put.add(A_CF, ByteUtil.concat(MAX_CLICK_COUNT_DYNCOL_PREFIX, USER_ID2_BYTES), PInteger.INSTANCE.toBytes(ENTRY1_CLICK_COUNT)); - put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID1_BYTES), PTime.INSTANCE.toBytes(ENTRY1_USER_ID1_LOGIN_TIME)); - put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID2_BYTES), PTime.INSTANCE.toBytes(ENTRY1_USER_ID2_LOGIN_TIME)); + put.addColumn(A_CF, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, ByteUtil.EMPTY_BYTE_ARRAY); + put.addColumn(A_CF, ByteUtil.concat(MAX_CLICK_COUNT_DYNCOL_PREFIX, USER_ID2_BYTES), PInteger.INSTANCE.toBytes(ENTRY1_CLICK_COUNT)); + put.addColumn(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID1_BYTES), PTime.INSTANCE.toBytes(ENTRY1_USER_ID1_LOGIN_TIME)); + put.addColumn(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID2_BYTES), PTime.INSTANCE.toBytes(ENTRY1_USER_ID2_LOGIN_TIME)); mutations.add(put); put = new Put(Bytes.toBytes("entry2")); - put.add(A_CF, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, ByteUtil.EMPTY_BYTE_ARRAY); - put.add(A_CF, ByteUtil.concat(MAX_CLICK_COUNT_DYNCOL_PREFIX, USER_ID3_BYTES), PInteger.INSTANCE.toBytes(ENTRY2_CLICK_COUNT)); - put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID2_BYTES), PTime.INSTANCE.toBytes(ENTRY2_USER_ID2_LOGIN_TIME)); - put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID3_BYTES), PTime.INSTANCE.toBytes(ENTRY2_USER_ID3_LOGIN_TIME)); + put.addColumn(A_CF, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, ByteUtil.EMPTY_BYTE_ARRAY); + put.addColumn(A_CF, ByteUtil.concat(MAX_CLICK_COUNT_DYNCOL_PREFIX, USER_ID3_BYTES), PInteger.INSTANCE.toBytes(ENTRY2_CLICK_COUNT)); + put.addColumn(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID2_BYTES), PTime.INSTANCE.toBytes(ENTRY2_USER_ID2_LOGIN_TIME)); + put.addColumn(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID3_BYTES), PTime.INSTANCE.toBytes(ENTRY2_USER_ID3_LOGIN_TIME)); mutations.add(put); put = new Put(Bytes.toBytes("entry3")); - put.add(A_CF, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, ByteUtil.EMPTY_BYTE_ARRAY); - put.add(A_CF, ByteUtil.concat(MAX_CLICK_COUNT_DYNCOL_PREFIX, USER_ID1_BYTES), PInteger.INSTANCE.toBytes(ENTRY3_CLICK_COUNT)); - put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID1_BYTES), PTime.INSTANCE.toBytes(ENTRY3_USER_ID1_LOGIN_TIME)); - put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID2_BYTES), PTime.INSTANCE.toBytes(ENTRY3_USER_ID2_LOGIN_TIME)); - put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID3_BYTES), PTime.INSTANCE.toBytes(ENTRY3_USER_ID3_LOGIN_TIME)); + put.addColumn(A_CF, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, ByteUtil.EMPTY_BYTE_ARRAY); + put.addColumn(A_CF, ByteUtil.concat(MAX_CLICK_COUNT_DYNCOL_PREFIX, USER_ID1_BYTES), PInteger.INSTANCE.toBytes(ENTRY3_CLICK_COUNT)); + put.addColumn(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID1_BYTES), PTime.INSTANCE.toBytes(ENTRY3_USER_ID1_LOGIN_TIME)); + put.addColumn(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID2_BYTES), PTime.INSTANCE.toBytes(ENTRY3_USER_ID2_LOGIN_TIME)); + put.addColumn(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID3_BYTES), PTime.INSTANCE.toBytes(ENTRY3_USER_ID3_LOGIN_TIME)); mutations.add(put); hTable.batch(mutations); http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc6cf43a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java index 52e22bf..043907b 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java @@ -107,8 +107,8 @@ public class MappingTableDataTypeIT extends ParallelStatsDisabledIT { private void insertData(final byte[] tableName, HBaseAdmin admin, HTableInterface t) throws IOException, InterruptedException { Put p = new Put(Bytes.toBytes("row")); - p.add(Bytes.toBytes("cf1"), Bytes.toBytes("q1"), Bytes.toBytes("value1")); - p.add(Bytes.toBytes("cf2"), Bytes.toBytes("q2"), Bytes.toBytes("value2")); + p.addColumn(Bytes.toBytes("cf1"), Bytes.toBytes("q1"), Bytes.toBytes("value1")); + p.addColumn(Bytes.toBytes("cf2"), Bytes.toBytes("q2"), Bytes.toBytes("value2")); t.put(p); t.flushCommits(); admin.flush(tableName); http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc6cf43a/phoenix-core/src/it/java/org/apache/phoenix/end2end/NativeHBaseTypesIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/NativeHBaseTypesIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/NativeHBaseTypesIT.java index 127c25a..50563d4 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/NativeHBaseTypesIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/NativeHBaseTypesIT.java @@ -96,8 +96,8 @@ public class NativeHBaseTypesIT extends ParallelStatsDisabledIT { bKey = key = ByteUtil.concat(Bytes.toBytes(20), Bytes.toBytes(200L), Bytes.toBytes("b")); put = new Put(key); - put.add(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(5000)); - put.add(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(50000L)); + put.addColumn(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(5000)); + put.addColumn(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(50000L)); mutations.add(put); // FIXME: the version of the Delete constructor without the lock args was introduced // in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version @@ -106,30 +106,30 @@ public class NativeHBaseTypesIT extends ParallelStatsDisabledIT { Delete del = new Delete(key, ts); mutations.add(del); put = new Put(key); - put.add(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(2000)); - put.add(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(20000L)); + put.addColumn(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(2000)); + put.addColumn(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(20000L)); mutations.add(put); key = ByteUtil.concat(Bytes.toBytes(10), Bytes.toBytes(100L), Bytes.toBytes("a")); put = new Put(key); - put.add(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(5)); - put.add(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(50L)); + put.addColumn(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(5)); + put.addColumn(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(50L)); mutations.add(put); put = new Put(key); - put.add(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(10)); - put.add(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(100L)); + put.addColumn(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(10)); + put.addColumn(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(100L)); mutations.add(put); key = ByteUtil.concat(Bytes.toBytes(30), Bytes.toBytes(300L), Bytes.toBytes("c")); put = new Put(key); - put.add(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(3000)); - put.add(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(30000L)); + put.addColumn(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(3000)); + put.addColumn(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(30000L)); mutations.add(put); key = ByteUtil.concat(Bytes.toBytes(40), Bytes.toBytes(400L), Bytes.toBytes("d")); put = new Put(key); - put.add(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(4000)); - put.add(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(40000L)); + put.addColumn(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(4000)); + put.addColumn(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(40000L)); mutations.add(put); hTable.batch(mutations); @@ -286,9 +286,9 @@ public class NativeHBaseTypesIT extends ParallelStatsDisabledIT { // negative number for an unsigned type key = ByteUtil.concat(Bytes.toBytes(-10), Bytes.toBytes(100L), Bytes.toBytes("e")); put = new Put(key); - put.add(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(10)); - put.add(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(100L)); - put.add(family, QueryConstants.EMPTY_COLUMN_BYTES, HConstants.LATEST_TIMESTAMP, ByteUtil.EMPTY_BYTE_ARRAY); + put.addColumn(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(10)); + put.addColumn(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(100L)); + put.addColumn(family, QueryConstants.EMPTY_COLUMN_BYTES, HConstants.LATEST_TIMESTAMP, ByteUtil.EMPTY_BYTE_ARRAY); mutations.add(put); hTable.batch(mutations); http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc6cf43a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java index c7c8ebf..4f84304 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java @@ -899,8 +899,8 @@ public class QueryDatabaseMetaDataIT extends ParallelStatsDisabledIT { pconn.getQueryServices() .getTable(SchemaUtil.getTableNameAsBytes(schemaName, tableName)); Put put = new Put(Bytes.toBytes("0")); - put.add(cfB, Bytes.toBytes("COL1"), PInteger.INSTANCE.toBytes(1)); - put.add(cfC, Bytes.toBytes("COL2"), PLong.INSTANCE.toBytes(2)); + put.addColumn(cfB, Bytes.toBytes("COL1"), PInteger.INSTANCE.toBytes(1)); + put.addColumn(cfC, Bytes.toBytes("COL2"), PLong.INSTANCE.toBytes(2)); htable.put(put); // Should be ok b/c we've marked the view with IMMUTABLE_ROWS=true http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc6cf43a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java index b71dd7c..48a49b2 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java @@ -624,7 +624,7 @@ public class UpgradeIT extends ParallelStatsDisabledIT { schemaName == null ? new byte[0] : Bytes.toBytes(schemaName), Bytes.toBytes(tableName)); Put viewColumnDefinitionPut = new Put(rowKey, HConstants.LATEST_TIMESTAMP); - viewColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + viewColumnDefinitionPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES, HConstants.LATEST_TIMESTAMP, null); try (PhoenixConnection conn = @@ -738,7 +738,7 @@ public class UpgradeIT extends ParallelStatsDisabledIT { byte[] family = PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES; byte[] qualifier = UPGRADE_MUTEX; Put put = new Put(row); - put.add(family, qualifier, UPGRADE_MUTEX_UNLOCKED); + put.addColumn(family, qualifier, UPGRADE_MUTEX_UNLOCKED); sysMutexTable.put(put); sysMutexTable.flushCommits(); } http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc6cf43a/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java index 5421801..ce01e2b 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java @@ -273,7 +273,7 @@ public class ParameterizedTransactionIT extends ParallelStatsDisabledIT { HTableInterface htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes( nonTxTableName)); List<Put>puts = Lists.newArrayList(new Put(PInteger.INSTANCE.toBytes(1)), new Put(PInteger.INSTANCE.toBytes(2)), new Put(PInteger.INSTANCE.toBytes(3))); for (Put put : puts) { - put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, ByteUtil.EMPTY_BYTE_ARRAY); + put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, ByteUtil.EMPTY_BYTE_ARRAY); } htable.put(puts); @@ -333,7 +333,7 @@ public class ParameterizedTransactionIT extends ParallelStatsDisabledIT { // Reset empty column value to an empty value like it is pre-transactions HTableInterface htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes("SYSTEM." + nonTxTableName)); Put put = new Put(PInteger.INSTANCE.toBytes(1)); - put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, ByteUtil.EMPTY_BYTE_ARRAY); + put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, ByteUtil.EMPTY_BYTE_ARRAY); htable.put(put); HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin(); http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc6cf43a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java index 34218d5..4c72c2d 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java @@ -2639,7 +2639,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso int newPosition = ++lastOrdinalPos; byte[] ptr = new byte[PInteger.INSTANCE.getByteSize()]; PInteger.INSTANCE.getCodec().encodeInt(newPosition, ptr, 0); - viewColumnPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + viewColumnPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.ORDINAL_POSITION_BYTES, clientTimeStamp, ptr); mutationsForAddingColumnsToViews.add(viewColumnPut); } else { @@ -2674,7 +2674,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso short newKeySeq = (short)(currentKeySeq + deltaNumPkColsSoFar); byte[] keySeqBytes = new byte[PSmallint.INSTANCE.getByteSize()]; PSmallint.INSTANCE.getCodec().encodeShort(newKeySeq, keySeqBytes, 0); - viewColumnPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + viewColumnPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.KEY_SEQ_BYTES, keySeqBytes); addMutationsForAddingPkColsToViewIndexes(mutationsForAddingColumnsToViews, clientTimeStamp, view, deltaNumPkColsSoFar, columnName, viewColumnPut); @@ -2713,7 +2713,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso } byte[] viewSequencePtr = new byte[PLong.INSTANCE.getByteSize()]; PLong.INSTANCE.getCodec().encodeLong(view.getSequenceNumber() + 1, viewSequencePtr, 0); - viewHeaderRowPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + viewHeaderRowPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES, clientTimeStamp, viewSequencePtr); // invalidate the view so that it is removed from the cache invalidateList.add(new ImmutableBytesPtr(viewKey)); @@ -2751,21 +2751,21 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso int oldBaseColumnCount = view.getBaseColumnCount(); byte[] baseColumnCountPtr = new byte[PInteger.INSTANCE.getByteSize()]; PInteger.INSTANCE.getCodec().encodeInt(oldBaseColumnCount + baseTableColumnDelta, baseColumnCountPtr, 0); - viewHeaderRowPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + viewHeaderRowPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES, clientTimeStamp, baseColumnCountPtr); } if (viewColumnDelta != 0) { byte[] columnCountPtr = new byte[PInteger.INSTANCE.getByteSize()]; PInteger.INSTANCE.getCodec().encodeInt(numCols + viewColumnDelta, columnCountPtr, 0); - viewHeaderRowPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + viewHeaderRowPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.COLUMN_COUNT_BYTES, clientTimeStamp, columnCountPtr); } if (changeSequenceNumber) { byte[] viewSequencePtr = new byte[PLong.INSTANCE.getByteSize()]; PLong.INSTANCE.getCodec().encodeLong(view.getSequenceNumber() + 1, viewSequencePtr, 0); - viewHeaderRowPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + viewHeaderRowPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES, clientTimeStamp, viewSequencePtr); mutationsForAddingColumnsToViews.add(viewHeaderRowPut); @@ -2781,7 +2781,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso Put positionUpdatePut = new Put(columnKey, clientTimeStamp); byte[] ptr = new byte[PInteger.INSTANCE.getByteSize()]; PInteger.INSTANCE.getCodec().encodeInt(ordinalPosition, ptr, 0); - positionUpdatePut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + positionUpdatePut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.ORDINAL_POSITION_BYTES, clientTimeStamp, ptr); mutationsForAddingColumnsToViews.add(positionUpdatePut); i++; @@ -2796,7 +2796,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso if (!basePhysicalTable.isTransactional() && switchAttribute(basePhysicalTable, basePhysicalTable.isTransactional(), tableMetadata, TRANSACTIONAL_BYTES)) { invalidateList.add(new ImmutableBytesPtr(viewKey)); Put put = new Put(viewKey); - put.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, TRANSACTIONAL_BYTES, clientTimeStamp, PBoolean.INSTANCE.toBytes(true)); mutationsForAddingColumnsToViews.add(put); } @@ -3034,14 +3034,14 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso long newSequenceNumber = index.getSequenceNumber() + 1; byte[] newSequenceNumberPtr = new byte[PLong.INSTANCE.getByteSize()]; PLong.INSTANCE.getCodec().encodeLong(newSequenceNumber, newSequenceNumberPtr, 0); - indexHeaderRowMutation.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + indexHeaderRowMutation.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES, newSequenceNumberPtr); // increase the column count int newColumnCount = index.getColumns().size() + deltaNumPkColsSoFar; byte[] newColumnCountPtr = new byte[PInteger.INSTANCE.getByteSize()]; PInteger.INSTANCE.getCodec().encodeInt(newColumnCount, newColumnCountPtr, 0); - indexHeaderRowMutation.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + indexHeaderRowMutation.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.COLUMN_COUNT_BYTES, newColumnCountPtr); // add index row header key to the invalidate list to force clients to fetch the latest meta-data @@ -3068,7 +3068,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso int indexColumnDataType = IndexUtil.getIndexColumnDataType(true, PDataType.fromTypeId(viewPkColumnDataType)).getSqlType(); PInteger.INSTANCE.getCodec().encodeInt(indexColumnDataType, indexColumnDataTypeBytes, 0); - indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + indexColumnDefinitionPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.DATA_TYPE_BYTES, indexColumnDataTypeBytes); @@ -3078,7 +3078,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso PhoenixDatabaseMetaData.DECIMAL_DIGITS_BYTES); if (decimalDigits != null && decimalDigits.size() > 0) { Cell decimalDigit = decimalDigits.get(0); - indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + indexColumnDefinitionPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.DECIMAL_DIGITS_BYTES, decimalDigit.getValueArray()); } @@ -3088,7 +3088,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso PhoenixDatabaseMetaData.COLUMN_SIZE_BYTES); if (columnSizes != null && columnSizes.size() > 0) { Cell columnSize = columnSizes.get(0); - indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + indexColumnDefinitionPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.COLUMN_SIZE_BYTES, columnSize.getValueArray()); } @@ -3097,7 +3097,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso PhoenixDatabaseMetaData.SORT_ORDER_BYTES); if (sortOrders != null && sortOrders.size() > 0) { Cell sortOrder = sortOrders.get(0); - indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + indexColumnDefinitionPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.SORT_ORDER_BYTES, sortOrder.getValueArray()); } @@ -3106,7 +3106,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso PhoenixDatabaseMetaData.DATA_TABLE_NAME_BYTES); if (dataTableNames != null && dataTableNames.size() > 0) { Cell dataTableName = dataTableNames.get(0); - indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + indexColumnDefinitionPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.DATA_TABLE_NAME_BYTES, dataTableName.getValueArray()); } @@ -3114,12 +3114,12 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso byte[] ordinalPositionBytes = new byte[PInteger.INSTANCE.getByteSize()]; int ordinalPositionOfNewCol = oldNumberOfColsInIndex + deltaNumPkColsSoFar; PInteger.INSTANCE.getCodec().encodeInt(ordinalPositionOfNewCol, ordinalPositionBytes, 0); - indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + indexColumnDefinitionPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.ORDINAL_POSITION_BYTES, ordinalPositionBytes); // New PK columns have to be nullable after the first DDL byte[] isNullableBytes = PInteger.INSTANCE.toBytes(ResultSetMetaData.columnNullable); - indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + indexColumnDefinitionPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.NULLABLE_BYTES, isNullableBytes); // Set the key sequence for the pk column to be added @@ -3127,7 +3127,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso short newKeySeq = (short)(currentKeySeq + deltaNumPkColsSoFar); byte[] keySeqBytes = new byte[PSmallint.INSTANCE.getByteSize()]; PSmallint.INSTANCE.getCodec().encodeShort(newKeySeq, keySeqBytes, 0); - indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + indexColumnDefinitionPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.KEY_SEQ_BYTES, keySeqBytes); mutationsForAddingColumnsToViews.add(indexColumnDefinitionPut); http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc6cf43a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java index 31b512a..d202193 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java @@ -754,7 +754,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver if (!timeStamps.contains(kvts)) { Put put = new Put(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength()); - put.add(emptyCF, QueryConstants.EMPTY_COLUMN_BYTES, kvts, + put.addColumn(emptyCF, QueryConstants.EMPTY_COLUMN_BYTES, kvts, ByteUtil.EMPTY_BYTE_ARRAY); mutations.add(put); } http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc6cf43a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java index 2f41dc3..bc2523d 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java @@ -1154,15 +1154,15 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> { ColumnReference indexColumn = coveredColumnsMap.get(ref); // If table delete was single version, then index delete should be as well if (deleteType == DeleteType.SINGLE_VERSION) { - delete.deleteFamilyVersion(indexColumn.getFamily(), ts); + delete.addFamilyVersion(indexColumn.getFamily(), ts); } else { - delete.deleteFamily(indexColumn.getFamily(), ts); + delete.addFamily(indexColumn.getFamily(), ts); } } if (deleteType == DeleteType.SINGLE_VERSION) { - delete.deleteFamilyVersion(emptyCF, ts); + delete.addFamilyVersion(emptyCF, ts); } else { - delete.deleteFamily(emptyCF, ts); + delete.addFamily(emptyCF, ts); } delete.setDurability(!indexWALDisabled ? Durability.USE_DEFAULT : Durability.SKIP_WAL); return delete; @@ -1181,9 +1181,9 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> { ColumnReference indexColumn = coveredColumnsMap.get(ref); // If point delete for data table, then use point delete for index as well if (kv.getTypeByte() == KeyValue.Type.Delete.getCode()) { - delete.deleteColumn(indexColumn.getFamily(), indexColumn.getQualifier(), ts); + delete.addColumn(indexColumn.getFamily(), indexColumn.getQualifier(), ts); } else { - delete.deleteColumns(indexColumn.getFamily(), indexColumn.getQualifier(), ts); + delete.addColumns(indexColumn.getFamily(), indexColumn.getQualifier(), ts); } } } http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc6cf43a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java index f5e83f2..8866ced 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java @@ -2651,7 +2651,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement byte[] mutexRowKey = SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA, PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE); Put put = new Put(mutexRowKey); - put.add(PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES, UPGRADE_MUTEX, UPGRADE_MUTEX_UNLOCKED); + put.addColumn(PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES, UPGRADE_MUTEX, UPGRADE_MUTEX_UNLOCKED); sysMutexTable.put(put); } } catch (IOException e) { http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc6cf43a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java index 1a11427..082a58b 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java @@ -1040,10 +1040,10 @@ public class PTableImpl implements PTable { newMutations(); Delete delete = new Delete(key); if (families.isEmpty()) { - delete.deleteFamily(SchemaUtil.getEmptyColumnFamily(PTableImpl.this), ts); + delete.addFamily(SchemaUtil.getEmptyColumnFamily(PTableImpl.this), ts); } else { for (PColumnFamily colFamily : families) { - delete.deleteFamily(colFamily.getName().getBytes(), ts); + delete.addFamily(colFamily.getName().getBytes(), ts); } } http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc6cf43a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java index 8956862..ae077b9 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java @@ -61,7 +61,6 @@ import org.apache.phoenix.util.MetaDataUtil; import org.apache.phoenix.util.PrefixByteDecoder; import org.apache.phoenix.util.SchemaUtil; import org.apache.phoenix.util.ServerUtil; -import org.apache.phoenix.util.TimeKeeper; import com.google.protobuf.ServiceException; @@ -189,13 +188,13 @@ public class StatisticsWriter implements Closeable { private void addGuidepost(ImmutableBytesPtr cfKey, List<Mutation> mutations, ImmutableBytesWritable ptr, long byteCount, long rowCount, long timeStamp) { byte[] prefix = StatisticsUtil.getRowKey(tableName, cfKey, ptr); Put put = new Put(prefix); - put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES, + put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES, timeStamp, PLong.INSTANCE.toBytes(byteCount)); - put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, + put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_ROW_COUNT_BYTES, timeStamp, PLong.INSTANCE.toBytes(rowCount)); // Add our empty column value so queries behave correctly - put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timeStamp, + put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timeStamp, ByteUtil.EMPTY_BYTE_ARRAY); mutations.add(put); } @@ -241,7 +240,7 @@ public class StatisticsWriter implements Closeable { long currentTime = EnvironmentEdgeManager.currentTimeMillis(); byte[] prefix = tableName; Put put = new Put(prefix); - put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.LAST_STATS_UPDATE_TIME_BYTES, + put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.LAST_STATS_UPDATE_TIME_BYTES, timeStamp, PDate.INSTANCE.toBytes(new Date(currentTime))); return put; } http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc6cf43a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java index 33b7383..7e280f4 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java @@ -735,11 +735,11 @@ public class IndexUtil { HTableInterface metaTable, PIndexState newState) throws Throwable { // Mimic the Put that gets generated by the client on an update of the index state Put put = new Put(indexTableKey); - put.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_STATE_BYTES, + put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_STATE_BYTES, newState.getSerializedBytes()); - put.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES, + put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES, PLong.INSTANCE.toBytes(minTimeStamp)); - put.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP_BYTES, + put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP_BYTES, PLong.INSTANCE.toBytes(0)); final List<Mutation> tableMetadata = Collections.<Mutation> singletonList(put); http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc6cf43a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ReadWriteKeyValuesWithCodecTest.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ReadWriteKeyValuesWithCodecTest.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ReadWriteKeyValuesWithCodecTest.java index 469dd21..8bb491d 100644 --- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ReadWriteKeyValuesWithCodecTest.java +++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ReadWriteKeyValuesWithCodecTest.java @@ -39,12 +39,10 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.io.util.LRUDictionary; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest; import org.apache.phoenix.hbase.index.IndexTestingUtils; import org.apache.phoenix.hbase.index.wal.IndexedKeyValue; import org.junit.BeforeClass; import org.junit.Test; -import org.junit.experimental.categories.Category; /** * Simple test to read/write simple files via our custom {@link WALCellCodec} to ensure properly @@ -93,14 +91,14 @@ public class ReadWriteKeyValuesWithCodecTest { // Build up a couple of edits List<WALEdit> edits = new ArrayList<WALEdit>(); Put p = new Put(ROW); - p.add(FAMILY, null, Bytes.toBytes("v1")); + p.addColumn(FAMILY, null, Bytes.toBytes("v1")); WALEdit withPut = new WALEdit(); addMutation(withPut, p, FAMILY); edits.add(withPut); Delete d = new Delete(ROW); - d.deleteColumn(FAMILY, null); + d.addColumn(FAMILY, null); WALEdit withDelete = new WALEdit(); addMutation(withDelete, d, FAMILY); edits.add(withDelete); http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc6cf43a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexCodec.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexCodec.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexCodec.java index a668c21..83d05f3 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexCodec.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexCodec.java @@ -101,7 +101,7 @@ public class CoveredColumnIndexCodec extends BaseIndexCodec { // add each of the corresponding families to the put int count = 0; for (ColumnEntry column : columns) { - indexInsert.add(INDEX_ROW_COLUMN_FAMILY, + indexInsert.addColumn(INDEX_ROW_COLUMN_FAMILY, ArrayUtils.addAll(Bytes.toBytes(count++), toIndexQualifier(column.ref)), null); } } http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc6cf43a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/LocalTableStateTest.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/LocalTableStateTest.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/LocalTableStateTest.java index 82f3c3c..c7e1769 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/LocalTableStateTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/LocalTableStateTest.java @@ -73,7 +73,7 @@ public class LocalTableStateTest { @Test public void testCorrectOrderingWithLazyLoadingColumns() throws Exception { Put m = new Put(row); - m.add(fam, qual, ts, val); + m.addColumn(fam, qual, ts, val); // setup mocks Configuration conf = new Configuration(false); RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class); @@ -132,7 +132,7 @@ public class LocalTableStateTest { }; Put m = new Put(row); - m.add(fam, qual, ts, val); + m.addColumn(fam, qual, ts, val); // setup mocks Configuration conf = new Configuration(false); RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class); @@ -169,7 +169,7 @@ public class LocalTableStateTest { }; Put m = new Put(row); - m.add(fam, qual, ts, val); + m.addColumn(fam, qual, ts, val); // setup mocks Configuration conf = new Configuration(false); RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class); @@ -201,7 +201,7 @@ public class LocalTableStateTest { @SuppressWarnings("unchecked") public void testCorrectRollback() throws Exception { Put m = new Put(row); - m.add(fam, qual, ts, val); + m.addColumn(fam, qual, ts, val); // setup mocks RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class); @@ -269,7 +269,7 @@ public class LocalTableStateTest { }); LocalHBaseState state = new LocalTable(env); Put pendingUpdate = new Put(row); - pendingUpdate.add(fam, qual, ts, val); + pendingUpdate.addColumn(fam, qual, ts, val); LocalTableState table = new LocalTableState(state, pendingUpdate); // do the lookup for the given column http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc6cf43a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestCoveredColumnIndexCodec.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestCoveredColumnIndexCodec.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestCoveredColumnIndexCodec.java index 5cc6ada..d63dd6b 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestCoveredColumnIndexCodec.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestCoveredColumnIndexCodec.java @@ -200,7 +200,7 @@ public class TestCoveredColumnIndexCodec { Delete d = new Delete(PK, 2); // need to set the timestamp here, as would actually happen on the server, unlike what happens // with puts, where the get the constructor specified timestamp for unspecified methods. - d.deleteFamily(FAMILY, 2); + d.addFamily(FAMILY, 2); // setup the next batch of 'current state', basically just ripping out the current state from // the last round table = new SimpleTableState(new Result(kvs)); @@ -221,12 +221,12 @@ public class TestCoveredColumnIndexCodec { // now with the delete of the columns d = new Delete(PK, 2); - d.deleteColumns(FAMILY, QUAL, 2); + d.addColumns(FAMILY, QUAL, 2); ensureNoUpdatesWhenCoveredByDelete(env, codec, kvs, d); // this delete needs to match timestamps exactly, by contract, to have any effect d = new Delete(PK, 1); - d.deleteColumn(FAMILY, QUAL, 1); + d.addColumn(FAMILY, QUAL, 1); ensureNoUpdatesWhenCoveredByDelete(env, codec, kvs, d); } http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc6cf43a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java index 918c411..a25f7cf 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java @@ -22,7 +22,6 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; -import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -51,8 +50,6 @@ import org.apache.hadoop.hbase.util.VersionInfo; import org.apache.phoenix.hbase.index.StubAbortable; import org.apache.phoenix.hbase.index.TableName; import org.apache.phoenix.hbase.index.exception.IndexWriteException; -import org.apache.phoenix.hbase.index.exception.SingleIndexWriteFailureException; -import org.apache.phoenix.hbase.index.table.HTableInterfaceReference; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; import org.junit.Rule; import org.junit.Test; @@ -60,9 +57,6 @@ import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import com.google.common.collect.LinkedListMultimap; -import com.google.common.collect.Multimap; - public class TestIndexWriter { private static final Log LOG = LogFactory.getLog(TestIndexWriter.class); @Rule @@ -115,7 +109,7 @@ public class TestIndexWriter { byte[] tableName = this.testName.getTableName(); Put m = new Put(row); - m.add(Bytes.toBytes("family"), Bytes.toBytes("qual"), null); + m.addColumn(Bytes.toBytes("family"), Bytes.toBytes("qual"), null); Collection<Pair<Mutation, byte[]>> indexUpdates = Arrays.asList(new Pair<Mutation, byte[]>(m, tableName)); @@ -197,7 +191,7 @@ public class TestIndexWriter { // update a single table Put m = new Put(row); - m.add(Bytes.toBytes("family"), Bytes.toBytes("qual"), null); + m.addColumn(Bytes.toBytes("family"), Bytes.toBytes("qual"), null); final List<Pair<Mutation, byte[]>> indexUpdates = new ArrayList<Pair<Mutation, byte[]>>(); indexUpdates.add(new Pair<Mutation, byte[]>(m, tableName)); http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc6cf43a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java index bfe1d0d..cd29e10 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java @@ -101,7 +101,7 @@ public class TestParalleIndexWriter { Mockito.when(mockRegion.getTableDesc()).thenReturn(mockTableDesc); ImmutableBytesPtr tableName = new ImmutableBytesPtr(this.test.getTableName()); Put m = new Put(row); - m.add(Bytes.toBytes("family"), Bytes.toBytes("qual"), null); + m.addColumn(Bytes.toBytes("family"), Bytes.toBytes("qual"), null); Multimap<HTableInterfaceReference, Mutation> indexUpdates = ArrayListMultimap.<HTableInterfaceReference, Mutation> create(); indexUpdates.put(new HTableInterfaceReference(tableName), m); http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc6cf43a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java index 6f0881b..32ae108 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java @@ -102,7 +102,7 @@ public class TestParalleWriterIndexCommitter { ImmutableBytesPtr tableName = new ImmutableBytesPtr(this.test.getTableName()); Put m = new Put(row); - m.add(Bytes.toBytes("family"), Bytes.toBytes("qual"), null); + m.addColumn(Bytes.toBytes("family"), Bytes.toBytes("qual"), null); Multimap<HTableInterfaceReference, Mutation> indexUpdates = ArrayListMultimap.<HTableInterfaceReference, Mutation> create(); indexUpdates.put(new HTableInterfaceReference(tableName), m); http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc6cf43a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java index d3af9cd..b1e87e5 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java @@ -192,7 +192,7 @@ public class TestWALRecoveryCaching { // load some data into the table Put p = new Put(Bytes.toBytes("row")); - p.add(family, qual, Bytes.toBytes("value")); + p.addColumn(family, qual, Bytes.toBytes("value")); HTable primary = new HTable(conf, testTable.getTableName()); primary.put(p); primary.flushCommits(); @@ -235,7 +235,7 @@ public class TestWALRecoveryCaching { // make a second put that (1), isn't indexed, so we can be sure of the index state and (2) // ensures that our table is back up Put p2 = new Put(p.getRow()); - p2.add(nonIndexedFamily, Bytes.toBytes("Not indexed"), Bytes.toBytes("non-indexed value")); + p2.addColumn(nonIndexedFamily, Bytes.toBytes("Not indexed"), Bytes.toBytes("non-indexed value")); primary.put(p2); primary.flushCommits(); http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc6cf43a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java index 819c7f6..7253165 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java @@ -61,8 +61,8 @@ public class TestPerRegionIndexWriteCache { Put p = new Put(row); Put p2 = new Put(Bytes.toBytes("other row")); { - p.add(family, qual, val); - p2.add(family, qual, val); + p.addColumn(family, qual, val); + p2.addColumn(family, qual, val); } HRegion r1; // FIXME: Uses private type http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc6cf43a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java index 277e257..341fbec 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java @@ -786,10 +786,10 @@ public class TestUtil { byte[] markerRowKey = Bytes.toBytes("TO_DELETE"); Put put = new Put(markerRowKey); - put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES); + put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES); htable.put(put); Delete delete = new Delete(markerRowKey); - delete.deleteColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES); + delete.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES); htable.delete(delete); htable.close(); if (table.isTransactional()) {