jpisaac commented on code in PR #2130:
URL: https://github.com/apache/phoenix/pull/2130#discussion_r2099066288
##########
phoenix-core-client/src/main/java/org/apache/phoenix/util/UpgradeUtil.java:
##########
@@ -1494,76 +1498,79 @@ public static void
moveHBaseLevelTTLToSYSCAT(PhoenixConnection oldMetaConnection
Scan scan = new Scan();
scan.addFamily(DEFAULT_COLUMN_FAMILY_BYTES);
// Push down the filter to hbase to avoid transfer
- SingleColumnValueFilter tableFilter = new
SingleColumnValueFilter(
+ QualifierFilter tableTypeQualifierFilter = new
QualifierFilter(
+ CompareOperator.EQUAL, new
BinaryComparator(TABLE_TYPE_BYTES));
+
+ SingleColumnValueFilter tableTypeFilter = new
SingleColumnValueFilter(
DEFAULT_COLUMN_FAMILY_BYTES,
TABLE_TYPE_BYTES, CompareOperator.EQUAL,
PTableType.TABLE.getSerializedValue().getBytes(StandardCharsets.UTF_8));
-
- tableFilter.setFilterIfMissing(true);
+ tableTypeFilter.setFilterIfMissing(true);
// Limit number of records
PageFilter pf = new PageFilter(DEFAULT_SCAN_PAGE_SIZE);
- scan.setFilter(new
FilterList(FilterList.Operator.MUST_PASS_ALL, pf, tableFilter));
+ scan.setFilter(new
FilterList(FilterList.Operator.MUST_PASS_ALL, pf, tableTypeQualifierFilter,
tableTypeFilter));
if (pageMore) {
scan.withStartRow(lastRowKey, false);
}
// Collect the row keys to process them in batch
try (ResultScanner scanner =
sysCatalogTable.getScanner(scan)) {
int count = 0;
List<byte[]> rowKeys = new ArrayList<>();
- List<Put> puts = new ArrayList<>();
+ List<Mutation> mutations = new ArrayList<>();
for (Result rr = scanner.next(); rr != null; rr =
scanner.next()) {
count++;
lastRowKey = rr.getRow();
byte[] tmpKey = new byte[lastRowKey.length];
System.arraycopy(lastRowKey, 0, tmpKey, 0,
tmpKey.length);
rowKeys.add(tmpKey);
- String tableName =
SchemaUtil.getTableName(rr.getValue(
- DEFAULT_COLUMN_FAMILY_BYTES,
TABLE_SCHEM_BYTES),
- rr.getValue(DEFAULT_COLUMN_FAMILY_BYTES,
TABLE_NAME_BYTES));
- if (tableName == null ||
Arrays.equals(rr.getValue(DEFAULT_COLUMN_FAMILY_BYTES,
- TABLE_SCHEM_BYTES),
SYSTEM_SCHEMA_NAME_BYTES)) {
+ byte[][] rowKeyMetaData = new byte[3][];
+ getVarChars(tmpKey, 3, rowKeyMetaData);
+ byte[] schemaName =
rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
+ byte[] tableName =
rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
+
+ String fullTableName =
SchemaUtil.getTableName(schemaName, tableName);
+ if
(SchemaUtil.isSystemTable(SchemaUtil.getTableNameAsBytes(schemaName,
tableName))) {
//We do not support system table ttl through
phoenix ttl, and it will be moved to a
//constant value in future commit.
continue;
}
TableDescriptor tableDesc =
admin.getDescriptor(SchemaUtil.getPhysicalTableName(
- tableName, readOnlyProps));
+ fullTableName, readOnlyProps));
int ttl =
tableDesc.getColumnFamily(DEFAULT_COLUMN_FAMILY_BYTES).
getTimeToLive();
- if (ttl !=
ColumnFamilyDescriptorBuilder.DEFAULT_TTL) {
- //As we have ttl defined fot this table create
a Put to set TTL.
- long rowTS = rr.rawCells()[0].getTimestamp();
+ // As we have ttl defined for this table create a
Put to set TTL with
+ // backward compatibility in mind.
+ long rowTS =
EnvironmentEdgeManager.currentTimeMillis();
+
+ if (ttl == HConstants.FOREVER || ttl ==
TTL_NOT_DEFINED) {
+ // Set the TTL column to null
+ Delete deleteColumn = new Delete(tmpKey);
+ KeyValue kv =
GenericKeyValueBuilder.INSTANCE.buildDeleteColumns(
+ new ImmutableBytesWritable(tmpKey),
+ new
ImmutableBytesWritable(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES),
+ new ImmutableBytesWritable(TTL_BYTES),
rowTS);
+ deleteColumn.add(kv);
+ mutations.add(deleteColumn);
Review Comment:
Oh, you are saying that it is null to begin with - so no point in adding a
null again? Let me verify and will remove it if that is the case.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]