[
https://issues.apache.org/jira/browse/PHOENIX-6888?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17704386#comment-17704386
]
ASF GitHub Bot commented on PHOENIX-6888:
-----------------------------------------
virajjasani commented on code in PR #1569:
URL: https://github.com/apache/phoenix/pull/1569#discussion_r1146979436
##########
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/CompactionScanner.java:
##########
@@ -0,0 +1,776 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.coprocessor;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeepDeletedCells;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScannerContext;
+import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static
org.apache.phoenix.query.QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX;
+
+/**
+ * The store scanner that implements Phoenix TTL and Max Lookback. Phoenix
overrides the
+ * implementation data retention policies in HBase which is built at the cell
and implements
+ * its row level data retention within this store scanner.
+ */
+public class CompactionScanner implements InternalScanner {
+ private static final Logger LOGGER =
LoggerFactory.getLogger(CompactionScanner.class);
+ public static final String SEPARATOR = ":";
+ private final InternalScanner storeScanner;
+ private final Region region;
+ private final Store store;
+ private final Configuration config;
+ private final RegionCoprocessorEnvironment env;
+ private long maxLookbackWindowStart;
+ private long ttlWindowStart;
+ private long ttl;
+ private final long maxLookbackInMillis;
+ private int minVersion;
+ private int maxVersion;
+ private final boolean emptyCFStore;
+ private KeepDeletedCells keepDeletedCells;
+ private long compactionTime;
+ private static Map<String, Long> maxLookbackMap = new
ConcurrentHashMap<>();
+
+ public CompactionScanner(RegionCoprocessorEnvironment env,
+ Store store,
+ InternalScanner storeScanner,
+ long maxLookbackInMillis,
+ byte[] emptyCF) {
+ this.storeScanner = storeScanner;
+ this.region = env.getRegion();
+ this.store = store;
+ this.env = env;
+ this.config = env.getConfiguration();
+ compactionTime = EnvironmentEdgeManager.currentTimeMillis();
+ this.maxLookbackInMillis = maxLookbackInMillis;
+ String columnFamilyName = store.getColumnFamilyName();
+ String tableName = region.getRegionInfo().getTable().getNameAsString();
+ Long overriddenMaxLookback =
+ maxLookbackMap.remove(tableName + SEPARATOR +
columnFamilyName);
+ this.maxLookbackWindowStart = compactionTime - (overriddenMaxLookback
== null ?
+ maxLookbackInMillis : Math.max(maxLookbackInMillis,
overriddenMaxLookback));
+ ColumnFamilyDescriptor cfd = store.getColumnFamilyDescriptor();
+ ttl = cfd.getTimeToLive();
+ this.ttlWindowStart = ttl == HConstants.FOREVER ? 1 : compactionTime -
ttl * 1000;
+ ttl *= 1000;
+ this.maxLookbackWindowStart = Math.max(ttlWindowStart,
maxLookbackWindowStart);
+ this.minVersion = cfd.getMinVersions();
+ this.maxVersion = cfd.getMaxVersions();
+ this.keepDeletedCells = cfd.getKeepDeletedCells();
+ emptyCFStore = region.getTableDescriptor().getColumnFamilies().length
== 1 ||
+ columnFamilyName.equals(Bytes.toString(emptyCF)) ||
+ columnFamilyName.startsWith(LOCAL_INDEX_COLUMN_FAMILY_PREFIX);
+ }
+
+ /**
+ * Any coprocessors within a JVM can extend the max lookback window for a
column family
+ * by calling this static method.
+ */
+ public static void overrideMaxLookback(String tableName, String
columnFamilyName,
+ long maxLookbackInMillis) {
+ if (tableName == null || columnFamilyName == null) {
+ return;
+ }
+ Long old = maxLookbackMap.putIfAbsent(tableName + SEPARATOR +
columnFamilyName,
+ maxLookbackInMillis);
+ if (old == null || old < maxLookbackInMillis) {
+ maxLookbackMap.put(columnFamilyName, maxLookbackInMillis);
+ }
Review Comment:
I didn't understand where we are retrieving only CF (without table name)
from this map?
##########
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java:
##########
@@ -588,21 +590,70 @@ public InternalScanner
preCompact(ObserverContext<RegionCoprocessorEnvironment>
@Override
public InternalScanner run() throws Exception {
InternalScanner internalScanner = scanner;
+ if (request.isMajor()) {
+ boolean isDisabled = false;
+ final String fullTableName =
tableName.getNameAsString();
+ PTable table = null;
+ try (PhoenixConnection conn =
QueryUtil.getConnectionOnServer(
+
compactionConfig).unwrap(PhoenixConnection.class)) {
+ table = PhoenixRuntime.getTableNoCache(conn,
fullTableName);
+ } catch (Exception e) {
+ if (e instanceof TableNotFoundException) {
+ LOGGER.debug("Ignoring HBase table that is not
a Phoenix table: "
+ + fullTableName);
+ // non-Phoenix HBase tables won't be found, do
nothing
+ } else {
+ LOGGER.error(
+ "Unable to modify compaction scanner
to retain deleted "
+ + "cells for a table with
disabled Index; "
+ + fullTableName, e);
+ }
+ }
+ if (table != null &&
+
!PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME.equals(fullTableName) &&
+ !ScanUtil.hasCoprocessor(c.getEnvironment(),
+ GlobalIndexChecker.class.getName())) {
+ List<PTable>
+ indexes =
+ PTableType.INDEX.equals(table.getType()) ?
+ Lists.newArrayList(table) :
+ table.getIndexes();
Review Comment:
Shall we comment just above this if condition that this condition is only
for old indexing design?
> Fixing TTL and Max Lookback Issues for Phoenix Tables
> -----------------------------------------------------
>
> Key: PHOENIX-6888
> URL: https://issues.apache.org/jira/browse/PHOENIX-6888
> Project: Phoenix
> Issue Type: Bug
> Affects Versions: 5.1.3
> Reporter: Kadir Ozdemir
> Assignee: Kadir Ozdemir
> Priority: Major
>
> In HBase, the unit of data is a cell and data retention rules are executed at
> the cell level. These rules are defined at the column family level. Phoenix
> leverages the data retention features of HBase and exposes them to its users
> to provide its TTL feature at the table level. However, these rules (since
> they are defined at the cell level instead of the row level) results in
> partial row retention that in turn creates data integrity issues at the
> Phoenix level.
> Similarly, Phoenix’s max lookback feature leverages HBase deleted data
> retention capabilities to preserve deleted cells within a configurable max
> lookback. This requires two data retention windows, max lookback and TTL. One
> end of these windows is the current time and the end is a moment in the past
> (i.e., current time minus the window size). Typically, the max lookback
> window is shorter than the TTL window. In the max lookback window, we would
> like to preserve the complete history of mutations regardless of how many
> cell versions these mutations generated. In the remaining TTL window outside
> the max lookback, we would like to apply the data retention rules defined
> above. However, HBase provides only one data retention window. Thus, the max
> lookback window had to be extended to become TTL window and the max lookback
> feature results in unwantedly retaining deleted data for the maximum of max
> lookback and TTL periods.
> This Jira is to fix both of these issues.
--
This message was sent by Atlassian Jira
(v8.20.10#820010)