ctubbsii commented on code in PR #3137:
URL: https://github.com/apache/accumulo/pull/3137#discussion_r1056729173


##########
server/manager/src/main/java/org/apache/accumulo/manager/upgrade/UpgradeCoordinator.java:
##########
@@ -152,8 +155,13 @@ public synchronized void upgradeZookeeper(ServerContext 
context,
         abortIfFateTransactions(context);
 
         for (int v = currentVersion; v < AccumuloDataVersion.get(); v++) {
-          log.info("Upgrading Zookeeper from data version {}", v);
-          upgraders.get(v).upgradeZookeeper(context);
+          log.info("Upgrading Zookeeper from data version {} target version 
{}", v,
+              AccumuloDataVersion.get());
+          var upgrader = upgraders.get(v);
+          log.info("UPGRADER: version:{} upgrader: {}", v, upgrader);

Review Comment:
   Is this expecting the upgrader to have a meaningful toString? If we want it 
to log here, in a consistent way, we should add a dedicated method for getting 
the description for logging. Or, we can create an upgrader base class that logs 
itself when run.



##########
server/base/src/main/java/org/apache/accumulo/server/AccumuloDataVersion.java:
##########
@@ -36,6 +36,11 @@
  */
 public class AccumuloDataVersion {
 
+  /**
+   * version (11) reflects removal of replication starting with 3.0
+   */
+  public static final int REMOVE_REPLICATION = 11;

Review Comment:
   I suspect that this will be just one of several things that this upgrade 
will perform. It might be better to rename this to something more broadly 
applicable, such as `REMOVE_DEPRECATIONS_FOR_VERSION_3`. Can always enumerate 
the details in the javadoc, if desired.



##########
server/manager/src/main/java/org/apache/accumulo/manager/upgrade/Upgrader10to11.java:
##########
@@ -0,0 +1,270 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.manager.upgrade;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.accumulo.core.Constants.ZNAMESPACES;
+import static org.apache.accumulo.core.Constants.ZTABLES;
+import static org.apache.accumulo.core.Constants.ZTABLE_STATE;
+import static 
org.apache.accumulo.core.metadata.schema.MetadataSchema.RESERVED_PREFIX;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.BatchDeleter;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.data.InstanceId;
+import org.apache.accumulo.core.data.NamespaceId;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.TableId;
+import org.apache.accumulo.core.fate.zookeeper.ZooReaderWriter;
+import org.apache.accumulo.core.fate.zookeeper.ZooUtil;
+import org.apache.accumulo.core.manager.state.tables.TableState;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.volume.Volume;
+import org.apache.accumulo.server.ServerContext;
+import org.apache.accumulo.server.conf.store.NamespacePropKey;
+import org.apache.accumulo.server.conf.store.PropStore;
+import org.apache.accumulo.server.conf.store.PropStoreKey;
+import org.apache.accumulo.server.conf.store.SystemPropKey;
+import org.apache.accumulo.server.conf.store.TablePropKey;
+import org.apache.hadoop.fs.Path;
+import org.apache.zookeeper.KeeperException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+
+public class Upgrader10to11 implements Upgrader {
+
+  private static final Logger log = 
LoggerFactory.getLogger(Upgrader10to11.class);
+
+  // Included for upgrade code usage any other usage post 3.0 should not be 
used.
+  private static final TableId REPLICATION_ID = TableId.of("+rep");
+
+  public Upgrader10to11() {
+    super();
+  }
+
+  @Override
+  public void upgradeZookeeper(final ServerContext context) {
+    log.info("upgrade of ZooKeeper entries");
+
+    var zrw = context.getZooReaderWriter();
+    var iid = context.getInstanceID();
+
+    // if the replication base path (../tables/+rep) assume removed or never 
existed.
+    if (!checkReplicationTableInZk(iid, zrw)) {
+      log.debug("replication table root node does not exist in ZooKeeper - 
nothing to do");
+      return;
+    }
+
+    // if the replication table is online - stop. There could be data in 
transit.
+    if (!checkReplicationOffline(iid, zrw)) {
+      throw new IllegalStateException(
+          "Replication table is not offline. Cannot continue with upgrade that 
will remove replication with replication active");
+    }
+
+    deleteReplicationConfigs(zrw, iid, context.getPropStore());
+
+    deleteReplicationTableZkEntries(zrw, iid);
+
+  }
+
+  @Override
+  public void upgradeRoot(final ServerContext context) {
+    log.info("upgrade root - skipping, nothing to do");
+  }
+
+  @Override
+  public void upgradeMetadata(final ServerContext context) {
+    log.info("upgrade metadata entries");

Review Comment:
   This is upgrading at the info level, which is very visible. It should have a 
slightly more verbose description if it's going to appear in user logs or on 
the console, like upgrading metadata entries to do what? or as part of what? As 
is, these feel like debugging log statements, with minimal information, just 
used in the course of development and left in unintentionally. I think they 
should be either meaningful for the user or removed, or reduced to the trace or 
debug level.



##########
server/manager/src/main/java/org/apache/accumulo/manager/upgrade/UpgradeCoordinator.java:
##########
@@ -179,15 +187,21 @@ public synchronized Future<Void> 
upgradeMetadata(ServerContext context,
           .submit(() -> {
             try {
               for (int v = currentVersion; v < AccumuloDataVersion.get(); v++) 
{
-                log.info("Upgrading Root from data version {}", v);
-                upgraders.get(v).upgradeRoot(context);
+                log.info("Upgrading Root from data version {} target version 
{}", v,
+                    AccumuloDataVersion.get());
+                if (upgraders.get(v) != null) {
+                  upgraders.get(v).upgradeRoot(context);
+                }
               }
 
               setStatus(UpgradeStatus.UPGRADED_ROOT, eventCoordinator);
 
               for (int v = currentVersion; v < AccumuloDataVersion.get(); v++) 
{
-                log.info("Upgrading Metadata from data version {}", v);
-                upgraders.get(v).upgradeMetadata(context);
+                log.info("Upgrading Metadata from data version {} target 
version {}", v,
+                    AccumuloDataVersion.get());
+                if (upgraders.get(v) != null) {
+                  upgraders.get(v).upgradeMetadata(context);
+                }

Review Comment:
   Why the null checks here? Under what circumstances would we find an upgrader 
to be null? If that happens, we don't want to completely ignore it, but we 
should fail hard. That's not expected.



##########
server/manager/src/main/java/org/apache/accumulo/manager/upgrade/Upgrader10to11.java:
##########
@@ -0,0 +1,270 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.manager.upgrade;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.accumulo.core.Constants.ZNAMESPACES;
+import static org.apache.accumulo.core.Constants.ZTABLES;
+import static org.apache.accumulo.core.Constants.ZTABLE_STATE;
+import static 
org.apache.accumulo.core.metadata.schema.MetadataSchema.RESERVED_PREFIX;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.BatchDeleter;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.data.InstanceId;
+import org.apache.accumulo.core.data.NamespaceId;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.TableId;
+import org.apache.accumulo.core.fate.zookeeper.ZooReaderWriter;
+import org.apache.accumulo.core.fate.zookeeper.ZooUtil;
+import org.apache.accumulo.core.manager.state.tables.TableState;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.volume.Volume;
+import org.apache.accumulo.server.ServerContext;
+import org.apache.accumulo.server.conf.store.NamespacePropKey;
+import org.apache.accumulo.server.conf.store.PropStore;
+import org.apache.accumulo.server.conf.store.PropStoreKey;
+import org.apache.accumulo.server.conf.store.SystemPropKey;
+import org.apache.accumulo.server.conf.store.TablePropKey;
+import org.apache.hadoop.fs.Path;
+import org.apache.zookeeper.KeeperException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+
+public class Upgrader10to11 implements Upgrader {
+
+  private static final Logger log = 
LoggerFactory.getLogger(Upgrader10to11.class);
+
+  // Included for upgrade code usage any other usage post 3.0 should not be 
used.
+  private static final TableId REPLICATION_ID = TableId.of("+rep");
+
+  public Upgrader10to11() {
+    super();
+  }
+
+  @Override
+  public void upgradeZookeeper(final ServerContext context) {
+    log.info("upgrade of ZooKeeper entries");
+
+    var zrw = context.getZooReaderWriter();
+    var iid = context.getInstanceID();
+
+    // if the replication base path (../tables/+rep) assume removed or never 
existed.
+    if (!checkReplicationTableInZk(iid, zrw)) {
+      log.debug("replication table root node does not exist in ZooKeeper - 
nothing to do");
+      return;
+    }
+
+    // if the replication table is online - stop. There could be data in 
transit.
+    if (!checkReplicationOffline(iid, zrw)) {
+      throw new IllegalStateException(
+          "Replication table is not offline. Cannot continue with upgrade that 
will remove replication with replication active");
+    }
+
+    deleteReplicationConfigs(zrw, iid, context.getPropStore());
+
+    deleteReplicationTableZkEntries(zrw, iid);
+
+  }
+
+  @Override
+  public void upgradeRoot(final ServerContext context) {
+    log.info("upgrade root - skipping, nothing to do");
+  }
+
+  @Override
+  public void upgradeMetadata(final ServerContext context) {
+    log.info("upgrade metadata entries");
+    deleteReplMetadataEntries(context);
+    deleteReplHdfsFiles(context);
+  }
+
+  /**
+   * remove +rep entries from metadata.
+   */
+  private void deleteReplMetadataEntries(final ServerContext context) {
+    try (BatchDeleter deleter =
+        context.createBatchDeleter(MetadataTable.NAME, Authorizations.EMPTY, 
10)) {
+
+      Range repTableRange =
+          new Range(REPLICATION_ID.canonical() + ";", true, 
REPLICATION_ID.canonical() + "<", true);
+      // copied from MetadataSchema 2.1 (removed in 3.0)
+      Range repWalRange =
+          new Range(RESERVED_PREFIX + "repl", true, RESERVED_PREFIX + "repm", 
false);
+
+      deleter.setRanges(List.of(repTableRange, repWalRange));
+      deleter.delete();
+    } catch (TableNotFoundException | MutationsRejectedException ex) {
+      throw new IllegalStateException("failed to remove replication info from 
metadata table", ex);
+    }
+  }
+
+  @VisibleForTesting
+  void deleteReplHdfsFiles(final ServerContext context) {
+    try {
+      for (Volume volume : context.getVolumeManager().getVolumes()) {
+        String dirUri = volume.getBasePath() + Constants.HDFS_TABLES_DIR + 
Path.SEPARATOR
+            + REPLICATION_ID.canonical();

Review Comment:
   This is making assumptions that there are no other files in these 
directories other than files used by the replication table and not used by any 
other table. That doesn't seem like a safe assumption. For example, if the 
replication table had been cloned, some of its files might now be referenced by 
the clone.
   
   Instead of deleting these directories, we should instead read the file 
entriess in the replication table's tablets and create corresponding `~del` 
entries for the `accumulo-gc` to delete later when it determines they no longer 
need to be preserved. This also allows the actual deletion to be deferred by 
the user by avoiding running the `accumulo-gc` during upgrade (which might be 
one way they could upgrade more cautiously) or by utilizing the HDFS trash that 
the `accumulo-gc` is configured to use.



##########
server/manager/src/main/java/org/apache/accumulo/manager/upgrade/UpgradeCoordinator.java:
##########
@@ -152,8 +155,13 @@ public synchronized void upgradeZookeeper(ServerContext 
context,
         abortIfFateTransactions(context);
 
         for (int v = currentVersion; v < AccumuloDataVersion.get(); v++) {
-          log.info("Upgrading Zookeeper from data version {}", v);
-          upgraders.get(v).upgradeZookeeper(context);
+          log.info("Upgrading Zookeeper from data version {} target version 
{}", v,
+              AccumuloDataVersion.get());

Review Comment:
   I think this log message change could be misleading. These are incremental 
upgrades, but the target version is just the final version. For example, the 
message may imply that it is upgrading from 5 to 9, but is really doing 5 to 6, 
then 6 to 7, etc. until it gets to 8 to 9.



##########
server/manager/src/main/java/org/apache/accumulo/manager/upgrade/Upgrader10to11.java:
##########
@@ -0,0 +1,270 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.manager.upgrade;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.accumulo.core.Constants.ZNAMESPACES;
+import static org.apache.accumulo.core.Constants.ZTABLES;
+import static org.apache.accumulo.core.Constants.ZTABLE_STATE;
+import static 
org.apache.accumulo.core.metadata.schema.MetadataSchema.RESERVED_PREFIX;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.BatchDeleter;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.data.InstanceId;
+import org.apache.accumulo.core.data.NamespaceId;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.TableId;
+import org.apache.accumulo.core.fate.zookeeper.ZooReaderWriter;
+import org.apache.accumulo.core.fate.zookeeper.ZooUtil;
+import org.apache.accumulo.core.manager.state.tables.TableState;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.volume.Volume;
+import org.apache.accumulo.server.ServerContext;
+import org.apache.accumulo.server.conf.store.NamespacePropKey;
+import org.apache.accumulo.server.conf.store.PropStore;
+import org.apache.accumulo.server.conf.store.PropStoreKey;
+import org.apache.accumulo.server.conf.store.SystemPropKey;
+import org.apache.accumulo.server.conf.store.TablePropKey;
+import org.apache.hadoop.fs.Path;
+import org.apache.zookeeper.KeeperException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+
+public class Upgrader10to11 implements Upgrader {
+
+  private static final Logger log = 
LoggerFactory.getLogger(Upgrader10to11.class);
+
+  // Included for upgrade code usage any other usage post 3.0 should not be 
used.
+  private static final TableId REPLICATION_ID = TableId.of("+rep");
+
+  public Upgrader10to11() {
+    super();
+  }
+
+  @Override
+  public void upgradeZookeeper(final ServerContext context) {
+    log.info("upgrade of ZooKeeper entries");
+
+    var zrw = context.getZooReaderWriter();
+    var iid = context.getInstanceID();
+
+    // if the replication base path (../tables/+rep) assume removed or never 
existed.
+    if (!checkReplicationTableInZk(iid, zrw)) {
+      log.debug("replication table root node does not exist in ZooKeeper - 
nothing to do");
+      return;
+    }
+
+    // if the replication table is online - stop. There could be data in 
transit.
+    if (!checkReplicationOffline(iid, zrw)) {
+      throw new IllegalStateException(
+          "Replication table is not offline. Cannot continue with upgrade that 
will remove replication with replication active");
+    }
+
+    deleteReplicationConfigs(zrw, iid, context.getPropStore());
+
+    deleteReplicationTableZkEntries(zrw, iid);
+
+  }
+
+  @Override
+  public void upgradeRoot(final ServerContext context) {
+    log.info("upgrade root - skipping, nothing to do");
+  }
+
+  @Override
+  public void upgradeMetadata(final ServerContext context) {
+    log.info("upgrade metadata entries");
+    deleteReplMetadataEntries(context);
+    deleteReplHdfsFiles(context);
+  }
+
+  /**
+   * remove +rep entries from metadata.
+   */
+  private void deleteReplMetadataEntries(final ServerContext context) {
+    try (BatchDeleter deleter =
+        context.createBatchDeleter(MetadataTable.NAME, Authorizations.EMPTY, 
10)) {
+
+      Range repTableRange =
+          new Range(REPLICATION_ID.canonical() + ";", true, 
REPLICATION_ID.canonical() + "<", true);
+      // copied from MetadataSchema 2.1 (removed in 3.0)
+      Range repWalRange =
+          new Range(RESERVED_PREFIX + "repl", true, RESERVED_PREFIX + "repm", 
false);
+
+      deleter.setRanges(List.of(repTableRange, repWalRange));
+      deleter.delete();
+    } catch (TableNotFoundException | MutationsRejectedException ex) {
+      throw new IllegalStateException("failed to remove replication info from 
metadata table", ex);
+    }
+  }
+
+  @VisibleForTesting
+  void deleteReplHdfsFiles(final ServerContext context) {
+    try {
+      for (Volume volume : context.getVolumeManager().getVolumes()) {
+        String dirUri = volume.getBasePath() + Constants.HDFS_TABLES_DIR + 
Path.SEPARATOR
+            + REPLICATION_ID.canonical();
+        Path replPath = new Path(dirUri);
+        if (volume.getFileSystem().exists(replPath)) {
+          try {
+            log.debug("Removing replication dir and files in hdfs {}", 
replPath);
+            volume.getFileSystem().delete(replPath, true);
+          } catch (IOException ex) {
+            log.error("Unable to remove replication dir and files from " + 
replPath + ": " + ex);
+          }
+        }
+      }
+    } catch (IOException ex) {
+      log.error("Unable to remove replication dir and files: " + ex);
+    }
+  }
+
+  private boolean checkReplicationTableInZk(final InstanceId iid, final 
ZooReaderWriter zrw) {
+    try {
+      String path = buildRepTablePath(iid);
+      return zrw.exists(path);
+    } catch (KeeperException ex) {
+      throw new IllegalStateException("ZooKeeper error - cannot determine 
replication table status",
+          ex);
+    } catch (InterruptedException ex) {
+      Thread.currentThread().interrupt();
+      throw new IllegalStateException("interrupted reading replication state 
from ZooKeeper", ex);
+    }
+  }
+
+  /**
+   * To protect against removing replication information if replication is 
being used and possible
+   * active, check the replication table state in Zookeeper to see if it is 
ONLINE (active) or
+   * OFFLINE (inactive). If the state node does not exist, then the status is 
considered as OFFLINE.
+   *
+   * @return true if the replication table state is OFFLINE, false otherwise
+   */
+  private boolean checkReplicationOffline(final InstanceId iid, final 
ZooReaderWriter zrw) {
+    try {
+      String path = buildRepTablePath(iid) + ZTABLE_STATE;
+      byte[] bytes = zrw.getData(path);
+      if (bytes != null && bytes.length > 0) {
+        String status = new String(bytes, UTF_8);
+        return TableState.OFFLINE.name().equals(status);
+      }
+      return false;
+    } catch (KeeperException ex) {
+      throw new IllegalStateException("ZooKeeper error - cannot determine 
replication table status",
+          ex);
+    } catch (InterruptedException ex) {
+      Thread.currentThread().interrupt();
+      throw new IllegalStateException("interrupted reading replication state 
from ZooKeeper", ex);
+    }
+  }
+
+  /**
+   * Utility method to build the ZooKeeper replication table path. The path 
resolves to
+   * {@code /accumulo/INSTANCE_ID/tables/+rep}
+   */
+  static String buildRepTablePath(final InstanceId iid) {
+    return ZooUtil.getRoot(iid) + ZTABLES + "/" + REPLICATION_ID.canonical();
+  }
+
+  private void deleteReplicationTableZkEntries(ZooReaderWriter zrw, InstanceId 
iid) {
+    String repTablePath = buildRepTablePath(iid);
+    try {
+      zrw.recursiveDelete(repTablePath, ZooUtil.NodeMissingPolicy.SKIP);
+    } catch (KeeperException ex) {
+      throw new IllegalStateException(
+          "ZooKeeper error - failed recursive deletion on " + repTablePath, 
ex);
+    } catch (InterruptedException ex) {
+      Thread.currentThread().interrupt();
+      throw new IllegalStateException("interrupted deleting " + repTablePath + 
" from ZooKeeper",
+          ex);
+    }
+  }
+
+  private void deleteReplicationConfigs(ZooReaderWriter zrw, InstanceId iid, 
PropStore propStore) {
+    List<PropStoreKey<?>> ids = getPropKeysFromZkIds(zrw, iid);

Review Comment:
   Is this just removing them from the cache? It seems that these will be 
deleted from ZK later when we delete everything from ZK for this table. So, I'm 
not sure if this is strictly necessary to do separately. And, deleting the 
configs from ZK may cause the per-table configuration to enter a strange state, 
where its configuration now is the default per-table configuration or that set 
in the site configuration file or in the accumulo namespace config in ZK... 
which may not be what we want, because it could cause all sorts of things to 
change for the table that's supposed to be offline.



##########
server/manager/src/main/java/org/apache/accumulo/manager/upgrade/Upgrader10to11.java:
##########
@@ -0,0 +1,236 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.manager.upgrade;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.accumulo.core.Constants.ZNAMESPACES;
+import static org.apache.accumulo.core.Constants.ZTABLES;
+import static org.apache.accumulo.core.Constants.ZTABLE_STATE;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+
+import org.apache.accumulo.core.client.BatchDeleter;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.data.InstanceId;
+import org.apache.accumulo.core.data.NamespaceId;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.TableId;
+import org.apache.accumulo.core.fate.zookeeper.ZooReaderWriter;
+import org.apache.accumulo.core.fate.zookeeper.ZooUtil;
+import org.apache.accumulo.core.manager.state.tables.TableState;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.server.ServerContext;
+import org.apache.accumulo.server.conf.store.NamespacePropKey;
+import org.apache.accumulo.server.conf.store.PropStore;
+import org.apache.accumulo.server.conf.store.PropStoreKey;
+import org.apache.accumulo.server.conf.store.SystemPropKey;
+import org.apache.accumulo.server.conf.store.TablePropKey;
+import org.apache.zookeeper.KeeperException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class Upgrader10to11 implements Upgrader {
+
+  private static final Logger log = 
LoggerFactory.getLogger(Upgrader10to11.class);
+
+  // Included for upgrade code usage any other usage post 3.0 should not be 
used.
+  private static final TableId REPLICATION_ID = TableId.of("+rep");
+
+  public Upgrader10to11() {
+    super();
+  }
+
+  @Override
+  public void upgradeZookeeper(final ServerContext context) {
+    log.info("upgrade of ZooKeeper entries");
+
+    var zrw = context.getZooReaderWriter();
+    var iid = context.getInstanceID();
+
+    // if the replication base path (../tables/+rep) assume removed or never 
existed.
+    if (!checkReplicationTableInZk(iid, zrw)) {
+      log.debug("replication table root node does not exist in ZooKeeper - 
nothing to do");
+      return;
+    }
+
+    // if the replication table is online - stop. There could be data in 
transit.
+    if (!checkReplicationOffline(iid, zrw)) {
+      throw new IllegalStateException(
+          "Replication table is not offline. Cannot continue with upgrade that 
will remove replication with replication active");
+    }
+
+    deleteReplicationConfigs(zrw, iid, context.getPropStore());
+
+    deleteReplicationTableZkEntries(zrw, iid);
+
+  }
+
+  @Override
+  public void upgradeRoot(final ServerContext context) {
+    log.info("upgrade root - skipping, nothing to do");
+  }
+
+  @Override
+  public void upgradeMetadata(final ServerContext context) {
+    log.info("upgrade metadata entries");
+    deleteReplMetadataEntries(context);
+  }
+
+  /**
+   * remove +rep entries from metadata.
+   */
+  private void deleteReplMetadataEntries(final ServerContext context) {
+    try (BatchDeleter deleter =
+        context.createBatchDeleter(MetadataTable.NAME, Authorizations.EMPTY, 
10)) {
+      deleter.setRanges(Collections.singletonList(
+          new Range(REPLICATION_ID.canonical() + ";", 
REPLICATION_ID.canonical() + "<")));
+      deleter.delete();

Review Comment:
   The repTableRange would be deleting the metadata entries that define the 
replication table. The repWalRange would be deleting the additionally tracked 
WALs that are preserved because they are in use, and intended to prevent the 
accumulo-gc from deleting the still-referenced WALs. I think that's all we're 
expecting.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to