EdColeman commented on code in PR #3137:
URL: https://github.com/apache/accumulo/pull/3137#discussion_r1057798068


##########
server/manager/src/main/java/org/apache/accumulo/manager/upgrade/Upgrader10to11.java:
##########
@@ -0,0 +1,270 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.manager.upgrade;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.accumulo.core.Constants.ZNAMESPACES;
+import static org.apache.accumulo.core.Constants.ZTABLES;
+import static org.apache.accumulo.core.Constants.ZTABLE_STATE;
+import static 
org.apache.accumulo.core.metadata.schema.MetadataSchema.RESERVED_PREFIX;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.BatchDeleter;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.data.InstanceId;
+import org.apache.accumulo.core.data.NamespaceId;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.TableId;
+import org.apache.accumulo.core.fate.zookeeper.ZooReaderWriter;
+import org.apache.accumulo.core.fate.zookeeper.ZooUtil;
+import org.apache.accumulo.core.manager.state.tables.TableState;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.volume.Volume;
+import org.apache.accumulo.server.ServerContext;
+import org.apache.accumulo.server.conf.store.NamespacePropKey;
+import org.apache.accumulo.server.conf.store.PropStore;
+import org.apache.accumulo.server.conf.store.PropStoreKey;
+import org.apache.accumulo.server.conf.store.SystemPropKey;
+import org.apache.accumulo.server.conf.store.TablePropKey;
+import org.apache.hadoop.fs.Path;
+import org.apache.zookeeper.KeeperException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+
+public class Upgrader10to11 implements Upgrader {
+
+  private static final Logger log = 
LoggerFactory.getLogger(Upgrader10to11.class);
+
+  // Included for upgrade code usage any other usage post 3.0 should not be 
used.
+  private static final TableId REPLICATION_ID = TableId.of("+rep");
+
+  public Upgrader10to11() {
+    super();
+  }
+
+  @Override
+  public void upgradeZookeeper(final ServerContext context) {
+    log.info("upgrade of ZooKeeper entries");
+
+    var zrw = context.getZooReaderWriter();
+    var iid = context.getInstanceID();
+
+    // if the replication base path (../tables/+rep) assume removed or never 
existed.
+    if (!checkReplicationTableInZk(iid, zrw)) {
+      log.debug("replication table root node does not exist in ZooKeeper - 
nothing to do");
+      return;
+    }
+
+    // if the replication table is online - stop. There could be data in 
transit.
+    if (!checkReplicationOffline(iid, zrw)) {
+      throw new IllegalStateException(
+          "Replication table is not offline. Cannot continue with upgrade that 
will remove replication with replication active");
+    }
+
+    deleteReplicationConfigs(zrw, iid, context.getPropStore());
+
+    deleteReplicationTableZkEntries(zrw, iid);
+
+  }
+
+  @Override
+  public void upgradeRoot(final ServerContext context) {
+    log.info("upgrade root - skipping, nothing to do");
+  }
+
+  @Override
+  public void upgradeMetadata(final ServerContext context) {
+    log.info("upgrade metadata entries");
+    deleteReplMetadataEntries(context);
+    deleteReplHdfsFiles(context);
+  }
+
+  /**
+   * remove +rep entries from metadata.
+   */
+  private void deleteReplMetadataEntries(final ServerContext context) {
+    try (BatchDeleter deleter =
+        context.createBatchDeleter(MetadataTable.NAME, Authorizations.EMPTY, 
10)) {
+
+      Range repTableRange =
+          new Range(REPLICATION_ID.canonical() + ";", true, 
REPLICATION_ID.canonical() + "<", true);
+      // copied from MetadataSchema 2.1 (removed in 3.0)
+      Range repWalRange =
+          new Range(RESERVED_PREFIX + "repl", true, RESERVED_PREFIX + "repm", 
false);
+
+      deleter.setRanges(List.of(repTableRange, repWalRange));
+      deleter.delete();
+    } catch (TableNotFoundException | MutationsRejectedException ex) {
+      throw new IllegalStateException("failed to remove replication info from 
metadata table", ex);
+    }
+  }
+
+  @VisibleForTesting
+  void deleteReplHdfsFiles(final ServerContext context) {
+    try {
+      for (Volume volume : context.getVolumeManager().getVolumes()) {
+        String dirUri = volume.getBasePath() + Constants.HDFS_TABLES_DIR + 
Path.SEPARATOR
+            + REPLICATION_ID.canonical();

Review Comment:
   Will need to look at this - during the upgrade, and depending on the phase, 
there is limited information - what is in zookeeper, what is in the root table 
and what is in the metadata - other tables have not been brought online. 
   
   One way to handle this could be through instructions.
   
   The replication table must be offline for the upgrade to proceed.  So, if 
someone is using replication, the instructions could include the warning that 
any tables that may have been cloned / shared data with the replication table, 
they should be compacted prior to upgrading so they do not share any files.
   
   Alternatively, the files could be left in place and leave it up to the user 
to delete the hdfs directory if they want.  
   
   Removing the zookeeper and metadata entries during the upgrade process seems 
to be required because of the "special" status as a system table, but also 
closes off some other options.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to