stefan-egli commented on code in PR #995:
URL: https://github.com/apache/jackrabbit-oak/pull/995#discussion_r1238718942


##########
oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/WriteAfterRecoveryTest.java:
##########
@@ -0,0 +1,661 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.plugins.document;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+
+import org.apache.jackrabbit.oak.api.CommitFailedException;
+import org.apache.jackrabbit.oak.commons.PathUtils;
+import 
org.apache.jackrabbit.oak.plugins.document.FailingDocumentStore.FailedUpdateOpListener;
+import org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore;
+import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
+import org.apache.jackrabbit.oak.spi.state.NodeState;
+import org.apache.jackrabbit.oak.stats.Clock;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+
+import static 
org.apache.jackrabbit.oak.plugins.document.ClusterNodeInfo.DEFAULT_LEASE_DURATION_MILLIS;
+import static org.apache.jackrabbit.oak.plugins.document.Collection.NODES;
+import static 
org.apache.jackrabbit.oak.plugins.document.NodeDocument.isDeletedEntry;
+import static 
org.apache.jackrabbit.oak.plugins.document.TestUtils.childBuilder;
+import static 
org.apache.jackrabbit.oak.plugins.document.TestUtils.disposeQuietly;
+import static 
org.apache.jackrabbit.oak.plugins.document.UpdateOp.Operation.Type.SET_MAP_ENTRY;
+import static 
org.apache.jackrabbit.oak.plugins.document.util.Utils.getIdFromPath;
+import static 
org.apache.jackrabbit.oak.plugins.memory.StringPropertyState.stringProperty;
+import static org.apache.jackrabbit.oak.spi.state.NodeStateUtils.getNode;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.containsString;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+/**
+ * Tests that write after with a clusterId after the leased timed out and
+ * recovery was performed.
+ */
+public class WriteAfterRecoveryTest {
+
+    @Rule
+    public DocumentMKBuilderProvider builderProvider = new 
DocumentMKBuilderProvider();
+
+    private final FailingDocumentStore store = new FailingDocumentStore(new 
MemoryDocumentStore());
+
+    private final Clock clock = new Clock.Virtual();
+
+    private DocumentNodeStore ns1;
+
+    @Before
+    public void setUp() throws Exception {
+        clock.waitUntil(System.currentTimeMillis());
+        Revision.setClock(clock);
+        ClusterNodeInfo.setClock(clock);
+        ns1 = createNodeStore(1);
+    }
+
+    @After
+    public void cleanUp() {
+        ClusterNodeInfo.resetClockToDefault();
+        Revision.resetClockToDefault();
+    }
+
+    @Test
+    public void oneNodeAdded() throws Exception {
+        merge(ns1, createChild("/a"), true);
+
+        List<UpdateOp> failed = new ArrayList<>();
+        store.addListener(filter(failed, ADD_NODE_OPS));
+        store.fail().after(0).eternally();
+        try {
+            merge(ns1, createChild("/a/b"), false);
+            fail("merge must fail");
+        } catch (CommitFailedException e) {
+            // expected
+        }
+        disposeQuietly(ns1);
+        store.fail().never();
+
+        // wait until lease expires
+        clockWait(DEFAULT_LEASE_DURATION_MILLIS);
+
+        DocumentNodeStore ns2 = createNodeStore(2);
+        assertTrue(ns2.getLastRevRecoveryAgent().isRecoveryNeeded());
+        assertEquals(0, ns2.getLastRevRecoveryAgent().recover(1));
+
+        // 'late write'
+        store.createOrUpdate(NODES, failed);
+        Set<Revision> lateWriteRevisions = getRevisions(failed);
+
+        // revive clusterId 1
+        ns1 = createNodeStore(1);
+        merge(ns1, createChild("/d"), true);
+
+        ns2.runBackgroundOperations();
+        assertTrue(getNode(ns2.getRoot(), "/d").exists());
+        assertFalse(getNode(ns2.getRoot(), "/a/b").exists());
+        try {
+            merge(ns2, createChild("/a/b"), false);
+            fail("merge must fail");
+        } catch (CommitFailedException e) {
+            assertThat(e.getMessage(), containsString("older than base"));
+        }
+
+        Set<Revision> inconsistent = checkConsistency(ns2, "/a/b");
+        assertEquals(lateWriteRevisions, inconsistent);
+    }
+
+    @Test
+    public void multipleNodesAdded() throws Exception {
+        merge(ns1, createChild("/a"), true);
+
+        List<UpdateOp> failed = new ArrayList<>();
+        store.addListener(filter(failed, ADD_NODE_OPS));
+        store.fail().after(0).eternally();
+        try {
+            merge(ns1, createChild("/a/b/c"), false);
+            fail("merge must fail");
+        } catch (CommitFailedException e) {
+            // expected
+        }
+        disposeQuietly(ns1);
+        store.fail().never();
+
+        // wait until lease expires
+        clockWait(DEFAULT_LEASE_DURATION_MILLIS);
+
+        DocumentNodeStore ns2 = createNodeStore(2);
+        assertTrue(ns2.getLastRevRecoveryAgent().isRecoveryNeeded());
+        assertEquals(0, ns2.getLastRevRecoveryAgent().recover(1));
+
+        // 'late write'
+        store.createOrUpdate(NODES, failed);
+        Set<Revision> lateWriteRevisions = getRevisions(failed);
+
+        // revive clusterId 1
+        ns1 = createNodeStore(1);
+        merge(ns1, createChild("/d"), true);
+
+        ns2.runBackgroundOperations();
+        assertTrue(getNode(ns2.getRoot(), "/d").exists());
+        assertFalse(getNode(ns2.getRoot(), "/a/b/c").exists());
+        try {
+            merge(ns2, createChild("/a/b/c"), false);
+            fail("merge must fail");
+        } catch (CommitFailedException e) {
+            assertThat(e.getMessage(), containsString("older than base"));
+        }
+
+        Set<Revision> inconsistent = checkConsistency(ns2, "/a/b", "/a/b/c");
+        assertEquals(lateWriteRevisions, inconsistent);
+    }
+
+    @Test
+    public void orphanedNodesAdded() throws Exception {
+        merge(ns1, createChild("/a"), true);
+
+        List<UpdateOp> failed = new ArrayList<>();
+        store.addListener(filter(failed, ADD_NODE_OPS));
+        // let merge create some nodes
+        store.fail().after(2).eternally();
+        try {
+            merge(ns1, createChild("/a/b/c/d/e/f"), false);
+            fail("merge must fail");
+        } catch (CommitFailedException e) {
+            // expected
+        }
+        disposeQuietly(ns1);
+        store.fail().never();
+
+        // wait until lease expires
+        clockWait(DEFAULT_LEASE_DURATION_MILLIS);
+
+        DocumentNodeStore ns2 = createNodeStore(2);
+        assertTrue(ns2.getLastRevRecoveryAgent().isRecoveryNeeded());
+        assertEquals(1, ns2.getLastRevRecoveryAgent().recover(1));
+
+        // 'late write'
+        store.createOrUpdate(NODES, failed);
+        Set<Revision> lateWriteRevisions = getRevisions(failed);
+
+        // revive clusterId 1
+        ns1 = createNodeStore(1);
+        merge(ns1, createChild("/d"), true);
+
+        ns2.runBackgroundOperations();
+        assertTrue(getNode(ns2.getRoot(), "/d").exists());
+        assertFalse(getNode(ns2.getRoot(), "/a/b").exists());
+        // can create nodes that recovery removed ...
+        merge(ns2, createChild("/a/b/c"), false);
+        // ... but not those written after recovery
+        try {
+            merge(ns2, createChild("/a/b/c/d/e/f"), false);
+            fail("merge must fail");
+        } catch (CommitFailedException e) {
+            assertThat(e.getMessage(), containsString("older than base"));
+        }
+
+        Set<Revision> inconsistent = checkConsistency(ns2, "/a/b", "/a/b/c", 
"/a/b/c/d", "/a/b/c/d/e", "/a/b/c/d/e/f");
+        assertEquals(lateWriteRevisions, inconsistent);
+    }
+
+    @Test
+    public void oneNodeRemoved() throws Exception {
+        merge(ns1, createChild("/a/b"), true);
+
+        List<UpdateOp> failed = new ArrayList<>();
+        store.addListener(filter(failed, REMOVE_NODE_OPS));
+        store.fail().after(0).eternally();
+        try {
+            merge(ns1, removeChild("/a/b"), false);
+            fail("merge must fail");
+        } catch (CommitFailedException e) {
+            // expected
+        }
+        disposeQuietly(ns1);
+        store.fail().never();
+
+        // wait until lease expires
+        clockWait(DEFAULT_LEASE_DURATION_MILLIS);
+
+        DocumentNodeStore ns2 = createNodeStore(2);
+        assertTrue(ns2.getLastRevRecoveryAgent().isRecoveryNeeded());
+        assertEquals(0, ns2.getLastRevRecoveryAgent().recover(1));
+
+        // 'late write'
+        store.createOrUpdate(NODES, failed);
+        Set<Revision> lateWriteRevisions = getRevisions(failed);
+
+        // revive clusterId 1
+        ns1 = createNodeStore(1);
+        merge(ns1, createChild("/d"), true);
+
+        ns2.runBackgroundOperations();
+        assertTrue(getNode(ns2.getRoot(), "/d").exists());
+        assertTrue(getNode(ns2.getRoot(), "/a/b").exists());
+        try {
+            merge(ns2, removeChild("/a/b"), false);
+            fail("merge must fail");
+        } catch (CommitFailedException e) {
+            assertThat(e.getMessage(), containsString("already deleted"));
+        }
+
+        Set<Revision> inconsistent = checkConsistency(ns2, "/a/b");
+        assertEquals(lateWriteRevisions, inconsistent);
+    }
+
+    @Test
+    public void multipleNodesRemoved() throws Exception {
+        merge(ns1, createChild("/a/b/c"), true);
+
+        List<UpdateOp> failed = new ArrayList<>();
+        store.addListener(filter(failed, REMOVE_NODE_OPS));
+        store.fail().after(0).eternally();
+        try {
+            merge(ns1, removeChild("/a/b"), false);
+            fail("merge must fail");
+        } catch (CommitFailedException e) {
+            // expected
+        }
+        disposeQuietly(ns1);
+        store.fail().never();
+
+        // wait until lease expires
+        clockWait(DEFAULT_LEASE_DURATION_MILLIS);
+
+        DocumentNodeStore ns2 = createNodeStore(2);
+        assertTrue(ns2.getLastRevRecoveryAgent().isRecoveryNeeded());
+        assertEquals(0, ns2.getLastRevRecoveryAgent().recover(1));
+
+        // 'late write'
+        store.createOrUpdate(NODES, failed);
+        Set<Revision> lateWriteRevisions = getRevisions(failed);
+
+        // revive clusterId 1
+        ns1 = createNodeStore(1);
+        merge(ns1, createChild("/d"), true);
+
+        ns2.runBackgroundOperations();
+        assertTrue(getNode(ns2.getRoot(), "/d").exists());
+        assertTrue(getNode(ns2.getRoot(), "/a/b/c").exists());
+        try {
+            merge(ns2, removeChild("/a/b"), false);
+            fail("merge must fail");
+        } catch (CommitFailedException e) {
+            assertThat(e.getMessage(), containsString("already deleted"));
+        }
+
+        Set<Revision> inconsistent = checkConsistency(ns2, "/a/b", "/a/b/c");
+        assertEquals(lateWriteRevisions, inconsistent);
+    }
+
+    @Test
+    public void orphanedNodesRemoved() throws Exception {
+        merge(ns1, createChild("/a/b/c/d/e/f"), true);
+
+        List<UpdateOp> failed = new ArrayList<>();
+        store.addListener(filter(failed, REMOVE_NODE_OPS));
+        // let merge remove some nodes and then fail
+        store.fail().after(2).eternally();
+        try {
+            merge(ns1, removeChild("/a/b"), false);
+            fail("merge must fail");
+        } catch (CommitFailedException e) {
+            // expected
+        }
+        disposeQuietly(ns1);
+        store.fail().never();
+
+        // wait until lease expires
+        clockWait(DEFAULT_LEASE_DURATION_MILLIS);
+
+        DocumentNodeStore ns2 = createNodeStore(2);
+        assertTrue(ns2.getLastRevRecoveryAgent().isRecoveryNeeded());
+        assertEquals(1, ns2.getLastRevRecoveryAgent().recover(1));
+
+        // 'late write'
+        store.createOrUpdate(NODES, failed);
+        Set<Revision> lateWriteRevisions = getRevisions(failed);
+
+        // revive clusterId 1
+        ns1 = createNodeStore(1);
+        merge(ns1, createChild("/d"), true);
+
+        ns2.runBackgroundOperations();
+        assertTrue(getNode(ns2.getRoot(), "/d").exists());
+        assertTrue(getNode(ns2.getRoot(), "/a/b/c/d/e/f").exists());
+        // add a node
+        try {
+            merge(ns2, createChild("/a/b/c/d/e/f/g"), false);
+            fail("merge must fail");
+        } catch (CommitFailedException e) {
+            assertThat(e.getMessage(), containsString("already deleted"));
+        }
+        // can set a property on a node where recovery reverted the delete ...
+        merge(ns2, setProperty("/a/b", "p", "v"), false);
+        // ... but cannot set a property on a node deleted after recovery
+        try {
+            merge(ns2, setProperty("/a/b/c/d/e/f", "p", "v"), false);
+            fail("merge must fail");
+        } catch (CommitFailedException e) {
+            assertThat(e.getMessage(), containsString("already deleted"));
+        }
+
+        Set<Revision> inconsistent = checkConsistency(ns2, "/a/b", "/a/b/c", 
"/a/b/c/d", "/a/b/c/d/e", "/a/b/c/d/e/f", "/a/b/c/d/e/f/g");
+        assertEquals(lateWriteRevisions, inconsistent);
+    }
+
+    @Test
+    public void propertyChanged() throws Exception {
+        merge(ns1, compose(
+                createChild("/a/b"),
+                setProperty("/a/b", "p", "u")
+        ), true);
+
+        List<UpdateOp> failed = new ArrayList<>();
+        store.addListener(filter(failed, setPropertyOps("p")));
+        store.fail().after(0).eternally();
+        try {
+            merge(ns1, compose(
+                    setProperty("/a/b", "p", "v"),
+                    createChild("/a/b/c")
+            ), false);
+            fail("merge must fail");
+        } catch (CommitFailedException e) {
+            // expected
+        }
+        disposeQuietly(ns1);
+        store.fail().never();
+
+        // wait until lease expires
+        clockWait(DEFAULT_LEASE_DURATION_MILLIS);
+
+        DocumentNodeStore ns2 = createNodeStore(2);
+        assertTrue(ns2.getLastRevRecoveryAgent().isRecoveryNeeded());
+        assertEquals(0, ns2.getLastRevRecoveryAgent().recover(1));
+
+        // 'late write'
+        store.createOrUpdate(NODES, failed);
+        Set<Revision> lateWriteRevisions = getRevisions(failed);
+
+        // revive clusterId 1
+        ns1 = createNodeStore(1);
+        merge(ns1, createChild("/d"), true);
+        assertEquals(stringProperty("p", "u"), getNode(ns1.getRoot(), 
"/a/b").getProperty("p"));
+        NodeDocument b = store.find(NODES, getIdFromPath("/a/b"));
+        assertNotNull(b);
+        assertNotNull(b.getNodeAtRevision(ns1, ns1.getHeadRevision(), 
null).getProperty("p"));
+
+        ns2.runBackgroundOperations();
+        assertTrue(getNode(ns2.getRoot(), "/d").exists());
+        assertEquals(stringProperty("p", "u"), getNode(ns2.getRoot(), 
"/a/b").getProperty("p"));
+
+        // current implementation does not fail with conflict when property
+        // is overwritten, but consistency check can still detect it
+        Set<Revision> inconsistent = checkConsistency(ns2, "/a/b", "/a/b/c");
+        assertEquals(lateWriteRevisions, inconsistent);
+    }

Review Comment:
   And now I remember another test idea : when making a change on a *different* 
property on the same node, the original property state also changes - that 
might be a useful test to add perhaps. 
   This could also be further extended with a test where a node was removed via 
a late write, at which point it would purely be an inconsistency. However, a 
later modification on a property on that removed node would actually 
immediately remove the node.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: dev-unsubscr...@jackrabbit.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to