This is an automated email from the ASF dual-hosted git repository.

stefanegli pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/jackrabbit-oak.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 1c1825b4bb OAK-11184 : introduce prevNoProp cache to avoid expensive 
prev docs scan (#1779)
1c1825b4bb is described below

commit 1c1825b4bb6d438c538f9f476516f2a0c7728e64
Author: stefan-egli <stefane...@apache.org>
AuthorDate: Tue Nov 5 21:20:17 2024 +0100

    OAK-11184 : introduce prevNoProp cache to avoid expensive prev docs scan 
(#1779)
---
 .../oak/run/cli/DocumentFixtureProvider.java       |   3 +-
 .../oak/run/cli/DocumentNodeStoreOptions.java      |   8 +
 .../oak/index/IndexDocumentBuilderCustomizer.java  |   3 +-
 .../oak/plugins/document/Configuration.java        |   6 +
 .../oak/plugins/document/DocumentNodeStore.java    |  21 ++
 .../plugins/document/DocumentNodeStoreBuilder.java |  56 ++++-
 .../plugins/document/DocumentNodeStoreService.java |  16 +-
 .../oak/plugins/document/NodeDocument.java         | 179 +++++++++++++-
 .../oak/plugins/document/DisableCacheTest.java     |   4 +-
 .../oak/plugins/document/DocumentMK.java           |   1 +
 .../plugins/document/DocumentMKBuilderTest.java    |  34 ++-
 .../oak/plugins/document/DocumentNodeStoreIT.java  | 260 +++++++++++++++++++++
 .../oak/plugins/document/MemoryDiffCacheTest.java  |   2 +-
 .../oak/plugins/document/NodeDocumentTest.java     |  74 +++++-
 .../mongo/MongoDocumentNodeStoreBuilderTest.java   |   6 +
 .../oak/plugins/document/util/UtilsTest.java       |  42 ++++
 16 files changed, 687 insertions(+), 28 deletions(-)

diff --git 
a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/run/cli/DocumentFixtureProvider.java
 
b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/run/cli/DocumentFixtureProvider.java
index 8535682ddf..90beaeae3b 100644
--- 
a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/run/cli/DocumentFixtureProvider.java
+++ 
b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/run/cli/DocumentFixtureProvider.java
@@ -95,7 +95,8 @@ class DocumentFixtureProvider {
                     docStoreOpts.getNodeCachePercentage(),
                     docStoreOpts.getPrevDocCachePercentage(),
                     docStoreOpts.getChildrenCachePercentage(),
-                    docStoreOpts.getDiffCachePercentage()
+                    docStoreOpts.getDiffCachePercentage(),
+                    docStoreOpts.getPrevNoPropCachePercentage()
             );
         }
 
diff --git 
a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/run/cli/DocumentNodeStoreOptions.java
 
b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/run/cli/DocumentNodeStoreOptions.java
index 754b4dfced..1bb0a5aeea 100644
--- 
a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/run/cli/DocumentNodeStoreOptions.java
+++ 
b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/run/cli/DocumentNodeStoreOptions.java
@@ -34,6 +34,7 @@ public class DocumentNodeStoreOptions implements OptionsBean {
     private final OptionSpec<Integer> prevDocCachePercentage;
     private final OptionSpec<Integer> childrenCachePercentage;
     private final OptionSpec<Integer> diffCachePercentage;
+    private final OptionSpec<Integer> prevNoPropCachePercentage;
     private OptionSet options;
 
     public DocumentNodeStoreOptions(OptionParser parser){
@@ -56,6 +57,9 @@ public class DocumentNodeStoreOptions implements OptionsBean {
         diffCachePercentage = parser.
                 accepts("diffCachePercentage", "Percentage of cache to be 
allocated towards Diff cache")
                 .withRequiredArg().ofType(Integer.class).defaultsTo(30);
+        prevNoPropCachePercentage = parser.
+                accepts("prevNoPropCachePercentage", "Percentage of cache to 
be allocated towards PrevNoProp cache")
+                .withRequiredArg().ofType(Integer.class).defaultsTo(1);
     }
 
     @Override
@@ -112,6 +116,10 @@ public class DocumentNodeStoreOptions implements 
OptionsBean {
         return diffCachePercentage.value(options);
     }
 
+    public int getPrevNoPropCachePercentage() {
+        return prevNoPropCachePercentage.value(options);
+    }
+
     public boolean isCacheDistributionDefined(){
         return options.has(nodeCachePercentage) ||
                 options.has(prevDocCachePercentage) ||
diff --git 
a/oak-run/src/main/java/org/apache/jackrabbit/oak/index/IndexDocumentBuilderCustomizer.java
 
b/oak-run/src/main/java/org/apache/jackrabbit/oak/index/IndexDocumentBuilderCustomizer.java
index aa3dfa9067..7258acc4d8 100644
--- 
a/oak-run/src/main/java/org/apache/jackrabbit/oak/index/IndexDocumentBuilderCustomizer.java
+++ 
b/oak-run/src/main/java/org/apache/jackrabbit/oak/index/IndexDocumentBuilderCustomizer.java
@@ -71,7 +71,8 @@ class IndexDocumentBuilderCustomizer implements 
DocumentBuilderCustomizer {
                     35,
                     10,
                     15,
-                    2
+                    2,
+                    1
             );
         }
 
diff --git 
a/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/Configuration.java
 
b/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/Configuration.java
index 90ff3dfac6..946c253f0a 100644
--- 
a/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/Configuration.java
+++ 
b/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/Configuration.java
@@ -106,6 +106,12 @@ import static 
org.apache.jackrabbit.oak.plugins.document.DocumentNodeStoreServic
             description = "Percentage of cache to be allocated towards Diff 
cache")
     int diffCachePercentage() default DEFAULT_DIFF_CACHE_PERCENTAGE;
 
+    @AttributeDefinition(
+            name = "PrevNoProp Cache",
+            description = "Percentage of cache to be allocated towards 
PrevNoProp cache."
+                    + " This cache is used to keep non existence of properties 
in previous documents and can be small.")
+    int prevNoPropCachePercentage() default DEFAULT_PREV_DOC_CACHE_PERCENTAGE;
+
     @AttributeDefinition(
             name = "LIRS Cache Segment Count",
             description = "The number of segments in the LIRS cache " +
diff --git 
a/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
 
b/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
index 8b987ca42a..c064738c78 100644
--- 
a/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
+++ 
b/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
@@ -102,6 +102,7 @@ import 
org.apache.jackrabbit.oak.plugins.document.prefetch.CacheWarming;
 import 
org.apache.jackrabbit.oak.plugins.document.util.LeaseCheckDocumentStoreWrapper;
 import 
org.apache.jackrabbit.oak.plugins.document.util.LoggingDocumentStoreWrapper;
 import 
org.apache.jackrabbit.oak.plugins.document.util.ReadOnlyDocumentStoreWrapperFactory;
+import org.apache.jackrabbit.oak.plugins.document.util.StringValue;
 import 
org.apache.jackrabbit.oak.plugins.document.util.ThrottlingDocumentStoreWrapper;
 import 
org.apache.jackrabbit.oak.plugins.document.util.TimingDocumentStoreWrapper;
 import org.apache.jackrabbit.oak.plugins.document.util.Utils;
@@ -439,6 +440,12 @@ public final class DocumentNodeStore
      */
     private final DiffCache diffCache;
 
+    /**
+     * Tiny cache for non existence of any revisions in previous documents
+     * for particular properties.
+     */
+    private final Cache<StringValue, StringValue> prevNoPropCache;
+
     /**
      * The commit value resolver for this node store.
      */
@@ -683,6 +690,9 @@ public final class DocumentNodeStore
 
         diffCache = builder.getDiffCache(this.clusterId);
 
+        // builder checks for feature toggle directly and returns null if 
disabled
+        prevNoPropCache = builder.buildPrevNoPropCache();
+
         // check if root node exists
         NodeDocument rootDoc = store.find(NODES, Utils.getIdFromPath(ROOT));
         if (rootDoc == null) {
@@ -860,6 +870,13 @@ public final class DocumentNodeStore
         LOG.info("Initialized DocumentNodeStore with clusterNodeId: {}, 
updateLimit: {} ({})",
                 clusterId, updateLimit,
                 getClusterNodeInfoDisplayString());
+        if (prevNoPropCache == null) {
+            LOG.info("prevNoProp cache is disabled");
+        } else {
+            // unfortunate that the guava cache doesn't unveil its max size
+            // hence falling back to using the builder's original value for 
now.
+            LOG.info("prevNoProp cache is enabled with size: " + 
builder.getPrevNoPropCacheSize());
+        }
 
         if (!builder.isBundlingDisabled()) {
             bundlingConfigHandler.initialize(this, executor);
@@ -1306,6 +1323,10 @@ public final class DocumentNodeStore
         return nodeCachePredicate;
     }
 
+    public Cache<StringValue, StringValue> getPrevNoPropCache() {
+        return prevNoPropCache;
+    }
+
     /**
      * Returns the journal entry that will be stored in the journal with the
      * next background updated.
diff --git 
a/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreBuilder.java
 
b/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreBuilder.java
index a3ccc027fd..b0cb99decf 100644
--- 
a/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreBuilder.java
+++ 
b/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreBuilder.java
@@ -85,7 +85,8 @@ public class DocumentNodeStoreBuilder<T extends 
DocumentNodeStoreBuilder<T>> {
     private static final Logger LOG = 
LoggerFactory.getLogger(DocumentNodeStoreBuilder.class);
 
     public static final long DEFAULT_MEMORY_CACHE_SIZE = 256 * 1024 * 1024;
-    public static final int DEFAULT_NODE_CACHE_PERCENTAGE = 35;
+    public static final int DEFAULT_NODE_CACHE_PERCENTAGE = 34;
+    public static final int DEFAULT_PREV_NO_PROP_CACHE_PERCENTAGE = 1;
     public static final int DEFAULT_PREV_DOC_CACHE_PERCENTAGE = 4;
     public static final int DEFAULT_CHILDREN_CACHE_PERCENTAGE = 15;
     public static final int DEFAULT_DIFF_CACHE_PERCENTAGE = 30;
@@ -134,10 +135,12 @@ public class DocumentNodeStoreBuilder<T extends 
DocumentNodeStoreBuilder<T>> {
     private Feature cancelInvalidationFeature;
     private Feature docStoreFullGCFeature;
     private Feature docStoreEmbeddedVerificationFeature;
+    private Feature prevNoPropCacheFeature;
     private Weigher<CacheValue, CacheValue> weigher = new EmpiricalWeigher();
     private long memoryCacheSize = DEFAULT_MEMORY_CACHE_SIZE;
     private int nodeCachePercentage = DEFAULT_NODE_CACHE_PERCENTAGE;
     private int prevDocCachePercentage = DEFAULT_PREV_DOC_CACHE_PERCENTAGE;
+    private int prevNoPropCachePercentage = 
DEFAULT_PREV_NO_PROP_CACHE_PERCENTAGE;
     private int childrenCachePercentage = DEFAULT_CHILDREN_CACHE_PERCENTAGE;
     private int diffCachePercentage = DEFAULT_DIFF_CACHE_PERCENTAGE;
     private int cacheSegmentCount = DEFAULT_CACHE_SEGMENT_COUNT;
@@ -451,6 +454,16 @@ public class DocumentNodeStoreBuilder<T extends 
DocumentNodeStoreBuilder<T>> {
         return docStoreEmbeddedVerificationFeature;
     }
 
+    public T setPrevNoPropCacheFeature(@Nullable Feature 
prevNoPropCacheFeature) {
+        this.prevNoPropCacheFeature = prevNoPropCacheFeature;
+        return thisBuilder();
+    }
+
+    @Nullable
+    public Feature getPrevNoPropCacheFeature() {
+        return prevNoPropCacheFeature;
+    }
+
     public T setLeaseFailureHandler(LeaseFailureHandler leaseFailureHandler) {
         this.leaseFailureHandler = leaseFailureHandler;
         return thisBuilder();
@@ -586,17 +599,20 @@ public class DocumentNodeStoreBuilder<T extends 
DocumentNodeStoreBuilder<T>> {
     public T memoryCacheDistribution(int nodeCachePercentage,
                                      int prevDocCachePercentage,
                                      int childrenCachePercentage,
-                                     int diffCachePercentage) {
+                                     int diffCachePercentage,
+                                     int prevNoPropCachePercentage) {
         checkArgument(nodeCachePercentage >= 0);
         checkArgument(prevDocCachePercentage >= 0);
         checkArgument(childrenCachePercentage>= 0);
         checkArgument(diffCachePercentage >= 0);
+        checkArgument(prevNoPropCachePercentage >= 0);
         checkArgument(nodeCachePercentage + prevDocCachePercentage + 
childrenCachePercentage +
-                diffCachePercentage < 100);
+                diffCachePercentage + prevNoPropCachePercentage < 100);
         this.nodeCachePercentage = nodeCachePercentage;
         this.prevDocCachePercentage = prevDocCachePercentage;
         this.childrenCachePercentage = childrenCachePercentage;
         this.diffCachePercentage = diffCachePercentage;
+        this.prevNoPropCachePercentage = prevNoPropCachePercentage;
         return thisBuilder();
     }
 
@@ -608,13 +624,21 @@ public class DocumentNodeStoreBuilder<T extends 
DocumentNodeStoreBuilder<T>> {
         return memoryCacheSize * prevDocCachePercentage / 100;
     }
 
+    public long getPrevNoPropCacheSize() {
+        // feature toggle overrules the prevNoPropCachePercentage config
+        if (!isPrevNoPropCacheEnabled()) {
+            return 0;
+        }
+        return memoryCacheSize * prevNoPropCachePercentage / 100;
+    }
+
     public long getChildrenCacheSize() {
         return memoryCacheSize * childrenCachePercentage / 100;
     }
 
     public long getDocumentCacheSize() {
         return memoryCacheSize - getNodeCacheSize() - 
getPrevDocumentCacheSize() - getChildrenCacheSize()
-                - getDiffCacheSize();
+                - getDiffCacheSize() - getPrevNoPropCacheSize();
     }
 
     public long getDiffCacheSize() {
@@ -898,6 +922,30 @@ public class DocumentNodeStoreBuilder<T extends 
DocumentNodeStoreBuilder<T>> {
         return new NodeDocumentCache(nodeDocumentsCache, 
nodeDocumentsCacheStats, prevDocumentsCache, prevDocumentsCacheStats, locks);
     }
 
+    /**
+     * Checks the feature toggle for prevNoProp cache and returns true if 
that's enabled
+     * @return true if the prevNoProp feature toggle is enabled, false 
otherwise
+     */
+    private boolean isPrevNoPropCacheEnabled() {
+        final Feature prevNoPropFeature = getPrevNoPropCacheFeature();
+        return prevNoPropFeature != null && prevNoPropFeature.isEnabled();
+    }
+
+    /**
+     * Builds the prevNoProp cache, if the corresponding feature toggle is 
enabled.
+     * Returns null otherwise
+     * @return null if prevNoProp feature is disabled and size non-null, a 
newly built cache otherwise
+     */
+    @Nullable
+    public Cache<StringValue, StringValue> buildPrevNoPropCache() {
+        // if feature toggle is off or the config is explicitly set to 0, then 
no cache
+        if (!isPrevNoPropCacheEnabled() || getPrevNoPropCacheSize() == 0) {
+            return null;
+        }
+        // no persistent cache for now as this is only a tiny cache
+        return buildCache("PREV_NOPROP", getPrevNoPropCacheSize(), new 
CopyOnWriteArraySet<>());
+    }
+
     /**
      * @deprecated Use {@link #setNodeCachePathPredicate(Predicate)} instead.
      */
diff --git 
a/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreService.java
 
b/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreService.java
index 5c681ecbb9..e77388d948 100644
--- 
a/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreService.java
+++ 
b/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreService.java
@@ -201,6 +201,12 @@ public class DocumentNodeStoreService {
      */
     private static final String FT_NAME_EMBEDDED_VERIFICATION = 
"FT_EMBEDDED_VERIFICATION_OAK-10633";
 
+    /**
+     * Feature toggle name to enable the prev-no-prop cache.
+     * prev-no-prop refers to previous document not containing a particular 
property key.
+     */
+    private static final String FT_NAME_PREV_NO_PROP_CACHE = 
"FT_PREV_NO_PROP_OAK-11184";
+
     // property name constants - values can come from framework properties or 
OSGi config
     public static final String CUSTOM_BLOB_STORE = "customBlobStore";
     public static final String PROP_REV_RECOVERY_INTERVAL = 
"lastRevRecoveryJobIntervalInSecs";
@@ -240,6 +246,7 @@ public class DocumentNodeStoreService {
     private Feature cancelInvalidationFeature;
     private Feature docStoreFullGCFeature;
     private Feature docStoreEmbeddedVerificationFeature;
+    private Feature prevNoPropCacheFeature;
     private ComponentContext context;
     private Whiteboard whiteboard;
     private long deactivationTimestamp = 0;
@@ -278,6 +285,7 @@ public class DocumentNodeStoreService {
         cancelInvalidationFeature = 
Feature.newFeature(FT_NAME_CANCEL_INVALIDATION, whiteboard);
         docStoreFullGCFeature = Feature.newFeature(FT_NAME_FULL_GC, 
whiteboard);
         docStoreEmbeddedVerificationFeature = 
Feature.newFeature(FT_NAME_EMBEDDED_VERIFICATION, whiteboard);
+        prevNoPropCacheFeature = 
Feature.newFeature(FT_NAME_PREV_NO_PROP_CACHE, whiteboard);
 
         registerNodeStoreIfPossible();
     }
@@ -487,7 +495,8 @@ public class DocumentNodeStoreService {
                         config.nodeCachePercentage(),
                         config.prevDocCachePercentage(),
                         config.childrenCachePercentage(),
-                        config.diffCachePercentage()).
+                        config.diffCachePercentage(),
+                        config.prevNoPropCachePercentage()).
                 setCacheSegmentCount(config.cacheSegmentCount()).
                 setCacheStackMoveDistance(config.cacheStackMoveDistance()).
                 setBundlingDisabled(config.bundlingDisabled()).
@@ -499,6 +508,7 @@ public class DocumentNodeStoreService {
                 setCancelInvalidationFeature(cancelInvalidationFeature).
                 setDocStoreFullGCFeature(docStoreFullGCFeature).
                 
setDocStoreEmbeddedVerificationFeature(docStoreEmbeddedVerificationFeature).
+                setPrevNoPropCacheFeature(prevNoPropCacheFeature).
                 setThrottlingEnabled(config.throttlingEnabled()).
                 setFullGCEnabled(config.fullGCEnabled()).
                 setFullGCIncludePaths(config.fullGCIncludePaths()).
@@ -670,6 +680,10 @@ public class DocumentNodeStoreService {
             docStoreEmbeddedVerificationFeature.close();
         }
 
+        if (prevNoPropCacheFeature != null) {
+            prevNoPropCacheFeature.close();
+        }
+
         unregisterNodeStore();
     }
 
diff --git 
a/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
 
b/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
index d4fc3b7bd8..7897d13bdd 100644
--- 
a/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
+++ 
b/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
@@ -48,10 +48,12 @@ import java.util.SortedSet;
 import java.util.TreeMap;
 import java.util.TreeSet;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.function.Function;
 import java.util.function.Predicate;
 
+import org.apache.jackrabbit.guava.common.cache.Cache;
 import org.apache.jackrabbit.guava.common.collect.AbstractIterator;
 import org.apache.jackrabbit.guava.common.collect.ImmutableList;
 import org.apache.jackrabbit.guava.common.collect.Iterables;
@@ -66,6 +68,7 @@ import org.apache.jackrabbit.oak.commons.json.JsopTokenizer;
 import org.apache.jackrabbit.oak.commons.json.JsopWriter;
 import org.apache.jackrabbit.oak.commons.log.LogSilencer;
 import org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore;
+import org.apache.jackrabbit.oak.plugins.document.util.StringValue;
 import org.apache.jackrabbit.oak.plugins.document.util.Utils;
 import org.jetbrains.annotations.NotNull;
 import org.jetbrains.annotations.Nullable;
@@ -87,6 +90,7 @@ public final class NodeDocument extends Document {
     }
 
     static final Logger LOG = LoggerFactory.getLogger(NodeDocument.class);
+    static final Logger PREV_NO_PROP_LOG = 
LoggerFactory.getLogger(NodeDocument.class + ".prevNoProp");
 
     private static final LogSilencer LOG_SILENCER = new LogSilencer();
 
@@ -1020,12 +1024,30 @@ public final class NodeDocument extends Document {
             Value value = getLatestValue(nodeStore, local.entrySet(),
                     readRevision, validRevisions, lastRevs);
 
+            if (value == null
+                    // only filter if prevNoProp feature toggle is enabled:
+                    && nodeStore.getPrevNoPropCache() != null
+                    && !anyRevisionCommitted(local.keySet(), nodeStore, 
validRevisions)) {
+                // OAK-11184 : if the locally resolved value is null AND
+                // there are no committed revisions in the local map at all,
+                // then don't scan previous documents as that should not
+                // find anything. The split algorithm always ensures
+                // that at least one committed revision remains in the
+                // local map. From that we can derive that if there's
+                // no committed revision in the local map, there isn't
+                // any in previous documents neither.
+                // This should only occur when a property is being newly
+                // added or was deleted, then fullGC-ed and now re-added.
+                PREV_NO_PROP_LOG.debug("getNodeAtRevision : skipping as no 
committed revision locally for path={}, key={}", path, key);
+                continue;
+            }
+
             // check if there may be more recent values in a previous document
             value = requiresCompleteMapCheck(value, local, nodeStore) ? null : 
value;
 
             if (value == null && !getPreviousRanges().isEmpty()) {
                 // check revision history
-                value = getLatestValue(nodeStore, getVisibleChanges(key, 
readRevision),
+                value = getLatestValue(nodeStore, getVisibleChanges(key, 
readRevision, nodeStore.getPrevNoPropCache()),
                         readRevision, validRevisions, lastRevs);
             }
             String propertyName = Utils.unescapePropertyName(key);
@@ -1087,6 +1109,37 @@ public final class NodeDocument extends Document {
         return new DocumentNodeState(nodeStore, path, readRevision, props, 
hasChildren(), lastRevision);
     }
 
+    /**
+     * Checks if any of the provided revisions are committed - given the
+     * RevisionContext. Uses validRevisions as cached earlier
+     * confirmed valid revisions (but chooses not to add to that map, to limit
+     * side-effects of this new-ish method).
+     *
+     * @param revisions      the revisions to check if any of them are 
committed
+     * @param context        the RevisionContext to use for commit value 
resolving
+     * @param validRevisions map of revision to commit value considered valid
+     *                       against the given readRevision.
+     * @return true if the provided (local) map of revisions (of a property) 
has
+     * any revisions that are committed (irrespective of visible or not).
+     */
+    private boolean anyRevisionCommitted(Set<Revision> revisions, @NotNull 
RevisionContext context,
+            Map<Revision, String> validRevisions) {
+        for (Revision propRev : revisions) {
+            String commitValue = validRevisions.get(propRev);
+            if (commitValue == null) {
+                commitValue = context.getCommitValue(propRev, this);
+            }
+            if (commitValue == null) {
+                // then it's not committed
+                continue;
+            }
+            if (Utils.isCommitted(commitValue)) {
+                return true;
+            }
+        }
+        return false;
+    }
+
     /**
      * Get the earliest (oldest) revision where the node was alive at or before
      * the provided revision, if the node was alive at the given revision.
@@ -1447,10 +1500,13 @@ public final class NodeDocument extends Document {
         };
     }
 
+    private String getPreviousDocId(Revision rev, Range range) {
+        return Utils.getPreviousIdFor(getMainPath(), rev, range.height);
+    }
+
     @Nullable
     private NodeDocument getPreviousDoc(Revision rev, Range range){
-        int h = range.height;
-        String prevId = Utils.getPreviousIdFor(getMainPath(), rev, h);
+        String prevId = getPreviousDocId(rev, range);
         NodeDocument prev = getPreviousDocument(prevId);
         if (prev != null) {
             return prev;
@@ -1550,21 +1606,55 @@ public final class NodeDocument extends Document {
      *
      * @param property the name of the property.
      * @param readRevision the read revision vector.
+     * @param prevNoPropCache optional cache for remembering non existence
+     * of any property revisions in previous documents (by their id)
      * @return property changes visible from the given read revision vector.
      */
     @NotNull
     Iterable<Map.Entry<Revision, String>> getVisibleChanges(@NotNull final 
String property,
-                                                            @NotNull final 
RevisionVector readRevision) {
+                                                            @NotNull final 
RevisionVector readRevision,
+                                                            @Nullable final 
Cache<StringValue, StringValue> prevNoPropCache) {
+        return getVisibleChanges(property, readRevision, prevNoPropCache, 
null);
+    }
+
+    /**
+     * Variation of getVisibleChanges that allows to provide a non-null 
propRevFound.
+     * The latter is used to detect whether previous documents had any 
property revisions at all.
+     * This method is invoked in two different ways:
+     * <ul>
+     * <li>prevNoPropCache != null : this is used in the top most invocation 
only and
+     * when passed causes top level previous documents to be handled via the 
cache.
+     * To do that, for these cases the
+     * changesFor method will do iterable-yoga to sneak into the iterator() 
code while
+     * having taken note of whether any previous document had any revision at 
all for the
+     * given property (this later aspect is checked in getVisibleChanges in a 
child iteration).</li>
+     * <li>prevNoPropCache == null : this is used in invocations on all 
previous documents.
+     * In this case the method checks if there are any revisions for the given 
property.
+     * If there are, then the provided propRevFound AtomicBoolean is set to 
true.
+     * That information is then used in the top most call in this 
getVisibleChanges-iteration
+     * to decide whether we can cache the fact that no propery (whatsoever) 
was found in the
+     * given previous document (and all its children) or not. That decision is 
based on the
+     * AtomciBoolean being true or false.</li>
+     * </ul>
+     */
+    @NotNull
+    Iterable<Map.Entry<Revision, String>> getVisibleChanges(@NotNull final 
String property,
+                                                            @NotNull final 
RevisionVector readRevision,
+                                                            @Nullable final 
Cache<StringValue, StringValue> prevNoPropCache,
+                                                            @Nullable final 
AtomicBoolean propRevFound) {
         Predicate<Map.Entry<Revision, String>> p = input -> 
!readRevision.isRevisionNewer(input.getKey());
         List<Iterable<Map.Entry<Revision, String>>> changes = new 
ArrayList<>();
         Map<Revision, String> localChanges = getLocalMap(property);
         if (!localChanges.isEmpty()) {
+            if (propRevFound != null) {
+                propRevFound.set(true);
+            }
             changes.add(filter(localChanges.entrySet(), p::test));
         }
 
         for (Revision r : readRevision) {
             // collect changes per clusterId
-            collectVisiblePreviousChanges(property, r, changes);
+            collectVisiblePreviousChanges(property, r, changes, 
prevNoPropCache, propRevFound);
         }
 
         if (changes.size() == 1) {
@@ -1586,7 +1676,9 @@ public final class NodeDocument extends Document {
      */
     private void collectVisiblePreviousChanges(@NotNull final String property,
                                                @NotNull final Revision 
readRevision,
-                                               @NotNull final 
List<Iterable<Entry<Revision, String>>> changes) {
+                                               @NotNull final 
List<Iterable<Entry<Revision, String>>> changes,
+                                               @Nullable final 
Cache<StringValue, StringValue> prevNoPropCache,
+                                               @Nullable final AtomicBoolean 
propRevFound) {
         List<Iterable<Map.Entry<Revision, String>>> revs = new ArrayList<>();
 
         RevisionVector readRV = new RevisionVector(readRevision);
@@ -1612,7 +1704,7 @@ public final class NodeDocument extends Document {
                     previous = r;
                 }
             }
-            revs.add(changesFor(batch, readRV, property));
+            revs.add(changesFor(batch, readRV, property, prevNoPropCache, 
propRevFound));
             batch.clear();
         }
 
@@ -1637,18 +1729,85 @@ public final class NodeDocument extends Document {
      */
     private Iterable<Map.Entry<Revision, String>> changesFor(final List<Range> 
ranges,
                                                              final 
RevisionVector readRev,
-                                                             final String 
property) {
+                                                             final String 
property,
+                                                             @Nullable final 
Cache<StringValue, StringValue> prevNoPropCache,
+                                                             @Nullable final 
AtomicBoolean parentPropRevFound) {
         if (ranges.isEmpty()) {
             return Collections.emptyList();
         }
 
-        final Function<Range, Iterable<Map.Entry<Revision, String>>> 
rangeToChanges = input -> {
+        final Function<Range, Iterable<Map.Entry<Revision, String>>> 
rangeToChanges;
+        if (prevNoPropCache != null && parentPropRevFound == null) {
+            // then we are in the main doc. at this point we thus need to
+            // check the cache, if miss then scan prev docs and cache the 
result
+            //TODO: consider refactoring of the 
getVisibleChanges/collectVisiblePreviousChanges/changesFor
+            // logic. The way these methods create a sequence of Iterables and 
lambdas make
+            // for a rather complex logic that is difficult to fiddle with.
+            // It might thus be worth while to look into some loop logic 
rather than iteration here.
+            // Except that refactoring is likely a bigger task, hence 
postponed for now.
+            rangeToChanges = input -> {
+                    final String prevDocId = getPreviousDocId(input.high, 
input);
+                    final StringValue cacheKey = new StringValue(property + 
"@" + prevDocId);
+                    if (prevNoPropCache.getIfPresent(cacheKey) != null) {
+                        // cache hit, awesome!
+                        // (we're not interested in the actual cache value 
btw, as finding 
+                        // a cache value actually indicates "the property does 
not exist 
+                        // in any previous document whatsoever" - no need for 
value check)
+                        PREV_NO_PROP_LOG.trace("changesFor : empty changes 
cache hit for cacheKey={}", cacheKey);
+                        return Collections.emptyList();
+                    }
+                    // cache miss - let's do the heavy lifting then
+                    NodeDocument doc = getPreviousDoc(input.high, input);
+                    if (doc == null) {
+                        // this could be a candidate for caching probably.
+                        // but might also indicate some race-condition.
+                        // so let's not cache for now.
+                        return Collections.emptyList();
+                    }
+                    // initiate counting
+                    final AtomicBoolean childrenPropRevFound = new 
AtomicBoolean(false);
+                    // create that Iterable - but wrap it so that we know how 
many
+                    // property revisions were actually found in the scan.
+                    // (we're mostly interested if that's zero ro non-zero 
though)
+                    final Iterable<Entry<Revision, String>> vc = 
doc.getVisibleChanges(property, readRev, null, childrenPropRevFound);
+                    // wrap that Iterable to intercept the call to hasNext().
+                    // at that point if the counter is non-null it means
+                    // that any previous documents scanned does indeed have
+                    // the property we're interested in. It might not be 
visible
+                    // or committed, but at least it does have it.
+                    // In which case we'd skip that. But if it does not exist
+                    // at all, then we cache that fact.
+                    return new Iterable<Entry<Revision, String>>() {
+                        @Override
+                        public Iterator<Entry<Revision, String>> iterator() {
+                            // grab the iterator - this typically triggers 
previous doc scan
+                            final Iterator<Entry<Revision, String>> wrappee = 
vc.iterator();
+                            // but let's invoke hasNext here explicitly still. 
to ensure
+                            // we do indeed scan - without a scan hasNext 
can't work
+                            wrappee.hasNext();
+                            if (!childrenPropRevFound.get()) {
+                                // then let's cache that
+                                PREV_NO_PROP_LOG.debug("changesFor : caching 
empty changes for cacheKey={}", cacheKey);
+                                prevNoPropCache.put(cacheKey, 
StringValue.EMPTY);
+                            }
+                            return wrappee;
+                        }
+                    };
+                };
+        } else {
+            // without a cache either the caller is not interested at caching,
+            // or we are within a previous doc reading n+1 level previous docs.
+            // in both cases nothing much to do other than passing along the
+            // counter (propRevCount).
+            // also no caching other than in the main doc
+            rangeToChanges = input -> {
                 NodeDocument doc = getPreviousDoc(input.high, input);
                 if (doc == null) {
                     return Collections.emptyList();
                 }
-                return doc.getVisibleChanges(property, readRev);
+                return doc.getVisibleChanges(property, readRev, null, 
parentPropRevFound);
             };
+        }
 
         Iterable<Map.Entry<Revision, String>> changes;
         if (ranges.size() == 1) {
diff --git 
a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DisableCacheTest.java
 
b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DisableCacheTest.java
index a28cc74edd..6abbbb2731 100644
--- 
a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DisableCacheTest.java
+++ 
b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DisableCacheTest.java
@@ -19,6 +19,7 @@ package org.apache.jackrabbit.oak.plugins.document;
 import static 
org.apache.jackrabbit.oak.plugins.document.DocumentMK.Builder.DEFAULT_CHILDREN_CACHE_PERCENTAGE;
 import static 
org.apache.jackrabbit.oak.plugins.document.DocumentMK.Builder.DEFAULT_DIFF_CACHE_PERCENTAGE;
 import static 
org.apache.jackrabbit.oak.plugins.document.DocumentMK.Builder.DEFAULT_PREV_DOC_CACHE_PERCENTAGE;
+import static 
org.apache.jackrabbit.oak.plugins.document.DocumentMK.Builder.DEFAULT_PREV_NO_PROP_CACHE_PERCENTAGE;
 import static org.junit.Assert.assertEquals;
 
 import java.io.File;
@@ -45,7 +46,8 @@ public class DisableCacheTest {
         builder.memoryCacheDistribution(0,
                 DEFAULT_PREV_DOC_CACHE_PERCENTAGE,
                 DEFAULT_CHILDREN_CACHE_PERCENTAGE,
-                DEFAULT_DIFF_CACHE_PERCENTAGE);
+                DEFAULT_DIFF_CACHE_PERCENTAGE,
+                DEFAULT_PREV_NO_PROP_CACHE_PERCENTAGE);
         DocumentNodeStore nodeStore = builder.getNodeStore();
         Cache<PathRev, DocumentNodeState> cache = nodeStore.getNodeCache();
         assertEquals(0, cache.size());
diff --git 
a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java
 
b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java
index 4c81c505c0..452630975c 100644
--- 
a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java
+++ 
b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java
@@ -465,6 +465,7 @@ public class DocumentMK {
         public static final long DEFAULT_MEMORY_CACHE_SIZE = 
DocumentNodeStoreBuilder.DEFAULT_MEMORY_CACHE_SIZE;
         public static final int DEFAULT_NODE_CACHE_PERCENTAGE = 
DocumentNodeStoreBuilder.DEFAULT_NODE_CACHE_PERCENTAGE;
         public static final int DEFAULT_PREV_DOC_CACHE_PERCENTAGE = 
DocumentNodeStoreBuilder.DEFAULT_PREV_DOC_CACHE_PERCENTAGE;
+        public static final int DEFAULT_PREV_NO_PROP_CACHE_PERCENTAGE = 
DocumentNodeStoreBuilder.DEFAULT_PREV_NO_PROP_CACHE_PERCENTAGE;
         public static final int DEFAULT_CHILDREN_CACHE_PERCENTAGE = 
DocumentNodeStoreBuilder.DEFAULT_CHILDREN_CACHE_PERCENTAGE;
         public static final int DEFAULT_DIFF_CACHE_PERCENTAGE = 
DocumentNodeStoreBuilder.DEFAULT_DIFF_CACHE_PERCENTAGE;
         public static final int DEFAULT_CACHE_SEGMENT_COUNT = 
DocumentNodeStoreBuilder.DEFAULT_CACHE_SEGMENT_COUNT;
diff --git 
a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentMKBuilderTest.java
 
b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentMKBuilderTest.java
index 7cf6d9a9fd..781ebe7a7a 100644
--- 
a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentMKBuilderTest.java
+++ 
b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentMKBuilderTest.java
@@ -21,27 +21,55 @@ import com.mongodb.MongoClient;
 
 import org.apache.jackrabbit.oak.cache.CacheStats;
 import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 import static 
org.apache.jackrabbit.oak.plugins.document.DocumentMK.Builder.DEFAULT_CHILDREN_CACHE_PERCENTAGE;
 import static 
org.apache.jackrabbit.oak.plugins.document.DocumentMK.Builder.DEFAULT_DIFF_CACHE_PERCENTAGE;
 import static 
org.apache.jackrabbit.oak.plugins.document.DocumentMK.Builder.DEFAULT_NODE_CACHE_PERCENTAGE;
 import static 
org.apache.jackrabbit.oak.plugins.document.DocumentMK.Builder.DEFAULT_PREV_DOC_CACHE_PERCENTAGE;
+import static 
org.apache.jackrabbit.oak.plugins.document.DocumentMK.Builder.DEFAULT_PREV_NO_PROP_CACHE_PERCENTAGE;
+import static 
org.apache.jackrabbit.oak.plugins.document.util.UtilsTest.createFeature;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 
+import java.util.Arrays;
+
+@RunWith(Parameterized.class)
 public class DocumentMKBuilderTest extends AbstractMongoConnectionTest {
 
     private static final long CACHE_SIZE = 8 * 1024 * 1024;
     private static final long PREV_DOC_CACHE_SIZE = 
cacheSize(DEFAULT_PREV_DOC_CACHE_PERCENTAGE);
-    private static final long DOC_CACHE_SIZE = CACHE_SIZE -
+    private static final long DOC_CACHE_SIZE_DEFAULT = CACHE_SIZE -
             cacheSize(DEFAULT_CHILDREN_CACHE_PERCENTAGE) -
             cacheSize(DEFAULT_DIFF_CACHE_PERCENTAGE) -
             cacheSize(DEFAULT_NODE_CACHE_PERCENTAGE) -
             cacheSize(DEFAULT_PREV_DOC_CACHE_PERCENTAGE);
+    private static final long DOC_CACHE_SIZE_PREV_NO_PROP_ENABLED = 
DOC_CACHE_SIZE_DEFAULT -
+            cacheSize(DEFAULT_PREV_NO_PROP_CACHE_PERCENTAGE);
+
+    @Parameterized.Parameters(name="{index}: prevNoPropEnabled : {0}")
+    public static java.util.Collection<Boolean> params() {
+        return Arrays.asList(true,  false);
+    }
+
+    boolean prevNoPropEnabled;
+
+    long expectedDocCacheSize;
+
+    public DocumentMKBuilderTest(boolean prevNoPropEnabled) {
+        this.prevNoPropEnabled = prevNoPropEnabled;
+        if (prevNoPropEnabled) {
+            expectedDocCacheSize = DOC_CACHE_SIZE_PREV_NO_PROP_ENABLED;
+        } else {
+            expectedDocCacheSize = DOC_CACHE_SIZE_DEFAULT;
+        }
+    }
 
     @Override
     protected DocumentMK.Builder newBuilder(MongoClient client, String dbName) 
throws Exception {
-        return super.newBuilder(client, dbName).memoryCacheSize(CACHE_SIZE);
+        return super.newBuilder(client, dbName).memoryCacheSize(CACHE_SIZE)
+                .setPrevNoPropCacheFeature(createFeature(prevNoPropEnabled));
     }
 
     @Test
@@ -53,7 +81,7 @@ public class DocumentMKBuilderTest extends 
AbstractMongoConnectionTest {
         CacheStats prevDocCacheStats = Iterables.get(cacheStats, 1);
         assertEquals("Document-Documents", docCacheStats.getName());
         assertEquals("Document-PrevDocuments", prevDocCacheStats.getName());
-        assertEquals(DOC_CACHE_SIZE, docCacheStats.getMaxTotalWeight());
+        assertEquals(expectedDocCacheSize, docCacheStats.getMaxTotalWeight());
         assertEquals(PREV_DOC_CACHE_SIZE, 
prevDocCacheStats.getMaxTotalWeight());
     }
 
diff --git 
a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreIT.java
 
b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreIT.java
index b846c03506..21df810fb5 100644
--- 
a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreIT.java
+++ 
b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreIT.java
@@ -17,17 +17,22 @@
 package org.apache.jackrabbit.oak.plugins.document;
 
 import java.io.InputStream;
+import java.util.Date;
+import java.util.List;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
+import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.jackrabbit.guava.common.util.concurrent.Monitor;
 
+import org.apache.jackrabbit.oak.api.CommitFailedException;
 import org.apache.jackrabbit.oak.commons.concurrent.ExecutorCloser;
 import org.apache.jackrabbit.oak.json.JsopDiff;
+import 
org.apache.jackrabbit.oak.plugins.document.PausableDocumentStore.PauseCallback;
 import 
org.apache.jackrabbit.oak.plugins.document.util.TimingDocumentStoreWrapper;
 import org.apache.jackrabbit.oak.plugins.document.util.Utils;
 import org.apache.jackrabbit.oak.plugins.memory.AbstractBlob;
@@ -42,12 +47,16 @@ import org.junit.Test;
 
 import static java.util.concurrent.Executors.newSingleThreadExecutor;
 import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.jackrabbit.oak.plugins.document.Collection.NODES;
 import static 
org.apache.jackrabbit.oak.plugins.document.NodeDocument.MODIFIED_IN_SECS_RESOLUTION;
 import static org.apache.jackrabbit.oak.plugins.document.TestUtils.merge;
 import static 
org.apache.jackrabbit.oak.plugins.document.util.Utils.getIdFromPath;
+import static 
org.apache.jackrabbit.oak.plugins.document.util.UtilsTest.createFeature;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeFalse;
 
 /**
  * Tests DocumentNodeStore on various DocumentStore back-ends.
@@ -188,6 +197,257 @@ public class DocumentNodeStoreIT extends 
AbstractDocumentStoreTest {
         }
     }
 
+    /**
+     * OAK-11184 : a cluster node A is merging a change on root. That involves 
two
+     * updates : the first one writing the changes. The second one updating the
+     * commit root (with a revisions=c). If a cluster node B reads the root 
while
+     * the commit root was not yet updated, it has to read previous documents 
as
+     * part resolving the value of a property in getNodeAtRevision. Only to 
find
+     * that the new property does not exist in any previous document and is 
thus
+     * non-existent. Cluster node B will have to repeat going through
+     * previous documents whenever reading root - until B is
+     * able to do a backgroundRead. The backgroundRead however could be 
blocked by a
+     * number of merge operations - as those merge operations acquire the
+     * backgroundOperationLock - and backgroundRead wants that lock 
exclusively.
+     *
+     * The test reproduces only part one of the above (expensiveness of reading
+     * not-yet-visible property of a document with many previous documents).
+     */
+    @Test
+    public void unmergedCommitOnRoot_withPrevNoPropCache() throws Exception {
+        doUnmergedCommitOnRoot(true);
+    }
+
+    /**
+     * This variant tests without the case and is thus expected to fail with
+     * an AssertionError
+     */
+    @Test(expected = AssertionError.class)
+    public void unmergedCommitOnRoot_withoutPrevNoPropCache() throws Exception 
{
+        assumeFalse(dsf.getName().startsWith("RDB"));
+        doUnmergedCommitOnRoot(false);
+    }
+
+    private void doUnmergedCommitOnRoot(boolean prevNoPropCacheEnabled) throws 
Exception {
+        Clock clock = new Clock.Virtual();
+        clock.waitUntil(System.currentTimeMillis());
+        Revision.setClock(clock);
+        ClusterNodeInfo.setClock(clock);
+
+        FailingDocumentStore fs1 = new FailingDocumentStore(ds);
+        PausableDocumentStore store1 = new PausableDocumentStore(fs1);
+        DocumentNodeStore ns1 = 
builderProvider.newBuilder().setClusterId(1).setAsyncDelay(0).clock(clock)
+                
.setPrevNoPropCacheFeature(createFeature(prevNoPropCacheEnabled))
+                .setDocumentStore(store1).build();
+
+        NodeBuilder b1 = ns1.getRoot().builder();
+        b1.setProperty("prop", -1);
+        ns1.merge(b1, EmptyHook.INSTANCE, CommitInfo.EMPTY);
+
+        // create MANY previous docs
+        for (int j = 0; j < 100; j++) {
+            for (int i = 0; i < NodeDocument.NUM_REVS_THRESHOLD; i++) {
+                b1 = ns1.getRoot().builder();
+                b1.setProperty("prop", i);
+                ns1.merge(b1, EmptyHook.INSTANCE, CommitInfo.EMPTY);
+            }
+            ns1.runBackgroundOperations();
+        }
+
+        // create /a as some initial content
+        NodeBuilder builder = ns1.getRoot().builder();
+        builder.child("a1").child("b1");
+        builder.child("a2").child("b2");
+        merge(ns1, builder);
+
+        // update a root property (but not via a branch commit)
+        builder = ns1.getRoot().builder();
+        builder.setProperty("rootprop", "v");
+        builder.child("a1").setProperty("nonrootprop", "v");
+        final NodeBuilder finalBuilder = builder;
+        final Semaphore breakpointReachedSemaphore = new Semaphore(0);
+        final Semaphore continueSemaphore = new Semaphore(0);
+        final Thread mergeThread = new Thread(() -> {
+            try {
+                merge(ns1, finalBuilder);
+            } catch (CommitFailedException e) {
+                throw new RuntimeException(e);
+            }
+        });
+        PauseCallback pauseCallback = new PauseCallback() {
+            @Override
+            public PauseCallback handlePause(List<UpdateOp> remainingOps) {
+                breakpointReachedSemaphore.release(1);
+                try {
+                    if (!continueSemaphore.tryAcquire(5, TimeUnit.SECONDS)) {
+                        System.err.println("timeout");
+                        throw new RuntimeException("timeout");
+                    }
+                } catch (InterruptedException e) {
+                    throw new RuntimeException(e);
+                }
+                return null;
+            }
+        };
+        
store1.pauseWith(pauseCallback).on(NODES).on("0:/").after(2).eternally();
+        mergeThread.start();
+        boolean breakpointReached = breakpointReachedSemaphore.tryAcquire(5, 
TimeUnit.SECONDS);
+        assertTrue(breakpointReached);
+
+        DocumentNodeStore ns2 = null;
+        try {
+            // start B
+            // for MongoDocumentStore (and perhaps others), we can't use the 
same
+            // instance by the second DocumentNodeStore, as that would mean 
they
+            // share the cache. And it is also the cache that is part of the 
testing.
+            // Hence what we need is a real second MongoDocumentStore that 
connects
+            // to the same mongo instance (but has its own cache and thus cause
+            // interesting things).
+            // So let's do that here for Mongo (for now - TODO: generalize 
this concept)
+            DocumentStore ds2;
+            if (dsf.hasSinglePersistence()) {
+                ds2 = dsf.createDocumentStore(getBuilder().setClusterId(2 /* 
likely unnecessary */));
+            } else {
+                // for Memory fixture we need to fall back to single 
DocumentStore
+                // (in which case the caching aspect of DocumentStore gets a 
bit lost)
+                ds2 = ds;
+            }
+            CountingDocumentStore cds2 = new CountingDocumentStore(ds2);
+            ns2 = 
builderProvider.newBuilder().setClusterId(2).setAsyncDelay(0).clock(clock)
+                    
.setPrevNoPropCacheFeature(createFeature(prevNoPropCacheEnabled))
+                    .setDocumentStore(cds2).build();
+
+            // now simulate any write and count how many times
+            // find() had to be invoked (basically to read a previous doc)
+            // if there's more than 95, then that's considered expensive
+            {
+                int c = getNodesFindCountOfAnUpdate(cds2, ns2, "v0");
+                assertNoPreviousDocsRead(c, 95);
+                // prior to the fix it would have been:
+                // assertPreviousDocsRead(c, 95);
+            }
+
+            // while the merge in the other thread isn't done, we can repeat 
this
+            // and it's still slow
+            {
+                int c = getNodesFindCountOfAnUpdate(cds2, ns2, "v1");
+                assertNoPreviousDocsRead(c, 95);
+                // prior to the fix it would have been:
+                // assertPreviousDocsRead(c, 95);
+            }
+
+            // and again, you get the point
+            {
+                int c = getNodesFindCountOfAnUpdate(cds2, ns2, "v2");
+                assertNoPreviousDocsRead(c, 95);
+                // prior to the fix it would have been:
+                // assertPreviousDocsRead(c, 95);
+            }
+
+            // doing just another update without any bg work won't fix 
anything yet
+            {
+                int c = getNodesFindCountOfAnUpdate(cds2, ns2, "v3");
+                assertNoPreviousDocsRead(c, 95);
+                // prior to the fix it would have been:
+                // assertPreviousDocsRead(c, 95);
+            }
+            // neither does doing just an update on ns2
+            ns2.runBackgroundOperations();
+            {
+                int c = getNodesFindCountOfAnUpdate(cds2, ns2, "v4");
+                assertNoPreviousDocsRead(c, 95);
+                // prior to the fix it would have been:
+                // assertPreviousDocsRead(c, 95);
+            }
+
+            // all of the above asserts are "easy" to avoid, as the
+            // revision was indeed not committed yet, and is newly added.
+            // that's a situaiton that can be caught.
+            // It gets a bit more difficult once the revision is committed
+            // but not yet visible. That is hard to catch, as it legitimate
+            // for a revision to be split away while for another cluster node
+            // the only remaining revision is not yet visible (that fact
+            // might give a hint on how this can be fixed, but it is still 
hard).
+
+            // so, as soon as we release the other thread and let it finish
+            // its commit, our updates will see the revision being committed
+            // (as side-effect of the merge on root - otherwise there's hardly
+            // a reason to re-resolve the root doc), and subsequently go
+            // into expensive previous document scanning (so the test fails 
below).
+            continueSemaphore.release();
+            // let's make sure that other thread is really done
+            mergeThread.join(5000);
+            assertFalse(mergeThread.isAlive());
+
+            // let's check behavior after ns1 finished the commit
+            // but before it did a backgroundWrite
+            {
+                int c = getNodesFindCountOfAnUpdate(cds2, ns2, "v5");
+                // with a prevdoc_noprop cache (on root) there will
+                // still be one expensive round of previous document
+                // scans - only thereafter that is cached.
+                // to account for that : we now expect many previous
+                // docs were read:
+                assertPreviousDocsRead(c, 95);
+            }
+
+            // repeating it again should now make use of the prevdoc_noprop 
cache
+            {
+                int c = getNodesFindCountOfAnUpdate(cds2, ns2, "v6");
+                assertNoPreviousDocsRead(c, 95);
+                // prior to the fix it would have been:
+                // assertPreviousDocsRead(c, 95);
+            }
+
+            // neither does just doing an update on ns1
+            ns1.runBackgroundOperations();
+            {
+                int c = getNodesFindCountOfAnUpdate(cds2, ns2, "v7");
+                assertNoPreviousDocsRead(c, 95);
+                // prior to the fix it would have been:
+                // assertPreviousDocsRead(c, 95);
+            }
+
+            // just doing ns1 THEN ns2 will do the trick
+            ns2.runBackgroundOperations();
+            {
+                int c = getNodesFindCountOfAnUpdate(cds2, ns2, "v8");
+                // this worked irrespective of the fix:
+                assertNoPreviousDocsRead(c, 95);
+            }
+        } finally {
+            // in case anyone is still waiting (eg in a test failure case) :
+            continueSemaphore.release();
+            if (ns2 != null) {
+                ns2.dispose();
+            }
+        }
+
+        // a bit simplistic, but that's one way to reproduce the bug
+    }
+
+    private void assertNoPreviousDocsRead(int c, int threshold) {
+        assertTrue("c expected smaller than " + threshold + ", is: " + c, c < 
threshold);
+    }
+
+    // unused with fix for OAK-11184 - reproducing it requires this though (ie 
leaving FTR)
+    @SuppressWarnings("unused")
+    private void assertPreviousDocsRead(int c, int threshold) {
+        assertTrue("c expected larger than " + threshold + ", is: " + c, c > 
threshold);
+    }
+
+    private int getNodesFindCountOfAnUpdate(CountingDocumentStore cds, 
DocumentNodeStore ns2, String newValue)
+            throws CommitFailedException {
+        cds.resetCounters();
+        int nodesFindCount;
+        NodeBuilder b2 = ns2.getRoot().builder();
+        b2.setProperty("p", newValue);
+        merge(ns2, b2);
+        nodesFindCount = cds.getNumFindCalls(NODES);
+        cds.resetCounters();
+        return nodesFindCount;
+    }
+
     /**
      *  A blob that blocks on read until unblocked
      */
diff --git 
a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/MemoryDiffCacheTest.java
 
b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/MemoryDiffCacheTest.java
index fdffb47e2d..a62adbed71 100644
--- 
a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/MemoryDiffCacheTest.java
+++ 
b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/MemoryDiffCacheTest.java
@@ -33,7 +33,7 @@ public class MemoryDiffCacheTest {
     public void limit() throws Exception {
         DiffCache cache = new MemoryDiffCache(builderProvider.newBuilder()
                 .setCacheSegmentCount(1)
-                .memoryCacheDistribution(0, 0, 0, 99));
+                .memoryCacheDistribution(0, 0, 0, 99, 0));
         RevisionVector from = new RevisionVector(Revision.newRevision(1));
         RevisionVector to = new RevisionVector(Revision.newRevision(1));
         DiffCache.Entry entry = cache.newEntry(from, to, false);
diff --git 
a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/NodeDocumentTest.java
 
b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/NodeDocumentTest.java
index 0b3f503d81..445d8adc72 100644
--- 
a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/NodeDocumentTest.java
+++ 
b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/NodeDocumentTest.java
@@ -18,6 +18,7 @@ package org.apache.jackrabbit.oak.plugins.document;
 
 import java.lang.ref.ReferenceQueue;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -44,11 +45,15 @@ import 
org.apache.jackrabbit.oak.commons.collections.CollectionUtils;
 import 
org.apache.jackrabbit.oak.plugins.document.VersionGarbageCollector.VersionGCStats;
 import org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore;
 import org.apache.jackrabbit.oak.plugins.document.util.Utils;
+import org.apache.jackrabbit.oak.plugins.document.util.UtilsTest;
 import org.apache.jackrabbit.oak.spi.commit.CommitInfo;
 import org.apache.jackrabbit.oak.spi.commit.EmptyHook;
 import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
 import org.apache.jackrabbit.oak.spi.state.NodeStore;
+import org.jetbrains.annotations.NotNull;
 import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 import static java.lang.System.currentTimeMillis;
 import static java.util.Collections.singletonList;
@@ -81,8 +86,22 @@ import static org.mockito.Mockito.when;
 /**
  * Tests for {@link NodeDocument}.
  */
+@RunWith(Parameterized.class)
 public class NodeDocumentTest {
 
+    @Parameterized.Parameters(name="{index}: prevNoPropEnabled : {0}")
+    public static java.util.Collection<Boolean> params() {
+        return Arrays.asList(true,  false);
+    }
+
+    boolean prevNoPropEnabled;
+
+    long expectedDocCacheSize;
+
+    public NodeDocumentTest(boolean prevNoPropEnabled) {
+        this.prevNoPropEnabled = prevNoPropEnabled;
+    }
+
     @Test
     public void splitCollisions() throws Exception {
         MemoryDocumentStore docStore = new MemoryDocumentStore();
@@ -967,7 +986,7 @@ public class NodeDocumentTest {
 
         for (int i = 0; i < 20; i++) {
             prevDocCalls.clear();
-            String value = doc.getVisibleChanges("p", 
headRevisions.get(i)).iterator().next().getValue();
+            String value = doc.getVisibleChanges("p", headRevisions.get(i), 
null).iterator().next().getValue();
             assertEquals(String.valueOf(numChanges - (i + 1)), value);
             assertTrue("too many calls for previous documents: " + 
prevDocCalls,
                     prevDocCalls.size() <= 3);
@@ -977,6 +996,48 @@ public class NodeDocumentTest {
         ns2.dispose();
     }
 
+    // OAK-11184 : tests correct handling of branch commit as the only
+    // revision left on the main doc (and it has split docs)
+    @Test
+    public void simpleClusterWithBranchCommit() throws Exception {
+        MemoryDocumentStore store = new MemoryDocumentStore();
+        DocumentNodeStore ns1 = createTestStore(store, 1, 0);
+        DocumentNodeStore ns2 = createTestStore(store, 2, 0);
+        ns1.runBackgroundOperations();
+        ns2.runBackgroundOperations();
+        String checkpointA = ns2.checkpoint(Long.MAX_VALUE);
+
+        // let's create an initial state for ns2 to grab a checkpoint with
+        NodeBuilder builder = ns1.getRoot().builder();
+        builder.setProperty("p", "initial");
+        merge(ns1, builder);
+        ns1.runBackgroundOperations();
+        ns2.runBackgroundOperations();
+        String checkpointB = ns2.checkpoint(Long.MAX_VALUE);
+        // let's create many revisions, more than NUM_REVS_THRESHOLD
+        for( int i = 0; i < NodeDocument.NUM_REVS_THRESHOLD; i++ ) {
+            builder = ns1.getRoot().builder();
+            builder.setProperty("p", i);
+            merge(ns1, builder);
+        }
+        // then the last one, let that be a branch commit
+        builder = ns1.getRoot().builder();
+        builder.setProperty("p", "last");
+        TestUtils.persistToBranch(builder);
+        merge(ns1, builder);
+        // now trigger the split and lastRev update
+        ns1.runBackgroundOperations();
+        // let ns2 also see everything
+        ns2.runBackgroundOperations();
+        assertEquals("last", 
ns2.getRoot().getProperty("p").getValue(Type.STRING));
+        ns2.getNodeCache().invalidateAll();
+        assertEquals("last", 
ns2.getRoot().getProperty("p").getValue(Type.STRING));
+        ns2.getNodeCache().invalidateAll();
+        assertFalse(ns2.retrieve(checkpointA).hasProperty("p"));
+        ns2.getNodeCache().invalidateAll();
+        assertEquals("initial", 
ns2.retrieve(checkpointB).getProperty("p").getValue(Type.STRING));
+    }
+
     @Test
     public void tooManyReadsOnGetVisibleChangesWithLongRunningBranchCommit() 
throws Exception {
         int numChanges = 843;
@@ -1034,7 +1095,7 @@ public class NodeDocumentTest {
 
         for (int i = 0; i < 20; i++) {
             prevDocCalls.clear();
-            String value = doc.getVisibleChanges("p", 
headRevisions.get(i)).iterator().next().getValue();
+            String value = doc.getVisibleChanges("p", headRevisions.get(i), 
null).iterator().next().getValue();
             assertEquals(String.valueOf(numChanges - (i + 1)), value);
             assertTrue("too many calls for previous documents ("
                             + prevDocCalls.size() + "): " + prevDocCalls,
@@ -1116,7 +1177,7 @@ public class NodeDocumentTest {
         for (int i = 0; i < 10; i++) {
             int idx = random.nextInt(numChanges);
             Revision r = Iterables.get(doc.getValueMap("p").keySet(), idx);
-            Iterable<Map.Entry<Revision, String>> revs = 
doc.getVisibleChanges("p", new RevisionVector(r));
+            Iterable<Map.Entry<Revision, String>> revs = 
doc.getVisibleChanges("p", new RevisionVector(r), null);
             assertEquals(idx, numChanges - Iterables.size(revs));
         }
         ns.dispose();
@@ -1137,8 +1198,8 @@ public class NodeDocumentTest {
         for (int i = 0; i < 10; i++) {
             int idx = random.nextInt(numChanges);
             RevisionVector r = headRevisions.get(idx);
-            Iterable<Map.Entry<Revision, String>> revs1 = 
doc.getVisibleChanges("p", r);
-            Iterable<Map.Entry<Revision, String>> revs2 = 
doc.getVisibleChanges("p", r);
+            Iterable<Map.Entry<Revision, String>> revs1 = 
doc.getVisibleChanges("p", r, null);
+            Iterable<Map.Entry<Revision, String>> revs2 = 
doc.getVisibleChanges("p", r, null);
             assertEquals(Iterables.size(revs1), Iterables.size(revs2));
             assertEquals(idx, numChanges - Iterables.size(revs1));
         }
@@ -1179,7 +1240,7 @@ public class NodeDocumentTest {
 
         doc = store.find(NODES, id);
         assertNotNull(doc);
-        for (Map.Entry<Revision, String> change : doc.getVisibleChanges("p", 
readRev)) {
+        for (Map.Entry<Revision, String> change : doc.getVisibleChanges("p", 
readRev, null)) {
             assertFalse(readRev.isRevisionNewer(change.getKey()));
         }
     }
@@ -1268,6 +1329,7 @@ public class NodeDocumentTest {
             throws Exception {
         DocumentNodeStore ns = new DocumentMK.Builder()
                 
.setDocumentStore(store).setCommitValueCacheSize(commitValueCacheSize)
+                
.setPrevNoPropCacheFeature(UtilsTest.createFeature(prevNoPropEnabled))
                 .setAsyncDelay(0).setClusterId(clusterId).getNodeStore();
         for (int i = 0; i < numChanges; i++) {
             NodeBuilder builder = ns.getRoot().builder();
diff --git 
a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentNodeStoreBuilderTest.java
 
b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentNodeStoreBuilderTest.java
index ab5d85bd0e..0c3f82f36b 100644
--- 
a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentNodeStoreBuilderTest.java
+++ 
b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentNodeStoreBuilderTest.java
@@ -114,6 +114,12 @@ public class MongoDocumentNodeStoreBuilderTest {
         assertNull(builder.getDocStoreEmbeddedVerificationFeature());
     }
 
+    @Test
+    public void getPrevNoPropCacheFeatureDisabled() {
+        MongoDocumentNodeStoreBuilder builder = new 
MongoDocumentNodeStoreBuilder();
+        assertNull(builder.getPrevNoPropCacheFeature());
+    }
+
     @Test
     public void collectionCompressionDisabled() {
         MongoDocumentNodeStoreBuilder builder = new 
MongoDocumentNodeStoreBuilder();
diff --git 
a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/util/UtilsTest.java
 
b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/util/UtilsTest.java
index e5f90cc9e8..36fc0143ae 100644
--- 
a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/util/UtilsTest.java
+++ 
b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/util/UtilsTest.java
@@ -49,11 +49,14 @@ import org.apache.jackrabbit.oak.spi.commit.EmptyHook;
 import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
 import org.apache.jackrabbit.oak.spi.toggle.Feature;
 import org.apache.jackrabbit.oak.stats.Clock;
+import org.jetbrains.annotations.Nullable;
 import org.junit.Ignore;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.slf4j.event.Level;
 
+import static 
org.apache.jackrabbit.oak.plugins.document.DocumentNodeStoreBuilder.DEFAULT_MEMORY_CACHE_SIZE;
+import static 
org.apache.jackrabbit.oak.plugins.document.DocumentNodeStoreBuilder.DEFAULT_PREV_NO_PROP_CACHE_PERCENTAGE;
 import static 
org.apache.jackrabbit.oak.plugins.document.DocumentNodeStoreBuilder.newDocumentNodeStoreBuilder;
 import static 
org.apache.jackrabbit.oak.plugins.document.rdb.RDBDocumentNodeStoreBuilder.newRDBDocumentNodeStoreBuilder;
 import static 
org.apache.jackrabbit.oak.plugins.document.util.Utils.isFullGCEnabled;
@@ -310,6 +313,45 @@ public class UtilsTest {
         assertFalse("Embedded Verification is disabled for RDB Document 
Store", embeddedVerificationEnabled);
     }
 
+    @Test
+    public void prevNoPropDisabledByDefault() {
+        assertPrevNoPropDisabled(newDocumentNodeStoreBuilder());
+    }
+
+    @Test
+    public void prevNoPropDisabled() {
+        assertPrevNoPropDisabled(newDocumentNodeStoreBuilder()
+                .setPrevNoPropCacheFeature(createFeature(false)));
+    }
+
+    private void assertPrevNoPropDisabled(DocumentNodeStoreBuilder<?> builder) 
{
+        assertNotNull(builder);
+        @Nullable
+        Feature feature = builder.getPrevNoPropCacheFeature();
+        if (feature != null) {
+            assertFalse(feature.isEnabled());
+        }
+        assertEquals(0, builder.getPrevNoPropCacheSize());
+        assertNull(builder.buildPrevNoPropCache());
+    }
+
+    @Test
+    public void prevNoPropEnabled() {
+        DocumentNodeStoreBuilder<?> b =
+                
newDocumentNodeStoreBuilder().setPrevNoPropCacheFeature(createFeature(true));
+        assertNotNull(b);
+        assertTrue(b.getPrevNoPropCacheFeature().isEnabled());
+        assertEquals(DEFAULT_MEMORY_CACHE_SIZE * 
DEFAULT_PREV_NO_PROP_CACHE_PERCENTAGE / 100,
+                b.getPrevNoPropCacheSize());
+        assertNotNull(b.buildPrevNoPropCache());
+    }
+
+    public static Feature createFeature(boolean enabled) {
+        Feature f = mock(Feature.class);
+        when(f.isEnabled()).thenReturn(enabled);
+        return f;
+    }
+
     @Test
     public void getDepthFromId() throws Exception{
         assertEquals(1, Utils.getDepthFromId("1:/x"));

Reply via email to