Revision: 55378
Author:   aschrijvers
Date:     2015-08-25 14:31:10 +0200 (Tue, 25 Aug 2015)
Log Message:
-----------
REPO-1322 Forward port 10.1: In svn commit 40763 in issue REPO_707, I added an 
extra check in CachingMultiReaderQueryFilter#getIndexReaderDocIdSet to check 
whether there were no docIds in a cached index reader *deleted* : Namely, the 
index reader instance itself in that was unchanged, and the faceted navigation 
cache would not invalidate. However, later in REPO_707, I removed the caching 
in the faceted navigation cache that relied on indexreader instance, making it 
possible to remove commit 40763. The problem with 40763, is that when a docId 
gets deleted in an old index, the authorization query for that index is 
recomputed. *However*, this should not be necessary because the index reader (a 
ReadOnlyIndexReader) when being used always gets combined with a separate 
'deleted bitset' keeping track of deleted doc ids.

Modified Paths:
--------------
    
hippo-cms7/repository/trunk/engine/src/main/java/org/hippoecm/repository/FacetedNavigationEngineImpl.java
    
hippo-cms7/repository/trunk/engine/src/main/java/org/hippoecm/repository/query/lucene/ServicingSearchIndex.java
    
hippo-cms7/repository/trunk/engine/src/main/java/org/hippoecm/repository/query/lucene/util/CachingMultiReaderQueryFilter.java

Modified: 
hippo-cms7/repository/trunk/engine/src/main/java/org/hippoecm/repository/FacetedNavigationEngineImpl.java
===================================================================
--- 
hippo-cms7/repository/trunk/engine/src/main/java/org/hippoecm/repository/FacetedNavigationEngineImpl.java
   2015-08-25 12:26:47 UTC (rev 55377)
+++ 
hippo-cms7/repository/trunk/engine/src/main/java/org/hippoecm/repository/FacetedNavigationEngineImpl.java
   2015-08-25 12:31:10 UTC (rev 55378)
@@ -1,5 +1,5 @@
 /*
- *  Copyright 2008-2013 Hippo B.V. (http://www.onehippo.com)
+ *  Copyright 2008-2015 Hippo B.V. (http://www.onehippo.com)
  * 
  *  Licensed under the Apache License, Version 2.0 (the "License");
  *  you may not use this file except in compliance with the License.
@@ -353,13 +353,13 @@
             SetDocIdSetBuilder matchingDocsSetBuilder = new 
SetDocIdSetBuilder();
 
             BooleanQuery facetsQuery = new FacetsQuery(facetsQueryList, 
nsMappings).getQuery();
-            
matchingDocsSetBuilder.add(filterDocIdSetPlainLuceneQuery(facetsQuery, 
indexReader));
+            
matchingDocsSetBuilder.add(filterDocIdSetPlainLuceneQuery(facetsQuery, 
indexReader, contextImpl));
 
             BooleanQuery facetRangeQuery = new FacetRangeQuery(rangeQuery, 
nsMappings, this).getQuery();
-            
matchingDocsSetBuilder.add(filterDocIdSetPlainLuceneQuery(facetRangeQuery, 
indexReader));
+            
matchingDocsSetBuilder.add(filterDocIdSetPlainLuceneQuery(facetRangeQuery, 
indexReader, contextImpl));
 
             BooleanQuery inheritedFilterQuery = new 
InheritedFilterQuery(inheritedFilter, nsMappings).getQuery();
-            
matchingDocsSetBuilder.add(filterDocIdSetPlainLuceneQuery(inheritedFilterQuery, 
indexReader));
+            
matchingDocsSetBuilder.add(filterDocIdSetPlainLuceneQuery(inheritedFilterQuery, 
indexReader, contextImpl));
 
             org.apache.lucene.search.Query initialLuceneQuery = null;
             if (initialQuery != null && initialQuery.scopes != null && 
initialQuery.scopes.length > 0) {
@@ -372,7 +372,7 @@
                     }
                 }
             }
-            
matchingDocsSetBuilder.add(filterDocIdSetPlainLuceneQuery(initialLuceneQuery, 
indexReader));
+            
matchingDocsSetBuilder.add(filterDocIdSetPlainLuceneQuery(initialLuceneQuery, 
indexReader, contextImpl));
 
             FacetFiltersQuery facetFiltersQuery = null;
             if (initialQuery != null && initialQuery.facetFilters != null) {
@@ -400,7 +400,7 @@
                    // Not a search involving scoring, thus compute bitsets for 
facetFiltersQuery & freeSearchInjectedSort
                    if (facetFiltersQuery != null) {
                        if (facetFiltersQuery.isPlainLuceneQuery()) {
-                           
matchingDocsSetBuilder.add(filterDocIdSetPlainLuceneQuery(facetFiltersQuery.getQuery(),
 indexReader));
+                           
matchingDocsSetBuilder.add(filterDocIdSetPlainLuceneQuery(facetFiltersQuery.getQuery(),
 indexReader, contextImpl));
                        } else {
                            
matchingDocsSetBuilder.add(filterDocIdSetJackRabbitQuery(facetFiltersQuery.getQuery(),
 indexReader));
                        }
@@ -440,7 +440,7 @@
                      * facetPropExists: the node must have the property as 
facet
                      */
 
-                    
matchingDocsSetBuilder.add(filterDocIdSetPlainLuceneQuery(new 
FacetPropExistsQuery(propertyName).getQuery(), indexReader));
+                    
matchingDocsSetBuilder.add(filterDocIdSetPlainLuceneQuery(new 
FacetPropExistsQuery(propertyName).getQuery(), indexReader, contextImpl));
 
                     matchingDocs = matchingDocsSetBuilder.toBitSet();
                     cardinality = (int) matchingDocs.cardinality();
@@ -478,7 +478,7 @@
                 if (!hitsRequested.isResultRequested()) {
                     // No search with SCORING involved, this everything can be 
done with BitSet's
                     if (facetFiltersQuery != null && 
facetFiltersQuery.getQuery().clauses().size() > 0) {
-                        
matchingDocsSetBuilder.add(filterDocIdSetPlainLuceneQuery(facetFiltersQuery.getQuery(),
 indexReader));
+                        
matchingDocsSetBuilder.add(filterDocIdSetPlainLuceneQuery(facetFiltersQuery.getQuery(),
 indexReader, contextImpl));
                     }
                     
                     if (openQuery != null) {
@@ -571,7 +571,7 @@
                         } else {
                             // because we have at least one explicit sort, 
scoring can be skipped. We can use cached bitsets combined with a match all 
query
                             if (facetFiltersQuery != null) {
-                                
matchingDocsSetBuilder.add(filterDocIdSetPlainLuceneQuery(facetFiltersQuery.getQuery(),
 indexReader));
+                                
matchingDocsSetBuilder.add(filterDocIdSetPlainLuceneQuery(facetFiltersQuery.getQuery(),
 indexReader, contextImpl));
                             }
                             if (openQuery != null) {
                                 QueryAndSort queryAndSort = 
openQuery.getLuceneQueryAndSort(contextImpl);
@@ -805,19 +805,18 @@
 
 
     private DocIdSet filterDocIdSetPlainLuceneQuery(final 
org.apache.lucene.search.Query query,
-                                    final IndexReader indexReader) throws 
IOException {
+                                                    final IndexReader 
indexReader, final ContextImpl contextImpl) throws IOException {
         if ((query instanceof BooleanQuery) && 
((BooleanQuery)query).clauses().size() == 0) {
             // no constraints. Return null
             return null;
         }
         Filter queryFilter = filterCache.getIfPresent(query);
-        if (queryFilter != null) {
+        if (queryFilter == null) {
+            queryFilter = new CachingMultiReaderQueryFilter(query, 
contextImpl.session.getUserID());
+            filterCache.put(query, queryFilter);
+        } else {
             log.debug("For query '{}' getting queryFilter from cache", query);
         }
-        else {
-            queryFilter = new CachingMultiReaderQueryFilter(query);
-            filterCache.put(query, queryFilter);
-        }
         return queryFilter.getDocIdSet(indexReader);
     }
 

Modified: 
hippo-cms7/repository/trunk/engine/src/main/java/org/hippoecm/repository/query/lucene/ServicingSearchIndex.java
===================================================================
--- 
hippo-cms7/repository/trunk/engine/src/main/java/org/hippoecm/repository/query/lucene/ServicingSearchIndex.java
     2015-08-25 12:26:47 UTC (rev 55377)
+++ 
hippo-cms7/repository/trunk/engine/src/main/java/org/hippoecm/repository/query/lucene/ServicingSearchIndex.java
     2015-08-25 12:31:10 UTC (rev 55378)
@@ -1,5 +1,5 @@
 /*
- *  Copyright 2008-2013 Hippo B.V. (http://www.onehippo.com)
+ *  Copyright 2008-2015 Hippo B.V. (http://www.onehippo.com)
  *
  *  Licensed under the Apache License, Version 2.0 (the "License");
  *  you may not use this file except in compliance with the License.
@@ -175,7 +175,7 @@
             // the same filter twice or more: This only happens for the first 
unique userID or after a change in
             // authorization. Any way, storing it needlessly twice or more 
only for the same userID under concurrency is
             // much preferable over introducing synchronization
-            filter = new CachingMultiReaderQueryFilter(query);
+            filter = new CachingMultiReaderQueryFilter(query, 
session.getUserID());
             cache.put(session.getUserID(), filter);
         }
         return filter;

Modified: 
hippo-cms7/repository/trunk/engine/src/main/java/org/hippoecm/repository/query/lucene/util/CachingMultiReaderQueryFilter.java
===================================================================
--- 
hippo-cms7/repository/trunk/engine/src/main/java/org/hippoecm/repository/query/lucene/util/CachingMultiReaderQueryFilter.java
       2015-08-25 12:26:47 UTC (rev 55377)
+++ 
hippo-cms7/repository/trunk/engine/src/main/java/org/hippoecm/repository/query/lucene/util/CachingMultiReaderQueryFilter.java
       2015-08-25 12:31:10 UTC (rev 55378)
@@ -1,5 +1,5 @@
 /*
- *  Copyright 2012-2013 Hippo B.V. (http://www.onehippo.com)
+ *  Copyright 2012-2015 Hippo B.V. (http://www.onehippo.com)
  *
  *  Licensed under the Apache License, Version 2.0 (the "License");
  *  you may not use this file except in compliance with the License.
@@ -33,16 +33,19 @@
 
     private static final Logger log = 
LoggerFactory.getLogger(CachingMultiReaderQueryFilter.class);
 
-    private final WeakIdentityMap<IndexReader, ValidityBitSet> cache = 
WeakIdentityMap.newConcurrentHashMap();
+    private final WeakIdentityMap<IndexReader, OpenBitSet> cache = 
WeakIdentityMap.newConcurrentHashMap();
 
     private final Query query;
+    // userId of the jcr session triggering this CachingMultiReaderQueryFilter 
: Required only for logging purposes
+    private final String userId;
 
     /**
      * @param query only plain Lucene queries are allowed here, as Jackrabbit 
Query implementations are very specific,
      *              keep references to index readers, need multi index 
readers, etc etc
      */
-    public CachingMultiReaderQueryFilter(final Query query) {
+    public CachingMultiReaderQueryFilter(final Query query, final String 
userId) {
         this.query = query;
+        this.userId = userId;
     }
 
     public Query getQuery() {
@@ -59,39 +62,38 @@
             int[] maxDocs = new int[indexReaders.length];
             for (int i = 0; i < indexReaders.length; i++) {
                 IndexReader subReader = indexReaders[i];
-                docIdSets[i] = getIndexReaderDocIdSet(subReader, subReader);
+                docIdSets[i] = getIndexReaderDocIdSet(subReader);
                 maxDocs[i] = subReader.maxDoc();
             }
 
             return new MultiDocIdSet(docIdSets, maxDocs);
         }
-        log.warn("MultiIndexReader was expected but not found. Do not dissect 
the reader but use it as one instead");
+        log.warn("IndexReader of type MultiIndexReader expected for userId 
'{}' but reader of type '{}' was found. " +
+                "Do not dissect the reader but use it as is.", userId, 
reader.getClass().getName());
 
-        return getIndexReaderDocIdSet(reader, reader);
+        return getIndexReaderDocIdSet(reader);
     }
 
-    private DocIdSet getIndexReaderDocIdSet(final IndexReader reader, 
IndexReader cacheKey) throws IOException {
+    private DocIdSet getIndexReaderDocIdSet(final IndexReader reader) throws 
IOException {
 
-        ValidityBitSet validityBitSet = cache.get(cacheKey);
-        if (validityBitSet != null) {
-            // unfortunately, Jackrabbit can return a ReadOnlyIndexReader 
which is the same instance as used previously,
-            // but still happened to be changed through it's ***deleted*** 
bitset : This is a optimisation.
-            // See AbstractIndex#getReadOnlyIndexReader. This is why even 
though we use an IDENTITY as cachekey, we
-            // now still need to check whether the cached bit set is really 
still valid. We can only do this by checking
-            // numDocs as when a doc id gets deleted in the 
ReadOnlyIndexReader, numDocs decreases
-            if (reader.numDocs() == validityBitSet.numDocs) {
-                log.debug("Return cached bitSet for reader '{}'", reader);
-                return validityBitSet.bitSet;
-            } else {
-                log.debug("ReadOnlyIndexReader '{}' deleted bitset got 
changed. Cached entry not valid any more", reader);
-                cache.remove(cacheKey);
+        OpenBitSet docIdSet = cache.get(reader);
+        if (docIdSet != null) {
+            log.debug("For userId '{}' return cached bitSet for reader '{}'", 
userId, reader);
+            return docIdSet;
             }
+        synchronized (this) {
+            // try again after obtaining the lock
+            docIdSet = cache.get(reader);
+            if (docIdSet != null) {
+                log.debug("For userId '{}' return cached bitSet for reader 
'{}'", userId, reader);
+                return docIdSet;
         }
-        // no synchronization needed: worst case scenario two concurrent 
thread do it both
-        OpenBitSet docIdSet = createDocIdSet(reader);
-        cache.put(cacheKey, new ValidityBitSet(reader.numDocs(), docIdSet));
+            log.debug("For userId '{}' could not find a cached bitSet for 
reader '{}'", userId, reader);
+            docIdSet = createDocIdSet(reader);
+            cache.put(reader, docIdSet);
         return docIdSet;
     }
+    }
 
     private OpenBitSet createDocIdSet(IndexReader reader) throws IOException {
         final OpenBitSet bits = new OpenBitSet(reader.maxDoc());
@@ -98,13 +100,13 @@
 
         long start = System.currentTimeMillis();
         new IndexSearcher(reader).search(query, new AbstractHitCollector() {
-
             @Override
             public final void collect(int doc, float score) {
                 bits.set(doc);  // set bit for hit
             }
         });
-        log.info("Creating CachingMultiReaderQueryFilter doc id set took {} 
ms.", String.valueOf(System.currentTimeMillis() - start));
+        log.info("For userId '{}', creating CachingMultiReaderQueryFilter doc 
id set took {} ms.", userId,
+                String.valueOf(System.currentTimeMillis() - start));
         return bits;
     }
 

_______________________________________________
Hippocms-svn mailing list
Hippocms-svn@lists.onehippo.org
https://lists.onehippo.org/mailman/listinfo/hippocms-svn

Reply via email to