jpountz commented on a change in pull request #916: LUCENE-8213: Asynchronous 
Caching in LRUQueryCache
URL: https://github.com/apache/lucene-solr/pull/916#discussion_r334586442
 
 

 ##########
 File path: lucene/core/src/test/org/apache/lucene/search/TestLRUQueryCache.java
 ##########
 @@ -1691,4 +1964,184 @@ public void testBulkScorerLocking() throws Exception {
     t.start();
     t.join();
   }
+
+  public void testRejectedExecution() throws IOException {
+    ExecutorService service = new TestIndexSearcher.RejectingMockExecutor();
+    Directory dir = newDirectory();
+    final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
+
+    Document doc = new Document();
+    StringField f = new StringField("color", "blue", Store.NO);
+    doc.add(f);
+    w.addDocument(doc);
+    f.setStringValue("red");
+    w.addDocument(doc);
+    f.setStringValue("green");
+    w.addDocument(doc);
+    final DirectoryReader reader = w.getReader();
+
+    final Query red = new TermQuery(new Term("color", "red"));
+
+    IndexSearcher searcher = new IndexSearcher(reader, service);
+
+    final LRUQueryCache queryCache = new LRUQueryCache(2, 100000, context -> 
true);
+
+    searcher.setQueryCache(queryCache);
+    searcher.setQueryCachingPolicy(ALWAYS_CACHE);
+
+    // To ensure that failing ExecutorService still allows query to run
+    // successfully
+
+    searcher.search(new ConstantScoreQuery(red), 1);
+    assertEquals(Collections.singletonList(red), queryCache.cachedQueries());
+
+    reader.close();
+    w.close();
+    dir.close();
+    service.shutdown();
+  }
+
+  public void testClosedReaderExecution() throws IOException {
+    CountDownLatch latch = new CountDownLatch(1);
+    ExecutorService service = new BlockedMockExecutor(latch);
+
+    Directory dir = newDirectory();
+    final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
+
+    for (int i = 0; i < 100; i++) {
+      Document doc = new Document();
+      StringField f = new StringField("color", "blue", Store.NO);
+      doc.add(f);
+      w.addDocument(doc);
+      f.setStringValue("red");
+      w.addDocument(doc);
+      f.setStringValue("green");
+      w.addDocument(doc);
+
+      if (i % 10 == 0) {
+        w.commit();
+      }
+    }
+
+    final DirectoryReader reader = w.getReader();
+
+    final Query red = new TermQuery(new Term("color", "red"));
+
+    IndexSearcher searcher = new IndexSearcher(reader, service) {
+      @Override
+      protected LeafSlice[] slices(List<LeafReaderContext> leaves) {
+        ArrayList<LeafSlice> slices = new ArrayList<>();
+        for (LeafReaderContext ctx : leaves) {
+          slices.add(new LeafSlice(Arrays.asList(ctx)));
+        }
+        return slices.toArray(LeafSlice[]::new);
+      }
+    };
+
+    final LRUQueryCache queryCache = new LRUQueryCache(2, 100000, context -> 
true);
+
+    searcher.setQueryCache(queryCache);
+    searcher.setQueryCachingPolicy(ALWAYS_CACHE);
+
+    // To ensure that failing ExecutorService still allows query to run
+    // successfully
+
+    ExecutorService tempService = new ThreadPoolExecutor(2, 2, 0L, 
TimeUnit.MILLISECONDS,
+        new LinkedBlockingQueue<Runnable>(),
+        new NamedThreadFactory("TestLRUQueryCache"));
+
+    tempService.submit(new Runnable() {
+      @Override
+      public void run() {
+        try {
+          Thread.sleep(100);
+          List<LeafReaderContext> leaves = searcher.leafContexts;
+
+          for (LeafReaderContext leafReaderContext : leaves) {
+            leafReaderContext.reader().close();
+          }
+
+          reader.close();
+        } catch (Exception e) {
+          throw new RuntimeException(e.getMessage());
+        }
+
+        latch.countDown();
+
+      }
+    });
+
+    expectThrows(AlreadyClosedException.class, () -> searcher.search(new 
ConstantScoreQuery(red), 1));
 
 Review comment:
   Actually the interesting case is that the search succeeds but the caching 
fails? Can we simulate this case? Maybe we could create a searcher with a 
single leaf to force IndexSearcher to run search on the current thread, so that 
we would know that the only thing that runs in the threadpool is the caching? 
Or maybe we could wrap the cache in order to force it to use a special 
BlockedMockExecutor instead of the Executor of the IndexSearcher?

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@lucene.apache.org
For additional commands, e-mail: issues-h...@lucene.apache.org

Reply via email to