sammccall created this revision.
sammccall added reviewers: hokein, ilya-biryukov.
Herald added subscribers: cfe-commits, ioeric, jkorous-apple, klimek.

Don't actually start building ASTs for new revisions until either:

- 500ms have passed since the last revision, or
- we actually need the revision for something (or to unblock the queue)

In practice, this avoids the "first keystroke results in diagnostics" problem.
This is kind of awkward to test, and the test is pretty bad.
It can be observed nicely by capturing a trace, though.


Repository:
  rCTE Clang Tools Extra

https://reviews.llvm.org/D43648

Files:
  clangd/TUScheduler.cpp
  clangd/TUScheduler.h
  unittests/clangd/TUSchedulerTests.cpp

Index: unittests/clangd/TUSchedulerTests.cpp
===================================================================
--- unittests/clangd/TUSchedulerTests.cpp
+++ unittests/clangd/TUSchedulerTests.cpp
@@ -118,6 +118,28 @@
   EXPECT_EQ(2, CallbackCount);
 }
 
+TEST_F(TUSchedulerTests, Debounce) {
+  std::atomic<int> CallbackCount(0);
+  {
+    TUScheduler S(getDefaultAsyncThreadsCount(),
+                  /*StorePreamblesInMemory=*/true,
+                  /*ASTParsedCallback=*/nullptr,
+                  /*UpdateDebounce=*/std::chrono::milliseconds(50));
+    auto Path = testPath("foo.cpp");
+    S.update(Path, getInputs(Path, "auto (debounced)"), WantDiagnostics::Auto,
+             [&](std::vector<DiagWithFixIts> Diags) {
+               ADD_FAILURE() << "auto should have been debounced and canceled";
+             });
+    std::this_thread::sleep_for(std::chrono::milliseconds(10));
+    S.update(Path, getInputs(Path, "auto (timed out)"), WantDiagnostics::Auto,
+             [&](std::vector<DiagWithFixIts> Diags) { ++CallbackCount; });
+    std::this_thread::sleep_for(std::chrono::milliseconds(60));
+    S.update(Path, getInputs(Path, "auto (shut down)"), WantDiagnostics::Auto,
+             [&](std::vector<DiagWithFixIts> Diags) { ++CallbackCount; });
+  }
+  EXPECT_EQ(2, CallbackCount);
+}
+
 TEST_F(TUSchedulerTests, ManyUpdates) {
   const int FilesCount = 3;
   const int UpdatesPerFile = 10;
Index: clangd/TUScheduler.h
===================================================================
--- clangd/TUScheduler.h
+++ clangd/TUScheduler.h
@@ -17,6 +17,7 @@
 
 namespace clang {
 namespace clangd {
+
 /// Returns a number of a default async threads to use for TUScheduler.
 /// Returned value is always >= 1 (i.e. will not cause requests to be processed
 /// synchronously).
@@ -46,10 +47,13 @@
 /// and scheduling tasks.
 /// Callbacks are run on a threadpool and it's appropriate to do slow work in
 /// them. Each task has a name, used for tracing (should be UpperCamelCase).
+/// FIXME(sammccall): pull out a scheduler options struct.
 class TUScheduler {
 public:
   TUScheduler(unsigned AsyncThreadsCount, bool StorePreamblesInMemory,
-              ASTParsedCallback ASTCallback);
+              ASTParsedCallback ASTCallback,
+              std::chrono::steady_clock::duration UpdateDebounce =
+                  std::chrono::milliseconds(500));
   ~TUScheduler();
 
   /// Returns estimated memory usage for each of the currently open files.
@@ -101,6 +105,7 @@
   // asynchronously.
   llvm::Optional<AsyncTaskRunner> PreambleTasks;
   llvm::Optional<AsyncTaskRunner> WorkerThreads;
+  std::chrono::steady_clock::duration UpdateDebounce;
 };
 } // namespace clangd
 } // namespace clang
Index: clangd/TUScheduler.cpp
===================================================================
--- clangd/TUScheduler.cpp
+++ clangd/TUScheduler.cpp
@@ -54,6 +54,7 @@
 
 namespace clang {
 namespace clangd {
+using std::chrono::steady_clock;
 namespace {
 class ASTWorkerHandle;
 
@@ -69,17 +70,18 @@
 /// worker.
 class ASTWorker {
   friend class ASTWorkerHandle;
-  ASTWorker(llvm::StringRef File, Semaphore &Barrier, CppFile AST,
-            bool RunSync);
+  ASTWorker(llvm::StringRef File, Semaphore &Barrier, CppFile AST, bool RunSync,
+            steady_clock::duration UpdateDebounce);
 
 public:
   /// Create a new ASTWorker and return a handle to it.
   /// The processing thread is spawned using \p Tasks. However, when \p Tasks
   /// is null, all requests will be processed on the calling thread
   /// synchronously instead. \p Barrier is acquired when processing each
   /// request, it is be used to limit the number of actively running threads.
   static ASTWorkerHandle Create(llvm::StringRef File, AsyncTaskRunner *Tasks,
-                                Semaphore &Barrier, CppFile AST);
+                                Semaphore &Barrier, CppFile AST,
+                                steady_clock::duration UpdateDebounce);
   ~ASTWorker();
 
   void update(ParseInputs Inputs, WantDiagnostics,
@@ -101,12 +103,19 @@
   /// Adds a new task to the end of the request queue.
   void startTask(llvm::StringRef Name, UniqueFunction<void()> Task,
                  llvm::Optional<WantDiagnostics> UpdateType);
+  /// Determines the next action to perform.
+  /// All actions that should never run are disarded.
+  /// If the next action is ready, returns None to indicate this.
+  /// Otherwise returns a deadline for when it should be ready.
+  /// scheduleLocked() is called again at the deadline, or if requests arrive.
+  llvm::Optional<Deadline> scheduleLocked();
   /// Should the first task in the queue be skipped instead of run?
   bool shouldSkipHeadLocked() const;
 
   struct Request {
     UniqueFunction<void()> Action;
     std::string Name;
+    steady_clock::time_point AddTime;
     Context Ctx;
     llvm::Optional<WantDiagnostics> UpdateType;
   };
@@ -127,6 +136,8 @@
   bool Done;                           /* GUARDED_BY(Mutex) */
   std::deque<Request> Requests;        /* GUARDED_BY(Mutex) */
   mutable std::condition_variable RequestsCV;
+  // Time to wait after an update to see whether another update obsoletes it.
+  steady_clock::duration UpdateDebounce;
 };
 
 /// A smart-pointer-like class that points to an active ASTWorker.
@@ -172,20 +183,21 @@
 };
 
 ASTWorkerHandle ASTWorker::Create(llvm::StringRef File, AsyncTaskRunner *Tasks,
-                                  Semaphore &Barrier, CppFile AST) {
-  std::shared_ptr<ASTWorker> Worker(
-      new ASTWorker(File, Barrier, std::move(AST), /*RunSync=*/!Tasks));
+                                  Semaphore &Barrier, CppFile AST,
+                                  steady_clock::duration UpdateDebounce) {
+  std::shared_ptr<ASTWorker> Worker(new ASTWorker(
+      File, Barrier, std::move(AST), /*RunSync=*/!Tasks, UpdateDebounce));
   if (Tasks)
     Tasks->runAsync("worker:" + llvm::sys::path::filename(File),
                     [Worker]() { Worker->run(); });
 
   return ASTWorkerHandle(std::move(Worker));
 }
 
 ASTWorker::ASTWorker(llvm::StringRef File, Semaphore &Barrier, CppFile AST,
-                     bool RunSync)
+                     bool RunSync, steady_clock::duration UpdateDebounce)
     : File(File), RunSync(RunSync), Barrier(Barrier), AST(std::move(AST)),
-      Done(false) {
+      Done(false), UpdateDebounce(UpdateDebounce) {
   if (RunSync)
     return;
 }
@@ -275,8 +287,8 @@
   {
     std::lock_guard<std::mutex> Lock(Mutex);
     assert(!Done && "running a task after stop()");
-    Requests.push_back(
-        {std::move(Task), Name, Context::current().clone(), UpdateType});
+    Requests.push_back({std::move(Task), Name, steady_clock::now(),
+                        Context::current().clone(), UpdateType});
   }
   RequestsCV.notify_all();
 }
@@ -286,17 +298,33 @@
     Request Req;
     {
       std::unique_lock<std::mutex> Lock(Mutex);
-      RequestsCV.wait(Lock, [&]() { return Done || !Requests.empty(); });
-      if (Requests.empty()) {
-        assert(Done);
-        return;
+      while (auto Deadline = scheduleLocked()) {
+        if (Done) {
+          if (Requests.empty())
+            return;
+          else     // Even though Done is set, finish pending requests.
+            break; // However, skip delays to shutdown fast.
+        }
+
+        // Tracing: we have a next request, attribute this sleep to it.
+        Optional<WithContext> Ctx;
+        Optional<trace::Span> Tracer;
+        if (!Requests.empty()) {
+          Ctx.emplace(Requests.front().Ctx.clone());
+          Tracer.emplace("Debounce");
+          SPAN_ATTACH(*Tracer, "next_request", Requests.front().Name);
+          if (*Deadline)
+            SPAN_ATTACH(*Tracer, "sleep_ms",
+                        std::chrono::duration_cast<std::chrono::milliseconds>(
+                            **Deadline - steady_clock::now())
+                            .count());
+        }
+
+        if (*Deadline)
+          RequestsCV.wait_until(Lock, **Deadline);
+        else
+          RequestsCV.wait(Lock);
       }
-      // Even when Done is true, we finish processing all pending requests
-      // before exiting the processing loop.
-
-      while (shouldSkipHeadLocked())
-        Requests.pop_front();
-      assert(!Requests.empty() && "skipped the whole queue");
       Req = std::move(Requests.front());
       // Leave it on the queue for now, so waiters don't see an empty queue.
     } // unlock Mutex
@@ -316,6 +344,26 @@
   }
 }
 
+Optional<Deadline> ASTWorker::scheduleLocked() {
+  if (Requests.empty())
+    return Deadline(); // Wait forever for new requests.
+  while (shouldSkipHeadLocked())
+    Requests.pop_front();
+  assert(!Requests.empty() && "skipped the whole queue");
+  // Some updates aren't dead yet, but never end up being used.
+  // e.g. the first keystroke is live until obsoleted by the second.
+  // We debounce "maybe-unused" writes, sleeping 500ms in case they become dead.
+  // But don't delay reads (including updates where diagnostics are needed).
+  for (const auto &R : Requests)
+    if (R.UpdateType == None || R.UpdateType == WantDiagnostics::Yes)
+      return None;
+  // Front request needs to be debounced, so determine when we're ready.
+  Deadline D(Requests.front().AddTime + UpdateDebounce);
+  if (*D <= steady_clock::now())
+    return None;
+  return D;
+}
+
 // Returns true if Requests.front() is a dead update that can be skipped.
 bool ASTWorker::shouldSkipHeadLocked() const {
   assert(!Requests.empty());
@@ -369,10 +417,12 @@
 
 TUScheduler::TUScheduler(unsigned AsyncThreadsCount,
                          bool StorePreamblesInMemory,
-                         ASTParsedCallback ASTCallback)
+                         ASTParsedCallback ASTCallback,
+                         steady_clock::duration UpdateDebounce)
     : StorePreamblesInMemory(StorePreamblesInMemory),
       PCHOps(std::make_shared<PCHContainerOperations>()),
-      ASTCallback(std::move(ASTCallback)), Barrier(AsyncThreadsCount) {
+      ASTCallback(std::move(ASTCallback)), Barrier(AsyncThreadsCount),
+      UpdateDebounce(UpdateDebounce) {
   if (0 < AsyncThreadsCount) {
     PreambleTasks.emplace();
     WorkerThreads.emplace();
@@ -408,7 +458,8 @@
     // Create a new worker to process the AST-related tasks.
     ASTWorkerHandle Worker = ASTWorker::Create(
         File, WorkerThreads ? WorkerThreads.getPointer() : nullptr, Barrier,
-        CppFile(File, StorePreamblesInMemory, PCHOps, ASTCallback));
+        CppFile(File, StorePreamblesInMemory, PCHOps, ASTCallback),
+        UpdateDebounce);
     FD = std::unique_ptr<FileData>(new FileData{Inputs, std::move(Worker)});
   } else {
     FD->Inputs = Inputs;
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to