aaronpuchert updated this revision to Diff 162562.
aaronpuchert added a comment.

Use Locked flag to determine whether to unlock on destruction

Instead of unlocking the mutexes that are still available while not complaining 
about those that aren't, we use the status of the scoped capability to 
determine whether to unlock the underlying mutexes.

This way we will attempt to unlock the mutex even if the user unlocked it 
manually, warning if it is no longer available, and we will not attempt to 
unlock if we are already released, hence warning if it is manually acquired 
again.


Repository:
  rC Clang

https://reviews.llvm.org/D51187

Files:
  lib/Analysis/ThreadSafety.cpp
  test/SemaCXX/warn-thread-safety-analysis.cpp

Index: test/SemaCXX/warn-thread-safety-analysis.cpp
===================================================================
--- test/SemaCXX/warn-thread-safety-analysis.cpp
+++ test/SemaCXX/warn-thread-safety-analysis.cpp
@@ -1729,7 +1729,7 @@
     MutexLock mulock_a(&mu1);
     MutexLock mulock_b(&mu1); // \
       // expected-warning {{acquiring mutex 'mu1' that is already held}}
-  }
+  } // expected-warning {{releasing mutex 'mu1' that was not held}}
 
   void foo4() {
     MutexLock mulock1(&mu1), mulock2(&mu2);
@@ -2580,6 +2580,8 @@
   void test3();
   void test4();
   void test5();
+  void test6();
+  void test7();
 };
 
 
@@ -2605,18 +2607,29 @@
 void Foo::test4() {
   ReleasableMutexLock rlock(&mu_);
   rlock.Release();
-  rlock.Release();  // expected-warning {{releasing mutex 'mu_' that was not held}}
+  rlock.Release();  // expected-warning {{releasing mutex 'rlock' that was not held}}
 }
 
 void Foo::test5() {
   ReleasableMutexLock rlock(&mu_);
   if (c) {
     rlock.Release();
   }
   // no warning on join point for managed lock.
-  rlock.Release();  // expected-warning {{releasing mutex 'mu_' that was not held}}
+  rlock.Release();  // expected-warning {{releasing mutex 'rlock' that was not held}}
 }
 
+void Foo::test6() {
+  ReleasableMutexLock rlock(&mu_);
+  mu_.Unlock();
+} // expected-warning {{releasing mutex 'mu_' that was not held}}
+
+void Foo::test7() {
+  ReleasableMutexLock rlock(&mu_);
+  rlock.Release();
+  mu_.Lock(); // expected-note {{mutex acquired here}}
+} // expected-warning {{mutex 'mu_' is still held at the end of function}}
+
 
 } // end namespace ReleasableScopedLock
 
@@ -2704,37 +2717,33 @@
 void doubleUnlock() {
   RelockableExclusiveMutexLock scope(&mu);
   scope.Unlock();
-  scope.Unlock(); // expected-warning {{releasing mutex 'mu' that was not held}}
+  scope.Unlock(); // expected-warning {{releasing mutex 'scope' that was not held}}
 }
 
 void doubleLock1() {
   RelockableExclusiveMutexLock scope(&mu);
-  scope.Lock(); // expected-warning {{acquiring mutex 'mu' that is already held}}
+  scope.Lock(); // expected-warning {{acquiring mutex 'scope' that is already held}}
 }
 
 void doubleLock2() {
   RelockableExclusiveMutexLock scope(&mu);
   scope.Unlock();
   scope.Lock();
-  scope.Lock(); // expected-warning {{acquiring mutex 'mu' that is already held}}
+  scope.Lock(); // expected-warning {{acquiring mutex 'scope' that is already held}}
 }
 
 void directUnlock() {
   RelockableExclusiveMutexLock scope(&mu);
   mu.Unlock();
-  // Debatable that there is no warning. Currently we don't track in the scoped
-  // object whether it is active, but just check if the contained locks can be
-  // reacquired. Here they can, because mu has been unlocked manually.
-  scope.Lock();
-}
+  scope.Lock(); // expected-warning {{acquiring mutex 'scope' that is already held}}
+} // expected-warning {{releasing mutex 'mu' that was not held}}
 
 void directRelock() {
   RelockableExclusiveMutexLock scope(&mu);
   scope.Unlock();
-  mu.Lock();
-  // Similarly debatable that there is no warning.
-  scope.Unlock();
-}
+  mu.Lock(); // expected-note {{mutex acquired here}}
+  scope.Unlock(); // expected-warning {{releasing mutex 'scope' that was not held}}
+} // expected-warning {{mutex 'mu' is still held at the end of function}}
 
 // Doesn't make a lot of sense, just making sure there is no crash.
 void destructLock() {
Index: lib/Analysis/ThreadSafety.cpp
===================================================================
--- lib/Analysis/ThreadSafety.cpp
+++ lib/Analysis/ThreadSafety.cpp
@@ -144,11 +144,11 @@
                                 ThreadSafetyHandler &Handler) const = 0;
   virtual void handleLock(FactSet &FSet, FactManager &FactMan,
                           const FactEntry &entry, ThreadSafetyHandler &Handler,
-                          StringRef DiagKind) const = 0;
+                          StringRef DiagKind) = 0;
   virtual void handleUnlock(FactSet &FSet, FactManager &FactMan,
                             const CapabilityExpr &Cp, SourceLocation UnlockLoc,
                             bool FullyRemove, ThreadSafetyHandler &Handler,
-                            StringRef DiagKind) const = 0;
+                            StringRef DiagKind) = 0;
 
   // Return true if LKind >= LK, where exclusive > shared
   bool isAtLeast(LockKind LK) {
@@ -877,15 +877,14 @@
   }
 
   void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
-                  ThreadSafetyHandler &Handler,
-                  StringRef DiagKind) const override {
+                  ThreadSafetyHandler &Handler, StringRef DiagKind) override {
     Handler.handleDoubleLock(DiagKind, entry.toString(), entry.loc());
   }
 
   void handleUnlock(FactSet &FSet, FactManager &FactMan,
                     const CapabilityExpr &Cp, SourceLocation UnlockLoc,
                     bool FullyRemove, ThreadSafetyHandler &Handler,
-                    StringRef DiagKind) const override {
+                    StringRef DiagKind) override {
     FSet.removeLock(FactMan, Cp);
     if (!Cp.negative()) {
       FSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>(
@@ -897,6 +896,7 @@
 class ScopedLockableFactEntry : public FactEntry {
 private:
   SmallVector<const til::SExpr *, 4> UnderlyingMutexes;
+  bool Locked = true; // Are the UnderlyingMutexes currently locked?
 
 public:
   ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc,
@@ -923,8 +923,11 @@
   }
 
   void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
-                  ThreadSafetyHandler &Handler,
-                  StringRef DiagKind) const override {
+                  ThreadSafetyHandler &Handler, StringRef DiagKind) override {
+    if (Locked)
+      return Handler.handleDoubleLock(DiagKind, entry.toString(), entry.loc());
+    Locked = true;
+
     for (const auto *UnderlyingMutex : UnderlyingMutexes) {
       CapabilityExpr UnderCp(UnderlyingMutex, false);
 
@@ -942,30 +945,28 @@
   void handleUnlock(FactSet &FSet, FactManager &FactMan,
                     const CapabilityExpr &Cp, SourceLocation UnlockLoc,
                     bool FullyRemove, ThreadSafetyHandler &Handler,
-                    StringRef DiagKind) const override {
+                    StringRef DiagKind) override {
     assert(!Cp.negative() && "Managing object cannot be negative.");
-    for (const auto *UnderlyingMutex : UnderlyingMutexes) {
-      CapabilityExpr UnderCp(UnderlyingMutex, false);
-      auto UnderEntry = llvm::make_unique<LockableFactEntry>(
-          !UnderCp, LK_Exclusive, UnlockLoc);
 
-      if (FullyRemove) {
-        // We're destroying the managing object.
-        // Remove the underlying mutex if it exists; but don't warn.
+    if (Locked) {
+      for (const auto *UnderlyingMutex : UnderlyingMutexes) {
+        CapabilityExpr UnderCp(UnderlyingMutex, false);
+
+        // We're releasing the underlying mutexes.  Warn on dual release.
         if (FSet.findLock(FactMan, UnderCp)) {
           FSet.removeLock(FactMan, UnderCp);
-          FSet.addLock(FactMan, std::move(UnderEntry));
-        }
-      } else {
-        // We're releasing the underlying mutex, but not destroying the
-        // managing object.  Warn on dual release.
-        if (!FSet.findLock(FactMan, UnderCp)) {
+          FSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>(
+                                    !UnderCp, LK_Exclusive, UnlockLoc));
+        } else {
           Handler.handleUnmatchedUnlock(DiagKind, UnderCp.toString(),
                                         UnlockLoc);
         }
-        FSet.removeLock(FactMan, UnderCp);
-        FSet.addLock(FactMan, std::move(UnderEntry));
       }
+      Locked = false;
+    } else {
+      // Unlocking an already unlocked scoped capability on destruction is fine.
+      if (!FullyRemove)
+        Handler.handleUnmatchedUnlock(DiagKind, Cp.toString(), UnlockLoc);
     }
     if (FullyRemove)
       FSet.removeLock(FactMan, Cp);
@@ -1294,7 +1295,7 @@
   if (Cp.shouldIgnore())
     return;
 
-  const FactEntry *LDat = FSet.findLock(FactMan, Cp);
+  FactEntry *LDat = FSet.findLock(FactMan, Cp);
   if (!LDat) {
     Handler.handleUnmatchedUnlock(DiagKind, Cp.toString(), UnlockLoc);
     return;
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to