Alexander_Droste updated this revision to Diff 49023.
Alexander_Droste marked 2 inline comments as done.
Alexander_Droste added a comment.

- fixed checkUnmatchedWaits (added ErrorNode)
- non fatal error node for double nonblocking
- renamed BugReporter variable to BReporter
- description why custom RequestMap is used
- collapse pathsensitivechecker with mpichecker
- mpi-checker move to optin package
- moved `sourceRange()` memregion.cpp/h
- remove Utility.cpp/h
- put everything into clang::ento
- remove '––––' from comments
- test case checking for multiple missing nonblocking calls in the same line 
(`// expected-warning-re 1+{{Request {{.*}} has..`)
- removed some of the mpi classifications/identifiers


http://reviews.llvm.org/D12761

Files:
  lib/StaticAnalyzer/Checkers/CMakeLists.txt
  lib/StaticAnalyzer/Checkers/Checkers.td
  lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.cpp
  lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h
  lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
  lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.h
  lib/StaticAnalyzer/Checkers/MPI-Checker/MPIFunctionClassifier.cpp
  lib/StaticAnalyzer/Checkers/MPI-Checker/MPIFunctionClassifier.h
  lib/StaticAnalyzer/Checkers/MPI-Checker/MPITypes.h
  test/Analysis/MPIChecker.cpp

Index: test/Analysis/MPIChecker.cpp
===================================================================
--- test/Analysis/MPIChecker.cpp
+++ test/Analysis/MPIChecker.cpp
@@ -0,0 +1,276 @@
+// RUN: %clang_cc1 -analyze -analyzer-checker=optin.mpi.MPI-Checker -verify %s
+
+// MPI-Checker makes no assumptions about details of an MPI implementation.
+// Typedefs and constants are compared as strings.
+
+#define NULL 0
+
+// mock types
+typedef int MPI_Datatype;
+typedef int MPI_Comm;
+typedef int MPI_Request;
+typedef int MPI_Status;
+typedef int MPI_Op;
+typedef int int8_t;
+typedef int uint8_t;
+typedef int uint16_t;
+typedef int int64_t;
+namespace std { template<class T> struct complex { T real; T imag; }; }
+
+// mock constants
+#define MPI_DATATYPE_NULL 0
+#define MPI_CHAR 0
+#define MPI_BYTE 0
+#define MPI_INT 0
+#define MPI_LONG 0
+#define MPI_LONG_DOUBLE 0
+#define MPI_UNSIGNED 0
+#define MPI_INT8_T 0
+#define MPI_UINT8_T 0
+#define MPI_UINT16_T 0
+#define MPI_C_LONG_DOUBLE_COMPLEX 0
+#define MPI_FLOAT 0
+#define MPI_DOUBLE 0
+#define MPI_CXX_BOOL 0
+#define MPI_CXX_FLOAT_COMPLEX 0
+#define MPI_CXX_DOUBLE_COMPLEX 0
+#define MPI_CXX_LONG_DOUBLE_COMPLEX 0
+#define MPI_IN_PLACE 0
+#define MPI_COMM_WORLD 0
+#define MPI_STATUS_IGNORE 0
+#define MPI_STATUSES_IGNORE 0
+#define MPI_SUM 0
+
+int MPI_Comm_size(MPI_Comm, int *);
+int MPI_Comm_rank(MPI_Comm, int *);
+int MPI_Send(const void *, int, MPI_Datatype, int, int, MPI_Comm);
+int MPI_Recv(void *, int, MPI_Datatype, int, int, MPI_Comm, MPI_Status *);
+int MPI_Isend(const void *, int, MPI_Datatype, int, int, MPI_Comm,
+    MPI_Request *);
+int MPI_Irecv(void *, int, MPI_Datatype, int, int, MPI_Comm, MPI_Request *);
+int MPI_Wait(MPI_Request *, MPI_Status *);
+int MPI_Waitall(int, MPI_Request[], MPI_Status[]);
+int MPI_Reduce(const void *, void *, int, MPI_Datatype, MPI_Op, int, MPI_Comm);
+int MPI_Ireduce(const void *, void *, int, MPI_Datatype, MPI_Op, int, MPI_Comm,
+    MPI_Request *);
+int MPI_Bcast(void *, int count, MPI_Datatype, int, MPI_Comm);
+
+//integration tests-------------------------------------------------------
+
+void matchedWait1() {
+  int rank = 0;
+  double buf = 0;
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+  if (rank >= 0) {
+    MPI_Request sendReq1, recvReq1;
+    MPI_Isend(&buf, 1, MPI_DOUBLE, rank + 1, 3, MPI_COMM_WORLD, &sendReq1);
+    MPI_Irecv(&buf, 1, MPI_DOUBLE, rank - 1, 3, MPI_COMM_WORLD, &recvReq1);
+
+    MPI_Wait(&sendReq1, MPI_STATUS_IGNORE);
+    MPI_Wait(&recvReq1, MPI_STATUS_IGNORE);
+  }
+} // no error
+
+void matchedWait2() {
+  int rank = 0;
+  double buf = 0;
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+  if (rank >= 0) {
+    MPI_Request sendReq1, recvReq1;
+    MPI_Isend(&buf, 1, MPI_DOUBLE, rank + 1, 4, MPI_COMM_WORLD, &sendReq1);
+    MPI_Irecv(&buf, 1, MPI_DOUBLE, rank - 1, 4, MPI_COMM_WORLD, &recvReq1);
+    MPI_Wait(&sendReq1, MPI_STATUS_IGNORE);
+    MPI_Wait(&recvReq1, MPI_STATUS_IGNORE);
+  }
+} // no error
+
+void matchedWait3() {
+  int rank = 0;
+  double buf = 0;
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+  if (rank >= 0) {
+    MPI_Request sendReq1, recvReq1;
+    MPI_Isend(&buf, 1, MPI_DOUBLE, rank + 1, 5, MPI_COMM_WORLD, &sendReq1);
+    MPI_Irecv(&buf, 1, MPI_DOUBLE, rank - 1, 5, MPI_COMM_WORLD, &recvReq1);
+
+    if (rank > 1000) {
+      MPI_Wait(&sendReq1, MPI_STATUS_IGNORE);
+      MPI_Wait(&recvReq1, MPI_STATUS_IGNORE);
+    } else {
+      MPI_Wait(&sendReq1, MPI_STATUS_IGNORE);
+      MPI_Wait(&recvReq1, MPI_STATUS_IGNORE);
+    }
+  }
+} // no error
+
+void missingWait1() { // Check missing wait for dead region.
+  int rank = 0;
+  double buf = 0;
+  MPI_Request sendReq1;
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+  MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD,
+              &sendReq1); // expected-note{{Request is previously used by nonblocking call here.}}
+} // expected-warning{{Request 'sendReq1' has no matching wait.}}
+
+
+MPI_Request sendReq1;
+void missingWait2() { // Check missing wait for global variable.
+  int rank = 0;
+  double buf = 0;
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+  MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD,
+              &sendReq1); // expected-note{{Request is previously used by nonblocking call here.}}
+} // expected-warning{{Request 'sendReq1' has no matching wait.}}
+
+void missingWait3() {
+  int rank = 0;
+  double buf = 0;
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+  if (rank == 0) {
+  } else {
+    MPI_Request sendReq1, recvReq1;
+
+    MPI_Isend(&buf, 1, MPI_DOUBLE, rank + 1, 2, MPI_COMM_WORLD, &sendReq1);
+    MPI_Irecv(&buf, 1, MPI_DOUBLE, rank - 1, 2, MPI_COMM_WORLD, &recvReq1); // expected-warning{{Request 'sendReq1' has no matching wait.}}
+    MPI_Wait(&recvReq1, MPI_STATUS_IGNORE);
+  }
+}
+
+void doubleNonblocking() {
+  int rank = 0;
+  double buf = 0;
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+  if (rank == 1) {
+  } else {
+    MPI_Request sendReq1;
+
+    MPI_Isend(&buf, 1, MPI_DOUBLE, rank + 1, 6, MPI_COMM_WORLD, &sendReq1); // expected-note{{Request is previously used by nonblocking call here. }}
+    MPI_Irecv(&buf, 1, MPI_DOUBLE, rank - 1, 6, MPI_COMM_WORLD, &sendReq1); // expected-warning{{Double nonblocking on request 'sendReq1'.}}
+    MPI_Wait(&sendReq1, MPI_STATUS_IGNORE);
+  }
+}
+
+void doubleNonblocking2() {
+  int rank = 0;
+  double buf = 0;
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+  MPI_Request req;
+  MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, &req); // expected-note{{Request is previously used by nonblocking call here.}}
+  MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, &req); // expected-warning{{Double nonblocking on request 'req'.}}
+  MPI_Wait(&req, MPI_STATUS_IGNORE);
+}
+
+void doubleNonblocking3() {
+  typedef struct { MPI_Request req; } ReqStruct;
+
+  ReqStruct rs;
+  int rank = 0;
+  double buf = 0;
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+  MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, &rs.req); // expected-note{{Request is previously used by nonblocking call here.}}
+  MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, &rs.req); // expected-warning{{Double nonblocking on request 'rs.req'.}}
+  MPI_Wait(&rs.req, MPI_STATUS_IGNORE);
+}
+
+void doubleNonblocking4() {
+  int rank = 0;
+  double buf = 0;
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+  MPI_Request req;
+  for (int i = 0; i < 2; ++i) {
+    MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, &req); // expected-warning{{Double nonblocking on request 'req'.}}
+  }
+  MPI_Wait(&req, MPI_STATUS_IGNORE);
+}
+
+void missingNonBlocking() {
+  int rank = 0;
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+  MPI_Request sendReq1[10][10][10];
+  MPI_Wait(&sendReq1[1][7][9], MPI_STATUS_IGNORE); // expected-warning{{Request 'sendReq1[1][7][9]' has no matching nonblocking call.}}
+}
+
+void missingNonBlockingMultiple() {
+  int rank = 0;
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+  MPI_Request sendReq[4];
+  for (int i = 0; i < 4; ++i) {
+    MPI_Wait(&sendReq[i], MPI_STATUS_IGNORE); // expected-warning-re 1+{{Request {{.*}} has no matching nonblocking call.}}
+  }
+}
+
+void noDoubleRequestUsage() {
+  typedef struct {
+    MPI_Request req;
+    MPI_Request req2;
+  } ReqStruct;
+
+  ReqStruct rs;
+  int rank = 0;
+  double buf = 0;
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+  MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD,
+              &rs.req);
+  MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD,
+              &rs.req2);
+  MPI_Wait(&rs.req, MPI_STATUS_IGNORE);
+  MPI_Wait(&rs.req2, MPI_STATUS_IGNORE);
+}
+
+void noDoubleRequestUsage2() {
+  typedef struct {
+    MPI_Request req[2];
+    MPI_Request req2;
+  } ReqStruct;
+
+  ReqStruct rs;
+  int rank = 0;
+  double buf = 0;
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+  MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD,
+              &rs.req[0]);
+  MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD,
+              &rs.req[1]);
+  MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD,
+              &rs.req2);
+  MPI_Wait(&rs.req[0], MPI_STATUS_IGNORE);
+  MPI_Wait(&rs.req[1], MPI_STATUS_IGNORE);
+  MPI_Wait(&rs.req2, MPI_STATUS_IGNORE);
+}
+
+void nestedRequest() {
+  typedef struct {
+    MPI_Request req[2];
+    MPI_Request req2;
+  } ReqStruct;
+
+  ReqStruct rs;
+  int rank = 0;
+  double buf = 0;
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+  MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD,
+              &rs.req[0]);
+  MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD,
+              &rs.req[1]);
+  MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD,
+              &rs.req2);
+  MPI_Waitall(2, rs.req, MPI_STATUSES_IGNORE);
+  MPI_Wait(&rs.req2, MPI_STATUS_IGNORE);
+}
+
+void singleRequestInWaitall() {
+  MPI_Request r;
+  int rank = 0;
+  double buf = 0;
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+  MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD,
+              &r);
+  MPI_Waitall(1, &r, MPI_STATUSES_IGNORE);
+}
Index: lib/StaticAnalyzer/Checkers/MPI-Checker/MPITypes.h
===================================================================
--- lib/StaticAnalyzer/Checkers/MPI-Checker/MPITypes.h
+++ lib/StaticAnalyzer/Checkers/MPI-Checker/MPITypes.h
@@ -0,0 +1,67 @@
+//===-- MPITypes.h - Functionality to model MPI concepts --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file provides definitions, to model the schema of MPI point-to-point
+/// functions and MPI requests. The mpi::Request class defines a wrapper
+/// class, in order to make MPI requests trackable for path-sensitive analysis.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MPICHECKER_MPITYPES_H
+#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MPICHECKER_MPITYPES_H
+
+#include "MPIFunctionClassifier.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "llvm/ADT/SmallSet.h"
+
+// for path-sensitive analysis
+namespace clang {
+namespace ento {
+namespace mpi {
+
+class Request {
+public:
+  enum State : unsigned char { Nonblocking, Wait };
+
+  Request(State UT) : CurrentState{UT} {}
+
+  void Profile(llvm::FoldingSetNodeID &Id) const {
+    Id.AddInteger(CurrentState);
+  }
+
+  bool operator==(const Request &ToCompare) const {
+    return CurrentState == ToCompare.CurrentState;
+  }
+
+  const State CurrentState;
+};
+
+} // end of namespace: mpi
+} // end of namespace: ento
+} // end of namespace: clang
+
+// The RequestMap stores MPI requests which are identified by their memory
+// region. Requests are used in MPI to complete nonblocking operations with wait
+// operations. A custom map implementation is used, in order to make it
+// available in an arbitrary amount of translation units.
+struct RequestMap {};
+typedef llvm::ImmutableMap<const clang::ento::MemRegion *,
+                           clang::ento::mpi::Request>
+    RequestMapImpl;
+template <>
+struct clang::ento::ProgramStateTrait<RequestMap>
+    : public clang::ento::ProgramStatePartialTrait<RequestMapImpl> {
+  static void *GDMIndex() {
+    static int index = 0;
+    return &index;
+  }
+};
+
+#endif
Index: lib/StaticAnalyzer/Checkers/MPI-Checker/MPIFunctionClassifier.h
===================================================================
--- lib/StaticAnalyzer/Checkers/MPI-Checker/MPIFunctionClassifier.h
+++ lib/StaticAnalyzer/Checkers/MPI-Checker/MPIFunctionClassifier.h
@@ -0,0 +1,97 @@
+//===-- MPIFunctionClassifier.h - classifies MPI functions ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines functionality to identify and classify MPI functions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MPICHECKER_MPIFUNCTIONCLASSIFIER_H
+#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MPICHECKER_MPIFUNCTIONCLASSIFIER_H
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+namespace clang {
+namespace ento {
+namespace mpi {
+
+class MPIFunctionClassifier {
+public:
+  MPIFunctionClassifier(AnalysisManager &AM) { identifierInit(AM); }
+
+  // general identifiers
+  bool isMPIType(const IdentifierInfo *const IdentInfo) const;
+  bool isNonBlockingType(const IdentifierInfo *const IdentInfo) const;
+
+  // point-to-point identifiers
+  bool isPointToPointType(const IdentifierInfo *const IdentInfo) const;
+
+  // collective identifiers
+  bool isCollectiveType(const IdentifierInfo *const IdentInfo) const;
+  bool isCollToColl(const IdentifierInfo *const IdentInfo) const;
+  bool isScatterType(const IdentifierInfo *const IdentInfo) const;
+  bool isGatherType(const IdentifierInfo *const IdentInfo) const;
+  bool isAllgatherType(const IdentifierInfo *const IdentInfo) const;
+  bool isAlltoallType(const IdentifierInfo *const IdentInfo) const;
+  bool isReduceType(const IdentifierInfo *const IdentInfo) const;
+  bool isBcastType(const IdentifierInfo *const IdentInfo) const;
+
+  // additional identifiers
+  bool isMPI_Wait(const IdentifierInfo *const IdentInfo) const;
+  bool isMPI_Waitall(const IdentifierInfo *const IdentInfo) const;
+  bool isWaitType(const IdentifierInfo *const IdentInfo) const;
+
+private:
+  // Initializes function identifiers, to recognize them during analysis.
+  void identifierInit(AnalysisManager &AM);
+  void initPointToPointIdentifiers(AnalysisManager &AM);
+  void initCollectiveIdentifiers(AnalysisManager &AM);
+  void initAdditionalIdentifiers(AnalysisManager &AM);
+
+  // The containers are used, to enable classification of MPI-functions during
+  // analysis.
+  llvm::SmallVector<IdentifierInfo *, 12> MPINonBlockingTypes;
+
+  llvm::SmallVector<IdentifierInfo *, 10> MPIPointToPointTypes;
+  llvm::SmallVector<IdentifierInfo *, 16> MPICollectiveTypes;
+
+  llvm::SmallVector<IdentifierInfo *, 4> MPIPointToCollTypes;
+  llvm::SmallVector<IdentifierInfo *, 4> MPICollToPointTypes;
+  llvm::SmallVector<IdentifierInfo *, 6> MPICollToCollTypes;
+
+  llvm::SmallVector<IdentifierInfo *, 32> MPIType;
+
+  // point-to-point functions
+  IdentifierInfo *IdentInfo_MPI_Send{nullptr}, *IdentInfo_MPI_Isend{nullptr},
+      *IdentInfo_MPI_Ssend{nullptr}, *IdentInfo_MPI_Issend{nullptr},
+      *IdentInfo_MPI_Bsend{nullptr}, *IdentInfo_MPI_Ibsend{nullptr},
+      *IdentInfo_MPI_Rsend{nullptr}, *IdentInfo_MPI_Irsend{nullptr},
+      *IdentInfo_MPI_Recv{nullptr}, *IdentInfo_MPI_Irecv{nullptr};
+
+  // collective functions
+  IdentifierInfo *IdentInfo_MPI_Scatter{nullptr},
+      *IdentInfo_MPI_Iscatter{nullptr}, *IdentInfo_MPI_Gather{nullptr},
+      *IdentInfo_MPI_Igather{nullptr}, *IdentInfo_MPI_Allgather{nullptr},
+      *IdentInfo_MPI_Iallgather{nullptr}, *IdentInfo_MPI_Bcast{nullptr},
+      *IdentInfo_MPI_Ibcast{nullptr}, *IdentInfo_MPI_Reduce{nullptr},
+      *IdentInfo_MPI_Ireduce{nullptr}, *IdentInfo_MPI_Allreduce{nullptr},
+      *IdentInfo_MPI_Iallreduce{nullptr}, *IdentInfo_MPI_Alltoall{nullptr},
+      *IdentInfo_MPI_Ialltoall{nullptr}, *IdentInfo_MPI_Barrier{nullptr};
+
+  // additional functions
+  IdentifierInfo *IdentInfo_MPI_Comm_rank{nullptr},
+      *IdentInfo_MPI_Comm_size{nullptr}, *IdentInfo_MPI_Wait{nullptr},
+      *IdentInfo_MPI_Waitall{nullptr};
+};
+
+} // end of namespace: mpi
+} // end of namespace: ento
+} // end of namespace: clang
+
+#endif
Index: lib/StaticAnalyzer/Checkers/MPI-Checker/MPIFunctionClassifier.cpp
===================================================================
--- lib/StaticAnalyzer/Checkers/MPI-Checker/MPIFunctionClassifier.cpp
+++ lib/StaticAnalyzer/Checkers/MPI-Checker/MPIFunctionClassifier.cpp
@@ -0,0 +1,291 @@
+//===-- MPIFunctionClassifier.cpp - classifies MPI functions ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines functionality to identify and classify MPI functions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MPIFunctionClassifier.h"
+#include "llvm/ADT/STLExtras.h"
+
+namespace clang {
+namespace ento {
+namespace mpi {
+
+void MPIFunctionClassifier::identifierInit(AnalysisManager &AM) {
+  // Initialize function identifiers.
+  initPointToPointIdentifiers(AM);
+  initCollectiveIdentifiers(AM);
+  initAdditionalIdentifiers(AM);
+}
+
+void MPIFunctionClassifier::initPointToPointIdentifiers(
+    clang::ento::AnalysisManager &AM) {
+  ASTContext &ASTCtx = AM.getASTContext();
+
+  // Copy identifiers into the correct classification containers.
+  IdentInfo_MPI_Send = &ASTCtx.Idents.get("MPI_Send");
+  MPIPointToPointTypes.push_back(IdentInfo_MPI_Send);
+  MPIType.push_back(IdentInfo_MPI_Send);
+  assert(IdentInfo_MPI_Send);
+
+  IdentInfo_MPI_Isend = &ASTCtx.Idents.get("MPI_Isend");
+  MPIPointToPointTypes.push_back(IdentInfo_MPI_Isend);
+  MPINonBlockingTypes.push_back(IdentInfo_MPI_Isend);
+  MPIType.push_back(IdentInfo_MPI_Isend);
+  assert(IdentInfo_MPI_Isend);
+
+  IdentInfo_MPI_Ssend = &ASTCtx.Idents.get("MPI_Ssend");
+  MPIPointToPointTypes.push_back(IdentInfo_MPI_Ssend);
+  MPIType.push_back(IdentInfo_MPI_Ssend);
+  assert(IdentInfo_MPI_Ssend);
+
+  IdentInfo_MPI_Issend = &ASTCtx.Idents.get("MPI_Issend");
+  MPIPointToPointTypes.push_back(IdentInfo_MPI_Issend);
+  MPINonBlockingTypes.push_back(IdentInfo_MPI_Issend);
+  MPIType.push_back(IdentInfo_MPI_Issend);
+  assert(IdentInfo_MPI_Issend);
+
+  IdentInfo_MPI_Bsend = &ASTCtx.Idents.get("MPI_Bsend");
+  MPIPointToPointTypes.push_back(IdentInfo_MPI_Bsend);
+  MPIType.push_back(IdentInfo_MPI_Bsend);
+  assert(IdentInfo_MPI_Bsend);
+
+  IdentInfo_MPI_Ibsend = &ASTCtx.Idents.get("MPI_Ibsend");
+  MPIPointToPointTypes.push_back(IdentInfo_MPI_Ibsend);
+  MPINonBlockingTypes.push_back(IdentInfo_MPI_Ibsend);
+  MPIType.push_back(IdentInfo_MPI_Ibsend);
+  assert(IdentInfo_MPI_Ibsend);
+
+  IdentInfo_MPI_Rsend = &ASTCtx.Idents.get("MPI_Rsend");
+  MPIPointToPointTypes.push_back(IdentInfo_MPI_Rsend);
+  MPIType.push_back(IdentInfo_MPI_Rsend);
+  assert(IdentInfo_MPI_Rsend);
+
+  IdentInfo_MPI_Irsend = &ASTCtx.Idents.get("MPI_Irsend");
+  MPIPointToPointTypes.push_back(IdentInfo_MPI_Irsend);
+  MPIType.push_back(IdentInfo_MPI_Irsend);
+  assert(IdentInfo_MPI_Irsend);
+
+  IdentInfo_MPI_Recv = &ASTCtx.Idents.get("MPI_Recv");
+  MPIPointToPointTypes.push_back(IdentInfo_MPI_Recv);
+  MPIType.push_back(IdentInfo_MPI_Recv);
+  assert(IdentInfo_MPI_Recv);
+
+  IdentInfo_MPI_Irecv = &ASTCtx.Idents.get("MPI_Irecv");
+  MPIPointToPointTypes.push_back(IdentInfo_MPI_Irecv);
+  MPINonBlockingTypes.push_back(IdentInfo_MPI_Irecv);
+  MPIType.push_back(IdentInfo_MPI_Irecv);
+  assert(IdentInfo_MPI_Irecv);
+}
+
+void MPIFunctionClassifier::initCollectiveIdentifiers(AnalysisManager &AM) {
+  ASTContext &ASTCtx = AM.getASTContext();
+
+  // Copy identifiers into the correct classification containers.
+  IdentInfo_MPI_Scatter = &ASTCtx.Idents.get("MPI_Scatter");
+  MPICollectiveTypes.push_back(IdentInfo_MPI_Scatter);
+  MPIPointToCollTypes.push_back(IdentInfo_MPI_Scatter);
+  MPIType.push_back(IdentInfo_MPI_Scatter);
+  assert(IdentInfo_MPI_Scatter);
+
+  IdentInfo_MPI_Iscatter = &ASTCtx.Idents.get("MPI_Iscatter");
+  MPICollectiveTypes.push_back(IdentInfo_MPI_Iscatter);
+  MPIPointToCollTypes.push_back(IdentInfo_MPI_Iscatter);
+  MPINonBlockingTypes.push_back(IdentInfo_MPI_Iscatter);
+  MPIType.push_back(IdentInfo_MPI_Iscatter);
+  assert(IdentInfo_MPI_Iscatter);
+
+  IdentInfo_MPI_Gather = &ASTCtx.Idents.get("MPI_Gather");
+  MPICollectiveTypes.push_back(IdentInfo_MPI_Gather);
+  MPICollToPointTypes.push_back(IdentInfo_MPI_Gather);
+  MPIType.push_back(IdentInfo_MPI_Gather);
+  assert(IdentInfo_MPI_Gather);
+
+  IdentInfo_MPI_Igather = &ASTCtx.Idents.get("MPI_Igather");
+  MPICollectiveTypes.push_back(IdentInfo_MPI_Igather);
+  MPICollToPointTypes.push_back(IdentInfo_MPI_Igather);
+  MPINonBlockingTypes.push_back(IdentInfo_MPI_Igather);
+  MPIType.push_back(IdentInfo_MPI_Igather);
+  assert(IdentInfo_MPI_Igather);
+
+  IdentInfo_MPI_Allgather = &ASTCtx.Idents.get("MPI_Allgather");
+  MPICollectiveTypes.push_back(IdentInfo_MPI_Allgather);
+  MPICollToCollTypes.push_back(IdentInfo_MPI_Allgather);
+  MPIType.push_back(IdentInfo_MPI_Allgather);
+  assert(IdentInfo_MPI_Allgather);
+
+  IdentInfo_MPI_Iallgather = &ASTCtx.Idents.get("MPI_Iallgather");
+  MPICollectiveTypes.push_back(IdentInfo_MPI_Iallgather);
+  MPICollToCollTypes.push_back(IdentInfo_MPI_Iallgather);
+  MPINonBlockingTypes.push_back(IdentInfo_MPI_Iallgather);
+  MPIType.push_back(IdentInfo_MPI_Iallgather);
+  assert(IdentInfo_MPI_Iallgather);
+
+  IdentInfo_MPI_Bcast = &ASTCtx.Idents.get("MPI_Bcast");
+  MPICollectiveTypes.push_back(IdentInfo_MPI_Bcast);
+  MPIPointToCollTypes.push_back(IdentInfo_MPI_Bcast);
+  MPIType.push_back(IdentInfo_MPI_Bcast);
+  assert(IdentInfo_MPI_Bcast);
+
+  IdentInfo_MPI_Ibcast = &ASTCtx.Idents.get("MPI_Ibcast");
+  MPICollectiveTypes.push_back(IdentInfo_MPI_Ibcast);
+  MPIPointToCollTypes.push_back(IdentInfo_MPI_Ibcast);
+  MPINonBlockingTypes.push_back(IdentInfo_MPI_Ibcast);
+  MPIType.push_back(IdentInfo_MPI_Ibcast);
+  assert(IdentInfo_MPI_Ibcast);
+
+  IdentInfo_MPI_Reduce = &ASTCtx.Idents.get("MPI_Reduce");
+  MPICollectiveTypes.push_back(IdentInfo_MPI_Reduce);
+  MPICollToPointTypes.push_back(IdentInfo_MPI_Reduce);
+  MPIType.push_back(IdentInfo_MPI_Reduce);
+  assert(IdentInfo_MPI_Reduce);
+
+  IdentInfo_MPI_Ireduce = &ASTCtx.Idents.get("MPI_Ireduce");
+  MPICollectiveTypes.push_back(IdentInfo_MPI_Ireduce);
+  MPICollToPointTypes.push_back(IdentInfo_MPI_Ireduce);
+  MPINonBlockingTypes.push_back(IdentInfo_MPI_Ireduce);
+  MPIType.push_back(IdentInfo_MPI_Ireduce);
+  assert(IdentInfo_MPI_Ireduce);
+
+  IdentInfo_MPI_Allreduce = &ASTCtx.Idents.get("MPI_Allreduce");
+  MPICollectiveTypes.push_back(IdentInfo_MPI_Allreduce);
+  MPICollToCollTypes.push_back(IdentInfo_MPI_Allreduce);
+  MPIType.push_back(IdentInfo_MPI_Allreduce);
+  assert(IdentInfo_MPI_Allreduce);
+
+  IdentInfo_MPI_Iallreduce = &ASTCtx.Idents.get("MPI_Iallreduce");
+  MPICollectiveTypes.push_back(IdentInfo_MPI_Iallreduce);
+  MPICollToCollTypes.push_back(IdentInfo_MPI_Iallreduce);
+  MPINonBlockingTypes.push_back(IdentInfo_MPI_Iallreduce);
+  MPIType.push_back(IdentInfo_MPI_Iallreduce);
+  assert(IdentInfo_MPI_Iallreduce);
+
+  IdentInfo_MPI_Alltoall = &ASTCtx.Idents.get("MPI_Alltoall");
+  MPICollectiveTypes.push_back(IdentInfo_MPI_Alltoall);
+  MPICollToCollTypes.push_back(IdentInfo_MPI_Alltoall);
+  MPIType.push_back(IdentInfo_MPI_Alltoall);
+  assert(IdentInfo_MPI_Alltoall);
+
+  IdentInfo_MPI_Ialltoall = &ASTCtx.Idents.get("MPI_Ialltoall");
+  MPICollectiveTypes.push_back(IdentInfo_MPI_Ialltoall);
+  MPICollToCollTypes.push_back(IdentInfo_MPI_Ialltoall);
+  MPINonBlockingTypes.push_back(IdentInfo_MPI_Ialltoall);
+  MPIType.push_back(IdentInfo_MPI_Ialltoall);
+  assert(IdentInfo_MPI_Ialltoall);
+}
+
+void MPIFunctionClassifier::initAdditionalIdentifiers(AnalysisManager &AM) {
+  ASTContext &ASTCtx = AM.getASTContext();
+
+  IdentInfo_MPI_Comm_rank = &ASTCtx.Idents.get("MPI_Comm_rank");
+  MPIType.push_back(IdentInfo_MPI_Comm_rank);
+  assert(IdentInfo_MPI_Comm_rank);
+
+  IdentInfo_MPI_Comm_size = &ASTCtx.Idents.get("MPI_Comm_size");
+  MPIType.push_back(IdentInfo_MPI_Comm_size);
+  assert(IdentInfo_MPI_Comm_size);
+
+  IdentInfo_MPI_Wait = &ASTCtx.Idents.get("MPI_Wait");
+  MPIType.push_back(IdentInfo_MPI_Wait);
+  assert(IdentInfo_MPI_Wait);
+
+  IdentInfo_MPI_Waitall = &ASTCtx.Idents.get("MPI_Waitall");
+  MPIType.push_back(IdentInfo_MPI_Waitall);
+  assert(IdentInfo_MPI_Waitall);
+
+  IdentInfo_MPI_Barrier = &ASTCtx.Idents.get("MPI_Barrier");
+  MPICollectiveTypes.push_back(IdentInfo_MPI_Barrier);
+  MPIType.push_back(IdentInfo_MPI_Barrier);
+  assert(IdentInfo_MPI_Barrier);
+}
+
+// general identifiers
+bool MPIFunctionClassifier::isMPIType(const IdentifierInfo *IdentInfo) const {
+  return llvm::is_contained(MPIType, IdentInfo);
+}
+
+bool MPIFunctionClassifier::isNonBlockingType(
+    const IdentifierInfo *IdentInfo) const {
+  return llvm::is_contained(MPINonBlockingTypes, IdentInfo);
+}
+
+// point-to-point identifiers
+bool MPIFunctionClassifier::isPointToPointType(
+    const IdentifierInfo *IdentInfo) const {
+  return llvm::is_contained(MPIPointToPointTypes, IdentInfo);
+}
+
+// collective identifiers
+bool MPIFunctionClassifier::isCollectiveType(
+    const IdentifierInfo *IdentInfo) const {
+  return llvm::is_contained(MPICollectiveTypes, IdentInfo);
+}
+
+bool MPIFunctionClassifier::isCollToColl(
+    const IdentifierInfo *IdentInfo) const {
+  return llvm::is_contained(MPICollToCollTypes, IdentInfo);
+}
+
+bool MPIFunctionClassifier::isScatterType(
+    const IdentifierInfo *IdentInfo) const {
+  return IdentInfo == IdentInfo_MPI_Scatter ||
+         IdentInfo == IdentInfo_MPI_Iscatter;
+}
+
+bool MPIFunctionClassifier::isGatherType(
+    const IdentifierInfo *IdentInfo) const {
+  return IdentInfo == IdentInfo_MPI_Gather ||
+         IdentInfo == IdentInfo_MPI_Igather ||
+         IdentInfo == IdentInfo_MPI_Allgather ||
+         IdentInfo == IdentInfo_MPI_Iallgather;
+}
+
+bool MPIFunctionClassifier::isAllgatherType(
+    const IdentifierInfo *IdentInfo) const {
+  return IdentInfo == IdentInfo_MPI_Allgather ||
+         IdentInfo == IdentInfo_MPI_Iallgather;
+}
+
+bool MPIFunctionClassifier::isAlltoallType(
+    const IdentifierInfo *IdentInfo) const {
+  return IdentInfo == IdentInfo_MPI_Alltoall ||
+         IdentInfo == IdentInfo_MPI_Ialltoall;
+}
+
+bool MPIFunctionClassifier::isBcastType(const IdentifierInfo *IdentInfo) const {
+  return IdentInfo == IdentInfo_MPI_Bcast || IdentInfo == IdentInfo_MPI_Ibcast;
+}
+
+bool MPIFunctionClassifier::isReduceType(
+    const IdentifierInfo *IdentInfo) const {
+  return IdentInfo == IdentInfo_MPI_Reduce ||
+         IdentInfo == IdentInfo_MPI_Ireduce ||
+         IdentInfo == IdentInfo_MPI_Allreduce ||
+         IdentInfo == IdentInfo_MPI_Iallreduce;
+}
+
+// additional identifiers
+bool MPIFunctionClassifier::isMPI_Wait(const IdentifierInfo *IdentInfo) const {
+  return IdentInfo == IdentInfo_MPI_Wait;
+}
+
+bool MPIFunctionClassifier::isMPI_Waitall(
+    const IdentifierInfo *IdentInfo) const {
+  return IdentInfo == IdentInfo_MPI_Waitall;
+}
+
+bool MPIFunctionClassifier::isWaitType(const IdentifierInfo *IdentInfo) const {
+  return IdentInfo == IdentInfo_MPI_Wait || IdentInfo == IdentInfo_MPI_Waitall;
+}
+
+} // end of namespace: mpi
+} // end of namespace: ento
+} // end of namespace: clang
Index: lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.h
===================================================================
--- lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.h
+++ lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.h
@@ -0,0 +1,122 @@
+//===-- MPIChecker.h - Verify MPI API usage- --------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the main class of MPI-Checker which serves as an entry
+/// point. It is created once for each translation unit analysed.
+/// The checker defines path-sensitive checks, to verify correct usage of the
+/// MPI API.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MPICHECKER_MPICHECKER_H
+#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MPICHECKER_MPICHECKER_H
+
+#include "MPIBugReporter.h"
+#include "MPIFunctionClassifier.h"
+#include "MPITypes.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+namespace clang {
+namespace ento {
+namespace mpi {
+
+class MPIChecker
+    : public Checker<check::PreCall, check::DeadSymbols, check::EndAnalysis> {
+public:
+  // path-sensitive callbacks
+  void checkPreCall(const CallEvent &CE, CheckerContext &Ctx) const {
+    dynamicInit(Ctx);
+    checkUnmatchedWaits(CE, Ctx);
+    checkDoubleNonblocking(CE, Ctx);
+  }
+
+  void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &Ctx) const {
+    dynamicInit(Ctx);
+    checkMissingWaits(SymReaper, Ctx);
+  }
+
+  void checkEndAnalysis(ExplodedGraph &G, BugReporter &, ExprEngine &) const {
+    checkMissingWaitsGlobals(G);
+  }
+
+  void dynamicInit(CheckerContext &Ctx) const {
+    if (IsInitialized)
+      return;
+    const_cast<std::unique_ptr<MPIFunctionClassifier> &>(FuncClassifier)
+        .reset(new MPIFunctionClassifier{Ctx.getAnalysisManager()});
+
+    const_cast<std::unique_ptr<MPIBugReporter> &>(BReporter).reset(
+        new MPIBugReporter{Ctx.getBugReporter(), *this, *FuncClassifier});
+
+    const_cast<bool &>(IsInitialized) = true;
+  }
+
+  /// Checks if a request is used by nonblocking calls multiple times
+  /// in sequence without intermediate wait. The check contains a guard,
+  /// in order to only inspect nonblocking functions.
+  ///
+  /// \param PreCallEvent MPI call to verify
+  void checkDoubleNonblocking(const clang::ento::CallEvent &PreCallEvent,
+                              clang::ento::CheckerContext &Ctx) const;
+
+  /// Checks if a request is used by a wait multiple times in sequence without
+  /// intermediate nonblocking call or if the request used by the wait
+  /// function was not used at all before. The check contains a guard,
+  /// in order to only inspect wait functions.
+  ///
+  /// \param PreCallEvent MPI call to verify
+  void checkUnmatchedWaits(const clang::ento::CallEvent &PreCallEvent,
+                           clang::ento::CheckerContext &Ctx) const;
+
+  /// Check if a nonblocking call is not matched by a wait.
+  /// If a memory region is not alive and the last function using the
+  /// request was a nonblocking call, this is rated as a missing wait.
+  void checkMissingWaits(clang::ento::SymbolReaper &SymReaper,
+                         clang::ento::CheckerContext &Ctx) const;
+
+  /// Check if a nonblocking call is not matched by a wait. This function
+  /// invokes the verification based on global request variables. Therefore,
+  /// all end nodes of a graph built for a translation unit are inspected.
+  /// \param G Graph constructed during path-sensitive analysis for the
+  /// analysed translation unit.
+  void checkMissingWaitsGlobals(clang::ento::ExplodedGraph &G) const;
+
+private:
+  /// Collects all memory regions of a request(array) used by a wait
+  /// function. If the wait function uses a single request, this is a single
+  /// region. For wait functions using multiple requests, multiple regions
+  /// representing elements in the array are collected.
+  ///
+  /// \param ReqRegions vector the regions get pushed into
+  /// \param MR top most region to iterate
+  /// \param CE MPI wait call using the request(s)
+  void allRegionsUsedByWait(
+      llvm::SmallVector<const clang::ento::MemRegion *, 2> &ReqRegions,
+      const clang::ento::MemRegion *const MR, const clang::ento::CallEvent &CE,
+      clang::ento::CheckerContext &Ctx) const;
+
+  /// Returns the memory region used by a wait function.
+  /// Distinguishes between MPI_Wait and MPI_Waitall.
+  ///
+  /// \param CE MPI wait call
+  const clang::ento::MemRegion *
+  topRegionUsedByWait(const clang::ento::CallEvent &CE) const;
+
+  const std::unique_ptr<MPIFunctionClassifier> FuncClassifier;
+  const std::unique_ptr<MPIBugReporter> BReporter;
+  bool IsInitialized{false};
+};
+
+} // end of namespace: mpi
+} // end of namespace: ento
+} // end of namespace: clang
+
+#endif
Index: lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
===================================================================
--- lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
+++ lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
@@ -0,0 +1,208 @@
+//===-- MPIChecker.cpp - Checker Entry Point Class --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the main class of MPI-Checker which serves as an entry
+/// point. It is created once for each translation unit analysed.
+/// The checker defines path-sensitive checks, to verify correct usage of the
+/// MPI API.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MPIChecker.h"
+#include "../ClangSACheckers.h"
+
+namespace clang {
+namespace ento {
+namespace mpi {
+
+void MPIChecker::checkDoubleNonblocking(const CallEvent &PreCallEvent,
+                                        CheckerContext &Ctx) const {
+  if (!FuncClassifier->isNonBlockingType(PreCallEvent.getCalleeIdentifier())) {
+    return;
+  }
+  const MemRegion *const MR =
+      PreCallEvent.getArgSVal(PreCallEvent.getNumArgs() - 1).getAsRegion();
+  const ElementRegion *const ER = dyn_cast<ElementRegion>(MR);
+
+  // The region must be typed, in order to reason about it.
+  if (!isa<TypedRegion>(MR) || (ER && !isa<TypedRegion>(ER->getSuperRegion())))
+    return;
+
+  ProgramStateRef State = Ctx.getState();
+  const Request *const Req = State->get<RequestMap>(MR);
+  const ExplodedNode *const ExplNode = Ctx.getPredecessor();
+
+  // double nonblocking detected
+  if (Req && Req->CurrentState == Request::State::Nonblocking) {
+    static CheckerProgramPointTag Tag("MPI-Checker", "DoubleNonblocking");
+    ExplodedNode *ErrorNode = Ctx.generateNonFatalErrorNode(State, &Tag);
+    BReporter->reportDoubleNonblocking(PreCallEvent, *Req, MR, ExplNode);
+    Ctx.addTransition(ErrorNode->getState(), ErrorNode);
+  }
+  // no error
+  else {
+    State = State->set<RequestMap>(MR, Request::State::Nonblocking);
+    Ctx.addTransition(State);
+  }
+}
+
+void MPIChecker::checkUnmatchedWaits(const CallEvent &PreCallEvent,
+                                     CheckerContext &Ctx) const {
+  if (!FuncClassifier->isWaitType(PreCallEvent.getCalleeIdentifier()))
+    return;
+  const MemRegion *const MR = topRegionUsedByWait(PreCallEvent);
+  const ElementRegion *const ER = dyn_cast<ElementRegion>(MR);
+  if (!MR)
+    return;
+
+  // The region must be typed, in order to reason about it.
+  if (!isa<TypedRegion>(MR) || (ER && !isa<TypedRegion>(ER->getSuperRegion())))
+    return;
+
+  llvm::SmallVector<const MemRegion *, 2> ReqRegions;
+  allRegionsUsedByWait(ReqRegions, MR, PreCallEvent, Ctx);
+  if (ReqRegions.empty())
+    return;
+
+  ProgramStateRef State = Ctx.getState();
+  static CheckerProgramPointTag Tag("MPI-Checker", "UnmatchedWait");
+  ExplodedNode *ErrorNode{nullptr};
+
+  // Check all request regions used by the wait function.
+  for (const auto &ReqRegion : ReqRegions) {
+    const Request *const Req = State->get<RequestMap>(ReqRegion);
+    State = State->set<RequestMap>(ReqRegion, Request::State::Wait);
+    if (!Req) {
+      if (!ErrorNode) {
+        ErrorNode = Ctx.generateNonFatalErrorNode(State, &Tag);
+        State = ErrorNode->getState();
+      }
+      // A wait has no matching nonblocking call.
+      BReporter->reportUnmatchedWait(PreCallEvent, ReqRegion, ErrorNode);
+    }
+  }
+
+  if (!ErrorNode) {
+    Ctx.addTransition(State);
+  } else {
+    Ctx.addTransition(State, ErrorNode);
+  }
+}
+
+void MPIChecker::checkMissingWaits(SymbolReaper &SymReaper,
+                                   CheckerContext &Ctx) const {
+  if (!SymReaper.hasDeadSymbols())
+    return;
+
+  ProgramStateRef State = Ctx.getState();
+
+  const auto &Requests = State->get<RequestMap>();
+  if (Requests.isEmpty())
+    return;
+
+  static CheckerProgramPointTag Tag("MPI-Checker", "MissingWait");
+  ExplodedNode *ErrorNode{nullptr};
+
+  auto ReqMap = State->get<RequestMap>();
+  for (const auto &Req : ReqMap) {
+    if (!SymReaper.isLiveRegion(Req.first)) {
+      if (Req.second.CurrentState == Request::State::Nonblocking) {
+
+        if (!ErrorNode) {
+          ErrorNode = Ctx.generateNonFatalErrorNode(State, &Tag);
+          State = ErrorNode->getState();
+        }
+        BReporter->reportMissingWait(Req.second, Req.first, ErrorNode);
+      }
+      State = State->remove<RequestMap>(Req.first);
+    }
+  }
+
+  // Transition to update the state regarding removed requests.
+  if (ErrorNode) {
+    Ctx.addTransition(State, ErrorNode);
+  } else {
+    Ctx.addTransition(State);
+  }
+}
+
+void MPIChecker::checkMissingWaitsGlobals(ExplodedGraph &G) const {
+
+  auto NodeIt = G.eop_begin();
+  const auto NodeEndIt = G.eop_end();
+
+  while (NodeIt != NodeEndIt) {
+    ProgramStateRef State = (*NodeIt)->getState();
+    const auto &ReqMap = State->get<RequestMap>();
+    for (const auto &Req : ReqMap) {
+      if (Req.second.CurrentState == Request::State::Nonblocking) {
+        BReporter->reportMissingWait(Req.second, Req.first, *NodeIt);
+      }
+    }
+    ++NodeIt;
+  }
+}
+
+const MemRegion *MPIChecker::topRegionUsedByWait(const CallEvent &CE) const {
+
+  if (FuncClassifier->isMPI_Wait(CE.getCalleeIdentifier())) {
+    return CE.getArgSVal(0).getAsRegion();
+  } else if (FuncClassifier->isMPI_Waitall(CE.getCalleeIdentifier())) {
+    return CE.getArgSVal(1).getAsRegion();
+  } else {
+    return (const MemRegion *)nullptr;
+  }
+}
+
+void MPIChecker::allRegionsUsedByWait(
+    llvm::SmallVector<const MemRegion *, 2> &ReqRegions,
+    const MemRegion *const MR, const CallEvent &CE, CheckerContext &Ctx) const {
+
+  MemRegionManager *const RegionManager = MR->getMemRegionManager();
+
+  if (FuncClassifier->isMPI_Waitall(CE.getCalleeIdentifier())) {
+    const MemRegion *SuperRegion{nullptr};
+    if (const ElementRegion *const ER = MR->getAs<ElementRegion>()) {
+      SuperRegion = ER->getSuperRegion();
+    }
+
+    // A single request is passed to MPI_Waitall.
+    if (!SuperRegion) {
+      ReqRegions.push_back(MR);
+      return;
+    }
+
+    const auto &size = Ctx.getStoreManager().getSizeInElements(
+        Ctx.getState(), SuperRegion,
+        CE.getArgExpr(1)->getType()->getPointeeType());
+    const llvm::APSInt &arrSize = size.getAs<nonloc::ConcreteInt>()->getValue();
+
+    for (size_t i = 0; i < arrSize; ++i) {
+      const NonLoc Idx = Ctx.getSValBuilder().makeArrayIndex(i);
+
+      const ElementRegion *const ER = RegionManager->getElementRegion(
+          CE.getArgExpr(1)->getType()->getPointeeType(), Idx, SuperRegion,
+          Ctx.getASTContext());
+
+      ReqRegions.push_back(ER->getAs<MemRegion>());
+    }
+  } else if (FuncClassifier->isMPI_Wait(CE.getCalleeIdentifier())) {
+    ReqRegions.push_back(MR);
+  }
+}
+
+} // end of namespace: mpi
+} // end of namespace: ento
+} // end of namespace: clang
+
+// Registers the checker for static analysis.
+void clang::ento::registerMPIChecker(CheckerManager &MGR) {
+  MGR.registerChecker<clang::ento::mpi::MPIChecker>();
+}
Index: lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h
===================================================================
--- lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h
+++ lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h
@@ -0,0 +1,113 @@
+//===-- MPIBugReporter.h - bug reporter -----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines prefabricated reports which are emitted in
+/// case of MPI related bugs, detected by AST-based and path-sensitive analysis.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MPICHECKER_MPIBUGREPORTER_H
+#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MPICHECKER_MPIBUGREPORTER_H
+
+#include "MPIFunctionClassifier.h"
+#include "MPITypes.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+
+namespace clang {
+namespace ento {
+namespace mpi {
+
+class MPIBugReporter {
+public:
+  MPIBugReporter(BugReporter &BR, const CheckerBase &CB,
+                 const MPIFunctionClassifier &FC)
+      : BReporter{BR}, FuncClassifier{FC} {
+    UnmatchedWaitBugType.reset(new BugType(&CB, "Unmatched wait", MPIError));
+    DoubleNonblockingBugType.reset(
+        new BugType(&CB, "Double nonblocking", MPIError));
+    MissingWaitBugType.reset(new BugType(&CB, "Missing wait", MPIError));
+  }
+
+  /// Report duplicate request use by nonblocking calls without intermediate
+  /// wait.
+  ///
+  /// \param MPICallEvent MPI call that caused the double nonblocking
+  /// \param Req request that was used by two nonblocking calls in sequence
+  /// \param RequestRegion memory region of the request
+  /// \param ExplNode node in the graph the bug appeared at
+  void reportDoubleNonblocking(const CallEvent &MPICallEvent,
+                               const Request &Req,
+                               const MemRegion *const RequestRegion,
+                               const ExplodedNode *const ExplNode) const;
+
+  /// Report a missing wait for a nonblocking call. A missing wait report
+  /// is emitted if a nonblocking call is not matched in the scope of a
+  /// function.
+  ///
+  /// \param Req request that is not matched by a wait
+  /// \param RequestRegion memory region of the request
+  /// \param ExplNode node in the graph the bug appeared at
+  void reportMissingWait(const Request &Req,
+                         const MemRegion *const RequestRegion,
+                         const ExplodedNode *const ExplNode) const;
+
+  /// Report a wait on a request that has not been used at all before.
+  ///
+  /// \param CE wait call that uses the request
+  /// \param ReqRegion memory region of the request
+  /// \param ExplNode node in the graph the bug appeared at
+  void reportUnmatchedWait(const CallEvent &CE,
+                           const MemRegion *const RequestRegion,
+                           const ExplodedNode *const ExplNode) const;
+
+private:
+  const std::string MPIError{"MPI Error"};
+
+  // path-sensitive bug types
+  std::unique_ptr<BugType> UnmatchedWaitBugType;
+  std::unique_ptr<BugType> MissingWaitBugType;
+  std::unique_ptr<BugType> DoubleNonblockingBugType;
+
+  BugReporter &BReporter;
+  const MPIFunctionClassifier &FuncClassifier;
+
+  /// Bug visitor class to find the node where the request region was previously
+  /// used in order to include it into the BugReport path.
+  class RequestNodeVisitor : public BugReporterVisitorImpl<RequestNodeVisitor> {
+  public:
+    RequestNodeVisitor(const MemRegion *const MemoryRegion,
+                       const std::string &ErrText,
+                       const MPIFunctionClassifier &FC)
+        : RequestRegion(MemoryRegion), ErrorText{ErrText}, FuncClassifier{FC} {}
+
+    void Profile(llvm::FoldingSetNodeID &ID) const override {
+      static int X = 0;
+      ID.AddPointer(&X);
+      ID.AddPointer(RequestRegion);
+    }
+
+    PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+                                   const ExplodedNode *PrevN,
+                                   BugReporterContext &BRC,
+                                   BugReport &BR) override;
+
+  private:
+    const MemRegion *const RequestRegion;
+    bool IsNodeFound{false};
+    std::string ErrorText;
+    const MPIFunctionClassifier &FuncClassifier;
+  };
+};
+
+} // end of namespace: mpi
+} // end of namespace: ento
+} // end of namespace: clang
+
+#endif
Index: lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.cpp
===================================================================
--- lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.cpp
+++ lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.cpp
@@ -0,0 +1,145 @@
+//===-- MPIBugReporter.cpp - bug reporter -----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines prefabricated reports which are emitted in
+/// case of MPI related bugs, detected by AST-based and path-sensitive analysis.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MPIBugReporter.h"
+#include "MPIChecker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+
+namespace clang {
+namespace ento {
+namespace mpi {
+
+void MPIBugReporter::reportDoubleNonblocking(
+    const CallEvent &MPICallEvent, const ento::mpi::Request &Req,
+    const MemRegion *const RequestRegion,
+    const ExplodedNode *const ExplNode) const {
+
+  // Check if double nonblocking was caused by the same call.
+  std::string ErrorText;
+  ErrorText = "Double nonblocking on request " +
+              RequestRegion->getVariableName() + ". ";
+
+  auto Report = llvm::make_unique<BugReport>(*DoubleNonblockingBugType,
+                                             ErrorText, ExplNode);
+
+  Report->addRange(MPICallEvent.getSourceRange());
+  SourceRange Range = RequestRegion->sourceRange();
+  // util::sourceRange(RequestRegion);
+  if (Range.isValid())
+    Report->addRange(Range);
+
+  Report->addVisitor(llvm::make_unique<RequestNodeVisitor>(
+      RequestRegion, "Request is previously used by nonblocking call here. ",
+      FuncClassifier));
+  Report->markInteresting(RequestRegion);
+
+  BReporter.emitReport(std::move(Report));
+}
+
+void MPIBugReporter::reportMissingWait(
+    const ento::mpi::Request &Req, const MemRegion *const RequestRegion,
+    const ExplodedNode *const ExplNode) const {
+  std::string ErrorText{"Request " + RequestRegion->getVariableName() +
+                        " has no matching wait. "};
+
+  auto Report =
+      llvm::make_unique<BugReport>(*MissingWaitBugType, ErrorText, ExplNode);
+
+  SourceRange Range = RequestRegion->sourceRange();
+  if (Range.isValid())
+    Report->addRange(Range);
+  Report->addVisitor(llvm::make_unique<RequestNodeVisitor>(
+      RequestRegion, "Request is previously used by nonblocking call here. ",
+      FuncClassifier));
+  Report->markInteresting(RequestRegion);
+
+  BReporter.emitReport(std::move(Report));
+}
+
+void MPIBugReporter::reportUnmatchedWait(
+    const CallEvent &CE, const clang::ento::MemRegion *const RequestRegion,
+    const ExplodedNode *const ExplNode) const {
+  std::string ErrorText{"Request " + RequestRegion->getVariableName() +
+                        " has no matching nonblocking call. "};
+
+  auto Report =
+      llvm::make_unique<BugReport>(*UnmatchedWaitBugType, ErrorText, ExplNode);
+
+  Report->addRange(CE.getSourceRange());
+  SourceRange Range = RequestRegion->sourceRange();
+  if (Range.isValid())
+    Report->addRange(Range);
+
+  BReporter.emitReport(std::move(Report));
+}
+
+PathDiagnosticPiece *MPIBugReporter::RequestNodeVisitor::VisitNode(
+    const ExplodedNode *N, const ExplodedNode *PrevN, BugReporterContext &BRC,
+    BugReport &BR) {
+
+  if (IsNodeFound)
+    return nullptr;
+
+  if (Optional<PreStmt> SP = N->getLocation().getAs<PreStmt>()) {
+    if (const CallExpr *CE = clang::dyn_cast<CallExpr>(SP->getStmt())) {
+
+      auto FuncIdentifier = CE->getDirectCallee()->getIdentifier();
+      CallEventManager &Mgr =
+          N->getState()->getStateManager().getCallEventManager();
+      // Find previous call that uses the request.
+      CallEventRef<> Call =
+          Mgr.getSimpleCall(CE, N->getState(), N->getLocationContext());
+
+      bool ReqUsedByCall{false};
+      // nonblocking
+      if (FuncClassifier.isNonBlockingType(FuncIdentifier)) {
+        SVal SV = Call->getArgSVal(Call->getNumArgs() - 1);
+        if (SV.getAsRegion() == RequestRegion) {
+          ReqUsedByCall = true;
+        }
+      }
+      // waitall
+      else if (FuncClassifier.isMPI_Waitall(FuncIdentifier)) {
+        const MemRegion *const OtherReqRegion =
+            Call->getArgSVal(1).getAsRegion();
+        if (OtherReqRegion == RequestRegion ||
+            (RequestRegion->isSubRegionOf(OtherReqRegion))) {
+          ReqUsedByCall = true;
+        }
+      }
+      // wait
+      else if (FuncClassifier.isMPI_Wait(FuncIdentifier)) {
+        if (Call->getArgSVal(0).getAsRegion() == RequestRegion) {
+          ReqUsedByCall = true;
+        }
+      }
+
+      if (ReqUsedByCall) {
+        // Do not include further locations.
+        IsNodeFound = true;
+        PathDiagnosticLocation Pos(CE, BRC.getSourceManager(),
+                                   N->getLocationContext());
+
+        return new PathDiagnosticEventPiece(Pos, ErrorText);
+      }
+    }
+  }
+
+  return nullptr;
+}
+
+} // end of namespace: mpi
+} // end of namespace: ento
+} // end of namespace: clang
Index: lib/StaticAnalyzer/Checkers/Checkers.td
===================================================================
--- lib/StaticAnalyzer/Checkers/Checkers.td
+++ lib/StaticAnalyzer/Checkers/Checkers.td
@@ -72,6 +72,8 @@
 def LocalizabilityAlpha : Package<"localizability">, InPackage<CocoaAlpha>;
 def LocalizabilityOptIn : Package<"localizability">, InPackage<CocoaOptIn>;
 
+def MPI : Package<"mpi">, InPackage<OptIn>;
+
 def LLVM : Package<"llvm">;
 def Debug : Package<"debug">;
 
@@ -577,6 +579,12 @@
   DescFile<"LocalizationChecker.cpp">;
 }
 
+let ParentPackage = MPI in {
+def MPIChecker : Checker<"MPI-Checker">,
+  HelpText<"Checks MPI code">,
+  DescFile<"MPIChecker.cpp">;
+} // end "MPI"
+
 //===----------------------------------------------------------------------===//
 // Checkers for LLVM development.
 //===----------------------------------------------------------------------===//
Index: lib/StaticAnalyzer/Checkers/CMakeLists.txt
===================================================================
--- lib/StaticAnalyzer/Checkers/CMakeLists.txt
+++ lib/StaticAnalyzer/Checkers/CMakeLists.txt
@@ -46,6 +46,9 @@
   MallocChecker.cpp
   MallocOverflowSecurityChecker.cpp
   MallocSizeofChecker.cpp
+  MPI-Checker/MPIBugReporter.cpp
+  MPI-Checker/MPIChecker.cpp
+  MPI-Checker/MPIFunctionClassifier.cpp
   NSAutoreleasePoolChecker.cpp
   NSErrorChecker.cpp
   NoReturnFunctionChecker.cpp
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to