Hi!

This patch fixes two bugs in the -fopenmp-simd support.  One is that
in C++ #pragma omp parallel master would actually create OMP_PARALLEL
in the IL, which is a big no-no for -fopenmp-simd, we should be creating
only the constructs -fopenmp-simd handles (mainly OMP_SIMD, OMP_LOOP which
is gimplified as simd in that case, declare simd/reduction and ordered simd).

The other bug was that #pragma omp master taskloop simd combined construct
contains simd and thus should be recognized as #pragma omp simd (with only
the simd applicable clauses), but as master wasn't included in
omp_pragmas_simd, we'd ignore it completely instead.

Bootstrapped/regtested on x86_64-linux and i686-linux, committed to trunk,
queued for backporting.

2020-12-08  Jakub Jelinek  <ja...@redhat.com>

        PR c++/98187
        * c-pragma.c (omp_pragmas): Remove "master".
        (omp_pragmas_simd): Add "master".

        * parser.c (cp_parser_omp_parallel): For parallel master with
        -fopenmp-simd only, just call cp_parser_omp_master instead of
        wrapping it in OMP_PARALLEL.

        * c-c++-common/gomp/pr98187.c: New test.

--- gcc/c-family/c-pragma.c.jj  2020-11-09 23:01:10.874738865 +0100
+++ gcc/c-family/c-pragma.c     2020-12-07 21:19:59.319988185 +0100
@@ -1317,7 +1317,6 @@ static const struct omp_pragma_def omp_p
   { "depobj", PRAGMA_OMP_DEPOBJ },
   { "end", PRAGMA_OMP_END_DECLARE_TARGET },
   { "flush", PRAGMA_OMP_FLUSH },
-  { "master", PRAGMA_OMP_MASTER },
   { "requires", PRAGMA_OMP_REQUIRES },
   { "section", PRAGMA_OMP_SECTION },
   { "sections", PRAGMA_OMP_SECTIONS },
@@ -1333,6 +1332,7 @@ static const struct omp_pragma_def omp_p
   { "distribute", PRAGMA_OMP_DISTRIBUTE },
   { "for", PRAGMA_OMP_FOR },
   { "loop", PRAGMA_OMP_LOOP },
+  { "master", PRAGMA_OMP_MASTER },
   { "ordered", PRAGMA_OMP_ORDERED },
   { "parallel", PRAGMA_OMP_PARALLEL },
   { "scan", PRAGMA_OMP_SCAN },
--- gcc/cp/parser.c.jj  2020-12-04 21:39:14.418768272 +0100
+++ gcc/cp/parser.c     2020-12-07 21:22:23.400385209 +0100
@@ -40491,6 +40491,9 @@ cp_parser_omp_parallel (cp_parser *parse
          cclauses = cclauses_buf;
 
          cp_lexer_consume_token (parser->lexer);
+         if (!flag_openmp)  /* flag_openmp_simd  */
+           return cp_parser_omp_master (parser, pragma_tok, p_name, mask,
+                                        cclauses, if_p);
          block = begin_omp_parallel ();
          save = cp_parser_begin_omp_structured_block (parser);
          tree ret = cp_parser_omp_master (parser, pragma_tok, p_name, mask,
--- gcc/testsuite/c-c++-common/gomp/pr98187.c.jj        2020-12-07 
21:25:38.108218964 +0100
+++ gcc/testsuite/c-c++-common/gomp/pr98187.c   2020-12-07 21:25:28.960320755 
+0100
@@ -0,0 +1,109 @@
+/* PR c++/98187 */
+/* { dg-do compile } */
+/* { dg-options "-fopenmp-simd -O2 -fdump-tree-gimple" } */
+/* { dg-final { scan-tree-dump-times "#pragma omp simd" 17 "gimple" } } */
+
+void
+foo (int *p)
+{
+  int i;
+  #pragma omp distribute parallel for
+  for (i = 0; i < 64; i++)
+    p[i]++;
+  #pragma omp distribute parallel for simd
+  for (i = 0; i < 64; i++)
+    p[i]++;
+  #pragma omp distribute simd
+  for (i = 0; i < 64; i++)
+    p[i]++;
+}
+
+void
+bar (int *p)
+{
+  int i;
+  #pragma omp for simd
+  for (i = 0; i < 64; i++)
+    p[i]++;
+  #pragma omp master taskloop
+  for (i = 0; i < 64; i++)
+    p[i]++;
+  #pragma omp master taskloop simd
+  for (i = 0; i < 64; i++)
+    p[i]++;
+  #pragma omp parallel for
+  for (i = 0; i < 64; i++)
+    p[i]++;
+  #pragma omp parallel for simd
+  for (i = 0; i < 64; i++)
+    p[i]++;
+  #pragma omp parallel loop
+  for (i = 0; i < 64; i++)
+    p[i]++;
+  #pragma omp parallel master
+  p[0]++;
+  #pragma omp parallel master taskloop
+  for (i = 0; i < 64; i++)
+    p[i]++;
+  #pragma omp parallel master taskloop simd
+  for (i = 0; i < 64; i++)
+    p[i]++;
+  #pragma omp parallel sections
+  {
+    p[0]++;
+    #pragma omp section
+    p[1]++;
+    #pragma omp section
+    p[2]++;
+  }
+  #pragma omp target parallel
+  #pragma omp master
+  p[0]++;
+  #pragma omp target parallel for
+  for (i = 0; i < 64; i++)
+    p[i]++;
+  #pragma omp target parallel for simd
+  for (i = 0; i < 64; i++)
+    p[i]++;
+  #pragma omp target parallel loop
+  for (i = 0; i < 64; i++)
+    p[i]++;
+  #pragma omp target teams private (i)
+  i = 0;
+  #pragma omp target teams distribute
+  for (i = 0; i < 64; i++)
+    p[i]++;
+  #pragma omp target teams distribute parallel for
+  for (i = 0; i < 64; i++)
+    p[i]++;
+  #pragma omp target teams distribute parallel for simd
+  for (i = 0; i < 64; i++)
+    p[i]++;
+  #pragma omp target teams distribute simd
+  for (i = 0; i < 64; i++)
+    p[i]++;
+  #pragma omp target teams loop
+  for (i = 0; i < 64; i++)
+    p[i]++;
+  #pragma omp target simd
+  for (i = 0; i < 64; i++)
+    p[i]++;
+  #pragma omp taskloop simd
+  for (i = 0; i < 64; i++)
+    p[i]++;
+  #pragma omp teams distribute
+  for (i = 0; i < 64; i++)
+    p[i]++;
+  #pragma omp teams distribute parallel for
+  for (i = 0; i < 64; i++)
+    p[i]++;
+  #pragma omp teams distribute parallel for simd
+  for (i = 0; i < 64; i++)
+    p[i]++;
+  #pragma omp teams distribute simd
+  for (i = 0; i < 64; i++)
+    p[i]++;
+  #pragma omp teams loop
+  for (i = 0; i < 64; i++)
+    p[i]++;
+}

        Jakub

Reply via email to