Hi,

For code:
```
u64
test_rlwinm_lowpart_mask (u32 v)
{
  u32 v1 = ((v << N) | (v >> (32 - N))) & 0xfffff00;
  return (u64)v1;
}
```
We generate "rlwinm 3,3,4,4,23; rldicl 3,3,0,32" instead of "rlwinm 3,3,4,4,23".
Here the "rlwinm" cleans high32 bits already, so "rldicl" is reductant.

Similarly, for the below code which is the functionality of "rlwinm".
```
u64
test_rlwinm_mask (u32 v)
{
  u32 v1 = ((v << N) | (v >> (32 - N)));
  u64 v2 = (u64) v1 | ((u64) v1 << 32);
  return v2 & 0xffffffffe0000003ULL;
}
```
We generate
"rotlwi 3,3,4; sldi 9,3,32; add 3,9,3; rldicl 3,3,35,27; rldicl 3,3,29,0"
instead of "rlwinm 3,3,4,30,2".

This patch optimizes these two kinds of code to use just one "rlwinm" insn.

Bootstrap and regtests pass on ppc64{,le}.
Is this patch ok for trunk (or next stage1)?


BR,
Jeff (Jiufu)


gcc/ChangeLog:

        * config/rs6000/predicates.md (lowpart_subreg_operand): New
        define_predicate.
        * config/rs6000/rs6000.md (rlwinm_lowpart_mask): New define_insn.
        (rlwinm_mask_<code>): New define_insn.

gcc/testsuite/ChangeLog:

        * gcc.target/powerpc/rlwinm-0.c: Reduce instruction number.
        * gcc.target/powerpc/rlwinm_3.c: New test.

---
 gcc/config/rs6000/predicates.md             | 18 ++++++++
 gcc/config/rs6000/rs6000.md                 | 34 +++++++++++++++
 gcc/testsuite/gcc.target/powerpc/rlwinm-0.c |  6 +--
 gcc/testsuite/gcc.target/powerpc/rlwinm_3.c | 47 +++++++++++++++++++++
 4 files changed, 102 insertions(+), 3 deletions(-)
 create mode 100644 gcc/testsuite/gcc.target/powerpc/rlwinm_3.c

diff --git a/gcc/config/rs6000/predicates.md b/gcc/config/rs6000/predicates.md
index 52c65534e51..3cc51d797c6 100644
--- a/gcc/config/rs6000/predicates.md
+++ b/gcc/config/rs6000/predicates.md
@@ -1290,6 +1290,24 @@ (define_predicate "mma_disassemble_output_operand"
   return vsx_register_operand (op, mode);
 })
 
+;; Return 1 if this operand is lowpart subreg
+(define_predicate "lowpart_subreg_operand"
+  (match_code "subreg,reg")
+{
+  if (REG_P (op))
+    return 1;
+
+  rtx inner_reg = SUBREG_REG (op);
+  if (!REG_P (inner_reg))
+    return 0;
+
+  machine_mode inner_mode = GET_MODE (inner_reg);
+  if (SUBREG_BYTE (op) != subreg_lowpart_offset (mode, inner_mode))
+    return 0;
+
+  return 1;
+})
+
 ;; Return true if operand is an operator used in rotate-and-mask instructions.
 (define_predicate "rotate_mask_operator"
   (match_code "rotate,ashift,lshiftrt"))
diff --git a/gcc/config/rs6000/rs6000.md b/gcc/config/rs6000/rs6000.md
index 4a7812fa592..a5d2b6ea000 100644
--- a/gcc/config/rs6000/rs6000.md
+++ b/gcc/config/rs6000/rs6000.md
@@ -4325,6 +4325,40 @@ (define_insn "*rotldi3_insert_7"
   [(set_attr "type" "insert")
    (set_attr "size" "64")])
 
+(define_insn "rlwinm_lowpart_mask"
+  [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+     (and:DI
+       (subreg:DI
+         (match_operator:SI 4 "rotate_mask_operator"
+           [(match_operand:SI 1 "lowpart_subreg_operand" "r")
+            (match_operand:SI 2 "const_int_operand" "n")]) 0)
+       (match_operand:DI 3 "const_int_operand" "n")))]
+  "TARGET_POWERPC64 && (UINTVAL (operands[3]) >> 32) == 0
+   && rs6000_is_valid_shift_mask (operands[3], operands[4], SImode)"
+{
+  return rs6000_insn_for_shift_mask (SImode, operands, false);
+}
+  [(set_attr "type" "shift")])
+
+(define_insn "rlwinm_mask_<code>"
+  [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+     (and:DI
+       (plus_ior_xor:DI
+        (ashift:DI
+          (subreg:DI
+            (match_operator:SI 4 "rotate_mask_operator"
+              [(match_operand:SI 1 "lowpart_subreg_operand" "r")
+               (match_operand:SI 2 "const_int_operand" "n")]) 0)
+          (const_int 32))
+        (zero_extend:DI (match_dup 4)))
+       (match_operand:DI 3 "const_int_operand" "n")))]
+  "TARGET_POWERPC64
+   && (UINTVAL (operands[3]) & 0xffffffff80000001ULL) == 0xffffffff80000001ULL
+   && rs6000_is_valid_mask (operands[3], NULL, NULL, SImode)"
+{
+  return rs6000_insn_for_shift_mask (SImode, operands, false);
+}
+  [(set_attr "type" "shift")])
 
 ; This handles the important case of multiple-precision shifts.  There is
 ; no canonicalization rule for ASHIFT vs. LSHIFTRT, so two patterns.
diff --git a/gcc/testsuite/gcc.target/powerpc/rlwinm-0.c 
b/gcc/testsuite/gcc.target/powerpc/rlwinm-0.c
index 4f4fca2d8ef..50ff01e1925 100644
--- a/gcc/testsuite/gcc.target/powerpc/rlwinm-0.c
+++ b/gcc/testsuite/gcc.target/powerpc/rlwinm-0.c
@@ -2,12 +2,12 @@
 /* { dg-options "-O2" } */
 
 /* { dg-final { scan-assembler-times {(?n)^\s+[a-z]} 6739 { target ilp32 } } } 
*/
-/* { dg-final { scan-assembler-times {(?n)^\s+[a-z]} 9716 { target lp64 } } } 
*/
+/* { dg-final { scan-assembler-times {(?n)^\s+[a-z]} 8164 { target lp64 } } } 
*/
 /* { dg-final { scan-assembler-times {(?n)^\s+blr} 3375 } } */
-/* { dg-final { scan-assembler-times {(?n)^\s+rldicl} 3081 { target lp64 } } } 
*/
+/* { dg-final { scan-assembler-times {(?n)^\s+rldicl} 1538 { target lp64 } } } 
*/
 
 /* { dg-final { scan-assembler-times {(?n)^\s+rlwinm} 3197 { target ilp32 } } 
} */
-/* { dg-final { scan-assembler-times {(?n)^\s+rlwinm} 3093 { target lp64 } } } 
*/
+/* { dg-final { scan-assembler-times {(?n)^\s+rlwinm} 3084 { target lp64 } } } 
*/
 /* { dg-final { scan-assembler-times {(?n)^\s+rotlwi} 154 } } */
 /* { dg-final { scan-assembler-times {(?n)^\s+srwi} 13 { target ilp32 } } } */
 /* { dg-final { scan-assembler-times {(?n)^\s+srdi} 13 { target lp64 } } } */
diff --git a/gcc/testsuite/gcc.target/powerpc/rlwinm_3.c 
b/gcc/testsuite/gcc.target/powerpc/rlwinm_3.c
new file mode 100644
index 00000000000..65dcd69ace2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/rlwinm_3.c
@@ -0,0 +1,47 @@
+/* { dg-do run } */
+/* { dg-options "-O2 -save-temps" } */
+
+typedef unsigned long long u64;
+typedef unsigned int u32;
+#define NOINLE __attribute__ ((noinline))
+#define V (0x9753)
+
+#define MASK 0xffffffffe0000003ULL
+#define N 4
+#define LMASK 0xfffff00
+
+u64 NOINLE
+test_rlwinm_lowpart_mask (u32 v)
+{
+  u32 v1 = ((v << N) | (v >> (32 - N))) & LMASK;
+  return (u64)v1;
+}
+
+u64 NOINLE
+test_rlwinm_mask (u32 v)
+{
+  u32 v1 = ((v << N) | (v >> (32 - N)));
+  u64 v2 = (u64) v1 | ((u64) v1 << 32);
+  return v2 & MASK;
+}
+
+/* { dg-final { scan-assembler-times {\mrlwinm\M} 2 { target has_arch_ppc64 } 
} } */
+
+#define RLWINM_L(v, n)                                                         
\
+  ((((v & 0xffffffffLL) << n) | ((v & 0xffffffffLL) >> (32 - n)))              
\
+   & 0xffffffffLL)
+#define RLWINM_MASK(v, n, m) (((RLWINM_L (v, n) << 32) | (RLWINM_L (v, n))) & 
m)
+
+u64 v_low_mask = RLWINM_MASK (V, N, LMASK);
+u64 v_mask = RLWINM_MASK (V, N, MASK);
+
+int
+main ()
+{
+  u64 v = V;
+  if (test_rlwinm_lowpart_mask (v) != v_low_mask
+      || test_rlwinm_mask (v) != v_mask)
+    __builtin_abort ();
+
+  return 0;
+}
-- 
2.17.1

Reply via email to