Hello,
On 15 Aug 20:35, Uros Bizjak wrote:
> On Fri, Aug 15, 2014 at 1:56 PM, Kirill Yukhin <kirill.yuk...@gmail.com> 
> wrote:
> Again, please split insn pattern to avoid:
> 
> +  "TARGET_SSE2
> +   && <mask_mode512bit_condition>
> +   && ((<MODE>mode != V16HImode && <MODE>mode != V8HImode)
> +       || TARGET_AVX512BW
> +       || !<mask_applied>)"
> 
> insn constraints. The insn constraint should use baseline TARGET_* and
> mode iterator should use TARGET_* that results in "baseline TARGET_ &&
> iterator TARGET_" for certain mode. If these are properly used, then
> there is no need to use <MODE>mode checks in the insn constraint.
I've refactored the pattern. But it should be mentioned,
that it still uses <MODE>mode checkes implicitly.
Problem is that masking allowed either for ZMM or (XMM|YMM)+TARGET_AVX512VL,
supposing that TARGET_AVX512F is on for both cases.
That is what "mask_mode512bit_condition" check:
  (define_subst_attr "mask_mode512bit_condition" "mask" "1" "(<MODE_SIZE> == 64 
|| TARGET_AVX512VL)")

So, this pattern:
(define_insn "<shift_insn><mode>3<mask_name>"
  [(set (match_operand:VI2_AVX2_AVX512BW 0 "register_operand" "=x,v")
        (any_lshift:VI2_AVX2_AVX512BW
          (match_operand:VI2_AVX2_AVX512BW 1 "register_operand" "0,v")
          (match_operand:SI 2 "nonmemory_operand" "xN,vN")))]
  "TARGET_SSE2 && <mask_mode512bit_condition> && <mask_avx512bw_condition>"

is expanded to (16hi, mask, ashift):
(define_insn ("ashlv16hi3_mask")
     [
        (set (match_operand:V16HI 0 ("register_operand") ("=x,v"))
            (vec_merge:V16HI (ashift:V16HI (match_operand:V16HI 1 
("register_operand") ("0,v"))
                    (match_operand:SI 2 ("nonmemory_operand") ("xN,vN")))
                (match_operand:V16HI 3 ("vector_move_operand") ("0C,0C"))
                (match_operand:HI 4 ("register_operand") ("Yk,Yk"))))
    ] ("(TARGET_AVX512F) && ((TARGET_SSE2 && (32 == 64 || TARGET_AVX512VL) && 
TARGET_AVX512BW) && (TARGET_AVX2))") 

(TARGET_AVX512F) came from `mask' subst, (32 == 64 || TARGET_AVX512VL) is what
I've described above, (TARGET_AVX512BW) came from coresponding subst attr and
TARGET_AVX2 belongs to mode iterator.

Bootstrapped and avx512-regtested.

gcc/
        * config/i386/sse.md
        (define_mode_iterator VI248_AVX2): Delete.
        (define_mode_iterator VI2_AVX2_AVX512BW): New.
        (define_mode_iterator VI48_AVX2): Ditto.
        (define_insn ""<shift_insn><mode>3<mask_name>"): Add masking.
        Split into two similar patterns, which use different mode
        iterators: VI2_AVX2_AVX512BW and VI48_AVX2.
        * config/i386/subst.md (define_subst_attr "mask_avx512bw_condition"):
        New.

--
Thanks, K

diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
index 6a5faee..5b1e5c1 100644
--- a/gcc/config/i386/sse.md
+++ b/gcc/config/i386/sse.md
@@ -347,9 +347,11 @@
    (V16HI "TARGET_AVX2") V8HI
    (V8SI "TARGET_AVX2") V4SI])
 
-(define_mode_iterator VI248_AVX2
-  [(V16HI "TARGET_AVX2") V8HI
-   (V8SI "TARGET_AVX2") V4SI
+(define_mode_iterator VI2_AVX2_AVX512BW
+  [(V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX2") V8HI])
+
+(define_mode_iterator VI48_AVX2
+  [(V8SI "TARGET_AVX2") V4SI
    (V4DI "TARGET_AVX2") V2DI])
 
 (define_mode_iterator VI248_AVX2_8_AVX512F
@@ -8585,15 +8587,34 @@
        (const_string "0")))
    (set_attr "mode" "<sseinsnmode>")])
 
-(define_insn "<shift_insn><mode>3"
-  [(set (match_operand:VI248_AVX2 0 "register_operand" "=x,x")
-       (any_lshift:VI248_AVX2
-         (match_operand:VI248_AVX2 1 "register_operand" "0,x")
-         (match_operand:SI 2 "nonmemory_operand" "xN,xN")))]
-  "TARGET_SSE2"
+(define_insn "<shift_insn><mode>3<mask_name>"
+  [(set (match_operand:VI2_AVX2_AVX512BW 0 "register_operand" "=x,v")
+       (any_lshift:VI2_AVX2_AVX512BW
+         (match_operand:VI2_AVX2_AVX512BW 1 "register_operand" "0,v")
+         (match_operand:SI 2 "nonmemory_operand" "xN,vN")))]
+  "TARGET_SSE2 && <mask_mode512bit_condition> && <mask_avx512bw_condition>"
+  "@
+   p<vshift><ssemodesuffix>\t{%2, %0|%0, %2}
+   vp<vshift><ssemodesuffix>\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, 
%1, %2}"
+  [(set_attr "isa" "noavx,avx")
+   (set_attr "type" "sseishft")
+   (set (attr "length_immediate")
+     (if_then_else (match_operand 2 "const_int_operand")
+       (const_string "1")
+       (const_string "0")))
+   (set_attr "prefix_data16" "1,*")
+   (set_attr "prefix" "orig,vex")
+   (set_attr "mode" "<sseinsnmode>")])
+
+(define_insn "<shift_insn><mode>3<mask_name>"
+  [(set (match_operand:VI48_AVX2 0 "register_operand" "=x,v")
+       (any_lshift:VI48_AVX2
+         (match_operand:VI48_AVX2 1 "register_operand" "0,v")
+         (match_operand:SI 2 "nonmemory_operand" "xN,vN")))]
+  "TARGET_SSE2 && <mask_mode512bit_condition>"
   "@
    p<vshift><ssemodesuffix>\t{%2, %0|%0, %2}
-   vp<vshift><ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
+   vp<vshift><ssemodesuffix>\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, 
%1, %2}"
   [(set_attr "isa" "noavx,avx")
    (set_attr "type" "sseishft")
    (set (attr "length_immediate")
diff --git a/gcc/config/i386/subst.md b/gcc/config/i386/subst.md
index edb0965..b05cb17 100644
--- a/gcc/config/i386/subst.md
+++ b/gcc/config/i386/subst.md
@@ -56,6 +56,7 @@
 (define_subst_attr "mask_operand_arg34" "mask" "" ", operands[3], operands[4]")
 (define_subst_attr "mask_mode512bit_condition" "mask" "1" "(<MODE_SIZE> == 64 
|| TARGET_AVX512VL)")
 (define_subst_attr "mask_avx512vl_condition" "mask" "1" "TARGET_AVX512VL")
+(define_subst_attr "mask_avx512bw_condition" "mask" "1" "TARGET_AVX512BW")
 (define_subst_attr "store_mask_constraint" "mask" "vm" "v")
 (define_subst_attr "store_mask_predicate" "mask" "nonimmediate_operand" 
"register_operand")
 (define_subst_attr "mask_prefix" "mask" "vex" "evex")
 

Reply via email to