Another renaming, this time to make way for partial/unpacked
float modes.

Tested on aarch64-linux-gnu and applied as r278339.

Richard


2019-11-16  Richard Sandiford  <richard.sandif...@arm.com>

gcc/
        * config/aarch64/iterators.md (SVE_PARTIAL): Rename to...
        (SVE_PARTIAL_I): ...this.
        * config/aarch64/aarch64-sve.md: Apply the above renaming throughout.

Index: gcc/config/aarch64/iterators.md
===================================================================
--- gcc/config/aarch64/iterators.md     2019-11-16 10:50:39.014190116 +0000
+++ gcc/config/aarch64/iterators.md     2019-11-16 10:52:49.537270154 +0000
@@ -339,10 +339,10 @@ (define_mode_iterator SVE_FULL_S [VNx4SI
 ;; Fully-packed SVE vector modes that have 64-bit elements.
 (define_mode_iterator SVE_FULL_D [VNx2DI VNx2DF])
 
-;; All partial SVE modes.
-(define_mode_iterator SVE_PARTIAL [VNx2QI
-                                  VNx4QI VNx2HI
-                                  VNx8QI VNx4HI VNx2SI])
+;; All partial SVE integer modes.
+(define_mode_iterator SVE_PARTIAL_I [VNx8QI VNx4QI VNx2QI
+                                    VNx4HI VNx2HI
+                                    VNx2SI])
 
 ;; Modes involved in extending or truncating SVE data, for 8 elements per
 ;; 128-bit block.
Index: gcc/config/aarch64/aarch64-sve.md
===================================================================
--- gcc/config/aarch64/aarch64-sve.md   2019-11-16 10:50:39.014190116 +0000
+++ gcc/config/aarch64/aarch64-sve.md   2019-11-16 10:52:49.537270154 +0000
@@ -2818,33 +2818,33 @@ (define_insn "@cond_<optab><mode>"
 ;; -------------------------------------------------------------------------
 
 ;; Predicated SXT[BHW].
-(define_insn "@aarch64_pred_sxt<SVE_FULL_HSDI:mode><SVE_PARTIAL:mode>"
+(define_insn "@aarch64_pred_sxt<SVE_FULL_HSDI:mode><SVE_PARTIAL_I:mode>"
   [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w")
        (unspec:SVE_FULL_HSDI
          [(match_operand:<VPRED> 1 "register_operand" "Upl")
           (sign_extend:SVE_FULL_HSDI
-            (truncate:SVE_PARTIAL
+            (truncate:SVE_PARTIAL_I
               (match_operand:SVE_FULL_HSDI 2 "register_operand" "w")))]
          UNSPEC_PRED_X))]
   "TARGET_SVE && (~<narrower_mask> & <self_mask>) == 0"
-  "sxt<SVE_PARTIAL:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, 
%2.<SVE_FULL_HSDI:Vetype>"
+  "sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, 
%2.<SVE_FULL_HSDI:Vetype>"
 )
 
 ;; Predicated SXT[BHW] with merging.
-(define_insn "@aarch64_cond_sxt<SVE_FULL_HSDI:mode><SVE_PARTIAL:mode>"
+(define_insn "@aarch64_cond_sxt<SVE_FULL_HSDI:mode><SVE_PARTIAL_I:mode>"
   [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w, ?&w, ?&w")
        (unspec:SVE_FULL_HSDI
          [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
           (sign_extend:SVE_FULL_HSDI
-            (truncate:SVE_PARTIAL
+            (truncate:SVE_PARTIAL_I
               (match_operand:SVE_FULL_HSDI 2 "register_operand" "w, w, w")))
           (match_operand:SVE_FULL_HSDI 3 "aarch64_simd_reg_or_zero" "0, Dz, 
w")]
          UNSPEC_SEL))]
   "TARGET_SVE && (~<narrower_mask> & <self_mask>) == 0"
   "@
-   sxt<SVE_PARTIAL:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, 
%2.<SVE_FULL_HSDI:Vetype>
-   movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, 
%2.<SVE_FULL_HSDI:Vetype>\;sxt<SVE_PARTIAL:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, 
%1/m, %2.<SVE_FULL_HSDI:Vetype>
-   movprfx\t%0, %3\;sxt<SVE_PARTIAL:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, 
%2.<SVE_FULL_HSDI:Vetype>"
+   sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, 
%2.<SVE_FULL_HSDI:Vetype>
+   movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, 
%2.<SVE_FULL_HSDI:Vetype>\;sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>,
 %1/m, %2.<SVE_FULL_HSDI:Vetype>
+   movprfx\t%0, %3\;sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, 
%1/m, %2.<SVE_FULL_HSDI:Vetype>"
   [(set_attr "movprfx" "*,yes,yes")]
 )
 

Reply via email to