> Am 23.08.2019 um 12:43 schrieb Richard Sandiford <richard.sandif...@arm.com>:
> 
> Ilya Leoshkevich <i...@linux.ibm.com> writes:
>> @@ -3819,6 +3820,82 @@ can_compare_p (enum rtx_code code, machine_mode mode,
>>   return 0;
>> }
>> 
>> +/* can_vector_compare_p presents fake rtx binary operations to the the 
>> back-end
>> +   in order to determine its capabilities.  In order to avoid creating fake
>> +   operations on each call, values from previous calls are cached in a 
>> global
>> +   cached_binops hash_table.  It contains rtxes, which can be looked up 
>> using
>> +   binop_keys.  */
>> +
>> +struct binop_key {
>> +  enum rtx_code code;        /* Operation code.  */
>> +  machine_mode value_mode;   /* Result mode.     */
>> +  machine_mode cmp_op_mode;  /* Operand mode.    */
>> +};
>> +
>> +struct binop_hasher : pointer_hash_mark<rtx>, ggc_cache_remove<rtx> {
>> +  typedef rtx value_type;
>> +  typedef binop_key compare_type;
>> +
>> +  static hashval_t
>> +  hash (enum rtx_code code, machine_mode value_mode, machine_mode 
>> cmp_op_mode)
>> +  {
>> +    inchash::hash hstate (0);
>> +    hstate.add_int (code);
>> +    hstate.add_int (value_mode);
>> +    hstate.add_int (cmp_op_mode);
>> +    return hstate.end ();
>> +  }
>> +
>> +  static hashval_t
>> +  hash (const rtx &ref)
>> +  {
>> +    return hash (GET_CODE (ref), GET_MODE (ref), GET_MODE (XEXP (ref, 0)));
>> +  }
>> +
>> +  static bool
>> +  equal (const rtx &ref1, const binop_key &ref2)
>> +  {
>> +    return (GET_CODE (ref1) == ref2.code)
>> +       && (GET_MODE (ref1) == ref2.value_mode)
>> +       && (GET_MODE (XEXP (ref1, 0)) == ref2.cmp_op_mode);
>> +  }
>> +};
>> +
>> +static GTY ((cache)) hash_table<binop_hasher> *cached_binops;
>> +
>> +static rtx
>> +get_cached_binop (enum rtx_code code, machine_mode value_mode,
>> +              machine_mode cmp_op_mode)
>> +{
>> +  if (!cached_binops)
>> +    cached_binops = hash_table<binop_hasher>::create_ggc (1024);
>> +  binop_key key = { code, value_mode, cmp_op_mode };
>> +  hashval_t hash = binop_hasher::hash (code, value_mode, cmp_op_mode);
>> +  rtx *slot = cached_binops->find_slot_with_hash (key, hash, INSERT);
>> +  if (!*slot)
>> +    *slot = gen_rtx_fmt_ee (code, value_mode, gen_reg_rtx (cmp_op_mode),
>> +                        gen_reg_rtx (cmp_op_mode));
>> +  return *slot;
>> +}
> 
> Sorry, I didn't mean anything this complicated.  I just meant that
> we should have a single cached rtx that we can change via PUT_CODE and
> PUT_MODE_RAW for each new query, rather than allocating a new rtx each
> time.
> 
> Something like:
> 
> static GTY ((cache)) rtx cached_binop;
> 
> rtx
> get_cached_binop (machine_mode mode, rtx_code code, machine_mode op_mode)
> {
>  if (cached_binop)
>    {
>      PUT_CODE (cached_binop, code);
>      PUT_MODE_RAW (cached_binop, mode);
>      PUT_MODE_RAW (XEXP (cached_binop, 0), op_mode);
>      PUT_MODE_RAW (XEXP (cached_binop, 1), op_mode);
>    }
>  else
>    {
>      rtx reg1 = gen_raw_REG (op_mode, LAST_VIRTUAL_REGISTER + 1);
>      rtx reg2 = gen_raw_REG (op_mode, LAST_VIRTUAL_REGISTER + 2);
>      cached_binop = gen_rtx_fmt_ee (code, mode, reg1, reg2);
>    }
>  return cached_binop;
> }
> 
> Thanks,
> Richard

Oh, I must have completely missed the point: the cache is only for
storage, and stored values themselves don't really matter.

To make rtx usable with GTY ((cache)) I had to do:

--- a/gcc/rtl.h
+++ b/gcc/rtl.h
@@ -4427,4 +4427,9 @@ extern void gt_ggc_mx (rtx &);
 extern void gt_pch_nx (rtx &);
 extern void gt_pch_nx (rtx &, gt_pointer_operator, void *);

+inline void
+gt_cleare_cache (rtx)
+{
+}
+
 #endif /* ! GCC_RTL_H */

Does that look ok?

Another think that might turn out being important: in your first
suggestion you use

 if (insn_operand_predicate_fn pred = insn_data[icode].operand[3].predicate)
   {
     machine_mode cmp_mode = insn_data[icode].operand[3].mode;

instead of simply insn_operand_matches - is there any difference?

Reply via email to