On 14.09.2013 23:54, Richard Henderson wrote:
> This merges the implementation of tcg_out_addi and tcg_out_subi.
> 
> Signed-off-by: Richard Henderson <r...@twiddle.net>
> ---
>  tcg/aarch64/tcg-target.c | 79 
> +++++++++++++++++++-----------------------------
>  1 file changed, 31 insertions(+), 48 deletions(-)
> 
> diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c
> index 0e7b67b..56625a9 100644
> --- a/tcg/aarch64/tcg-target.c
> +++ b/tcg/aarch64/tcg-target.c
> @@ -211,6 +211,12 @@ typedef enum {
>      INSN_EOR    = 0x4a000000,
>      INSN_ANDS   = 0x6a000000,
>  
> +    /* Add/subtract immediate instructions */
> +    INSN_ADDI  = 0x11000000,
> +    INSN_ADDSI = 0x31000000,
> +    INSN_SUBI  = 0x51000000,
> +    INSN_SUBSI = 0x71000000,
> +
>      /* Add/subtract shifted register instructions */
>      INSN_ADD    = 0x0b000000,
>      INSN_ADDS   = 0x2b000000,
> @@ -326,6 +332,22 @@ static inline void tcg_fmt_Rdnm(TCGContext *s, 
> AArch64Insn insn, TCGType sf,
>      tcg_out32(s, insn | sf << 31 | rm << 16 | rn << 5 | rd);
>  }
>  
> +/* This function is used for the Arithmetic (immediate) instruction group.
> +   The value of AIMM must be appropriate for encoding in the shift+imm12
> +   fields.  */
> +static inline void tcg_fmt_Rdn_aimm(TCGContext *s, AArch64Insn insn,
> +                                    TCGType sf, TCGReg rd, TCGReg rn,
> +                                    unsigned int aimm)

Since we are emitting, tcg_out_* seems better to me. 'tcg_out_arith_imm' ?
Also here we have the same issue where it might be not immediately obvious 
which AArch64Insn values are ok to pass.

> +{
> +    if (aimm > 0xfff) {
> +        assert((aimm & 0xfff) == 0);
> +        aimm >>= 12;
> +        assert(aimm <= 0xfff);
> +        aimm |= 1 << 12;  /* apply LSL 12 */
> +    }
> +    tcg_out32(s, insn | sf << 31 | aimm << 10 | rn << 5 | rd);
> +}
> +
>  static inline void tcg_out_ldst_9(TCGContext *s,
>                                    enum aarch64_ldst_op_data op_data,
>                                    enum aarch64_ldst_op_type op_type,
> @@ -742,46 +764,6 @@ static inline void tcg_out_uxt(TCGContext *s, int s_bits,
>      tcg_out_ubfm(s, 0, rd, rn, 0, bits);
>  }
>  
> -static inline void tcg_out_addi(TCGContext *s, TCGType ext,
> -                                TCGReg rd, TCGReg rn, unsigned int aimm)
> -{
> -    /* add immediate aimm unsigned 12bit value (with LSL 0 or 12) */
> -    /* using ADD 0x11000000 | (ext) | (aimm << 10) | (rn << 5) | rd */
> -    unsigned int base = ext ? 0x91000000 : 0x11000000;
> -
> -    if (aimm <= 0xfff) {
> -        aimm <<= 10;
> -    } else {
> -        /* we can only shift left by 12, on assert we cannot represent */
> -        assert(!(aimm & 0xfff));
> -        assert(aimm <= 0xfff000);
> -        base |= 1 << 22; /* apply LSL 12 */
> -        aimm >>= 2;
> -    }
> -
> -    tcg_out32(s, base | aimm | (rn << 5) | rd);
> -}
> -
> -static inline void tcg_out_subi(TCGContext *s, TCGType ext,
> -                                TCGReg rd, TCGReg rn, unsigned int aimm)
> -{
> -    /* sub immediate aimm unsigned 12bit value (with LSL 0 or 12) */
> -    /* using SUB 0x51000000 | (ext) | (aimm << 10) | (rn << 5) | rd */
> -    unsigned int base = ext ? 0xd1000000 : 0x51000000;
> -
> -    if (aimm <= 0xfff) {
> -        aimm <<= 10;
> -    } else {
> -        /* we can only shift left by 12, on assert we cannot represent */
> -        assert(!(aimm & 0xfff));
> -        assert(aimm <= 0xfff000);
> -        base |= 1 << 22; /* apply LSL 12 */
> -        aimm >>= 2;
> -    }
> -
> -    tcg_out32(s, base | aimm | (rn << 5) | rd);
> -}
> -
>  static inline void tcg_out_nop(TCGContext *s)
>  {
>      tcg_out32(s, 0xd503201f);
> @@ -899,9 +881,9 @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg 
> addr_reg,
>                   (TARGET_LONG_BITS - TARGET_PAGE_BITS) + s_bits,
>                   (TARGET_LONG_BITS - TARGET_PAGE_BITS));
>      /* Add any "high bits" from the tlb offset to the env address into X2,
> -       to take advantage of the LSL12 form of the addi instruction.
> +       to take advantage of the LSL12 form of the ADDI instruction.
>         X2 = env + (tlb_offset & 0xfff000) */
> -    tcg_out_addi(s, 1, TCG_REG_X2, base, tlb_offset & 0xfff000);
> +    tcg_fmt_Rdn_aimm(s, INSN_ADDI, 1, TCG_REG_X2, base, tlb_offset & 
> 0xfff000);
>      /* Merge the tlb index contribution into X2.
>         X2 = X2 + (X0 << CPU_TLB_ENTRY_BITS) */
>      tcg_fmt_Rdnm_lsl(s, INSN_ADD, 1, TCG_REG_X2, TCG_REG_X2,
> @@ -1510,9 +1492,10 @@ static void tcg_target_qemu_prologue(TCGContext *s)
>          tcg_out_store_pair(s, TCG_REG_FP, r, r + 1, idx);
>      }
>  
> -    /* make stack space for TCG locals */
> -    tcg_out_subi(s, 1, TCG_REG_SP, TCG_REG_SP,
> -                 frame_size_tcg_locals * TCG_TARGET_STACK_ALIGN);
> +    /* Make stack space for TCG locals.  */
> +    tcg_fmt_Rdn_aimm(s, INSN_SUBI, 1, TCG_REG_SP, TCG_REG_SP,
> +                     frame_size_tcg_locals * TCG_TARGET_STACK_ALIGN);
> +
>      /* inform TCG about how to find TCG locals with register, offset, size */
>      tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE,
>                    CPU_TEMP_BUF_NLONGS * sizeof(long));
> @@ -1529,9 +1512,9 @@ static void tcg_target_qemu_prologue(TCGContext *s)
>  
>      tb_ret_addr = s->code_ptr;
>  
> -    /* remove TCG locals stack space */
> -    tcg_out_addi(s, 1, TCG_REG_SP, TCG_REG_SP,
> -                 frame_size_tcg_locals * TCG_TARGET_STACK_ALIGN);
> +    /* Remove TCG locals stack space.  */
> +    tcg_fmt_Rdn_aimm(s, INSN_ADDI, 1, TCG_REG_SP, TCG_REG_SP,
> +                     frame_size_tcg_locals * TCG_TARGET_STACK_ALIGN);
>  
>      /* restore registers x19..x28.
>         FP must be preserved, so it still points to callee_saved area */
> 


Reply via email to