Sign repetitions are perforce all identical, whether they are 1 or 0. Bitwise operations preserve the relative quantity of the repetitions.
Reviewed-by: Philippe Mathieu-Daudé <f4...@amsat.org> Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- tcg/optimize.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/tcg/optimize.c b/tcg/optimize.c index 06b178aa4a..71e97ccd99 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -952,6 +952,13 @@ static bool fold_and(OptContext *ctx, TCGOp *op) z2 = arg_info(op->args[2])->z_mask; ctx->z_mask = z1 & z2; + /* + * Sign repetitions are perforce all identical, whether they are 1 or 0. + * Bitwise operations preserve the relative quantity of the repetitions. + */ + ctx->s_mask = arg_info(op->args[1])->s_mask + & arg_info(op->args[2])->s_mask; + /* * Known-zeros does not imply known-ones. Therefore unless * arg2 is constant, we can't infer affected bits from it. @@ -987,6 +994,8 @@ static bool fold_andc(OptContext *ctx, TCGOp *op) } ctx->z_mask = z1; + ctx->s_mask = arg_info(op->args[1])->s_mask + & arg_info(op->args[2])->s_mask; return fold_masks(ctx, op); } @@ -1278,6 +1287,9 @@ static bool fold_eqv(OptContext *ctx, TCGOp *op) fold_xi_to_not(ctx, op, 0)) { return true; } + + ctx->s_mask = arg_info(op->args[1])->s_mask + & arg_info(op->args[2])->s_mask; return false; } @@ -1465,6 +1477,8 @@ static bool fold_movcond(OptContext *ctx, TCGOp *op) ctx->z_mask = arg_info(op->args[3])->z_mask | arg_info(op->args[4])->z_mask; + ctx->s_mask = arg_info(op->args[3])->s_mask + & arg_info(op->args[4])->s_mask; if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) { uint64_t tv = arg_info(op->args[3])->val; @@ -1553,6 +1567,9 @@ static bool fold_nand(OptContext *ctx, TCGOp *op) fold_xi_to_not(ctx, op, -1)) { return true; } + + ctx->s_mask = arg_info(op->args[1])->s_mask + & arg_info(op->args[2])->s_mask; return false; } @@ -1582,6 +1599,9 @@ static bool fold_nor(OptContext *ctx, TCGOp *op) fold_xi_to_not(ctx, op, 0)) { return true; } + + ctx->s_mask = arg_info(op->args[1])->s_mask + & arg_info(op->args[2])->s_mask; return false; } @@ -1591,6 +1611,8 @@ static bool fold_not(OptContext *ctx, TCGOp *op) return true; } + ctx->s_mask = arg_info(op->args[1])->s_mask; + /* Because of fold_to_not, we want to always return true, via finish. */ finish_folding(ctx, op); return true; @@ -1606,6 +1628,8 @@ static bool fold_or(OptContext *ctx, TCGOp *op) ctx->z_mask = arg_info(op->args[1])->z_mask | arg_info(op->args[2])->z_mask; + ctx->s_mask = arg_info(op->args[1])->s_mask + & arg_info(op->args[2])->s_mask; return fold_masks(ctx, op); } @@ -1617,6 +1641,9 @@ static bool fold_orc(OptContext *ctx, TCGOp *op) fold_xi_to_not(ctx, op, 0)) { return true; } + + ctx->s_mask = arg_info(op->args[1])->s_mask + & arg_info(op->args[2])->s_mask; return false; } @@ -1881,6 +1908,8 @@ static bool fold_xor(OptContext *ctx, TCGOp *op) ctx->z_mask = arg_info(op->args[1])->z_mask | arg_info(op->args[2])->z_mask; + ctx->s_mask = arg_info(op->args[1])->s_mask + & arg_info(op->args[2])->s_mask; return fold_masks(ctx, op); } -- 2.25.1