Hi! These builtins are perfect candidates for bitwise ccp, the bytes are preserved, just byte-swapped.
Noticed this while wondering why we haven't optimized the f9 function in another PR, bswap64 zero extended from 32-bits, later casted to 32-bit unsigned int is 0. Bootstrapped/regtested on x86_64-linux and i686-linux, ok for GCC10? 2019-02-23 Jakub Jelinek <ja...@redhat.com> PR tree-optimization/89475 * tree-ssa-ccp.c (evaluate_stmt): Handle BUILT_IN_BSWAP{16,32,64} calls. * gcc.dg/tree-ssa/pr89475.c: New test. --- gcc/tree-ssa-ccp.c.jj 2019-01-01 12:37:17.078976247 +0100 +++ gcc/tree-ssa-ccp.c 2019-02-23 22:35:23.888343273 +0100 @@ -1960,6 +1960,35 @@ evaluate_stmt (gimple *stmt) break; } + case BUILT_IN_BSWAP16: + case BUILT_IN_BSWAP32: + case BUILT_IN_BSWAP64: + val = get_value_for_expr (gimple_call_arg (stmt, 0), true); + if (val.lattice_val == UNDEFINED) + break; + else if (val.lattice_val == CONSTANT + && val.value + && TREE_CODE (val.value) == INTEGER_CST) + { + tree type = TREE_TYPE (gimple_call_lhs (stmt)); + int prec = TYPE_PRECISION (type); + wide_int wval = wi::to_wide (val.value); + val.value + = wide_int_to_tree (type, + wide_int::from (wval, prec, + UNSIGNED).bswap ()); + val.mask + = widest_int::from (wide_int::from (val.mask, prec, + UNSIGNED).bswap (), + UNSIGNED); + if (wi::sext (val.mask, prec) != -1) + break; + } + val.lattice_val = VARYING; + val.value = NULL_TREE; + val.mask = -1; + break; + default:; } } --- gcc/testsuite/gcc.dg/tree-ssa/pr89475.c.jj 2019-02-23 18:58:23.035845645 +0100 +++ gcc/testsuite/gcc.dg/tree-ssa/pr89475.c 2019-02-23 18:59:38.462607598 +0100 @@ -0,0 +1,104 @@ +/* PR tree-optimization/89475 */ +/* { dg-do compile { target { ilp32 || lp64 } } } */ +/* { dg-options "-O2 -fdump-tree-optimized" } */ +/* { dg-final { scan-tree-dump-not "link_error " "optimized" } } */ + +void link_error (void); + +unsigned short +f0 (unsigned short x) +{ + x &= 0xaa55; + x = __builtin_bswap16 (x); + if (x & 0xaa55) + link_error (); + return x; +} + +unsigned short +f1 (unsigned short x) +{ + x &= 0x55aa; + x = __builtin_bswap16 (x); + if (x & 0x55aa) + link_error (); + return x; +} + +unsigned int +f2 (unsigned int x) +{ + x &= 0x55aa5aa5U; + x = __builtin_bswap32 (x); + if (x & 0x5aa555aaU) + link_error (); + return x; +} + +unsigned long long int +f3 (unsigned long long int x) +{ + x &= 0x55aa5aa544cc2211ULL; + x = __builtin_bswap64 (x); + if (x & 0xeedd33bb5aa555aaULL) + link_error (); + return x; +} + +unsigned short +f4 (unsigned short x) +{ + x = __builtin_bswap32 (x); + if (x != 0) + link_error (); + return x; +} + +unsigned int +f5 (unsigned int x) +{ + x = __builtin_bswap64 (x); + if (x != 0) + link_error (); + return x; +} + +unsigned short +f6 (unsigned short x) +{ + x |= 0xaa55; + x = __builtin_bswap16 (x); + if ((x | 0xaa55) != 0xffff) + link_error (); + return x; +} + +unsigned short +f7 (unsigned short x) +{ + x |= 0x55aa; + x = __builtin_bswap16 (x); + if ((x | 0x55aa) != 0xffff) + link_error (); + return x; +} + +unsigned int +f8 (unsigned int x) +{ + x |= 0x55aa5aa5U; + x = __builtin_bswap32 (x); + if ((x | 0x5aa555aaU) != 0xffffffffU) + link_error (); + return x; +} + +unsigned long long int +f9 (unsigned long long int x) +{ + x |= 0x55aa5aa544cc2211ULL; + x = __builtin_bswap64 (x); + if ((x | 0xeedd33bb5aa555aaULL) != 0xffffffffffffffffULL) + link_error (); + return x; +} Jakub