https://gcc.gnu.org/bugzilla/show_bug.cgi?id=118907
Bug ID: 118907
Summary: ICF optimises bit selection differently based on
declaration order
Product: gcc
Version: 15.0
Status: UNCONFIRMED
Severity: normal
Priority: P3
Component: ipa
Assignee: unassigned at gcc dot gnu.org
Reporter: sjames at gcc dot gnu.org
CC: arsen at gcc dot gnu.org
Target Milestone: ---
This comes from
https://old.reddit.com/r/cpp_questions/comments/1ilww3q/gcc_bit_manipulation_code_generation_seems_buggy/.
godbolt: https://godbolt.org/z/GnbKzd33s
```
#include <cstdint>
#include <bit>
using namespace std;
using Bitboard = uint64_t;
template <typename ...Squares>
Bitboard setSquare(Bitboard bitboard, Squares... squares) {
return bitboard | ((1ull << squares) | ...);
}
Bitboard setSquare1(Bitboard bitboard, uint32_t a, uint32_t b, uint32_t c,
uint32_t d) {
return setSquare(bitboard, a, b, c, d);
}
Bitboard setSquare2(Bitboard bitboard, uint32_t a, uint32_t b, uint32_t c,
uint32_t d) {
bitboard |= 1ull << a;
bitboard |= 1ull << b;
bitboard |= 1ull << c;
bitboard |= 1ull << d;
return bitboard;
}
Bitboard setSquare3(Bitboard bitboard, uint32_t a, uint32_t b, uint32_t c,
uint32_t d) {
return bitboard | ((1ULL << a) | (1ULL << b) | (1ULL << c) | (1ULL << d));
}
Bitboard setSquare4(Bitboard bitboard, uint32_t a, uint32_t b, uint32_t c,
uint32_t d) {
return bitboard | (1ULL << a) | (1ULL << b) | (1ULL << c) | (1ULL << d);
}
Bitboard setSquare5(Bitboard bitboard, uint32_t a, uint32_t b, uint32_t c,
uint32_t d) {
Bitboard result = bitboard | ((1ULL << a) | (1ULL << b) | (1ULL << c) |
(1ULL << d));
return result;
}
Bitboard setSquare6(Bitboard bitboard, uint32_t a, uint32_t b, uint32_t c,
uint32_t d) {
Bitboard result = bitboard | (1ULL << a) | (1ULL << b) | (1ULL << c) |
(1ULL << d);
return result;
}
```
With `-std=c++20 -O2`, ICF makes a mess in that `setSquare3` and `setSquare4`
get optimised differently.