On 1/10/26 03:50, Philippe Mathieu-Daudé wrote:
+uint64_t ldm_p(const void *ptr, MemOp mop)
+{
+ const unsigned size = memop_size(mop);
+ uint64_t val;
+ uint8_t *pval = (uint8_t *)&val;
+
+ if (HOST_BIG_ENDIAN) {
+ pval += sizeof(val) - size;
+ }
+
+ __builtin_memcpy(pval, ptr, size);
+ if (unlikely(mop & MO_BSWAP)) {
+ switch (size) {
+ case sizeof(uint16_t):
+ val = __builtin_bswap16(val);
+ break;
+ case sizeof(uint32_t):
+ val = __builtin_bswap32(val);
+ break;
+ case sizeof(uint64_t):
+ val = __builtin_bswap64(val);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+ return val;
+}
I'm not fond of the pointer arithmetic or the code structure.
Perhaps better as
switch (mop & (MO_BSWAP | MO_SIZE)) {
case MO_LEUW:
return lduw_le_p(ptr);
case MO_BEUW:
return lduw_be_p(ptr);
...
default:
g_assert_not_reached();
}
which would hopefully compile to host endian-swapping load insns like
.L1:
mov (ptr), %eax
ret
.L2:
movbe (ptr), %eax
ret
.L3:
mov (ptr), %rax
ret
.L4:
movbe (ptr), %rax
ret
etc.
And the default case assert makes sure that we're not passing garbage, or too
large MO_SIZE.
r~