https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102125

            Bug ID: 102125
           Summary: (ARM Cortex-M3 and newer) missed optimization. memcpy
                    not needed operations
           Product: gcc
           Version: 10.2.1
            Status: UNCONFIRMED
          Severity: normal
          Priority: P3
         Component: c
          Assignee: unassigned at gcc dot gnu.org
          Reporter: jankowski938 at gmail dot com
  Target Milestone: ---

uint64_t bar64(const uint8_t *rData1)
{
    uint64_t buffer;
    memcpy(&buffer, rData1, sizeof(buffer));
    return buffer;
}

compiler options: 
-Ox -mthumb -mcpu=cortex-my

where x : 2,3,s   y:3,4,7

```
bar64:
        sub     sp, sp, #8
        mov     r2, r0
        ldr     r0, [r0]  @ unaligned
        ldr     r1, [r2, #4]      @ unaligned
        mov     r3, sp
        stmia   r3!, {r0, r1}
        ldrd    r0, [sp]
        add     sp, sp, #8
        bx      lr
```

it is enough to:

```
        mov     r3, r0
        ldr     r0, [r0]  @ unaligned
        ldr     r1, [r3, #4]      @ unaligned
        bx      lr
```

32 bit memcpy is optimized correctly:

Full example code:

```
uint64_t foo64(const uint8_t *rData1)
{
    uint64_t buffer;
    buffer =  (((uint64_t)rData1[7]) << 56)|((uint64_t)(rData1[6]) <<
48)|((uint64_t)(rData1[5]) << 40)|(((uint64_t)rData1[4]) << 32)|
                            (((uint64_t)rData1[3]) <<
24)|(((uint64_t)rData1[2]) << 16)|((uint64_t)(rData1[1]) << 8)|rData1[0];
    return buffer;
}

uint64_t bar64(const uint8_t *rData1)
{
    uint64_t buffer;
    memcpy(&buffer, rData1, sizeof(buffer));
    return buffer;
}

uint32_t foo32(const uint8_t *rData1)
{
    uint32_t buffer;
    buffer = (((uint32_t)rData1[3]) << 24)|(((uint32_t)rData1[2]) <<
16)|((uint32_t)(rData1[1]) << 8)|rData1[0];
    return buffer;
}

uint32_t bar32(const uint8_t *rData1)
{
    uint32_t buffer;
    memcpy(&buffer, rData1, sizeof(buffer));
    return buffer;
}
```

compiler output:
```
foo64:
        mov     r3, r0
        ldr     r0, [r0]  @ unaligned
        ldr     r1, [r3, #4]      @ unaligned
        bx      lr
bar64:
        sub     sp, sp, #8
        mov     r2, r0
        ldr     r0, [r0]  @ unaligned
        ldr     r1, [r2, #4]      @ unaligned
        mov     r3, sp
        stmia   r3!, {r0, r1}
        ldrd    r0, [sp]
        add     sp, sp, #8
        bx      lr
foo32:
        ldr     r0, [r0]  @ unaligned
        bx      lr
bar32:
        ldr     r0, [r0]  @ unaligned
        bx      lr
```

Clang compiles without overhead:

https://godbolt.org/z/P7G7Whxqz

Reply via email to