Hi,
This patch fixes the constant load pattern for MVE, this was not
accounting correctly for label + offset cases.
Added test that ICE'd before and removed the scan assemblers for the
mve_vector* tests as they were too fragile.
Bootstrapped on arm-linux-gnueabihf and regression tested on arm-none-eabi.
Is this OK for trunk?
gcc/ChangeLog:
2020-04-07 Andre Vieira
* config/arm/arm.c (output_move_neon): Deal with label + offset
cases.
* config/arm/mve.md (*mve_mov): Handle const vectors.
gcc/testsuite/ChangeLog:
2020-04-07 Andre Vieira
* gcc.target/arm/mve/intrinsics/mve_load_from_array.c: New test.
* gcc.target/arm/mve/intrinsics/mve_vector_float.c: Remove
scan-assembler.
* gcc.target/arm/mve/intrinsics/mve_vector_float1.c: Likewise.
* gcc.target/arm/mve/intrinsics/mve_vector_int1.c: Likewise.
* gcc.target/arm/mve/intrinsics/mve_vector_int2.c: Likewise.
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index
d5207e0d8f07f9be5265fc6d175c148c6cdd53cb..1af9d5cf145f6d01e364a1afd7ceb3df5da86c9a
100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -20122,52 +20122,43 @@ output_move_neon (rtx *operands)
break;
}
/* Fall through. */
-case LABEL_REF:
case PLUS:
+ addr = XEXP (addr, 0);
+ /* Fall through. */
+case LABEL_REF:
{
int i;
int overlap = -1;
- if (TARGET_HAVE_MVE && !BYTES_BIG_ENDIAN
- && GET_CODE (addr) != LABEL_REF)
+ for (i = 0; i < nregs; i++)
{
- sprintf (buff, "v%srw.32\t%%q0, %%1", load ? "ld" : "st");
- ops[0] = reg;
- ops[1] = mem;
- output_asm_insn (buff, ops);
- }
- else
- {
- for (i = 0; i < nregs; i++)
+ /* We're only using DImode here because it's a convenient
+ size. */
+ ops[0] = gen_rtx_REG (DImode, REGNO (reg) + 2 * i);
+ ops[1] = adjust_address (mem, DImode, 8 * i);
+ if (reg_overlap_mentioned_p (ops[0], mem))
{
- /* We're only using DImode here because it's a convenient
- size. */
- ops[0] = gen_rtx_REG (DImode, REGNO (reg) + 2 * i);
- ops[1] = adjust_address (mem, DImode, 8 * i);
- if (reg_overlap_mentioned_p (ops[0], mem))
- {
- gcc_assert (overlap == -1);
- overlap = i;
- }
- else
- {
- if (TARGET_HAVE_MVE && GET_CODE (addr) == LABEL_REF)
- sprintf (buff, "v%sr.64\t%%P0, %%1", load ? "ld" : "st");
- else
- sprintf (buff, "v%sr%%?\t%%P0, %%1", load ? "ld" : "st");
- output_asm_insn (buff, ops);
- }
+ gcc_assert (overlap == -1);
+ overlap = i;
}
- if (overlap != -1)
+ else
{
- ops[0] = gen_rtx_REG (DImode, REGNO (reg) + 2 * overlap);
- ops[1] = adjust_address (mem, SImode, 8 * overlap);
if (TARGET_HAVE_MVE && GET_CODE (addr) == LABEL_REF)
- sprintf (buff, "v%sr.32\t%%P0, %%1", load ? "ld" : "st");
+ sprintf (buff, "v%sr.64\t%%P0, %%1", load ? "ld" : "st");
else
sprintf (buff, "v%sr%%?\t%%P0, %%1", load ? "ld" : "st");
output_asm_insn (buff, ops);
}
}
+ if (overlap != -1)
+ {
+ ops[0] = gen_rtx_REG (DImode, REGNO (reg) + 2 * overlap);
+ ops[1] = adjust_address (mem, SImode, 8 * overlap);
+ if (TARGET_HAVE_MVE && GET_CODE (addr) == LABEL_REF)
+ sprintf (buff, "v%sr.32\t%%P0, %%1", load ? "ld" : "st");
+ else
+ sprintf (buff, "v%sr%%?\t%%P0, %%1", load ? "ld" : "st");
+ output_asm_insn (buff, ops);
+ }
return "";
}
diff --git a/gcc/config/arm/mve.md b/gcc/config/arm/mve.md
index
d1028f4542b4972b4080e46544c86d625d77383a..10abc3fae3709891346b63213afb1fe3754af41a
100644
--- a/gcc/config/arm/mve.md
+++ b/gcc/config/arm/mve.md
@@ -695,9 +695,9 @@ (define_insn "*mve_mov"
case 2:
return "vmov\t%Q0, %R0, %e1 @ \;vmov\t%J0, %K0, %f1";
case 4:
- if ((TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (mode))
- || (MEM_P (operands[1])
- && GET_CODE (XEXP (operands[1], 0)) == LABEL_REF))
+ if (MEM_P (operands[1])
+ && (GET_CODE (XEXP (operands[1], 0)) == LABEL_REF
+ || GET_CODE (XEXP (operands[1], 0)) == CONST))
return output_move_neon (operands);
else
return "vldrb.8 %q0, %E1";
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_load_from_array.c
b/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_load_from_array.c
new file mode 100644
index