This is OK, thanks for catching the pasto! Only... > diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c > index 430d562..b8cb871 100644 > --- a/gcc/config/i386/i386.c > +++ b/gcc/config/i386/i386.c > @@ -3974,10 +3974,10 @@ ix86_option_override_internal (bool main_args_p, > if (flag_expensive_optimizations > && !(opts_set->x_target_flags & MASK_VZEROUPPER)) > opts->x_target_flags |= MASK_VZEROUPPER; > - if (!ix86_tune_features[X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL] > + if (!ix86_tune_features[X86_TUNE_AVX256_UNALIGNED_LOAD_OPTIMAL] > && !(opts_set->x_target_flags & MASK_AVX256_SPLIT_UNALIGNED_LOAD)) > opts->x_target_flags |= MASK_AVX256_SPLIT_UNALIGNED_LOAD; > - if (!ix86_tune_features[X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL] > + if (!ix86_tune_features[X86_TUNE_AVX256_UNALIGNED_STORE_OPTIMAL] > && !(opts_set->x_target_flags & MASK_AVX256_SPLIT_UNALIGNED_STORE)) > opts->x_target_flags |= MASK_AVX256_SPLIT_UNALIGNED_STORE; > /* Enable 128-bit AVX instruction generation > @@ -16576,7 +16576,7 @@ ix86_avx256_split_vector_move_misalign (rtx > op0, rtx op1) > > if (MEM_P (op1)) > { > - if (TARGET_AVX256_SPLIT_UNALIGNED_LOAD) > + if (!TARGET_AVX2 && TARGET_AVX256_SPLIT_UNALIGNED_LOAD) > { > rtx r = gen_reg_rtx (mode); > m = adjust_address (op1, mode, 0); > @@ -16596,7 +16596,7 @@ ix86_avx256_split_vector_move_misalign (rtx > op0, rtx op1) > } > else if (MEM_P (op0)) > { > - if (TARGET_AVX256_SPLIT_UNALIGNED_STORE) > + if (!TARGET_AVX2 && TARGET_AVX256_SPLIT_UNALIGNED_STORE)
I would add explanation comment on those two. Shall we also disable argument accumulation for cores? It seems we won't solve the IRA issues, right? Honza