Re: [patch] Fix PR101188 wrong code from postreload

2023-06-10 Thread Jeff Law via Gcc-patches



On 6/3/23 12:38, Georg-Johann Lay wrote:


    }

  note_stores (insn, move2add_note_store, insn);


The point is that in the continue block, the effect of the insn is
recorded even if !success, it's just the computed effect of the code.

Moreover, "next" is REG = REG + CONST_INT, so there are no REG_INC
notes, no?

Also I don't have any testcases that break other than the one
that has a clobber of a GPR along with the pointer addition.

I tried some "smart" solutions before, but all failed for some
reason, so I resorted to something that fixes the bug, and
*only* fixes the bug, and which has clearly no other side
effects than fixing the bug (I have to do all remote on compile
farm).  If a more elaborate fix is needed that also catches other
PRs, then I would hand this over to a postreload maintainer please.

Of particular importance for your case would be the note_stores call. 
But I could well see other targets needing the search for REG_INC 
notes as well as stack pushes.


If I'm right, then wouldn't it be better to factor that blob of code 
above into its own function, then use it before the "continue" rather 
than implementing a custom can for CLOBBERS?


I cannot answer that.  Maybe the authors of the code have some ideas.
Here's what I was thinking.  I don't have the bits here to build newlib 
or a simulator, so if you could give it a test it'd be appreciated.


I suspect the note_stores call is the important one in here since as you 
note we're dealing with simple arithmetic and I wouldn't expect to have 
REG_INC notes or SP autoincs in that scenario.


Jeffdiff --git a/gcc/postreload.cc b/gcc/postreload.cc
index 20e138b4fa8..856b7d6e22c 100644
--- a/gcc/postreload.cc
+++ b/gcc/postreload.cc
@@ -1899,6 +1899,79 @@ move2add_use_add3_insn (scalar_int_mode mode, rtx reg, rtx sym, rtx off,
   return changed;
 }
 
+/* Perform any invalidations necessary for INSN.  */
+
+void
+reload_cse_move2add_invalidate (rtx_insn *insn)
+{
+  for (rtx note = REG_NOTES (insn); note; note = XEXP (note, 1))
+{
+  if (REG_NOTE_KIND (note) == REG_INC
+	  && REG_P (XEXP (note, 0)))
+	{
+	  /* Reset the information about this register.  */
+	  int regno = REGNO (XEXP (note, 0));
+	  if (regno < FIRST_PSEUDO_REGISTER)
+	{
+	  move2add_record_mode (XEXP (note, 0));
+	  reg_mode[regno] = VOIDmode;
+	}
+	}
+}
+
+  /* There are no REG_INC notes for SP autoinc.  */
+  subrtx_var_iterator::array_type array;
+  FOR_EACH_SUBRTX_VAR (iter, array, PATTERN (insn), NONCONST)
+{
+  rtx mem = *iter;
+  if (mem
+	  && MEM_P (mem)
+	  && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
+	{
+	  if (XEXP (XEXP (mem, 0), 0) == stack_pointer_rtx)
+	reg_mode[STACK_POINTER_REGNUM] = VOIDmode;
+	}
+}
+
+  note_stores (insn, move2add_note_store, insn);
+
+  /* If INSN is a conditional branch, we try to extract an
+ implicit set out of it.  */
+  if (any_condjump_p (insn))
+{
+  rtx cnd = fis_get_condition (insn);
+
+  if (cnd != NULL_RTX
+	  && GET_CODE (cnd) == NE
+	  && REG_P (XEXP (cnd, 0))
+	  && !reg_set_p (XEXP (cnd, 0), insn)
+	  /* The following two checks, which are also in
+	 move2add_note_store, are intended to reduce the
+	 number of calls to gen_rtx_SET to avoid memory
+	 allocation if possible.  */
+	  && SCALAR_INT_MODE_P (GET_MODE (XEXP (cnd, 0)))
+	  && REG_NREGS (XEXP (cnd, 0)) == 1
+	  && CONST_INT_P (XEXP (cnd, 1)))
+	{
+	  rtx implicit_set = gen_rtx_SET (XEXP (cnd, 0), XEXP (cnd, 1));
+	  move2add_note_store (SET_DEST (implicit_set), implicit_set, insn);
+	}
+}
+
+  /* If this is a CALL_INSN, all call used registers are stored with
+ unknown values.  */
+  if (CALL_P (insn))
+{
+  function_abi callee_abi = insn_callee_abi (insn);
+  for (int i = FIRST_PSEUDO_REGISTER - 1; i >= 0; i--)
+	if (reg_mode[i] != VOIDmode
+	&& reg_mode[i] != BLKmode
+	&& callee_abi.clobbers_reg_p (reg_mode[i], i))
+	/* Reset the information about this register.  */
+	  reg_mode[i] = VOIDmode;
+}
+}
+
 /* Convert move insns with constant inputs to additions if they are cheaper.
Return true if any changes were made.  */
 static bool
@@ -1921,7 +1994,7 @@ reload_cse_move2add (rtx_insn *first)
   move2add_luid = 2;
   for (insn = first; insn; insn = NEXT_INSN (insn), move2add_luid++)
 {
-  rtx set, note;
+  rtx set;
 
   if (LABEL_P (insn))
 	{
@@ -2041,6 +2114,12 @@ reload_cse_move2add (rtx_insn *first)
 			delete_insn (insn);
 		  changed |= success;
 		  insn = next;
+		  /* Make sure to perform any invalidations related to
+			 NEXT/INSN since we're going to bypass the normal
+			 flow with the continue below.
+
+			 Do this before recording the new mode/offset.  */
+		  reload_cse_move2add_invalidate (insn);
 		  move2add_record_mode (reg);
 		  reg_offset[regno]
 			= trunc_int_for_mode (added_offset + base_offset,
@@ -2094,74 +2173,7 @@ reload_cse_move2add (rtx_insn *first)
 	

Re: [patch] Fix PR101188 wrong code from postreload

2023-06-05 Thread Jeff Law via Gcc-patches




On 6/5/23 09:06, Georg-Johann Lay wrote:



Am 03.06.23 um 17:53 schrieb Jeff Law:



On 6/2/23 02:46, Georg-Johann Lay wrote:

There is the following bug in postreload that can be traced back
to v5 at least:

In postreload.cc::reload_cse_move2add() there is a loop over all
insns.  If it encounters a SET, the next insn is analyzed if it
is a single_set.

After next has been analyzed, it continues with

   if (success)
 delete_insn (insn);
   changed |= success;
   insn = next; // This effectively skips analysis of next.
   move2add_record_mode (reg);
   reg_offset[regno]
 = trunc_int_for_mode (added_offset + base_offset,
   mode);
   continue; // for continues with insn = NEXT_INSN (insn).

So it records the effect of next, but not the clobbers that
next might have.  This is a problem if next clobbers a GPR
like it can happen for avr.  What then can happen is that in a
later round, it may use a value from a (partially) clobbered reg.

The patch records the effects of potential clobbers.

Bootstrapped and reg-tested on x86_64.  Also tested on avr where
the bug popped up.  The testcase discriminates on avr, and for now
I am not aware of any other target that's affected by the bug.

The change is not intrusive and fixes wrong code, so I'd like
to backport it.

Ok to apply?

Johann

rtl-optimization/101188: Don't bypass clobbers of some insns that are
optimized or are optimization candidates.

gcc/
 PR rtl-optimization/101188
 * postreload.cc (reload_cse_move2add): Record clobbers of next
 insn using move2add_note_store.

gcc/testsuite/
 PR rtl-optimization/101188
 * gcc.c-torture/execute/pr101188.c: New test.
If I understand the code correctly, isn't the core of the problem that 
we "continue" rather than executing the rest of the code in the loop. 
In particular the continue bypasses this chunk of code:



 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
    {
  if (REG_NOTE_KIND (note) == REG_INC
  && REG_P (XEXP (note, 0)))
    {
  /* Reset the information about this register.  */
  int regno = REGNO (XEXP (note, 0));
  if (regno < FIRST_PSEUDO_REGISTER)
    {
  move2add_record_mode (XEXP (note, 0));
  reg_mode[regno] = VOIDmode;
    }
    }
    }

  /* There are no REG_INC notes for SP autoinc.  */
  subrtx_var_iterator::array_type array;
  FOR_EACH_SUBRTX_VAR (iter, array, PATTERN (insn), NONCONST)
    {
  rtx mem = *iter;
  if (mem
  && MEM_P (mem)
  && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == 
RTX_AUTOINC)

    {
  if (XEXP (XEXP (mem, 0), 0) == stack_pointer_rtx)
    reg_mode[STACK_POINTER_REGNUM] = VOIDmode;
    }
    }

  note_stores (insn, move2add_note_store, insn);


Of particular importance for your case would be the note_stores call. 
But I could well see other targets needing the search for REG_INC 
notes as well as stack pushes.


If I'm right, then wouldn't it be better to factor that blob of code 
above into its own function, then use it before the "continue" rather 
than implementing a custom can for CLOBBERS?


It also begs the question if the other case immediately above the code 
I quoted needs similar adjustment.  It doesn't do the insn = next, but 
it does bypass the search for autoinc memory references and the 
note_stores call.



Jeff


So if I understand you correctly, this means that my patch is declined?
I wouldn't go that far.  I need to review your questions/comments in 
detail and decide if we want to fix the problem more generally or go 
with a more targeted fix.


jeff


Re: [patch] Fix PR101188 wrong code from postreload

2023-06-05 Thread Georg-Johann Lay




Am 03.06.23 um 17:53 schrieb Jeff Law:



On 6/2/23 02:46, Georg-Johann Lay wrote:

There is the following bug in postreload that can be traced back
to v5 at least:

In postreload.cc::reload_cse_move2add() there is a loop over all
insns.  If it encounters a SET, the next insn is analyzed if it
is a single_set.

After next has been analyzed, it continues with

   if (success)
 delete_insn (insn);
   changed |= success;
   insn = next; // This effectively skips analysis of next.
   move2add_record_mode (reg);
   reg_offset[regno]
 = trunc_int_for_mode (added_offset + base_offset,
   mode);
   continue; // for continues with insn = NEXT_INSN (insn).

So it records the effect of next, but not the clobbers that
next might have.  This is a problem if next clobbers a GPR
like it can happen for avr.  What then can happen is that in a
later round, it may use a value from a (partially) clobbered reg.

The patch records the effects of potential clobbers.

Bootstrapped and reg-tested on x86_64.  Also tested on avr where
the bug popped up.  The testcase discriminates on avr, and for now
I am not aware of any other target that's affected by the bug.

The change is not intrusive and fixes wrong code, so I'd like
to backport it.

Ok to apply?

Johann

rtl-optimization/101188: Don't bypass clobbers of some insns that are
optimized or are optimization candidates.

gcc/
 PR rtl-optimization/101188
 * postreload.cc (reload_cse_move2add): Record clobbers of next
 insn using move2add_note_store.

gcc/testsuite/
 PR rtl-optimization/101188
 * gcc.c-torture/execute/pr101188.c: New test.
If I understand the code correctly, isn't the core of the problem that 
we "continue" rather than executing the rest of the code in the loop. In 
particular the continue bypasses this chunk of code:



 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
    {
  if (REG_NOTE_KIND (note) == REG_INC
  && REG_P (XEXP (note, 0)))
    {
  /* Reset the information about this register.  */
  int regno = REGNO (XEXP (note, 0));
  if (regno < FIRST_PSEUDO_REGISTER)
    {
  move2add_record_mode (XEXP (note, 0));
  reg_mode[regno] = VOIDmode;
    }
    }
    }

  /* There are no REG_INC notes for SP autoinc.  */
  subrtx_var_iterator::array_type array;
  FOR_EACH_SUBRTX_VAR (iter, array, PATTERN (insn), NONCONST)
    {
  rtx mem = *iter;
  if (mem
  && MEM_P (mem)
  && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
    {
  if (XEXP (XEXP (mem, 0), 0) == stack_pointer_rtx)
    reg_mode[STACK_POINTER_REGNUM] = VOIDmode;
    }
    }

  note_stores (insn, move2add_note_store, insn);


Of particular importance for your case would be the note_stores call. 
But I could well see other targets needing the search for REG_INC notes 
as well as stack pushes.


If I'm right, then wouldn't it be better to factor that blob of code 
above into its own function, then use it before the "continue" rather 
than implementing a custom can for CLOBBERS?


It also begs the question if the other case immediately above the code I 
quoted needs similar adjustment.  It doesn't do the insn = next, but it 
does bypass the search for autoinc memory references and the note_stores 
call.



Jeff


So if I understand you correctly, this means that my patch is declined?

Johann


Re: [patch] Fix PR101188 wrong code from postreload

2023-06-03 Thread Georg-Johann Lay




Am 03.06.23 um 17:53 schrieb Jeff Law:



On 6/2/23 02:46, Georg-Johann Lay wrote:

There is the following bug in postreload that can be traced back
to v5 at least:

In postreload.cc::reload_cse_move2add() there is a loop over all
insns.  If it encounters a SET, the next insn is analyzed if it
is a single_set.

After next has been analyzed, it continues with

   if (success)
 delete_insn (insn);
   changed |= success;
   insn = next; // This effectively skips analysis of next.
   move2add_record_mode (reg);
   reg_offset[regno]
 = trunc_int_for_mode (added_offset + base_offset,
   mode);
   continue; // for continues with insn = NEXT_INSN (insn).

So it records the effect of next, but not the clobbers that
next might have.  This is a problem if next clobbers a GPR
like it can happen for avr.  What then can happen is that in a
later round, it may use a value from a (partially) clobbered reg.

The patch records the effects of potential clobbers.

Bootstrapped and reg-tested on x86_64.  Also tested on avr where
the bug popped up.  The testcase discriminates on avr, and for now
I am not aware of any other target that's affected by the bug.

The change is not intrusive and fixes wrong code, so I'd like
to backport it.

Ok to apply?

Johann

rtl-optimization/101188: Don't bypass clobbers of some insns that are
optimized or are optimization candidates.

gcc/
 PR rtl-optimization/101188
 * postreload.cc (reload_cse_move2add): Record clobbers of next
 insn using move2add_note_store.

gcc/testsuite/
 PR rtl-optimization/101188
 * gcc.c-torture/execute/pr101188.c: New test.
If I understand the code correctly, isn't the core of the problem that 
we "continue" rather than executing the rest of the code in the loop. In 
particular the continue bypasses this chunk of code:



 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
    {
  if (REG_NOTE_KIND (note) == REG_INC
  && REG_P (XEXP (note, 0)))
    {
  /* Reset the information about this register.  */
  int regno = REGNO (XEXP (note, 0));
  if (regno < FIRST_PSEUDO_REGISTER)
    {
  move2add_record_mode (XEXP (note, 0));
  reg_mode[regno] = VOIDmode;
    }
    }
    }

  /* There are no REG_INC notes for SP autoinc.  */
  subrtx_var_iterator::array_type array;
  FOR_EACH_SUBRTX_VAR (iter, array, PATTERN (insn), NONCONST)
    {
  rtx mem = *iter;
  if (mem
  && MEM_P (mem)
  && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
    {
  if (XEXP (XEXP (mem, 0), 0) == stack_pointer_rtx)
    reg_mode[STACK_POINTER_REGNUM] = VOIDmode;
    }
    }

  note_stores (insn, move2add_note_store, insn);


The point is that in the continue block, the effect of the insn is
recorded even if !success, it's just the computed effect of the code.

Moreover, "next" is REG = REG + CONST_INT, so there are no REG_INC
notes, no?

Also I don't have any testcases that break other than the one
that has a clobber of a GPR along with the pointer addition.

I tried some "smart" solutions before, but all failed for some
reason, so I resorted to something that fixes the bug, and
*only* fixes the bug, and which has clearly no other side
effects than fixing the bug (I have to do all remote on compile
farm).  If a more elaborate fix is needed that also catches other
PRs, then I would hand this over to a postreload maintainer please.

Of particular importance for your case would be the note_stores call. 
But I could well see other targets needing the search for REG_INC notes 
as well as stack pushes.


If I'm right, then wouldn't it be better to factor that blob of code 
above into its own function, then use it before the "continue" rather 
than implementing a custom can for CLOBBERS?


I cannot answer that.  Maybe the authors of the code have some ideas.

Johann

It also begs the question if the other case immediately above the code I 
quoted needs similar adjustment.  It doesn't do the insn = next, but it 
does bypass the search for autoinc memory references and the note_stores 
call.


Jeff


Re: [patch] Fix PR101188 wrong code from postreload

2023-06-03 Thread Jeff Law via Gcc-patches




On 6/2/23 02:46, Georg-Johann Lay wrote:

There is the following bug in postreload that can be traced back
to v5 at least:

In postreload.cc::reload_cse_move2add() there is a loop over all
insns.  If it encounters a SET, the next insn is analyzed if it
is a single_set.

After next has been analyzed, it continues with

   if (success)
 delete_insn (insn);
   changed |= success;
   insn = next; // This effectively skips analysis of next.
   move2add_record_mode (reg);
   reg_offset[regno]
 = trunc_int_for_mode (added_offset + base_offset,
   mode);
   continue; // for continues with insn = NEXT_INSN (insn).

So it records the effect of next, but not the clobbers that
next might have.  This is a problem if next clobbers a GPR
like it can happen for avr.  What then can happen is that in a
later round, it may use a value from a (partially) clobbered reg.

The patch records the effects of potential clobbers.

Bootstrapped and reg-tested on x86_64.  Also tested on avr where
the bug popped up.  The testcase discriminates on avr, and for now
I am not aware of any other target that's affected by the bug.

The change is not intrusive and fixes wrong code, so I'd like
to backport it.

Ok to apply?

Johann

rtl-optimization/101188: Don't bypass clobbers of some insns that are
optimized or are optimization candidates.

gcc/
 PR rtl-optimization/101188
 * postreload.cc (reload_cse_move2add): Record clobbers of next
 insn using move2add_note_store.

gcc/testsuite/
 PR rtl-optimization/101188
 * gcc.c-torture/execute/pr101188.c: New test.
If I understand the code correctly, isn't the core of the problem that 
we "continue" rather than executing the rest of the code in the loop. 
In particular the continue bypasses this chunk of code:



 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
{
  if (REG_NOTE_KIND (note) == REG_INC
  && REG_P (XEXP (note, 0)))
{
  /* Reset the information about this register.  */
  int regno = REGNO (XEXP (note, 0));
  if (regno < FIRST_PSEUDO_REGISTER)
{
  move2add_record_mode (XEXP (note, 0));
  reg_mode[regno] = VOIDmode;
}
}
}

  /* There are no REG_INC notes for SP autoinc.  */
  subrtx_var_iterator::array_type array;
  FOR_EACH_SUBRTX_VAR (iter, array, PATTERN (insn), NONCONST)
{
  rtx mem = *iter;
  if (mem
  && MEM_P (mem)
  && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
{
  if (XEXP (XEXP (mem, 0), 0) == stack_pointer_rtx)
reg_mode[STACK_POINTER_REGNUM] = VOIDmode;
}
}

  note_stores (insn, move2add_note_store, insn);


Of particular importance for your case would be the note_stores call. 
But I could well see other targets needing the search for REG_INC notes 
as well as stack pushes.


If I'm right, then wouldn't it be better to factor that blob of code 
above into its own function, then use it before the "continue" rather 
than implementing a custom can for CLOBBERS?


It also begs the question if the other case immediately above the code I 
quoted needs similar adjustment.  It doesn't do the insn = next, but it 
does bypass the search for autoinc memory references and the note_stores 
call.





Jeff



[patch] Fix PR101188 wrong code from postreload

2023-06-02 Thread Georg-Johann Lay

There is the following bug in postreload that can be traced back
to v5 at least:

In postreload.cc::reload_cse_move2add() there is a loop over all
insns.  If it encounters a SET, the next insn is analyzed if it
is a single_set.

After next has been analyzed, it continues with

  if (success)
delete_insn (insn);
  changed |= success;
  insn = next; // This effectively skips analysis of next.
  move2add_record_mode (reg);
  reg_offset[regno]
= trunc_int_for_mode (added_offset + base_offset,
  mode);
  continue; // for continues with insn = NEXT_INSN (insn).

So it records the effect of next, but not the clobbers that
next might have.  This is a problem if next clobbers a GPR
like it can happen for avr.  What then can happen is that in a
later round, it may use a value from a (partially) clobbered reg.

The patch records the effects of potential clobbers.

Bootstrapped and reg-tested on x86_64.  Also tested on avr where
the bug popped up.  The testcase discriminates on avr, and for now
I am not aware of any other target that's affected by the bug.

The change is not intrusive and fixes wrong code, so I'd like
to backport it.

Ok to apply?

Johann

rtl-optimization/101188: Don't bypass clobbers of some insns that are
optimized or are optimization candidates.

gcc/
PR rtl-optimization/101188
* postreload.cc (reload_cse_move2add): Record clobbers of next
insn using move2add_note_store.

gcc/testsuite/
PR rtl-optimization/101188
* gcc.c-torture/execute/pr101188.c: New test.


diff --git a/gcc/postreload.cc b/gcc/postreload.cc
index fb392651e1b..2de3e2ea780 100644
--- a/gcc/postreload.cc
+++ b/gcc/postreload.cc
@@ -2033,6 +2033,14 @@ reload_cse_move2add (rtx_insn *first)
  if (success)
delete_insn (insn);
  changed |= success;
+ // By setting "insn = next" below, we are bypassing the
+ // side-effects of next, see PR101188.  Do them by hand
+ subrtx_iterator::array_type array;
+ FOR_EACH_SUBRTX (iter, array, PATTERN (next), NONCONST)
+   {
+ if (GET_CODE (*iter) == CLOBBER)
+   move2add_note_store (XEXP (*iter, 0), *iter, next);
+   }
  insn = next;
  move2add_record_mode (reg);
  reg_offset[regno]
diff --git a/gcc/testsuite/gcc.c-torture/execute/pr101188.c 
b/gcc/testsuite/gcc.c-torture/execute/pr101188.c

new file mode 100644
index 000..4817c69347c
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/execute/pr101188.c
@@ -0,0 +1,59 @@
+typedef __UINT8_TYPE__ uint8_t;
+typedef __UINT16_TYPE__ uint16_t;
+
+typedef uint8_t (*fn1)(void *a);
+typedef void (*fn2)(void *a, int *arg);
+
+struct S
+{
+uint8_t buffer[64];
+uint16_t n;
+fn2 f2;
+void *a;
+fn1 f1;
+};
+
+volatile uint16_t x;
+
+void __attribute__((__noinline__,__noclone__))
+foo (uint16_t n)
+{
+  x = n;
+}
+
+void __attribute__((__noinline__,__noclone__))
+testfn (struct S *self)
+{
+int arg;
+
+foo (self->n);
+self->n++;
+self->f2 (self->a, &arg);
+self->buffer[0] = self->f1 (self->a);
+}
+
+static unsigned char myfn2_called = 0;
+
+static void
+myfn2 (void *a, int *arg)
+{
+  myfn2_called = 1;
+}
+
+static uint8_t
+myfn1 (void *a)
+{
+  return 0;
+}
+
+int main (void)
+{
+  struct S s;
+  s.n = 0;
+  s.f2 = myfn2;
+  s.f1 = myfn1;
+  testfn (&s);
+  if (myfn2_called != 1)
+__builtin_abort();
+  return 0;
+}