I can add a check in the inferred range manager for atomic loads to
resolve this PR.
The IL sequence tends to look like:
_1 = &this_2(D)->b;
__atomic_load_8 (_1, 0);
Just want to make sure I get this right since memory operations are not
my strong suit.
The first argument to the atomic load is non-null (so _1), as well as
the base of the RHS of the address expression that defines _1 are
non-zero?. (this_2)
The attached patch scavenges a little code from
fold_using_range::range_of_address that I think works... but perhaps
there is a more efficient way to do this? Is this likely to work OK and
be safe? or are there additional checks I need to be doing?
And I suppose there is an entire range of atomic operations this applies
to? certainly atomic_store should qualify...
Bootstraps on x86_64-pc-linux-gnu with no regressions, but I'm not sure
that is a really extensive test for this..
Andrew
From 2c0ab831d2a472a0049a72252dfe4da2f7d975de Mon Sep 17 00:00:00 2001
From: Andrew MacLeod <[email protected]>
Date: Thu, 6 Nov 2025 17:47:55 -0500
Subject: [PATCH 8/8] Add non-null support to atomic loads
Atomic loads are not recognized as dereferences by inferred ranges.
PR tree-optimization/102829
gcc/
* gimple-range-infer.cc (gimple_infer_range): Add atomic non-zero
load support.
gcc/testsuite/
* g++.dg/pr102829.C: New.
---
gcc/gimple-range-infer.cc | 50 +++++++++++++++++++++++++++++++++
gcc/testsuite/g++.dg/pr102829.C | 35 +++++++++++++++++++++++
2 files changed, 85 insertions(+)
create mode 100644 gcc/testsuite/g++.dg/pr102829.C
diff --git a/gcc/gimple-range-infer.cc b/gcc/gimple-range-infer.cc
index 612f66626af..c519438c54a 100644
--- a/gcc/gimple-range-infer.cc
+++ b/gcc/gimple-range-infer.cc
@@ -249,6 +249,56 @@ gimple_infer_range::gimple_infer_range (gimple *s, range_query *q,
walk_stmt_load_store_ops (s, (void *)this, non_null_loadstore,
non_null_loadstore);
+ // Handle Atomic loads.
+ if (gimple_call_builtin_p (s))
+ {
+ gcall *call = as_a <gcall *> (s);
+ switch (gimple_call_combined_fn (call))
+ {
+ case CFN_BUILT_IN_ATOMIC_LOAD_1:
+ case CFN_BUILT_IN_ATOMIC_LOAD_2:
+ case CFN_BUILT_IN_ATOMIC_LOAD_4:
+ case CFN_BUILT_IN_ATOMIC_LOAD_8:
+ case CFN_BUILT_IN_ATOMIC_LOAD_16:
+ {
+ // Treat the first load argument like a dereference.
+ tree op1 = gimple_call_arg (call, 0);
+ if (TREE_CODE (op1) == SSA_NAME)
+ {
+ // The argument is non-zero.
+ add_nonzero (op1);
+ // Look at the right hand side of the argument defintion.
+ // and if it is an address calculation, the base is non-zero.
+ gimple *def_stmt = SSA_NAME_DEF_STMT (op1);
+ if (gimple_code (def_stmt) == GIMPLE_ASSIGN
+ && gimple_assign_rhs_code (def_stmt) == ADDR_EXPR)
+ {
+ tree expr = gimple_assign_rhs1 (def_stmt);
+ poly_int64 bitsize, bitpos;
+ tree offset;
+ machine_mode mode;
+ int unsignedp, reversep, volatilep;
+ tree base = get_inner_reference (TREE_OPERAND (expr, 0),
+ &bitsize, &bitpos,
+ &offset, &mode,
+ &unsignedp,
+ &reversep,
+ &volatilep);
+ if (base != NULL_TREE
+ && TREE_CODE (base) == MEM_REF
+ && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
+ {
+ tree ssa = TREE_OPERAND (base, 0);
+ add_nonzero (ssa);
+ }
+ }
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
// Gated by flag.
if (!use_rangeops)
return;
diff --git a/gcc/testsuite/g++.dg/pr102829.C b/gcc/testsuite/g++.dg/pr102829.C
new file mode 100644
index 00000000000..7f377cad299
--- /dev/null
+++ b/gcc/testsuite/g++.dg/pr102829.C
@@ -0,0 +1,35 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-evrp" } */
+
+struct d {
+ long a;
+ long b;
+
+ struct d *e() {
+ __atomic_load_n(&b, 0);
+ return this;
+ }
+};
+
+d *j;
+
+void call ();
+
+void k()
+{
+ auto l = j->e();
+ if (l)
+ call();
+}
+
+
+long y;
+long z;
+void m (long *p)
+{
+ __atomic_load_n(p, 0);
+ if (p)
+ call ();
+}
+
+/* { dg-final { scan-tree-dump-not "if" "evrp" } } */
--
2.45.0