On 10/29/2013 02:51 AM, Bernd Edlinger wrote:
On Mon, 28 Oct 2013 21:29:24, Sandra Loosemore wrote:
On 10/28/2013 03:20 AM, Bernd Edlinger wrote:
I have attached an update to your patch, that should
a) fix the recursion problem.
b) restrict the -fstrict-volatile-bitfields to not violate the C++ memory model.
Here's a new version of the update patch.
Alternatively, if strict_volatile_bitfield_p returns false but
flag_strict_volatile_bitfields> 0, then always force to word_mode and
change the -fstrict-volatile-bitfields documentation to indicate that's
the fallback if the insertion/extraction cannot be done in the declared
mode, rather than claiming that it tries to do the same thing as if
-fstrict-volatile-bitfields were not enabled at all.
I decided that this approach was more expedient, after all.
I've tested this patch (in conjunction with my already-approved but
not-yet-applied patch) on mainline for arm-none-eabi, x86_64-linux-gnu,
and mips-linux gnu. I also backported the entire series to GCC 4.8 and
tested there on arm-none-eabi and x86_64-linux-gnu. OK to apply?
-Sandra
2013-10-30 Bernd Edlinger <bernd.edlin...@hotmail.de>
Sandra Loosemore <san...@codesourcery.com>
PR middle-end/23623
PR middle-end/48784
PR middle-end/56341
PR middle-end/56997
gcc/
* expmed.c (strict_volatile_bitfield_p): Add bitregion_start
and bitregion_end parameters. Test for compliance with C++
memory model.
(store_bit_field): Adjust call to strict_volatile_bitfield_p.
Add fallback logic for cases where -fstrict-volatile-bitfields
is supposed to apply, but cannot.
(extract_bit-field): Likewise.
* doc/invoke.texi (Code Gen Options): Better document fallback for
-fstrict-volatile-bitfields.
gcc/testsuite/
* gcc.dg/pr23623.c: Update to test interaction with C++
memory model.
diff -u gcc/doc/invoke.texi gcc/doc/invoke.texi
--- gcc/doc/invoke.texi (working copy)
+++ gcc/doc/invoke.texi (working copy)
@@ -21659,7 +21659,8 @@
In some cases, such as when the @code{packed} attribute is applied to a
structure field, it may not be possible to access the field with a single
read or write that is correctly aligned for the target machine. In this
-case GCC falls back to generating multiple accesses rather than code that
+case GCC falls back to generating either multiple accesses
+or an access in a larger mode, rather than code that
will fault or truncate the result at run time.
The default value of this option is determined by the application binary
diff -u gcc/testsuite/gcc.dg/pr23623.c gcc/testsuite/gcc.dg/pr23623.c
--- gcc/testsuite/gcc.dg/pr23623.c (revision 0)
+++ gcc/testsuite/gcc.dg/pr23623.c (revision 0)
@@ -8,16 +8,19 @@
extern struct
{
unsigned int b : 1;
+ unsigned int : 31;
} bf1;
extern volatile struct
{
unsigned int b : 1;
+ unsigned int : 31;
} bf2;
extern struct
{
volatile unsigned int b : 1;
+ volatile unsigned int : 31;
} bf3;
void writeb(void)
diff -u gcc/expmed.c gcc/expmed.c
--- gcc/expmed.c (working copy)
+++ gcc/expmed.c (working copy)
@@ -416,12 +416,17 @@
}
/* Return true if -fstrict-volatile-bitfields applies an access of OP0
- containing BITSIZE bits starting at BITNUM, with field mode FIELDMODE. */
+ containing BITSIZE bits starting at BITNUM, with field mode FIELDMODE.
+ Return false if the access would touch memory outside the range
+ BITREGION_START to BITREGION_END for conformance to the C++ memory
+ model. */
static bool
strict_volatile_bitfield_p (rtx op0, unsigned HOST_WIDE_INT bitsize,
unsigned HOST_WIDE_INT bitnum,
- enum machine_mode fieldmode)
+ enum machine_mode fieldmode,
+ unsigned HOST_WIDE_INT bitregion_start,
+ unsigned HOST_WIDE_INT bitregion_end)
{
unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (fieldmode);
@@ -448,6 +453,12 @@
&& bitnum % GET_MODE_ALIGNMENT (fieldmode) + bitsize > modesize))
return false;
+ /* Check for cases where the C++ memory model applies. */
+ if (bitregion_end != 0
+ && (bitnum - bitnum % modesize < bitregion_start
+ || bitnum - bitnum % modesize + modesize > bitregion_end))
+ return false;
+
return true;
}
@@ -904,7 +915,8 @@
rtx value)
{
/* Handle -fstrict-volatile-bitfields in the cases where it applies. */
- if (strict_volatile_bitfield_p (str_rtx, bitsize, bitnum, fieldmode))
+ if (strict_volatile_bitfield_p (str_rtx, bitsize, bitnum, fieldmode,
+ bitregion_start, bitregion_end))
{
/* Storing any naturally aligned field can be done with a simple
@@ -923,6 +935,14 @@
store_fixed_bit_field (str_rtx, bitsize, bitnum, 0, 0, value);
return;
}
+ else if (MEM_P (str_rtx)
+ && MEM_VOLATILE_P (str_rtx)
+ && flag_strict_volatile_bitfields > 0)
+ /* This is a case where -fstrict-volatile-bitfields doesn't apply
+ because we can't do a single access in the declared mode of the field.
+ Since the incoming STR_RTX has already been adjusted to that mode,
+ fall back to word mode for subsequent logic. */
+ str_rtx = adjust_address (str_rtx, word_mode, 0);
/* Under the C++0x memory model, we must not touch bits outside the
bit region. Adjust the address to start at the beginning of the
@@ -1695,7 +1715,7 @@
else
mode1 = tmode;
- if (strict_volatile_bitfield_p (str_rtx, bitsize, bitnum, mode1))
+ if (strict_volatile_bitfield_p (str_rtx, bitsize, bitnum, mode1, 0, 0))
{
rtx result;
@@ -1709,6 +1729,14 @@
target, unsignedp);
return convert_extracted_bit_field (result, mode, tmode, unsignedp);
}
+ else if (MEM_P (str_rtx)
+ && MEM_VOLATILE_P (str_rtx)
+ && flag_strict_volatile_bitfields > 0)
+ /* This is a case where -fstrict-volatile-bitfields doesn't apply
+ because we can't do a single access in the declared mode of the field.
+ Since the incoming STR_RTX has already been adjusted to that mode,
+ fall back to word mode for subsequent logic. */
+ str_rtx = adjust_address (str_rtx, word_mode, 0);
return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp,
target, mode, tmode, true);