Hi,
This patch fixes the definitions of the vld1* intrinsics
to take a const-qualified argument.
I've tested this on aarch64-none-elf with no regressions.
Is this OK to commit?
Thanks,
James Greenhalgh
---
2013-01-07 James Greenhalgh james.greenha...@arm.com
* config/aarch64/arm_neon.h (vld1_dup_*): Make argument const.
(vld1q_dup_*): Likewise.
(vld1_*): Likewise.
(vld1q_*): Likewise.
(vld1_lane_*): Likewise.
(vld1q_lane_*): Likewise.
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index e8fafa6..21fa428 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -8387,7 +8387,7 @@ vhsubq_u32 (uint32x4_t a, uint32x4_t b)
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vld1_dup_f32 (float32_t * a)
+vld1_dup_f32 (const float32_t * a)
{
float32x2_t result;
__asm__ (ld1r {%0.2s},[%1]
@@ -8398,7 +8398,7 @@ vld1_dup_f32 (float32_t * a)
}
__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
-vld1_dup_f64 (float64_t * a)
+vld1_dup_f64 (const float64_t * a)
{
float64x1_t result;
__asm__ (ld1 {%0.1d},[%1]
@@ -8409,7 +8409,7 @@ vld1_dup_f64 (float64_t * a)
}
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vld1_dup_p8 (poly8_t * a)
+vld1_dup_p8 (const poly8_t * a)
{
poly8x8_t result;
__asm__ (ld1r {%0.8b},[%1]
@@ -8420,7 +8420,7 @@ vld1_dup_p8 (poly8_t * a)
}
__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vld1_dup_p16 (poly16_t * a)
+vld1_dup_p16 (const poly16_t * a)
{
poly16x4_t result;
__asm__ (ld1r {%0.4h},[%1]
@@ -8431,7 +8431,7 @@ vld1_dup_p16 (poly16_t * a)
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vld1_dup_s8 (int8_t * a)
+vld1_dup_s8 (const int8_t * a)
{
int8x8_t result;
__asm__ (ld1r {%0.8b},[%1]
@@ -8442,7 +8442,7 @@ vld1_dup_s8 (int8_t * a)
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vld1_dup_s16 (int16_t * a)
+vld1_dup_s16 (const int16_t * a)
{
int16x4_t result;
__asm__ (ld1r {%0.4h},[%1]
@@ -8453,7 +8453,7 @@ vld1_dup_s16 (int16_t * a)
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vld1_dup_s32 (int32_t * a)
+vld1_dup_s32 (const int32_t * a)
{
int32x2_t result;
__asm__ (ld1r {%0.2s},[%1]
@@ -8464,7 +8464,7 @@ vld1_dup_s32 (int32_t * a)
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vld1_dup_s64 (int64_t * a)
+vld1_dup_s64 (const int64_t * a)
{
int64x1_t result;
__asm__ (ld1 {%0.1d},[%1]
@@ -8475,7 +8475,7 @@ vld1_dup_s64 (int64_t * a)
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vld1_dup_u8 (uint8_t * a)
+vld1_dup_u8 (const uint8_t * a)
{
uint8x8_t result;
__asm__ (ld1r {%0.8b},[%1]
@@ -8486,7 +8486,7 @@ vld1_dup_u8 (uint8_t * a)
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vld1_dup_u16 (uint16_t * a)
+vld1_dup_u16 (const uint16_t * a)
{
uint16x4_t result;
__asm__ (ld1r {%0.4h},[%1]
@@ -8497,7 +8497,7 @@ vld1_dup_u16 (uint16_t * a)
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vld1_dup_u32 (uint32_t * a)
+vld1_dup_u32 (const uint32_t * a)
{
uint32x2_t result;
__asm__ (ld1r {%0.2s},[%1]
@@ -8508,7 +8508,7 @@ vld1_dup_u32 (uint32_t * a)
}
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vld1_dup_u64 (uint64_t * a)
+vld1_dup_u64 (const uint64_t * a)
{
uint64x1_t result;
__asm__ (ld1 {%0.1d},[%1]
@@ -8519,7 +8519,7 @@ vld1_dup_u64 (uint64_t * a)
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vld1_f32 (float32_t * a)
+vld1_f32 (const float32_t * a)
{
float32x2_t result;
__asm__ (ld1 {%0.2s},[%1]
@@ -8530,7 +8530,7 @@ vld1_f32 (float32_t * a)
}
__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
-vld1_f64 (float64_t * a)
+vld1_f64 (const float64_t * a)
{
float64x1_t result;
__asm__ (ld1 {%0.1d},[%1]
@@ -8544,7 +8544,7 @@ vld1_f64 (float64_t * a)
__extension__ \
({ \
float32x2_t b_ = (b);\
- float32_t * a_ = (a);\
+ const float32_t * a_ = (a); \
float32x2_t result; \
__asm__ (ld1 {%0.s}[%3],[%1] \
: =w(result) \
@@ -8557,7 +8557,7 @@ vld1_f64 (float64_t * a)
__extension__ \
({