Package: zoneminder Version: 1.26.5-2 Severity: serious Tags: patch Hi,
zoneminder FTBFS on powerpc, powerpcspe and ppc64 like this: ... powerpc-linux-gnu-g++ -DHAVE_CONFIG_H -I. -I.. -I/usr/include -I/usr/include -D__STDC_CONSTANT_MACROS -Wall -finline-functions -fomit-frame-pointer -I/usr/include -D__STDC_CONSTANT_MACROS -D_FORTIFY_SOURCE=2 -D__STDC_CONSTANT_MACROS -D__STDC_CONSTANT_MACROS -g -O2 -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security -DZM_FFMPEG_CVS -DHAVE_LIBCRYPTO -MT zm_image.o -MD -MP -MF .deps/zm_image.Tpo -c -o zm_image.o zm_image.cpp zm_image.cpp: In member function 'bool Image::ReadRaw(const char*)': zm_image.cpp:597:26: warning: comparison between signed and unsigned integer expressions [-Wsign-compare] if ( statbuf.st_size != size ) ^ zm_image.cpp: At global scope: zm_image.cpp:2991:165: error: __attribute__((__target__("sse2"))) is invalid __attribute__((noinline,__target__("sse2"))) void sse2_fastblend(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count, double blendpercent) { ^ zm_image.cpp:3333:147: error: __attribute__((__target__("sse2"))) is invalid __attribute__((noinline,__target__("sse2"))) void sse2_delta8_gray8(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) { ^ ... There is already a patch to prevent *SSE* specific stuff on non-Intel hardware. But there are still some unconditional __attribute__((__target__("sse2"))) Not sure why it only affects powerpc systems. Attaching a patch that fixes it. Roland -- System Information: Debian Release: 7.0 APT prefers unreleased APT policy: (500, 'unreleased'), (500, 'unstable') Architecture: powerpcspe (ppc) Kernel: Linux 3.9.0-dirty (SMP w/2 CPU cores) Locale: LANG=en_GB.UTF-8, LC_CTYPE=en_GB.UTF-8 (charmap=UTF-8) (ignored: LC_ALL set to en_GB.UTF-8) Shell: /bin/sh linked to /bin/dash
Index: zoneminder-1.26.5/src/zm_image.cpp =================================================================== --- zoneminder-1.26.5.orig/src/zm_image.cpp 2014-01-24 13:48:35.000000000 +0100 +++ zoneminder-1.26.5/src/zm_image.cpp 2014-01-24 14:29:36.888257874 +0100 @@ -2988,7 +2988,10 @@ /************************************************* BLEND FUNCTIONS *************************************************/ -__attribute__((noinline,__target__("sse2"))) void sse2_fastblend(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count, double blendpercent) { +#if defined(__i386__) || defined(__x86_64__) +__attribute__((noinline,__target__("sse2"))) +#endif +void sse2_fastblend(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count, double blendpercent) { #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE)) static uint32_t divider = 0; static uint32_t clearmask = 0; @@ -3330,7 +3333,10 @@ } /* Grayscale SSE2 */ -__attribute__((noinline,__target__("sse2"))) void sse2_delta8_gray8(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) { +#if defined(__i386__) || defined(__x86_64__) +__attribute__((noinline,__target__("sse2"))) +#endif +void sse2_delta8_gray8(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) { #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE)) __asm__ __volatile__ ( @@ -3358,7 +3364,10 @@ } /* RGB32: RGBA SSE2 */ -__attribute__((noinline,__target__("sse2"))) void sse2_delta8_rgba(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) { +#if defined(__i386__) || defined(__x86_64__) +__attribute__((noinline,__target__("sse2"))) +#endif +void sse2_delta8_rgba(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) { #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE)) __asm__ __volatile__ ( @@ -3413,7 +3422,10 @@ } /* RGB32: BGRA SSE2 */ -__attribute__((noinline,__target__("sse2"))) void sse2_delta8_bgra(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) { +#if defined(__i386__) || defined(__x86_64__) +__attribute__((noinline,__target__("sse2"))) +#endif +void sse2_delta8_bgra(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) { #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE)) __asm__ __volatile__ ( @@ -3468,7 +3480,10 @@ } /* RGB32: ARGB SSE2 */ -__attribute__((noinline,__target__("sse2"))) void sse2_delta8_argb(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) { +#if defined(__i386__) || defined(__x86_64__) +__attribute__((noinline,__target__("sse2"))) +#endif +void sse2_delta8_argb(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) { #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE)) __asm__ __volatile__ ( @@ -3524,7 +3539,10 @@ } /* RGB32: ABGR SSE2 */ -__attribute__((noinline,__target__("sse2"))) void sse2_delta8_abgr(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) { +#if defined(__i386__) || defined(__x86_64__) +__attribute__((noinline,__target__("sse2"))) +#endif +void sse2_delta8_abgr(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) { #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE)) __asm__ __volatile__ ( @@ -3580,7 +3598,10 @@ } /* RGB32: RGBA SSSE3 */ -__attribute__((noinline,__target__("ssse3"))) void ssse3_delta8_rgba(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) { +#if defined(__i386__) || defined(__x86_64__) +__attribute__((noinline,__target__("ssse3"))) +#endif +void ssse3_delta8_rgba(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) { #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE)) __asm__ __volatile__ ( @@ -3632,7 +3653,10 @@ } /* RGB32: BGRA SSSE3 */ -__attribute__((noinline,__target__("ssse3"))) void ssse3_delta8_bgra(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) { +#if defined(__i386__) || defined(__x86_64__) +__attribute__((noinline,__target__("ssse3"))) +#endif +void ssse3_delta8_bgra(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) { #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE)) __asm__ __volatile__ ( @@ -3684,7 +3708,10 @@ } /* RGB32: ARGB SSSE3 */ -__attribute__((noinline,__target__("ssse3"))) void ssse3_delta8_argb(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) { +#if defined(__i386__) || defined(__x86_64__) +__attribute__((noinline,__target__("ssse3"))) +#endif +void ssse3_delta8_argb(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) { #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE)) __asm__ __volatile__ ( @@ -3737,7 +3764,10 @@ } /* RGB32: ABGR SSSE3 */ -__attribute__((noinline,__target__("ssse3"))) void ssse3_delta8_abgr(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) { +#if defined(__i386__) || defined(__x86_64__) +__attribute__((noinline,__target__("ssse3"))) +#endif +void ssse3_delta8_abgr(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) { #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE)) __asm__ __volatile__ ( @@ -3989,7 +4019,10 @@ } /* RGBA to grayscale SSSE3 */ -__attribute__((noinline,__target__("ssse3"))) void ssse3_convert_rgba_gray8(const uint8_t* col1, uint8_t* result, unsigned long count) { +#if defined(__i386__) || defined(__x86_64__) +__attribute__((noinline,__target__("ssse3"))) +#endif +void ssse3_convert_rgba_gray8(const uint8_t* col1, uint8_t* result, unsigned long count) { #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE)) __asm__ __volatile__ ( @@ -4035,7 +4068,10 @@ } /* Converts a YUYV image into grayscale by extracting the Y channel */ -__attribute__((noinline,__target__("ssse3"))) void ssse3_convert_yuyv_gray8(const uint8_t* col1, uint8_t* result, unsigned long count) { +#if defined(__i386__) || defined(__x86_64__) +__attribute__((noinline,__target__("ssse3"))) +#endif +void ssse3_convert_yuyv_gray8(const uint8_t* col1, uint8_t* result, unsigned long count) { #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE)) unsigned long i = 0; @@ -4652,7 +4688,10 @@ } /* Grayscale SSSE3 */ -__attribute__((noinline,__target__("ssse3"))) void ssse3_deinterlace_4field_gray8(uint8_t* col1, uint8_t* col2, unsigned int threshold, unsigned int width, unsigned int height) { +#if defined(__i386__) || defined(__x86_64__) +__attribute__((noinline,__target__("ssse3"))) +#endif +void ssse3_deinterlace_4field_gray8(uint8_t* col1, uint8_t* col2, unsigned int threshold, unsigned int width, unsigned int height) { #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE)) union { @@ -4785,7 +4824,10 @@ } /* RGBA SSSE3 */ -__attribute__((noinline,__target__("ssse3"))) void ssse3_deinterlace_4field_rgba(uint8_t* col1, uint8_t* col2, unsigned int threshold, unsigned int width, unsigned int height) { +#if defined(__i386__) || defined(__x86_64__) +__attribute__((noinline,__target__("ssse3"))) +#endif +void ssse3_deinterlace_4field_rgba(uint8_t* col1, uint8_t* col2, unsigned int threshold, unsigned int width, unsigned int height) { #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE)) __attribute__((aligned(16))) static const uint8_t movemask2[16] = {1,1,1,1,1,0,0,2,9,9,9,9,9,8,8,10}; @@ -4965,7 +5007,10 @@ } /* BGRA SSSE3 */ -__attribute__((noinline,__target__("ssse3"))) void ssse3_deinterlace_4field_bgra(uint8_t* col1, uint8_t* col2, unsigned int threshold, unsigned int width, unsigned int height) { +#if defined(__i386__) || defined(__x86_64__) +__attribute__((noinline,__target__("ssse3"))) +#endif +void ssse3_deinterlace_4field_bgra(uint8_t* col1, uint8_t* col2, unsigned int threshold, unsigned int width, unsigned int height) { #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE)) __attribute__((aligned(16))) static const uint8_t movemask2[16] = {1,1,1,1,1,2,2,0,9,9,9,9,9,10,10,8}; @@ -5145,7 +5190,10 @@ } /* ARGB SSSE3 */ -__attribute__((noinline,__target__("ssse3"))) void ssse3_deinterlace_4field_argb(uint8_t* col1, uint8_t* col2, unsigned int threshold, unsigned int width, unsigned int height) { +#if defined(__i386__) || defined(__x86_64__) +__attribute__((noinline,__target__("ssse3"))) +#endif +void ssse3_deinterlace_4field_argb(uint8_t* col1, uint8_t* col2, unsigned int threshold, unsigned int width, unsigned int height) { #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE)) __attribute__((aligned(16))) static const uint8_t movemask2[16] = {2,2,2,2,2,1,1,3,10,10,10,10,10,9,9,11}; @@ -5325,7 +5373,10 @@ } /* ABGR SSSE3 */ -__attribute__((noinline,__target__("ssse3"))) void ssse3_deinterlace_4field_abgr(uint8_t* col1, uint8_t* col2, unsigned int threshold, unsigned int width, unsigned int height) { +#if defined(__i386__) || defined(__x86_64__) +__attribute__((noinline,__target__("ssse3"))) +#endif +void ssse3_deinterlace_4field_abgr(uint8_t* col1, uint8_t* col2, unsigned int threshold, unsigned int width, unsigned int height) { #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE)) __attribute__((aligned(16))) static const uint8_t movemask2[16] = {2,2,2,2,2,3,3,1,10,10,10,10,10,11,11,9}; Index: zoneminder-1.26.5/src/zm_utils.cpp =================================================================== --- zoneminder-1.26.5.orig/src/zm_utils.cpp 2013-12-16 16:42:36.000000000 +0100 +++ zoneminder-1.26.5/src/zm_utils.cpp 2014-01-24 14:40:48.361170257 +0100 @@ -192,7 +192,10 @@ /* SSE2 aligned memory copy. Useful for big copying of aligned memory like image buffers in ZM */ /* For platforms without SSE2 we will use standard x86 asm memcpy or glibc's memcpy() */ -__attribute__((noinline,__target__("sse2"))) void* sse2_aligned_memcpy(void* dest, const void* src, size_t bytes) { +#if defined(__i386__) || defined(__x86_64__) +__attribute__((noinline,__target__("sse2"))) +#endif +void* sse2_aligned_memcpy(void* dest, const void* src, size_t bytes) { #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE)) if(bytes > 128) { unsigned int remainder = bytes % 128;