On Ubuntu 21.10 (ppc64le) building `raid6test` with gcc (Ubuntu
11.2.0-7ubuntu2) 11.2.0 fails with the error below.

    $ cd lib/raid6/test
    $ make
    […]
    gcc -I.. -I ../../../include -g -O2                      
-I../../../arch/powerpc/include -DCONFIG_ALTIVEC -o raid6test test.c raid6.a
    /usr/bin/ld: raid6.a(algos.o):/dev/shm/linux/lib/raid6/test/algos.c:28: 
multiple definition of `raid6_call'; 
/scratch/local/ccHnUnID.o:/dev/shm/linux/lib/raid6/test/test.c:22: first 
defined here
    collect2: error: ld returned 1 exit status
    make: *** [Makefile:74: raid6test] Error 1

Renaming the variable in `test.c` to `raid6_call2` works around that.

The resulting binary terminates with a segmentation fault:

    $ ./raid6test
    using recovery intx1
    Segmentation fault (core dumped)
    $ dmesg | tail -3
    [75519.758988] raid6test[1891185]: segfault (11) at 0 nip 0 lr 708aa3fe197c 
code 1 in libc.so.6[708aa3ca0000+260000]
    [75519.759006] raid6test[1891185]: code: XXXXXXXX XXXXXXXX XXXXXXXX 
XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX
    [75519.759024] raid6test[1891185]: code: XXXXXXXX XXXXXXXX XXXXXXXX 
XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX

Cc: Matt Brown <matthew.brown....@gmail.com>
Signed-off-by: Paul Menzel <pmen...@molgen.mpg.de>
---
 lib/raid6/test/test.c | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/lib/raid6/test/test.c b/lib/raid6/test/test.c
index a3cf071941ab..937d2a8bb294 100644
--- a/lib/raid6/test/test.c
+++ b/lib/raid6/test/test.c
@@ -19,7 +19,7 @@
 #define NDISKS         16      /* Including P and Q */
 
 const char raid6_empty_zero_page[PAGE_SIZE] 
__attribute__((aligned(PAGE_SIZE)));
-struct raid6_calls raid6_call;
+struct raid6_calls raid6_call2;
 
 char *dataptrs[NDISKS];
 char data[NDISKS][PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
@@ -71,7 +71,7 @@ static int test_disks(int i, int j)
                erra = errb = 0;
        } else {
                printf("algo=%-8s  faila=%3d(%c)  failb=%3d(%c)  %s\n",
-                      raid6_call.name,
+                      raid6_call2.name,
                       i, disk_type(i),
                       j, disk_type(j),
                       (!erra && !errb) ? "OK" :
@@ -107,30 +107,30 @@ int main(int argc, char *argv[])
                        if ((*algo)->valid && !(*algo)->valid())
                                continue;
 
-                       raid6_call = **algo;
+                       raid6_call2 = **algo;
 
                        /* Nuke syndromes */
                        memset(data[NDISKS-2], 0xee, 2*PAGE_SIZE);
 
                        /* Generate assumed good syndrome */
-                       raid6_call.gen_syndrome(NDISKS, PAGE_SIZE,
+                       raid6_call2.gen_syndrome(NDISKS, PAGE_SIZE,
                                                (void **)&dataptrs);
 
                        for (i = 0; i < NDISKS-1; i++)
                                for (j = i+1; j < NDISKS; j++)
                                        err += test_disks(i, j);
 
-                       if (!raid6_call.xor_syndrome)
+                       if (!raid6_call2.xor_syndrome)
                                continue;
 
                        for (p1 = 0; p1 < NDISKS-2; p1++)
                                for (p2 = p1; p2 < NDISKS-2; p2++) {
 
                                        /* Simulate rmw run */
-                                       raid6_call.xor_syndrome(NDISKS, p1, p2, 
PAGE_SIZE,
+                                       raid6_call2.xor_syndrome(NDISKS, p1, 
p2, PAGE_SIZE,
                                                                (void 
**)&dataptrs);
                                        makedata(p1, p2);
-                                       raid6_call.xor_syndrome(NDISKS, p1, p2, 
PAGE_SIZE,
+                                       raid6_call2.xor_syndrome(NDISKS, p1, 
p2, PAGE_SIZE,
                                                                 (void 
**)&dataptrs);
 
                                        for (i = 0; i < NDISKS-1; i++)
-- 
2.34.1

Reply via email to