Using the latest git pycuda (and included boost) and CUDA 3.2 driver and
toolkit, it seems that installation works with 64-bit Python on Snow
Leopard. Both test_driver and test_cumath work. However, not all of the
tests in test_gpuarray pass. Here is the output I get from that. I am using
the version of Python 2.6.4 that comes with Sage.

================================================= test session starts
==================================================
python: platform darwin -- Python 2.6.4 -- pytest-1.2.1
test object 1: test_gpuarray.py

test_gpuarray.py .F.F...F..F.....F.......F.F......F.

======================================================= FAILURES
=======================================================
_______________________________________________ TestGPUArray.test_random
_______________________________________________

    def f(*args, **kwargs):
        import pycuda.driver
        # appears to be idempotent, i.e. no harm in calling it more than
once
        pycuda.driver.init()

        ctx = make_default_context()
        try:
            assert isinstance(ctx.get_device().name(), str)
            assert isinstance(ctx.get_device().compute_capability(), tuple)
            assert isinstance(ctx.get_device().get_attributes(), dict)
>           inner_f(*args, **kwargs)

/Applications/sage/local/lib/python2.6/site-packages/pycuda-0.94.1-py2.6-macosx-10.4-i386.egg/pycuda/tools.py:503:

_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <test_gpuarray.TestGPUArray instance at 0x10526aa28>

    @mark_cuda_test
    def test_random(self):
        from pycuda.curandom import rand as curand

        if has_double_support():
            dtypes = [numpy.float32, numpy.float64]
        else:
            dtypes = [numpy.float32]

        for dtype in dtypes:
            a = curand((10, 100), dtype=dtype).get()

>           assert (0 <= a).all()
E           assert False
E            +  where False = 0 <= array([[  1.00000000e+00,
2.00000000e+00,   3.00000000e+00,\n          4.0000...   1.18008837e-38,
4.59205507e-41,\n          1.46936794e-38]], dtype=float32).all()

test_gpuarray.py:255: AssertionError
_______________________________________________ TestGPUArray.test_minmax
_______________________________________________

    def f(*args, **kwargs):
        import pycuda.driver
        # appears to be idempotent, i.e. no harm in calling it more than
once
        pycuda.driver.init()

        ctx = make_default_context()
        try:
            assert isinstance(ctx.get_device().name(), str)
            assert isinstance(ctx.get_device().compute_capability(), tuple)
            assert isinstance(ctx.get_device().get_attributes(), dict)
>           inner_f(*args, **kwargs)

/Applications/sage/local/lib/python2.6/site-packages/pycuda-0.94.1-py2.6-macosx-10.4-i386.egg/pycuda/tools.py:503:

_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <test_gpuarray.TestGPUArray instance at 0x1052acf80>

    @mark_cuda_test
    def test_minmax(self):
        from pycuda.curandom import rand as curand

        if has_double_support():
            dtypes = [numpy.float64, numpy.float32, numpy.int32]
        else:
            dtypes = [numpy.float32, numpy.int32]

        for what in ["min", "max"]:
            for dtype in dtypes:
                a_gpu = curand((200000,), dtype)
                a = a_gpu.get()

                op_a = getattr(numpy, what)(a)
                op_a_gpu = getattr(gpuarray, what)(a_gpu).get()

>               assert op_a_gpu == op_a, (op_a_gpu, op_a, dtype, what)
E               AssertionError: (array(-3.4028234663852886e+38,
dtype=float32), nan, <type 'numpy.float32'>, 'min')

test_gpuarray.py:450: AssertionError
____________________________________________ TestGPUArray.test_complex_bits
____________________________________________

    def f(*args, **kwargs):
        import pycuda.driver
        # appears to be idempotent, i.e. no harm in calling it more than
once
        pycuda.driver.init()

        ctx = make_default_context()
        try:
            assert isinstance(ctx.get_device().name(), str)
            assert isinstance(ctx.get_device().compute_capability(), tuple)
            assert isinstance(ctx.get_device().get_attributes(), dict)
>           inner_f(*args, **kwargs)

/Applications/sage/local/lib/python2.6/site-packages/pycuda-0.94.1-py2.6-macosx-10.4-i386.egg/pycuda/tools.py:503:

_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <test_gpuarray.TestGPUArray instance at 0x1052b4cb0>

    @mark_cuda_test
    def test_complex_bits(self):
        from pycuda.curandom import rand as curand

        if has_double_support():
            dtypes = [numpy.complex64, numpy.complex128]
        else:
            dtypes = [numpy.complex64]

        n = 20
        for tp in dtypes:
            dtype = numpy.dtype(tp)
>           from pytools import match_precision
E           ImportError: cannot import name match_precision

test_gpuarray.py:591: ImportError
___________________________________________ TestGPUArray.test_subset_minmax
____________________________________________

    def f(*args, **kwargs):
        import pycuda.driver
        # appears to be idempotent, i.e. no harm in calling it more than
once
        pycuda.driver.init()

        ctx = make_default_context()
        try:
            assert isinstance(ctx.get_device().name(), str)
            assert isinstance(ctx.get_device().compute_capability(), tuple)
            assert isinstance(ctx.get_device().get_attributes(), dict)
>           inner_f(*args, **kwargs)

/Applications/sage/local/lib/python2.6/site-packages/pycuda-0.94.1-py2.6-macosx-10.4-i386.egg/pycuda/tools.py:503:

_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <test_gpuarray.TestGPUArray instance at 0x1052bbb48>

    @mark_cuda_test
    def test_subset_minmax(self):
        from pycuda.curandom import rand as curand

        l_a = 200000
        gran = 5
        l_m = l_a - l_a // gran + 1

        if has_double_support():
            dtypes = [numpy.float64, numpy.float32, numpy.int32]
        else:
            dtypes = [numpy.float32, numpy.int32]

        for dtype in dtypes:
            a_gpu = curand((l_a,), dtype)
            a = a_gpu.get()

            meaningful_indices_gpu = gpuarray.zeros(l_m, dtype=numpy.int32)
            meaningful_indices = meaningful_indices_gpu.get()
            j = 0
            for i in range(len(meaningful_indices)):
                meaningful_indices[i] = j
                j = j + 1
                if j % gran == 0:
                    j = j + 1

            meaningful_indices_gpu = gpuarray.to_gpu(meaningful_indices)
            b = a[meaningful_indices]

            min_a = numpy.min(b)
            min_a_gpu = gpuarray.subset_min(meaningful_indices_gpu,
a_gpu).get()

>           assert min_a_gpu == min_a
E           assert array(-3.4024076769884137e+38, dtype=float32) == nan

test_gpuarray.py:484: AssertionError
________________________________________________ TestGPUArray.test_sum
_________________________________________________

    def f(*args, **kwargs):
        import pycuda.driver
        # appears to be idempotent, i.e. no harm in calling it more than
once
        pycuda.driver.init()

        ctx = make_default_context()
        try:
            assert isinstance(ctx.get_device().name(), str)
            assert isinstance(ctx.get_device().compute_capability(), tuple)
            assert isinstance(ctx.get_device().get_attributes(), dict)
>           inner_f(*args, **kwargs)

/Applications/sage/local/lib/python2.6/site-packages/pycuda-0.94.1-py2.6-macosx-10.4-i386.egg/pycuda/tools.py:503:

_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <test_gpuarray.TestGPUArray instance at 0x1052bbe18>

    @mark_cuda_test
    def test_sum(self):
        from pycuda.curandom import rand as curand
        a_gpu = curand((200000,))
        a = a_gpu.get()

        sum_a = numpy.sum(a)

        from pycuda.reduction import get_sum_kernel
        sum_a_gpu = gpuarray.sum(a_gpu).get()

>       assert abs(sum_a_gpu-sum_a)/abs(sum_a) < 1e-4
E       assert (abs((array(nan, dtype=float32) - nan)) / abs(nan)) < 0.0001

test_gpuarray.py:431: AssertionError
____________________________________________ TestGPUArray.test_if_positive
_____________________________________________

    def f(*args, **kwargs):
        import pycuda.driver
        # appears to be idempotent, i.e. no harm in calling it more than
once
        pycuda.driver.init()

        ctx = make_default_context()
        try:
            assert isinstance(ctx.get_device().name(), str)
            assert isinstance(ctx.get_device().compute_capability(), tuple)
            assert isinstance(ctx.get_device().get_attributes(), dict)
>           inner_f(*args, **kwargs)

/Applications/sage/local/lib/python2.6/site-packages/pycuda-0.94.1-py2.6-macosx-10.4-i386.egg/pycuda/tools.py:503:

_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <test_gpuarray.TestGPUArray instance at 0x1052c0758>

    @mark_cuda_test
    def test_if_positive(self):
        from pycuda.curandom import rand as curand

        l = 20
        a_gpu = curand((l,))
        b_gpu = curand((l,))
        a = a_gpu.get()
        b = b_gpu.get()

        import pycuda.gpuarray as gpuarray

>       max_a_b_gpu = gpuarray.maximum(a_gpu, b_gpu)

test_gpuarray.py:530:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

a = <[LogicError("cuMemcpyDtoH failed: invalid value") raised in repr()]
SafeRepr object at 0x10207b6c8>
b = <[LogicError("cuMemcpyDtoH failed: invalid value") raised in repr()]
SafeRepr object at 0x10207b6c8>
out = <[LogicError("cuMemcpyDtoH failed: invalid value") raised in repr()]
SafeRepr object at 0x10207b6c8>
stream = None

    def f(a, b, out=None, stream=None):
        if out is None:
            out = empty_like(a)

        func = elementwise.get_binary_minmax_kernel(which,
>               a.dtype, b.dtype, out.dtype)

/Applications/sage/local/lib/python2.6/site-packages/pycuda-0.94.1-py2.6-macosx-10.4-i386.egg/pycuda/gpuarray.py:956:

_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

func = 'maxf', dtype_x = dtype('float32'), dtype_y = dtype('float32'),
dtype_z = dtype('float32')

    def get_binary_minmax_kernel(func, dtype_x, dtype_y, dtype_z):
        if not numpy.float64 in [dtype_x, dtype_y]:
            func = func +"f"

>       from pytools import any
E       ImportError: cannot import name any

/Applications/sage/local/lib/python2.6/site-packages/pycuda-0.94.1-py2.6-macosx-10.4-i386.egg/pycuda/elementwise.py:371:
ImportError
_______________________________________________ TestGPUArray.test_slice
________________________________________________

    def f(*args, **kwargs):
        import pycuda.driver
        # appears to be idempotent, i.e. no harm in calling it more than
once
        pycuda.driver.init()

        ctx = make_default_context()
        try:
            assert isinstance(ctx.get_device().name(), str)
            assert isinstance(ctx.get_device().compute_capability(), tuple)
            assert isinstance(ctx.get_device().get_attributes(), dict)
>           inner_f(*args, **kwargs)

/Applications/sage/local/lib/python2.6/site-packages/pycuda-0.94.1-py2.6-macosx-10.4-i386.egg/pycuda/tools.py:503:

_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <test_gpuarray.TestGPUArray instance at 0x1062728c0>

    @mark_cuda_test
    def test_slice(self):
        from pycuda.curandom import rand as curand

        l = 20000
        a_gpu = curand((l,))
        a = a_gpu.get()

        from random import randrange
        for i in range(200):
            start = randrange(l)
            end = randrange(start, l)

            a_gpu_slice = a_gpu[start:end]
            a_slice = a[start:end]

>           assert la.norm(a_gpu_slice.get()-a_slice) == 0
E           assert nan == 0
E            +  where nan = <function norm at
0x1004e35f0>((array([-0.72050297, -0.52672142,         NaN, ...,
-0.06777093,\n               NaN, -0.63302296], dtype=float32) -
array([-0.72050297, -0.52672142,         NaN, ...,
-0.06777093,\n               NaN, -0.63302296], dtype=float32)))
E            +    where <function norm at 0x1004e35f0> = la.norm
E            +    and   array([-0.72050297, -0.52672142,         NaN, ...,
-0.06777093,\n               NaN, -0.63302296], dtype=float32) =
array([-0.72050297, -0.52672142,         NaN, ...,
-0.06777093,\n               NaN, -0.63302296], dtype=float32).get()

test_gpuarray.py:516: AssertionError
________________________________________________ TestGPUArray.test_dot
_________________________________________________

    def f(*args, **kwargs):
        import pycuda.driver
        # appears to be idempotent, i.e. no harm in calling it more than
once
        pycuda.driver.init()

        ctx = make_default_context()
        try:
            assert isinstance(ctx.get_device().name(), str)
            assert isinstance(ctx.get_device().compute_capability(), tuple)
            assert isinstance(ctx.get_device().get_attributes(), dict)
>           inner_f(*args, **kwargs)

/Applications/sage/local/lib/python2.6/site-packages/pycuda-0.94.1-py2.6-macosx-10.4-i386.egg/pycuda/tools.py:503:

_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <test_gpuarray.TestGPUArray instance at 0x106276d88>

    @mark_cuda_test
    def test_dot(self):
        from pycuda.curandom import rand as curand
        a_gpu = curand((200000,))
        a = a_gpu.get()
        b_gpu = curand((200000,))
        b = b_gpu.get()

        dot_ab = numpy.dot(a, b)

        dot_ab_gpu = gpuarray.dot(a_gpu, b_gpu).get()

>       assert abs(dot_ab_gpu-dot_ab)/abs(dot_ab) < 1e-4
E       assert (abs((array(nan, dtype=float32) - nan)) / abs(nan)) < 0.0001

test_gpuarray.py:498: AssertionError
========================================= 8 failed, 27 passed in 12.51
seconds =========================================

On Tue, Sep 21, 2010 at 02:40, Alan <alanwil...@gmail.com> wrote:

> Dear all,
>
> I did on my MBP SL 10.6.4:
>
>
> #pycuda 21/09/10
>
> wget -c
> http://pypi.python.org/packages/source/p/pycuda/pycuda-0.94.1.tar.gz
>
> tar xvfz pycuda-0.94.1.tar.gz
>
> cd pycuda-0.94.1
>
> /usr/bin/python2.6 configure.py
>
> make
>
> sudo make install
>
> cd test
>
> /usr/bin/python2.6 test_driver.py
>
> /usr/bin/python2.6 test_gpuarray.py
>
> /usr/bin/python2.6 test_cumath.py
>
> ALL TESTS PASSED!!!
>
> I did also with /sw/bin/python2.6 and /sw/bin/python2.7 and everything
> worked.
>
>
> Many thanks,
>
>
> Alan
>
>
>
> On 21 September 2010 08:03, Alan <alanwil...@gmail.com> wrote:
>
>> Dear Andreas,
>>
>> It seems that finally we're getting there. You may not remember anymore
>> but I've been pursuing this issue since from the very beginning (if I've not
>> even started it this discussion at all), but didn't have time lately.
>>
>> I would appreciate when you do your next pycuda and pyopencl release to
>> have 64 bits available out-of-box for Mac and updated instructions for
>> installing and testing.
>>
>> Many thanks to you all guys who cracked this issue.
>>
>> Alan
>>
>> On 21 September 2010 00:53, Andreas Kloeckner <li...@informa.tiker.net>wrote:
>>
>>> Hi Art, Min, Bryan,
>>>
>>> On Mon, 20 Sep 2010 15:25:24 -0700, Art <grenan...@gmail.com> wrote:
>>> > Thanks for posting the fork. I used your modification to compiler.py
>>> (my
>>> > original one was incorrect) and I built a 64-bit only version of pycuda
>>> and
>>> > all tests under tests/ passed for the first time. I also was able to
>>> call
>>> > cublas and cufft using something similar to parret [1].
>>>
>>> Thanks very much for getting to the bottom of this pesky problem! (Or
>>> that's at least what it seems like to me--right?) I've pulled both Min's
>>> and Bryan's fixes into PyCUDA's git.
>>>
>>> Thanks again,
>>> Andreas
>>>
>>>
>>> _______________________________________________
>>> PyCUDA mailing list
>>> PyCUDA@tiker.net
>>> http://lists.tiker.net/listinfo/pycuda
>>>
>>>
>>
>>
>> --
>> Alan Wilter S. da Silva, D.Sc. - CCPN Research Associate
>> Department of Biochemistry, University of Cambridge.
>> 80 Tennis Court Road, Cambridge CB2 1GA, UK.
>> >>http://www.bio.cam.ac.uk/~awd28 <http://www.bio.cam.ac.uk/%7Eawd28><<
>>
>
>
>
> --
> Alan Wilter S. da Silva, D.Sc. - CCPN Research Associate
> Department of Biochemistry, University of Cambridge.
> 80 Tennis Court Road, Cambridge CB2 1GA, UK.
> >>http://www.bio.cam.ac.uk/~awd28 <http://www.bio.cam.ac.uk/%7Eawd28><<
>
> _______________________________________________
> PyCUDA mailing list
> PyCUDA@tiker.net
> http://lists.tiker.net/listinfo/pycuda
>
>
_______________________________________________
PyCUDA mailing list
PyCUDA@tiker.net
http://lists.tiker.net/listinfo/pycuda

Reply via email to