Author: Matti Picus <matti.pi...@gmail.com>
Branch: cpyext-ext
Changeset: r83978:181c8b1f5467
Date: 2016-04-27 15:45 +0300
http://bitbucket.org/pypy/pypy/changeset/181c8b1f5467/

Log:    merge cpyext-for-merge back into branch

diff too long, truncating to 2000 out of 6948 lines

diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -20,3 +20,4 @@
 5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1
 246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0
 bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1
+3260adbeba4a8b6659d1cc0d0b41f266769b74da release-5.1
diff --git a/lib-python/stdlib-upgrade.txt b/lib-python/stdlib-upgrade.txt
--- a/lib-python/stdlib-upgrade.txt
+++ b/lib-python/stdlib-upgrade.txt
@@ -5,15 +5,23 @@
 
     overly detailed
 
-1. check out the branch vendor/stdlib
+0. make sure your working dir is clean
+1. check out the branch vendor/stdlib (for 2.7) or vendor/stdlib-3-* (for py3k)
+   or create branch vendor/stdlib-3-*
 2. upgrade the files there
+   2a. remove lib-python/2.7/ or lib-python/3/
+   2b. copy the files from the cpython repo
+   2c. hg add lib-python/2.7/ or lib-python/3/
+   2d. hg remove --after
+   2e. show copied files in cpython repo by running `hg diff --git -r v<old> 
-r v<new> Lib | grep '^copy \(from\|to\)'`
+   2f. fix copies / renames manually by running `hg copy --after <from> <to>` 
for each copied file
 3. update stdlib-version.txt with the output of hg -id from the cpython repo
 4. commit
-5. update to default/py3k
+5. update to default / py3k
 6. create a integration branch for the new stdlib
    (just hg branch stdlib-$version)
-7. merge vendor/stdlib
+7. merge vendor/stdlib or vendor/stdlib-3-*
 8. commit
 10. fix issues
 11. commit --close-branch
-12. merge to default
+12. merge to default / py3k
diff --git a/lib_pypy/syslog.py b/lib_pypy/syslog.py
--- a/lib_pypy/syslog.py
+++ b/lib_pypy/syslog.py
@@ -51,6 +51,8 @@
     # if log is not opened, open it now
     if not _S_log_open:
         openlog()
+    if isinstance(message, unicode):
+        message = str(message)
     lib.syslog(priority, "%s", message)
 
 @builtinify
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -204,15 +204,6 @@
         BoolOption("withstrbuf", "use strings optimized for addition (ver 2)",
                    default=False),
 
-        BoolOption("withprebuiltchar",
-                   "use prebuilt single-character string objects",
-                   default=False),
-
-        BoolOption("sharesmallstr",
-                   "always reuse the prebuilt string objects "
-                   "(the empty string and potentially single-char strings)",
-                   default=False),
-
         BoolOption("withspecialisedtuple",
                    "use specialised tuples",
                    default=False),
@@ -222,39 +213,14 @@
                    default=False,
                    requires=[("objspace.honor__builtins__", False)]),
 
-        BoolOption("withmapdict",
-                   "make instances really small but slow without the JIT",
-                   default=False,
-                   requires=[("objspace.std.getattributeshortcut", True),
-                             ("objspace.std.withtypeversion", True),
-                       ]),
-
-        BoolOption("withrangelist",
-                   "enable special range list implementation that does not "
-                   "actually create the full list until the resulting "
-                   "list is mutated",
-                   default=False),
         BoolOption("withliststrategies",
                    "enable optimized ways to store lists of primitives ",
                    default=True),
 
-        BoolOption("withtypeversion",
-                   "version type objects when changing them",
-                   cmdline=None,
-                   default=False,
-                   # weakrefs needed, because of get_subclasses()
-                   requires=[("translation.rweakref", True)]),
-
-        BoolOption("withmethodcache",
-                   "try to cache method lookups",
-                   default=False,
-                   requires=[("objspace.std.withtypeversion", True),
-                             ("translation.rweakref", True)]),
         BoolOption("withmethodcachecounter",
                    "try to cache methods and provide a counter in __pypy__. "
                    "for testing purposes only.",
-                   default=False,
-                   requires=[("objspace.std.withmethodcache", True)]),
+                   default=False),
         IntOption("methodcachesizeexp",
                   " 2 ** methodcachesizeexp is the size of the of the method 
cache ",
                   default=11),
@@ -265,22 +231,10 @@
         BoolOption("optimized_list_getitem",
                    "special case the 'list[integer]' expressions",
                    default=False),
-        BoolOption("getattributeshortcut",
-                   "track types that override __getattribute__",
-                   default=False,
-                   # weakrefs needed, because of get_subclasses()
-                   requires=[("translation.rweakref", True)]),
         BoolOption("newshortcut",
                    "cache and shortcut calling __new__ from builtin types",
-                   default=False,
-                   # weakrefs needed, because of get_subclasses()
-                   requires=[("translation.rweakref", True)]),
+                   default=False),
 
-        BoolOption("withidentitydict",
-                   "track types that override __hash__, __eq__ or __cmp__ and 
use a special dict strategy for those which do not",
-                   default=False,
-                   # weakrefs needed, because of get_subclasses()
-                   requires=[("translation.rweakref", True)]),
      ]),
 ])
 
@@ -296,15 +250,10 @@
     """
     # all the good optimizations for PyPy should be listed here
     if level in ['2', '3', 'jit']:
-        config.objspace.std.suggest(withrangelist=True)
-        config.objspace.std.suggest(withmethodcache=True)
-        config.objspace.std.suggest(withprebuiltchar=True)
         config.objspace.std.suggest(intshortcut=True)
         config.objspace.std.suggest(optimized_list_getitem=True)
-        config.objspace.std.suggest(getattributeshortcut=True)
         #config.objspace.std.suggest(newshortcut=True)
         config.objspace.std.suggest(withspecialisedtuple=True)
-        config.objspace.std.suggest(withidentitydict=True)
         #if not IS_64_BITS:
         #    config.objspace.std.suggest(withsmalllong=True)
 
@@ -317,16 +266,13 @@
     # memory-saving optimizations
     if level == 'mem':
         config.objspace.std.suggest(withprebuiltint=True)
-        config.objspace.std.suggest(withrangelist=True)
-        config.objspace.std.suggest(withprebuiltchar=True)
-        config.objspace.std.suggest(withmapdict=True)
+        config.objspace.std.suggest(withliststrategies=True)
         if not IS_64_BITS:
             config.objspace.std.suggest(withsmalllong=True)
 
     # extra optimizations with the JIT
     if level == 'jit':
         config.objspace.std.suggest(withcelldict=True)
-        config.objspace.std.suggest(withmapdict=True)
 
 
 def enable_allworkingmodules(config):
diff --git a/pypy/config/test/test_pypyoption.py 
b/pypy/config/test/test_pypyoption.py
--- a/pypy/config/test/test_pypyoption.py
+++ b/pypy/config/test/test_pypyoption.py
@@ -11,12 +11,6 @@
 
     assert conf.objspace.usemodules.gc
 
-    conf.objspace.std.withmapdict = True
-    assert conf.objspace.std.withtypeversion
-    conf = get_pypy_config()
-    conf.objspace.std.withtypeversion = False
-    py.test.raises(ConfigError, "conf.objspace.std.withmapdict = True")
-
 def test_conflicting_gcrootfinder():
     conf = get_pypy_config()
     conf.translation.gc = "boehm"
@@ -47,18 +41,10 @@
 def test_set_pypy_opt_level():
     conf = get_pypy_config()
     set_pypy_opt_level(conf, '2')
-    assert conf.objspace.std.getattributeshortcut
+    assert conf.objspace.std.intshortcut
     conf = get_pypy_config()
     set_pypy_opt_level(conf, '0')
-    assert not conf.objspace.std.getattributeshortcut
-
-def test_rweakref_required():
-    conf = get_pypy_config()
-    conf.translation.rweakref = False
-    set_pypy_opt_level(conf, '3')
-
-    assert not conf.objspace.std.withtypeversion
-    assert not conf.objspace.std.withmethodcache
+    assert not conf.objspace.std.intshortcut
 
 def test_check_documentation():
     def check_file_exists(fn):
diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst
--- a/pypy/doc/build.rst
+++ b/pypy/doc/build.rst
@@ -102,15 +102,15 @@
 
     apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \
     libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \
-    tk-dev
+    tk-dev libgc-dev
 
 For the optional lzma module on PyPy3 you will also need ``liblzma-dev``.
 
 On Fedora::
 
-    yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
-    lib-sqlite3-devel ncurses-devel expat-devel openssl-devel
-    (XXX plus the Febora version of libgdbm-dev and tk-dev)
+    dnf install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
+    lib-sqlite3-devel ncurses-devel expat-devel openssl-devel tk-devel \
+    gdbm-devel
 
 For the optional lzma module on PyPy3 you will also need ``xz-devel``.
 
diff --git a/pypy/doc/config/objspace.std.getattributeshortcut.txt 
b/pypy/doc/config/objspace.std.getattributeshortcut.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.getattributeshortcut.txt
+++ /dev/null
@@ -1,1 +0,0 @@
-Performance only: track types that override __getattribute__.
diff --git a/pypy/doc/config/objspace.std.methodcachesizeexp.txt 
b/pypy/doc/config/objspace.std.methodcachesizeexp.txt
--- a/pypy/doc/config/objspace.std.methodcachesizeexp.txt
+++ b/pypy/doc/config/objspace.std.methodcachesizeexp.txt
@@ -1,1 +1,1 @@
-Set the cache size (number of entries) for 
:config:`objspace.std.withmethodcache`.
+Set the cache size (number of entries) for the method cache.
diff --git a/pypy/doc/config/objspace.std.withidentitydict.txt 
b/pypy/doc/config/objspace.std.withidentitydict.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withidentitydict.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-=============================
-objspace.std.withidentitydict
-=============================
-
-* **name:** withidentitydict
-
-* **description:** enable a dictionary strategy for "by identity" comparisons
-
-* **command-line:** --objspace-std-withidentitydict
-
-* **command-line for negation:** --no-objspace-std-withidentitydict
-
-* **option type:** boolean option
-
-* **default:** True
-
-
-Enable a dictionary strategy specialized for instances of classes which
-compares "by identity", which is the default unless you override ``__hash__``,
-``__eq__`` or ``__cmp__``.  This strategy will be used only with new-style
-classes.
diff --git a/pypy/doc/config/objspace.std.withmapdict.txt 
b/pypy/doc/config/objspace.std.withmapdict.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withmapdict.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-Enable the new version of "sharing dictionaries".
-
-See the section in `Standard Interpreter Optimizations`_ for more details.
-
-.. _`Standard Interpreter Optimizations`: 
../interpreter-optimizations.html#sharing-dicts
diff --git a/pypy/doc/config/objspace.std.withmethodcache.txt 
b/pypy/doc/config/objspace.std.withmethodcache.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withmethodcache.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-Enable method caching. See the section "Method Caching" in `Standard
-Interpreter Optimizations 
<../interpreter-optimizations.html#method-caching>`__.
diff --git a/pypy/doc/config/objspace.std.withmethodcachecounter.txt 
b/pypy/doc/config/objspace.std.withmethodcachecounter.txt
--- a/pypy/doc/config/objspace.std.withmethodcachecounter.txt
+++ b/pypy/doc/config/objspace.std.withmethodcachecounter.txt
@@ -1,1 +1,1 @@
-Testing/debug option for :config:`objspace.std.withmethodcache`.
+Testing/debug option for the method cache.
diff --git a/pypy/doc/config/objspace.std.withprebuiltchar.txt 
b/pypy/doc/config/objspace.std.withprebuiltchar.txt
deleted file mode 100644
diff --git a/pypy/doc/config/objspace.std.withrangelist.txt 
b/pypy/doc/config/objspace.std.withrangelist.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withrangelist.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-Enable "range list" objects. They are an additional implementation of the 
Python
-``list`` type, indistinguishable for the normal user. Whenever the ``range``
-builtin is called, an range list is returned. As long as this list is not
-mutated (and for example only iterated over), it uses only enough memory to
-store the start, stop and step of the range. This makes using ``range`` as
-efficient as ``xrange``, as long as the result is only used in a ``for``-loop.
-
-See the section in `Standard Interpreter Optimizations`_ for more details.
-
-.. _`Standard Interpreter Optimizations`: 
../interpreter-optimizations.html#range-lists
-
diff --git a/pypy/doc/config/objspace.std.withtypeversion.txt 
b/pypy/doc/config/objspace.std.withtypeversion.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withtypeversion.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-This (mostly internal) option enables "type versions": Every type object gets 
an
-(only internally visible) version that is updated when the type's dict is
-changed. This is e.g. used for invalidating caches. It does not make sense to
-enable this option alone.
-
-.. internal
diff --git a/pypy/doc/interpreter-optimizations.rst 
b/pypy/doc/interpreter-optimizations.rst
--- a/pypy/doc/interpreter-optimizations.rst
+++ b/pypy/doc/interpreter-optimizations.rst
@@ -62,29 +62,37 @@
 Dictionary Optimizations
 ~~~~~~~~~~~~~~~~~~~~~~~~
 
-Multi-Dicts
-+++++++++++
+Dict Strategies
+++++++++++++++++
 
-Multi-dicts are a special implementation of dictionaries.  It became clear that
-it is very useful to *change* the internal representation of an object during
-its lifetime.  Multi-dicts are a general way to do that for dictionaries: they
-provide generic support for the switching of internal representations for
-dicts.
+Dict strategies are an implementation approach for dictionaries (and lists)
+that make it possible to use a specialized representation of the dictionary's
+data, while still being able to switch back to a general representation should
+that become necessary later.
 
-If you just enable multi-dicts, special representations for empty dictionaries,
-for string-keyed dictionaries. In addition there are more specialized 
dictionary
-implementations for various purposes (see below).
+Dict strategies are always enabled, by default there are special strategies for
+dicts with just string keys, just unicode keys and just integer keys. If one of
+those specialized strategies is used, then dict lookup can use much faster
+hashing and comparison for the dict keys. There is of course also a strategy
+for general keys.
 
-This is now the default implementation of dictionaries in the Python 
interpreter.
 
+Identity Dicts
++++++++++++++++
 
-Sharing Dicts
+We also have a strategy specialized for keys that are instances of classes
+which compares "by identity", which is the default unless you override
+``__hash__``, ``__eq__`` or ``__cmp__``.  This strategy will be used only with
+new-style classes.
+
+
+Map Dicts
 +++++++++++++
 
-Sharing dictionaries are a special representation used together with 
multidicts.
-This dict representation is used only for instance dictionaries and tries to
-make instance dictionaries use less memory (in fact, in the ideal case the
-memory behaviour should be mostly like that of using __slots__).
+Map dictionaries are a special representation used together with dict 
strategies.
+This dict strategy is used only for instance dictionaries and tries to
+make instance dictionaries use less memory (in fact, usually memory behaviour
+should be mostly like that of using ``__slots__``).
 
 The idea is the following: Most instances of the same class have very similar
 attributes, and are even adding these keys to the dictionary in the same order
@@ -95,8 +103,6 @@
 dicts:
 the representation of the instance dict contains only a list of values.
 
-A more advanced version of sharing dicts, called *map dicts,* is available
-with the :config:`objspace.std.withmapdict` option.
 
 
 List Optimizations
@@ -114,8 +120,8 @@
 created. This gives the memory and speed behaviour of ``xrange`` and the 
generality
 of use of ``range``, and makes ``xrange`` essentially useless.
 
-You can enable this feature with the :config:`objspace.std.withrangelist`
-option.
+This feature is enabled by default as part of the
+:config:`objspace.std.withliststrategies` option.
 
 
 User Class Optimizations
@@ -133,8 +139,7 @@
 base classes is changed). On subsequent lookups the cached version can be used,
 as long as the instance did not shadow any of its classes attributes.
 
-You can enable this feature with the :config:`objspace.std.withmethodcache`
-option.
+This feature is enabled by default.
 
 
 Interpreter Optimizations
diff --git a/pypy/doc/introduction.rst b/pypy/doc/introduction.rst
--- a/pypy/doc/introduction.rst
+++ b/pypy/doc/introduction.rst
@@ -1,16 +1,22 @@
 What is PyPy?
 =============
 
-In common parlance, PyPy has been used to mean two things.  The first is the
-:ref:`RPython translation toolchain <rpython:index>`, which is a framework for 
generating
-dynamic programming language implementations.  And the second is one
-particular implementation that is so generated --
-an implementation of the Python_ programming language written in
-Python itself.  It is designed to be flexible and easy to experiment with.
+Historically, PyPy has been used to mean two things.  The first is the
+:ref:`RPython translation toolchain <rpython:index>` for generating
+interpreters for dynamic programming languages.  And the second is one
+particular implementation of Python_ produced with it. Because RPython
+uses the same syntax as Python, this generated version became known as
+Python interpreter written in Python. It is designed to be flexible and
+easy to experiment with.
 
-This double usage has proven to be confusing, and we are trying to move
-away from using the word PyPy to mean both things.  From now on we will
-try to use PyPy to only mean the Python implementation, and say the
+To make it more clear, we start with source code written in RPython,
+apply the RPython translation toolchain, and end up with PyPy as a
+binary executable. This executable is the Python interpreter.
+
+Double usage has proven to be confusing, so we've moved away from using
+the word PyPy to mean both toolchain and generated interpreter.  Now we
+use word PyPy to refer to the Python implementation, and explicitly
+mention
 :ref:`RPython translation toolchain <rpython:index>` when we mean the 
framework.
 
 Some older documents, presentations, papers and videos will still have the old
diff --git a/pypy/doc/release-5.1.0.rst b/pypy/doc/release-5.1.0.rst
--- a/pypy/doc/release-5.1.0.rst
+++ b/pypy/doc/release-5.1.0.rst
@@ -3,10 +3,17 @@
 ========
 
 We have released PyPy 5.1, about a month after PyPy 5.0.
-We encourage all users of PyPy to update to this version. Apart from the usual
-bug fixes, there is an ongoing effort to improve the warmup time and memory
-usage of JIT-related metadata, and we now fully support the IBM s390x 
-architecture.
+
+This release includes more improvement to warmup time and memory
+requirements. We have seen about a 20% memory requirement reduction and up to
+30% warmup time improvement, more detail in the `blog post`_.
+
+We also now have `fully support for the IBM s390x`_. Since this support is in
+`RPython`_, any dynamic language written using RPython, like PyPy, will
+automagically be supported on that architecture.  
+
+We updated cffi_ to 1.6, and continue to improve support for the wider
+python ecosystem using the PyPy interpreter.
 
 You can download the PyPy 5.1 release here:
 
@@ -26,6 +33,9 @@
 .. _`modules`: 
http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly
 .. _`help`: http://doc.pypy.org/en/latest/project-ideas.html
 .. _`numpy`: https://bitbucket.org/pypy/numpy
+.. _cffi: https://cffi.readthedocs.org
+.. _`fully support for the IBM s390x`: 
http://morepypy.blogspot.com/2016/04/pypy-enterprise-edition.html
+.. _`blog post`: 
http://morepypy.blogspot.com/2016/04/warmup-improvements-more-efficient.html
 
 What is PyPy?
 =============
@@ -46,7 +56,7 @@
   
   * big- and little-endian variants of **PPC64** running Linux,
 
-  * **s960x** running Linux
+  * **s390x** running Linux
 
 .. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
 .. _`dynamic languages`: http://pypyjs.org
@@ -74,6 +84,8 @@
   * Fix a corner case in the JIT
 
   * Fix edge cases in the cpyext refcounting-compatible semantics
+    (more work on cpyext compatibility is coming in the ``cpyext-ext``
+    branch, but isn't ready yet)
 
   * Try harder to not emit NEON instructions on ARM processors without NEON
     support
@@ -92,11 +104,17 @@
 
   * Fix sandbox startup (a regression in 5.0)
 
+  * Fix possible segfault for classes with mangled mro or __metaclass__
+
+  * Fix isinstance(deque(), Hashable) on the pure python deque
+
+  * Fix an issue with forkpty()
+
   * Issues reported with our previous release were resolved_ after reports 
from users on
     our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at
     #pypy
 
-* Numpy:
+* Numpy_:
 
   * Implemented numpy.where for a single argument
 
@@ -108,6 +126,8 @@
     functions exported from libpypy.so are declared in pypy_numpy.h, which is
     included only when building our fork of numpy
 
+  * Add broadcast
+
 * Performance improvements:
 
   * Improve str.endswith([tuple]) and str.startswith([tuple]) to allow JITting
@@ -119,14 +139,18 @@
   * Remove the forced minor collection that occurs when rewriting the
     assembler at the start of the JIT backend
 
+  * Port the resource module to cffi
+
 * Internal refactorings:
 
   * Use a simpler logger to speed up translation
 
   * Drop vestiges of Python 2.5 support in testing
 
+  * Update rpython functions with ones needed for py3k
+
 .. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html
-.. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html
+.. _Numpy: https://bitbucket.org/pypy/numpy
 
 Please update, and continue to help us make PyPy better.
 
diff --git a/pypy/doc/whatsnew-5.1.0.rst b/pypy/doc/whatsnew-5.1.0.rst
--- a/pypy/doc/whatsnew-5.1.0.rst
+++ b/pypy/doc/whatsnew-5.1.0.rst
@@ -60,3 +60,13 @@
 Remove old uneeded numpy headers, what is left is only for testing. Also 
 generate pypy_numpy.h which exposes functions to directly use micronumpy
 ndarray and ufuncs
+
+.. branch: rposix-for-3
+
+Reuse rposix definition of TIMESPEC in rposix_stat, add wrapper for fstatat().
+This updates the underlying rpython functions with the ones needed for the 
+py3k branch
+ 
+.. branch: numpy_broadcast
+
+Add broadcast to micronumpy
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -3,14 +3,43 @@
 =========================
 
 .. this is a revision shortly after release-5.1
-.. startrev: 2180e1eaf6f6
+.. startrev: aa60332382a1
 
-.. branch: rposix-for-3
+.. branch: techtonik/introductionrst-simplify-explanation-abo-1460879168046
 
-Reuse rposix definition of TIMESPEC in rposix_stat, add wrapper for fstatat().
-This updates the underlying rpython functions with the ones needed for the 
-py3k branch
- 
-.. branch: numpy_broadcast
+.. branch: gcheader-decl
 
-Add broadcast to micronumpy
+Reduce the size of generated C sources.
+
+
+.. branch: remove-objspace-options
+
+Remove a number of options from the build process that were never tested and
+never set. Fix a performance bug in the method cache.
+
+.. branch: bitstring
+
+JIT: use bitstrings to compress the lists of read or written descrs
+that we attach to EffectInfo.  Fixes a problem we had in
+remove-objspace-options.
+
+.. branch: cpyext-for-merge
+Update cpyext C-API support:
+  - allow c-snippet tests to be run with -A so we can verify we are compatible
+  - fix many edge cases exposed by fixing tests to run with -A
+  - issequence() logic matches cpython
+  - make PyStringObject and PyUnicodeObject field names compatible with cpython
+  - add prelminary support for PyDateTime_*
+  - support PyComplexObject, PyFloatObject, PyDict_Merge, PyDictProxy,
+    PyMemoryView_*, _Py_HashDouble, PyFile_AsFile, PyFile_FromFile,
+  - PyAnySet_CheckExact, PyUnicode_Concat
+  - improve support for PyGILState_Ensure, PyGILState_Release, and thread
+    primitives, also find a case where CPython will allow thread creation
+    before PyEval_InitThreads is run, dissallow on PyPy 
+  - create a PyObject-specific list strategy
+  - rewrite slot assignment for typeobjects
+  - improve tracking of PyObject to rpython object mapping
+  - support tp_as_{number, sequence, mapping, buffer} slots
+After this branch, we are almost able to support upstream numpy via cpyext, so
+we created (yet another) fork of numpy at github.com/pypy/numpy with the needed
+changes
diff --git a/pypy/interpreter/executioncontext.py 
b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -214,6 +214,7 @@
             self._trace(frame, 'exception', None, operationerr)
         #operationerr.print_detailed_traceback(self.space)
 
+    @jit.dont_look_inside
     @specialize.arg(1)
     def sys_exc_info(self, for_hidden=False):
         """Implements sys.exc_info().
@@ -225,15 +226,7 @@
         # NOTE: the result is not the wrapped sys.exc_info() !!!
 
         """
-        frame = self.gettopframe()
-        while frame:
-            if frame.last_exception is not None:
-                if ((for_hidden or not frame.hide()) or
-                        frame.last_exception is
-                            get_cleared_operation_error(self.space)):
-                    return frame.last_exception
-            frame = frame.f_backref()
-        return None
+        return self.gettopframe()._exc_info_unroll(self.space, for_hidden)
 
     def set_sys_exc_info(self, operror):
         frame = self.gettopframe_nohidden()
diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py
--- a/pypy/interpreter/pycode.py
+++ b/pypy/interpreter/pycode.py
@@ -114,6 +114,7 @@
                 e.write_unraisable(self.space, "new_code_hook()")
 
     def _initialize(self):
+        from pypy.objspace.std.mapdict import init_mapdict_cache
         if self.co_cellvars:
             argcount = self.co_argcount
             assert argcount >= 0     # annotator hint
@@ -149,9 +150,7 @@
 
         self._compute_flatcall()
 
-        if self.space.config.objspace.std.withmapdict:
-            from pypy.objspace.std.mapdict import init_mapdict_cache
-            init_mapdict_cache(self)
+        init_mapdict_cache(self)
 
     def _init_ready(self):
         "This is a hook for the vmprof module, which overrides this method."
@@ -163,7 +162,10 @@
         # When translating PyPy, freeze the file name
         #     <builtin>/lastdirname/basename.py
         # instead of freezing the complete translation-time path.
-        filename = self.co_filename.lstrip('<').rstrip('>')
+        filename = self.co_filename
+        if filename.startswith('<builtin>'):
+            return
+        filename = filename.lstrip('<').rstrip('>')
         if filename.lower().endswith('.pyc'):
             filename = filename[:-1]
         basename = os.path.basename(filename)
diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
--- a/pypy/interpreter/pyframe.py
+++ b/pypy/interpreter/pyframe.py
@@ -4,7 +4,7 @@
 from rpython.rlib import jit
 from rpython.rlib.debug import make_sure_not_resized, check_nonneg
 from rpython.rlib.jit import hint
-from rpython.rlib.objectmodel import we_are_translated, instantiate
+from rpython.rlib.objectmodel import instantiate, specialize, we_are_translated
 from rpython.rlib.rarithmetic import intmask, r_uint
 from rpython.tool.pairtype import extendabletype
 
@@ -12,7 +12,8 @@
 from pypy.interpreter.argument import Arguments
 from pypy.interpreter.astcompiler import consts
 from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError, oefmt
+from pypy.interpreter.error import (
+    OperationError, get_cleared_operation_error, oefmt)
 from pypy.interpreter.executioncontext import ExecutionContext
 from pypy.interpreter.nestedscope import Cell
 from pypy.tool import stdlib_opcode
@@ -870,6 +871,22 @@
             return space.wrap(self.builtin is not space.builtin)
         return space.w_False
 
+    @jit.unroll_safe
+    @specialize.arg(2)
+    def _exc_info_unroll(self, space, for_hidden=False):
+        """Return the most recent OperationError being handled in the
+        call stack
+        """
+        frame = self
+        while frame:
+            last = frame.last_exception
+            if last is not None:
+                if last is get_cleared_operation_error(self.space):
+                    break
+                if for_hidden or not frame.hide():
+                    return last
+            frame = frame.f_backref()
+        return None
 
 # ____________________________________________________________
 
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -739,25 +739,16 @@
         unroller = SContinueLoop(startofloop)
         return self.unrollstack_and_jump(unroller)
 
-    @jit.unroll_safe
     def RAISE_VARARGS(self, nbargs, next_instr):
         space = self.space
         if nbargs == 0:
-            frame = self
-            while frame:
-                if frame.last_exception is not None:
-                    operror = frame.last_exception
-                    break
-                frame = frame.f_backref()
-            else:
-                raise OperationError(space.w_TypeError,
-                    space.wrap("raise: no active exception to re-raise"))
-            if operror.w_type is space.w_None:
-                raise OperationError(space.w_TypeError,
-                    space.wrap("raise: the exception to re-raise was cleared"))
+            last_operr = self._exc_info_unroll(space, for_hidden=True)
+            if last_operr is None:
+                raise oefmt(space.w_TypeError,
+                            "No active exception to reraise")
             # re-raise, no new traceback obj will be attached
-            self.last_exception = operror
-            raise RaiseWithExplicitTraceback(operror)
+            self.last_exception = last_operr
+            raise RaiseWithExplicitTraceback(last_operr)
 
         w_value = w_traceback = space.w_None
         if nbargs >= 3:
@@ -951,8 +942,7 @@
     def LOAD_ATTR(self, nameindex, next_instr):
         "obj.attributename"
         w_obj = self.popvalue()
-        if (self.space.config.objspace.std.withmapdict
-            and not jit.we_are_jitted()):
+        if not jit.we_are_jitted():
             from pypy.objspace.std.mapdict import LOAD_ATTR_caching
             w_value = LOAD_ATTR_caching(self.getcode(), w_obj, nameindex)
         else:
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -98,175 +98,51 @@
 # reason is that it is missing a place to store the __dict__, the slots,
 # the weakref lifeline, and it typically has no interp-level __del__.
 # So we create a few interp-level subclasses of W_XxxObject, which add
-# some combination of features.
-#
-# We don't build 2**4 == 16 subclasses for all combinations of requested
-# features, but limit ourselves to 6, chosen a bit arbitrarily based on
-# typical usage (case 1 is the most common kind of app-level subclasses;
-# case 2 is the memory-saving kind defined with __slots__).
-#
-#  +----------------------------------------------------------------+
-#  | NOTE: if withmapdict is enabled, the following doesn't apply!  |
-#  | Map dicts can flexibly allow any slots/__dict__/__weakref__ to |
-#  | show up only when needed.  In particular there is no way with  |
-#  | mapdict to prevent some objects from being weakrefable.        |
-#  +----------------------------------------------------------------+
-#
-#     dict   slots   del   weakrefable
-#
-# 1.    Y      N      N         Y          UserDictWeakref
-# 2.    N      Y      N         N          UserSlots
-# 3.    Y      Y      N         Y          UserDictWeakrefSlots
-# 4.    N      Y      N         Y          UserSlotsWeakref
-# 5.    Y      Y      Y         Y          UserDictWeakrefSlotsDel
-# 6.    N      Y      Y         Y          UserSlotsWeakrefDel
-#
-# Note that if the app-level explicitly requests no dict, we should not
-# provide one, otherwise storing random attributes on the app-level
-# instance would unexpectedly work.  We don't care too much, though, if
-# an object is weakrefable when it shouldn't really be.  It's important
-# that it has a __del__ only if absolutely needed, as this kills the
-# performance of the GCs.
-#
-# Interp-level inheritance is like this:
-#
-#        W_XxxObject base
-#             /   \
-#            1     2
-#           /       \
-#          3         4
-#         /           \
-#        5             6
+# some combination of features. This is done using mapdict.
 
-def get_unique_interplevel_subclass(config, cls, hasdict, wants_slots,
-                                    needsdel=False, weakrefable=False):
+# we need two subclasses of the app-level type, one to add mapdict, and then 
one
+# to add del to not slow down the GC.
+
+def get_unique_interplevel_subclass(config, cls, needsdel=False):
     "NOT_RPYTHON: initialization-time only"
     if hasattr(cls, '__del__') and getattr(cls, "handle_del_manually", False):
         needsdel = False
     assert cls.typedef.acceptable_as_base_class
-    key = config, cls, hasdict, wants_slots, needsdel, weakrefable
+    key = config, cls, needsdel
     try:
         return _subclass_cache[key]
     except KeyError:
-        subcls = _getusercls(config, cls, hasdict, wants_slots, needsdel,
-                             weakrefable)
+        # XXX can save a class if cls already has a __del__
+        if needsdel:
+            cls = get_unique_interplevel_subclass(config, cls, False)
+        subcls = _getusercls(config, cls, needsdel)
         assert key not in _subclass_cache
         _subclass_cache[key] = subcls
         return subcls
 get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo"
 _subclass_cache = {}
 
-def _getusercls(config, cls, wants_dict, wants_slots, wants_del, weakrefable):
+def _getusercls(config, cls, wants_del, reallywantdict=False):
+    from rpython.rlib import objectmodel
+    from pypy.objspace.std.mapdict import (BaseUserClassMapdict,
+            MapdictDictSupport, MapdictWeakrefSupport,
+            _make_storage_mixin_size_n)
     typedef = cls.typedef
-    if wants_dict and typedef.hasdict:
-        wants_dict = False
-    if config.objspace.std.withmapdict and not typedef.hasdict:
-        # mapdict only works if the type does not already have a dict
-        if wants_del:
-            parentcls = get_unique_interplevel_subclass(config, cls, True, 
True,
-                                                        False, True)
-            return _usersubclswithfeature(config, parentcls, "del")
-        return _usersubclswithfeature(config, cls, "user", "dict", "weakref", 
"slots")
-    # Forest of if's - see the comment above.
+    name = cls.__name__ + "User"
+
+    mixins_needed = [BaseUserClassMapdict, _make_storage_mixin_size_n()]
+    if reallywantdict or not typedef.hasdict:
+        # the type has no dict, mapdict to provide the dict
+        mixins_needed.append(MapdictDictSupport)
+        name += "Dict"
+    if not typedef.weakrefable:
+        # the type does not support weakrefs yet, mapdict to provide weakref
+        # support
+        mixins_needed.append(MapdictWeakrefSupport)
+        name += "Weakrefable"
     if wants_del:
-        if wants_dict:
-            # case 5.  Parent class is 3.
-            parentcls = get_unique_interplevel_subclass(config, cls, True, 
True,
-                                                        False, True)
-        else:
-            # case 6.  Parent class is 4.
-            parentcls = get_unique_interplevel_subclass(config, cls, False, 
True,
-                                                        False, True)
-        return _usersubclswithfeature(config, parentcls, "del")
-    elif wants_dict:
-        if wants_slots:
-            # case 3.  Parent class is 1.
-            parentcls = get_unique_interplevel_subclass(config, cls, True, 
False,
-                                                        False, True)
-            return _usersubclswithfeature(config, parentcls, "slots")
-        else:
-            # case 1 (we need to add weakrefable unless it's already in 'cls')
-            if not typedef.weakrefable:
-                return _usersubclswithfeature(config, cls, "user", "dict", 
"weakref")
-            else:
-                return _usersubclswithfeature(config, cls, "user", "dict")
-    else:
-        if weakrefable and not typedef.weakrefable:
-            # case 4.  Parent class is 2.
-            parentcls = get_unique_interplevel_subclass(config, cls, False, 
True,
-                                                        False, False)
-            return _usersubclswithfeature(config, parentcls, "weakref")
-        else:
-            # case 2 (if the base is already weakrefable, case 2 == case 4)
-            return _usersubclswithfeature(config, cls, "user", "slots")
-
-def _usersubclswithfeature(config, parentcls, *features):
-    key = config, parentcls, features
-    try:
-        return _usersubclswithfeature_cache[key]
-    except KeyError:
-        subcls = _builduserclswithfeature(config, parentcls, *features)
-        _usersubclswithfeature_cache[key] = subcls
-        return subcls
-_usersubclswithfeature_cache = {}
-_allusersubcls_cache = {}
-
-def _builduserclswithfeature(config, supercls, *features):
-    "NOT_RPYTHON: initialization-time only"
-    name = supercls.__name__
-    name += ''.join([name.capitalize() for name in features])
-    body = {}
-    #print '..........', name, '(', supercls.__name__, ')'
-
-    def add(Proto):
-        for key, value in Proto.__dict__.items():
-            if (not key.startswith('__') and not key.startswith('_mixin_')
-                    or key == '__del__'):
-                if hasattr(value, "func_name"):
-                    value = func_with_new_name(value, value.func_name)
-                body[key] = value
-
-    if (config.objspace.std.withmapdict and "dict" in features):
-        from pypy.objspace.std.mapdict import BaseMapdictObject, ObjectMixin
-        add(BaseMapdictObject)
-        add(ObjectMixin)
-        body["user_overridden_class"] = True
-        features = ()
-
-    if "user" in features:     # generic feature needed by all subcls
-
-        class Proto(object):
-            user_overridden_class = True
-
-            def getclass(self, space):
-                return promote(self.w__class__)
-
-            def setclass(self, space, w_subtype):
-                # only used by descr_set___class__
-                self.w__class__ = w_subtype
-
-            def user_setup(self, space, w_subtype):
-                self.space = space
-                self.w__class__ = w_subtype
-                self.user_setup_slots(w_subtype.layout.nslots)
-
-            def user_setup_slots(self, nslots):
-                assert nslots == 0
-        add(Proto)
-
-    if "weakref" in features:
-        class Proto(object):
-            _lifeline_ = None
-            def getweakref(self):
-                return self._lifeline_
-            def setweakref(self, space, weakreflifeline):
-                self._lifeline_ = weakreflifeline
-            def delweakref(self):
-                self._lifeline_ = None
-        add(Proto)
-
-    if "del" in features:
-        parent_destructor = getattr(supercls, '__del__', None)
+        name += "Del"
+        parent_destructor = getattr(cls, '__del__', None)
         def call_parent_del(self):
             assert isinstance(self, subcls)
             parent_destructor(self)
@@ -281,57 +157,16 @@
                 if parent_destructor is not None:
                     self.enqueue_for_destruction(self.space, call_parent_del,
                                                  'internal destructor of ')
-        add(Proto)
+        mixins_needed.append(Proto)
 
-    if "slots" in features:
-        class Proto(object):
-            slots_w = []
-            def user_setup_slots(self, nslots):
-                if nslots > 0:
-                    self.slots_w = [None] * nslots
-            def setslotvalue(self, index, w_value):
-                self.slots_w[index] = w_value
-            def delslotvalue(self, index):
-                if self.slots_w[index] is None:
-                    return False
-                self.slots_w[index] = None
-                return True
-            def getslotvalue(self, index):
-                return self.slots_w[index]
-        add(Proto)
-
-    if "dict" in features:
-        base_user_setup = supercls.user_setup.im_func
-        if "user_setup" in body:
-            base_user_setup = body["user_setup"]
-        class Proto(object):
-            def getdict(self, space):
-                return self.w__dict__
-
-            def setdict(self, space, w_dict):
-                self.w__dict__ = check_new_dictionary(space, w_dict)
-
-            def user_setup(self, space, w_subtype):
-                self.w__dict__ = space.newdict(
-                    instance=True)
-                base_user_setup(self, space, w_subtype)
-
-        add(Proto)
-
-    subcls = type(name, (supercls,), body)
-    _allusersubcls_cache[subcls] = True
+    class subcls(cls):
+        user_overridden_class = True
+        for base in mixins_needed:
+            objectmodel.import_from_mixin(base)
+    del subcls.base
+    subcls.__name__ = name
     return subcls
 
-# a couple of helpers for the Proto classes above, factored out to reduce
-# the translated code size
-def check_new_dictionary(space, w_dict):
-    if not space.isinstance_w(w_dict, space.w_dict):
-        raise OperationError(space.w_TypeError,
-                space.wrap("setting dictionary to a non-dict"))
-    from pypy.objspace.std import dictmultiobject
-    assert isinstance(w_dict, dictmultiobject.W_DictMultiObject)
-    return w_dict
-check_new_dictionary._dont_inline_ = True
 
 # ____________________________________________________________
 
diff --git a/pypy/module/__builtin__/functional.py 
b/pypy/module/__builtin__/functional.py
--- a/pypy/module/__builtin__/functional.py
+++ b/pypy/module/__builtin__/functional.py
@@ -87,7 +87,7 @@
 
     howmany = get_len_of_range(space, start, stop, step)
 
-    if space.config.objspace.std.withrangelist:
+    if space.config.objspace.std.withliststrategies:
         return range_withspecialized_implementation(space, start,
                                                     step, howmany)
     res_w = [None] * howmany
@@ -99,7 +99,7 @@
 
 
 def range_withspecialized_implementation(space, start, step, length):
-    assert space.config.objspace.std.withrangelist
+    assert space.config.objspace.std.withliststrategies
     from pypy.objspace.std.listobject import make_range_list
     return make_range_list(space, start, step, length)
 
diff --git a/pypy/module/__builtin__/interp_classobj.py 
b/pypy/module/__builtin__/interp_classobj.py
--- a/pypy/module/__builtin__/interp_classobj.py
+++ b/pypy/module/__builtin__/interp_classobj.py
@@ -185,12 +185,19 @@
 
 class Cache:
     def __init__(self, space):
-        from pypy.interpreter.typedef import _usersubclswithfeature
-        # evil
-        self.cls_without_del = _usersubclswithfeature(
-                space.config, W_InstanceObject, "dict", "weakref")
-        self.cls_with_del = _usersubclswithfeature(
-                space.config, self.cls_without_del, "del")
+        from pypy.interpreter.typedef import _getusercls
+
+        if hasattr(space, 'is_fake_objspace'):
+            # hack: with the fake objspace, we don't want to see typedef's
+            # _getusercls() at all
+            self.cls_without_del = W_InstanceObject
+            self.cls_with_del = W_InstanceObject
+            return
+
+        self.cls_without_del = _getusercls(
+                space.config, W_InstanceObject, False, reallywantdict=True)
+        self.cls_with_del = _getusercls(
+                space.config, W_InstanceObject, True, reallywantdict=True)
 
 
 def class_descr_call(space, w_self, __args__):
diff --git a/pypy/module/__builtin__/test/test_builtin.py 
b/pypy/module/__builtin__/test/test_builtin.py
--- a/pypy/module/__builtin__/test/test_builtin.py
+++ b/pypy/module/__builtin__/test/test_builtin.py
@@ -748,10 +748,6 @@
         raises(TypeError, delattr, A(), 42)
 
 
-class AppTestGetattrWithGetAttributeShortcut(AppTestGetattr):
-    spaceconfig = {"objspace.std.getattributeshortcut": True}
-
-
 class TestInternal:
     def test_execfile(self, space):
         fn = str(udir.join('test_execfile'))
diff --git a/pypy/module/__builtin__/test/test_classobj.py 
b/pypy/module/__builtin__/test/test_classobj.py
--- a/pypy/module/__builtin__/test/test_classobj.py
+++ b/pypy/module/__builtin__/test/test_classobj.py
@@ -1118,8 +1118,7 @@
         assert getattr(c, u"x") == 1
 
 
-class AppTestOldStyleMapDict(AppTestOldstyle):
-    spaceconfig = {"objspace.std.withmapdict": True}
+class AppTestOldStyleMapDict:
 
     def setup_class(cls):
         if cls.runappdirect:
diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py
--- a/pypy/module/__pypy__/__init__.py
+++ b/pypy/module/__pypy__/__init__.py
@@ -110,9 +110,8 @@
                                  'interp_magic.method_cache_counter')
             self.extra_interpdef('reset_method_cache_counter',
                                  'interp_magic.reset_method_cache_counter')
-            if self.space.config.objspace.std.withmapdict:
-                self.extra_interpdef('mapdict_cache_counter',
-                                     'interp_magic.mapdict_cache_counter')
+            self.extra_interpdef('mapdict_cache_counter',
+                                 'interp_magic.mapdict_cache_counter')
         PYC_MAGIC = get_pyc_magic(self.space)
         self.extra_interpdef('PYC_MAGIC', 'space.wrap(%d)' % PYC_MAGIC)
         try:
diff --git a/pypy/module/__pypy__/interp_magic.py 
b/pypy/module/__pypy__/interp_magic.py
--- a/pypy/module/__pypy__/interp_magic.py
+++ b/pypy/module/__pypy__/interp_magic.py
@@ -37,17 +37,15 @@
     cache = space.fromcache(MethodCache)
     cache.misses = {}
     cache.hits = {}
-    if space.config.objspace.std.withmapdict:
-        cache = space.fromcache(MapAttrCache)
-        cache.misses = {}
-        cache.hits = {}
+    cache = space.fromcache(MapAttrCache)
+    cache.misses = {}
+    cache.hits = {}
 
 @unwrap_spec(name=str)
 def mapdict_cache_counter(space, name):
     """Return a tuple (index_cache_hits, index_cache_misses) for lookups
     in the mapdict cache with the given attribute name."""
     assert space.config.objspace.std.withmethodcachecounter
-    assert space.config.objspace.std.withmapdict
     cache = space.fromcache(MapAttrCache)
     return space.newtuple([space.newint(cache.hits.get(name, 0)),
                            space.newint(cache.misses.get(name, 0))])
diff --git a/pypy/module/__pypy__/test/test_special.py 
b/pypy/module/__pypy__/test/test_special.py
--- a/pypy/module/__pypy__/test/test_special.py
+++ b/pypy/module/__pypy__/test/test_special.py
@@ -1,8 +1,7 @@
 import py
 
 class AppTest(object):
-    spaceconfig = {"objspace.usemodules.select": False,
-                   "objspace.std.withrangelist": True}
+    spaceconfig = {"objspace.usemodules.select": False}
 
     def setup_class(cls):
         if cls.runappdirect:
@@ -61,6 +60,7 @@
         import __pypy__
         import sys
 
+        result = [False]
         @__pypy__.hidden_applevel
         def test_hidden_with_tb():
             def not_hidden(): 1/0
@@ -69,9 +69,11 @@
                 assert sys.exc_info() == (None, None, None)
                 tb = __pypy__.get_hidden_tb()
                 assert tb.tb_frame.f_code.co_name == 'not_hidden'
-                return True
+                result[0] = True
+                raise
             else: return False
-        assert test_hidden_with_tb()
+        raises(ZeroDivisionError, test_hidden_with_tb)
+        assert result[0]
 
     def test_lookup_special(self):
         from __pypy__ import lookup_special
diff --git a/pypy/module/_cffi_backend/__init__.py 
b/pypy/module/_cffi_backend/__init__.py
--- a/pypy/module/_cffi_backend/__init__.py
+++ b/pypy/module/_cffi_backend/__init__.py
@@ -46,6 +46,7 @@
         '_get_types': 'func._get_types',
         '_get_common_types': 'func._get_common_types',
         'from_buffer': 'func.from_buffer',
+        'gcp': 'func.gcp',
 
         'string': 'func.string',
         'unpack': 'func.unpack',
diff --git a/pypy/module/_cffi_backend/test/test_recompiler.py 
b/pypy/module/_cffi_backend/test/test_recompiler.py
--- a/pypy/module/_cffi_backend/test/test_recompiler.py
+++ b/pypy/module/_cffi_backend/test/test_recompiler.py
@@ -1773,14 +1773,14 @@
 
     def test_introspect_order(self):
         ffi, lib = self.prepare("""
-            union aaa { int a; }; typedef struct ccc { int a; } b;
-            union g   { int a; }; typedef struct cc  { int a; } bbb;
-            union aa  { int a; }; typedef struct a   { int a; } bb;
+            union CFFIaaa { int a; }; typedef struct CFFIccc { int a; } CFFIb;
+            union CFFIg   { int a; }; typedef struct CFFIcc  { int a; } 
CFFIbbb;
+            union CFFIaa  { int a; }; typedef struct CFFIa   { int a; } CFFIbb;
         """, "test_introspect_order", """
-            union aaa { int a; }; typedef struct ccc { int a; } b;
-            union g   { int a; }; typedef struct cc  { int a; } bbb;
-            union aa  { int a; }; typedef struct a   { int a; } bb;
+            union CFFIaaa { int a; }; typedef struct CFFIccc { int a; } CFFIb;
+            union CFFIg   { int a; }; typedef struct CFFIcc  { int a; } 
CFFIbbb;
+            union CFFIaa  { int a; }; typedef struct CFFIa   { int a; } CFFIbb;
         """)
-        assert ffi.list_types() == (['b', 'bb', 'bbb'],
-                                        ['a', 'cc', 'ccc'],
-                                        ['aa', 'aaa', 'g'])
+        assert ffi.list_types() == (['CFFIb', 'CFFIbb', 'CFFIbbb'],
+                                    ['CFFIa', 'CFFIcc', 'CFFIccc'],
+                                    ['CFFIaa', 'CFFIaaa', 'CFFIg'])
diff --git a/pypy/module/_cffi_backend/wrapper.py 
b/pypy/module/_cffi_backend/wrapper.py
--- a/pypy/module/_cffi_backend/wrapper.py
+++ b/pypy/module/_cffi_backend/wrapper.py
@@ -92,7 +92,8 @@
         return ctype._call(self.fnptr, args_w)
 
     def descr_repr(self, space):
-        return space.wrap("<FFIFunctionWrapper for %s()>" % (self.fnname,))
+        doc = self.rawfunctype.repr_fn_type(self.ffi, self.fnname)
+        return space.wrap("<FFIFunctionWrapper '%s'>" % (doc,))
 
     def descr_get_doc(self, space):
         doc = self.rawfunctype.repr_fn_type(self.ffi, self.fnname)
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -150,7 +150,7 @@
         target.chmod(0444) # make the file read-only, to make sure that nobody
                            # edits it by mistake
 
-def copy_header_files(dstdir):
+def copy_header_files(dstdir, copy_numpy_headers):
     # XXX: 20 lines of code to recursively copy a directory, really??
     assert dstdir.check(dir=True)
     headers = include_dir.listdir('*.h') + include_dir.listdir('*.inl')
@@ -158,6 +158,18 @@
         headers.append(udir.join(name))
     _copy_header_files(headers, dstdir)
 
+    if copy_numpy_headers:
+        try:
+            dstdir.mkdir('numpy')
+        except py.error.EEXIST:
+            pass
+        numpy_dstdir = dstdir / 'numpy'
+
+        numpy_include_dir = include_dir / 'numpy'
+        numpy_headers = numpy_include_dir.listdir('*.h') + 
numpy_include_dir.listdir('*.inl')
+        _copy_header_files(numpy_headers, numpy_dstdir)
+
+
 class NotSpecified(object):
     pass
 _NOT_SPECIFIED = NotSpecified()
@@ -1345,7 +1357,7 @@
 
     setup_init_functions(eci, translating=True)
     trunk_include = pypydir.dirpath() / 'include'
-    copy_header_files(trunk_include)
+    copy_header_files(trunk_include, use_micronumpy)
 
 def init_static_data_translated(space):
     builder = space.fromcache(StaticObjectBuilder)
diff --git a/pypy/module/cpyext/include/listobject.h 
b/pypy/module/cpyext/include/listobject.h
--- a/pypy/module/cpyext/include/listobject.h
+++ b/pypy/module/cpyext/include/listobject.h
@@ -1,2 +1,1 @@
 #define PyList_GET_ITEM PyList_GetItem
-#define PyList_SET_ITEM PyList_SetItem
diff --git a/pypy/module/cpyext/include/numpy/README 
b/pypy/module/cpyext/include/numpy/README
new file mode 100644
--- /dev/null
+++ b/pypy/module/cpyext/include/numpy/README
@@ -0,0 +1,8 @@
+headers for the micronumpy multiarray and umath modules,
+as used by https://bitbucket.org/pypy/numpy. They are needed by
+downstream packages that depend on numpy, like matplotlib, but can
+be slightly non-compatible with traditional numpy C-API use cases.
+
+The trick to including these headers is in get_include, located in
+numpy/lib/utils.py. They will be ignored by an upstream build of numpy
+since the <site-packages>/numpy/core/include path will be used instead
diff --git a/pypy/module/cpyext/include/numpy/__multiarray_api.h 
b/pypy/module/cpyext/include/numpy/__multiarray_api.h
new file mode 100644
--- /dev/null
+++ b/pypy/module/cpyext/include/numpy/__multiarray_api.h
@@ -0,0 +1,11 @@
+
+
+typedef struct {
+        PyObject_HEAD
+        npy_bool obval;
+} PyBoolScalarObject;
+
+static int import_array(){return 0;};
+static int _import_array(){return 0;};
+static int _import_math(){return 0;};
+
diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h 
b/pypy/module/cpyext/include/numpy/arrayobject.h
--- a/pypy/module/cpyext/include/numpy/arrayobject.h
+++ b/pypy/module/cpyext/include/numpy/arrayobject.h
@@ -1,6 +1,8 @@
 
-/* NDArray object interface - S. H. Muller, 2013/07/26 */
-/* For testing ndarrayobject only */
+/* NDArray object interface - S. H. Muller, 2013/07/26 
+ * It will be copied by numpy/core/setup.py by install_data to
+ * site-packages/numpy/core/includes/numpy  
+*/
 
 #ifndef Py_NDARRAYOBJECT_H
 #define Py_NDARRAYOBJECT_H
@@ -8,8 +10,14 @@
 extern "C" {
 #endif
 
+#include "pypy_numpy.h"
+#include "old_defines.h"
 #include "npy_common.h"
-#include "ndarraytypes.h"
+#include "__multiarray_api.h"
+
+#define NPY_UNUSED(x) x
+#define PyArray_MAX(a,b) (((a)>(b))?(a):(b))
+#define PyArray_MIN(a,b) (((a)<(b))?(a):(b))
 
 /* fake PyArrayObject so that code that doesn't do direct field access works */
 #define PyArrayObject PyObject
@@ -17,18 +25,206 @@
 
 PyAPI_DATA(PyTypeObject) PyArray_Type;
 
-#define PyArray_SimpleNew _PyArray_SimpleNew
-#define PyArray_ZEROS _PyArray_ZEROS
-#define PyArray_FILLWBYTE _PyArray_FILLWBYTE
 
 #define NPY_MAXDIMS 32
 
-/* functions defined in ndarrayobject.c*/
+#ifndef NDARRAYTYPES_H
+typedef struct {
+    npy_intp *ptr;
+    int len;
+} PyArray_Dims;
+
+/* data types copied from numpy/ndarraytypes.h 
+ * keep numbers in sync with micronumpy.interp_dtype.DTypeCache
+ */
+enum NPY_TYPES {    NPY_BOOL=0,
+                    NPY_BYTE, NPY_UBYTE,
+                    NPY_SHORT, NPY_USHORT,
+                    NPY_INT, NPY_UINT,
+                    NPY_LONG, NPY_ULONG,
+                    NPY_LONGLONG, NPY_ULONGLONG,
+                    NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE,
+                    NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE,
+                    NPY_OBJECT=17,
+                    NPY_STRING, NPY_UNICODE,
+                    NPY_VOID,
+                    /*
+                     * New 1.6 types appended, may be integrated
+                     * into the above in 2.0.
+                     */
+                    NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF,
+
+                    NPY_NTYPES,
+                    NPY_NOTYPE,
+                    NPY_CHAR,      /* special flag */
+                    NPY_USERDEF=256,  /* leave room for characters */
+
+                    /* The number of types not including the new 1.6 types */
+                    NPY_NTYPES_ABI_COMPATIBLE=21
+};
+
+#define PyTypeNum_ISBOOL(type)      ((type) == NPY_BOOL)
+#define PyTypeNum_ISINTEGER(type)  (((type) >= NPY_BYTE) && \
+                                    ((type) <= NPY_ULONGLONG))
+#define PyTypeNum_ISFLOAT(type)   ((((type) >= NPY_FLOAT) && \
+                                    ((type) <= NPY_LONGDOUBLE)) || \
+                                    ((type) == NPY_HALF))
+#define PyTypeNum_ISCOMPLEX(type)  (((type) >= NPY_CFLOAT) && \
+                                    ((type) <= NPY_CLONGDOUBLE))
+
+#define PyArray_ISBOOL(arr)    (PyTypeNum_ISBOOL(PyArray_TYPE(arr)))
+#define PyArray_ISINTEGER(arr) (PyTypeNum_ISINTEGER(PyArray_TYPE(arr)))
+#define PyArray_ISFLOAT(arr)   (PyTypeNum_ISFLOAT(PyArray_TYPE(arr)))
+#define PyArray_ISCOMPLEX(arr) (PyTypeNum_ISCOMPLEX(PyArray_TYPE(arr)))
+
+
+/* flags */
+#define NPY_ARRAY_C_CONTIGUOUS    0x0001
+#define NPY_ARRAY_F_CONTIGUOUS    0x0002
+#define NPY_ARRAY_OWNDATA         0x0004
+#define NPY_ARRAY_FORCECAST       0x0010
+#define NPY_ARRAY_ENSURECOPY      0x0020
+#define NPY_ARRAY_ENSUREARRAY     0x0040
+#define NPY_ARRAY_ELEMENTSTRIDES  0x0080
+#define NPY_ARRAY_ALIGNED         0x0100
+#define NPY_ARRAY_NOTSWAPPED      0x0200
+#define NPY_ARRAY_WRITEABLE       0x0400
+#define NPY_ARRAY_UPDATEIFCOPY    0x1000
+
+#define NPY_ARRAY_BEHAVED      (NPY_ARRAY_ALIGNED | \
+                                NPY_ARRAY_WRITEABLE)
+#define NPY_ARRAY_BEHAVED_NS   (NPY_ARRAY_ALIGNED | \
+                                NPY_ARRAY_WRITEABLE | \
+                                NPY_ARRAY_NOTSWAPPED)
+#define NPY_ARRAY_CARRAY       (NPY_ARRAY_C_CONTIGUOUS | \
+                                NPY_ARRAY_BEHAVED)
+#define NPY_ARRAY_CARRAY_RO    (NPY_ARRAY_C_CONTIGUOUS | \
+                                NPY_ARRAY_ALIGNED)
+#define NPY_ARRAY_FARRAY       (NPY_ARRAY_F_CONTIGUOUS | \
+                                NPY_ARRAY_BEHAVED)
+#define NPY_ARRAY_FARRAY_RO    (NPY_ARRAY_F_CONTIGUOUS | \
+                                NPY_ARRAY_ALIGNED)
+#define NPY_ARRAY_DEFAULT      (NPY_ARRAY_CARRAY)
+#define NPY_ARRAY_IN_ARRAY     (NPY_ARRAY_CARRAY_RO)
+#define NPY_ARRAY_OUT_ARRAY    (NPY_ARRAY_CARRAY)
+#define NPY_ARRAY_INOUT_ARRAY  (NPY_ARRAY_CARRAY | \
+                                NPY_ARRAY_UPDATEIFCOPY)
+#define NPY_ARRAY_IN_FARRAY    (NPY_ARRAY_FARRAY_RO)
+#define NPY_ARRAY_OUT_FARRAY   (NPY_ARRAY_FARRAY)
+#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \
+                                NPY_ARRAY_UPDATEIFCOPY)
+
+#define NPY_ARRAY_UPDATE_ALL   (NPY_ARRAY_C_CONTIGUOUS | \
+                                NPY_ARRAY_F_CONTIGUOUS | \
+                                NPY_ARRAY_ALIGNED)
+
+#define NPY_FARRAY NPY_ARRAY_FARRAY
+#define NPY_CARRAY NPY_ARRAY_CARRAY
+
+#define PyArray_CHKFLAGS(m, flags) (PyArray_FLAGS(m) & (flags))
+
+#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS)
+#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, NPY_ARRAY_WRITEABLE)
+#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, NPY_ARRAY_ALIGNED)
+
+#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS)
+#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS)
+
+#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) &&       \
+                                    PyArray_ISNOTSWAPPED(m))
+
+#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY)
+#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO)
+#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY)
+#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO)
+#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED)
+#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED)
+
+#define PyArray_ISONESEGMENT(arr)  (1)
+#define PyArray_ISNOTSWAPPED(arr)  (1)
+#define PyArray_ISBYTESWAPPED(arr) (0)
+
+#endif
+
+#define NPY_INT8      NPY_BYTE
+#define NPY_UINT8     NPY_UBYTE
+#define NPY_INT16     NPY_SHORT
+#define NPY_UINT16    NPY_USHORT
+#define NPY_INT32     NPY_INT
+#define NPY_UINT32    NPY_UINT
+#define NPY_INT64     NPY_LONG
+#define NPY_UINT64    NPY_ULONG
+#define NPY_FLOAT32   NPY_FLOAT
+#define NPY_FLOAT64   NPY_DOUBLE
+#define NPY_COMPLEX32 NPY_CFLOAT
+#define NPY_COMPLEX64 NPY_CDOUBLE
+
+
+/* functions */
+#ifndef PyArray_NDIM
+
+#define PyArray_Check      _PyArray_Check
+#define PyArray_CheckExact _PyArray_CheckExact
+#define PyArray_FLAGS      _PyArray_FLAGS
+
+#define PyArray_NDIM       _PyArray_NDIM
+#define PyArray_DIM        _PyArray_DIM
+#define PyArray_STRIDE     _PyArray_STRIDE
+#define PyArray_SIZE       _PyArray_SIZE
+#define PyArray_ITEMSIZE   _PyArray_ITEMSIZE
+#define PyArray_NBYTES     _PyArray_NBYTES
+#define PyArray_TYPE       _PyArray_TYPE
+#define PyArray_DATA       _PyArray_DATA
+
+#define PyArray_Size PyArray_SIZE
+#define PyArray_BYTES(arr) ((char *)PyArray_DATA(arr))
+
+#define PyArray_FromAny _PyArray_FromAny
+#define PyArray_FromObject _PyArray_FromObject
+#define PyArray_ContiguousFromObject PyArray_FromObject
+#define PyArray_ContiguousFromAny PyArray_FromObject
+
+#define PyArray_FROMANY(obj, typenum, min, max, requirements) (obj)
+#define PyArray_FROM_OTF(obj, typenum, requirements) \
+        PyArray_FromObject(obj, typenum, 0, 0)
+
+#define PyArray_New _PyArray_New
+#define PyArray_SimpleNew _PyArray_SimpleNew
+#define PyArray_SimpleNewFromData _PyArray_SimpleNewFromData
+#define PyArray_SimpleNewFromDataOwning _PyArray_SimpleNewFromDataOwning
+
+#define PyArray_EMPTY(nd, dims, type_num, fortran) \
+        PyArray_SimpleNew(nd, dims, type_num)
 
 PyAPI_FUNC(void) _PyArray_FILLWBYTE(PyObject* obj, int val);
 PyAPI_FUNC(PyObject *) _PyArray_ZEROS(int nd, npy_intp* dims, int type_num, 
int fortran);
 
+#define PyArray_FILLWBYTE _PyArray_FILLWBYTE
+#define PyArray_ZEROS _PyArray_ZEROS
 
+#define PyArray_Resize(self, newshape, refcheck, fortran) (NULL)
+
+/* Don't use these in loops! */
+
+#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \
+                                         (i)*PyArray_STRIDE(obj,0)))
+
+#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \
+                                            (i)*PyArray_STRIDE(obj,0) + \
+                                            (j)*PyArray_STRIDE(obj,1)))
+
+#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \
+                                            (i)*PyArray_STRIDE(obj,0) + \
+                                            (j)*PyArray_STRIDE(obj,1) + \
+                                            (k)*PyArray_STRIDE(obj,2)))
+
+#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \
+                                            (i)*PyArray_STRIDE(obj,0) + \
+                                            (j)*PyArray_STRIDE(obj,1) + \
+                                            (k)*PyArray_STRIDE(obj,2) + \
+                                            (l)*PyArray_STRIDE(obj,3)))
+
+#endif
 
 #ifdef __cplusplus
 }
diff --git a/pypy/module/cpyext/include/numpy/ndarraytypes.h 
b/pypy/module/cpyext/include/numpy/ndarraytypes.h
--- a/pypy/module/cpyext/include/numpy/ndarraytypes.h
+++ b/pypy/module/cpyext/include/numpy/ndarraytypes.h
@@ -1,9 +1,69 @@
 #ifndef NDARRAYTYPES_H
 #define NDARRAYTYPES_H
 
-/* For testing ndarrayobject only */
+#include "numpy/npy_common.h"
+//#include "npy_endian.h"
+//#include "npy_cpu.h"
+//#include "utils.h"
 
-#include "numpy/npy_common.h"
+//for pypy - numpy has lots of typedefs
+//for pypy - make life easier, less backward support
+#define NPY_1_8_API_VERSION 0x00000008
+#define NPY_NO_DEPRECATED_API NPY_1_8_API_VERSION
+#undef NPY_1_8_API_VERSION
+
+#define NPY_ENABLE_SEPARATE_COMPILATION 1
+#define NPY_VISIBILITY_HIDDEN 
+
+#ifdef NPY_ENABLE_SEPARATE_COMPILATION
+        #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN
+#else
+        #define NPY_NO_EXPORT static
+#endif
+
+/* Only use thread if configured in config and python supports it */
+#if defined WITH_THREAD && !NPY_NO_SMP
+        #define NPY_ALLOW_THREADS 1
+#else
+        #define NPY_ALLOW_THREADS 0
+#endif
+
+
+
+/*
+ * There are several places in the code where an array of dimensions
+ * is allocated statically.  This is the size of that static
+ * allocation.
+ *
+ * The array creation itself could have arbitrary dimensions but all
+ * the places where static allocation is used would need to be changed
+ * to dynamic (including inside of several structures)
+ */
+
+#define NPY_MAXDIMS 32
+#define NPY_MAXARGS 32
+
+/* Used for Converter Functions "O&" code in ParseTuple */
+#define NPY_FAIL 0
+#define NPY_SUCCEED 1
+
+/*
+ * Binary compatibility version number.  This number is increased
+ * whenever the C-API is changed such that binary compatibility is
+ * broken, i.e. whenever a recompile of extension modules is needed.
+ */
+#define NPY_VERSION NPY_ABI_VERSION
+
+/*
+ * Minor API version.  This number is increased whenever a change is
+ * made to the C-API -- whether it breaks binary compatibility or not.
+ * Some changes, such as adding a function pointer to the end of the
+ * function table, can be made without breaking binary compatibility.
+ * In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION)
+ * would be increased.  Whenever binary compatibility is broken, both
+ * NPY_VERSION and NPY_FEATURE_VERSION should be increased.
+ */
+#define NPY_FEATURE_VERSION NPY_API_VERSION
 
 enum NPY_TYPES {    NPY_BOOL=0,
                     NPY_BYTE, NPY_UBYTE,
@@ -31,6 +91,18 @@
                     NPY_NTYPES_ABI_COMPATIBLE=21
 };
 
+/* basetype array priority */
+#define NPY_PRIORITY 0.0
+
+/* default subtype priority */
+#define NPY_SUBTYPE_PRIORITY 1.0
+
+/* default scalar priority */
+#define NPY_SCALAR_PRIORITY -1000000.0
+
+/* How many floating point types are there (excluding half) */
+#define NPY_NUM_FLOATTYPE 3
+
 /*
  * These characters correspond to the array type and the struct
  * module
@@ -85,6 +157,27 @@
 };
 
 typedef enum {
+        NPY_QUICKSORT=0,
+        NPY_HEAPSORT=1,
+        NPY_MERGESORT=2
+} NPY_SORTKIND;
+#define NPY_NSORTS (NPY_MERGESORT + 1)
+
+
+typedef enum {
+        NPY_INTROSELECT=0,
+} NPY_SELECTKIND;
+#define NPY_NSELECTS (NPY_INTROSELECT + 1)
+
+
+typedef enum {
+        NPY_SEARCHLEFT=0,
+        NPY_SEARCHRIGHT=1
+} NPY_SEARCHSIDE;
+#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1)
+
+
+typedef enum {
         NPY_NOSCALAR=-1,
         NPY_BOOL_SCALAR,
         NPY_INTPOS_SCALAR,
@@ -93,6 +186,7 @@
         NPY_COMPLEX_SCALAR,
         NPY_OBJECT_SCALAR
 } NPY_SCALARKIND;
+#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1)
 
 /* For specifying array memory layout or iteration order */
 typedef enum {
@@ -106,6 +200,729 @@
         NPY_KEEPORDER=2
 } NPY_ORDER;
 
+/* For specifying allowed casting in operations which support it */
+typedef enum {
+        /* Only allow identical types */
+        NPY_NO_CASTING=0,
+        /* Allow identical and byte swapped types */
+        NPY_EQUIV_CASTING=1,
+        /* Only allow safe casts */
+        NPY_SAFE_CASTING=2,
+        /* Allow safe casts or casts within the same kind */
+        NPY_SAME_KIND_CASTING=3,
+        /* Allow any casts */
+        NPY_UNSAFE_CASTING=4,
+
+        /*
+         * Temporary internal definition only, will be removed in upcoming
+         * release, see below
+         * */
+        NPY_INTERNAL_UNSAFE_CASTING_BUT_WARN_UNLESS_SAME_KIND = 100,
+} NPY_CASTING;
+
+typedef enum {
+        NPY_CLIP=0,
+        NPY_WRAP=1,
+        NPY_RAISE=2
+} NPY_CLIPMODE;
+
+/* The special not-a-time (NaT) value */
+#define NPY_DATETIME_NAT NPY_MIN_INT64
+
+/*
+ * Upper bound on the length of a DATETIME ISO 8601 string
+ *   YEAR: 21 (64-bit year)
+ *   MONTH: 3
+ *   DAY: 3
+ *   HOURS: 3
+ *   MINUTES: 3
+ *   SECONDS: 3
+ *   ATTOSECONDS: 1 + 3*6
+ *   TIMEZONE: 5
+ *   NULL TERMINATOR: 1
+ */
+#define NPY_DATETIME_MAX_ISO8601_STRLEN (21+3*5+1+3*6+6+1)
+
+typedef enum {
+        NPY_FR_Y = 0,  /* Years */
+        NPY_FR_M = 1,  /* Months */
+        NPY_FR_W = 2,  /* Weeks */
+        /* Gap where 1.6 NPY_FR_B (value 3) was */
+        NPY_FR_D = 4,  /* Days */
+        NPY_FR_h = 5,  /* hours */
+        NPY_FR_m = 6,  /* minutes */
+        NPY_FR_s = 7,  /* seconds */
+        NPY_FR_ms = 8, /* milliseconds */
+        NPY_FR_us = 9, /* microseconds */
+        NPY_FR_ns = 10,/* nanoseconds */
+        NPY_FR_ps = 11,/* picoseconds */
+        NPY_FR_fs = 12,/* femtoseconds */
+        NPY_FR_as = 13,/* attoseconds */
+        NPY_FR_GENERIC = 14 /* Generic, unbound units, can convert to anything 
*/
+} NPY_DATETIMEUNIT;
+
+/*
+ * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS
+ *       is technically one more than the actual number of units.
+ */
+#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1)
+#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC
+
+/*
+ * Business day conventions for mapping invalid business
+ * days to valid business days.
+ */
+typedef enum {
+    /* Go forward in time to the following business day. */
+    NPY_BUSDAY_FORWARD,
+    NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD,
+    /* Go backward in time to the preceding business day. */
+    NPY_BUSDAY_BACKWARD,
+    NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD,
+    /*
+     * Go forward in time to the following business day, unless it
+     * crosses a month boundary, in which case go backward
+     */
+    NPY_BUSDAY_MODIFIEDFOLLOWING,
+    /*
+     * Go backward in time to the preceding business day, unless it
+     * crosses a month boundary, in which case go forward.
+     */
+    NPY_BUSDAY_MODIFIEDPRECEDING,
+    /* Produce a NaT for non-business days. */
+    NPY_BUSDAY_NAT,
+    /* Raise an exception for non-business days. */
+    NPY_BUSDAY_RAISE
+} NPY_BUSDAY_ROLL;
+
+/************************************************************
+ * NumPy Auxiliary Data for inner loops, sort functions, etc.
+ ************************************************************/
+
+/*
+ * When creating an auxiliary data struct, this should always appear
+ * as the first member, like this:
+ *
+ * typedef struct {
+ *     NpyAuxData base;
+ *     double constant;
+ * } constant_multiplier_aux_data;
+ */
+typedef struct NpyAuxData_tag NpyAuxData;
+
+/* Function pointers for freeing or cloning auxiliary data */
+typedef void (NpyAuxData_FreeFunc) (NpyAuxData *);
+typedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *);
+
+struct NpyAuxData_tag {
+    NpyAuxData_FreeFunc *free;
+    NpyAuxData_CloneFunc *clone;
+    /* To allow for a bit of expansion without breaking the ABI */
+    void *reserved[2];
+};
+
+/* Macros to use for freeing and cloning auxiliary data */
+#define NPY_AUXDATA_FREE(auxdata) \
+    do { \
+        if ((auxdata) != NULL) { \
+            (auxdata)->free(auxdata); \
+        } \
+    } while(0)
+#define NPY_AUXDATA_CLONE(auxdata) \
+    ((auxdata)->clone(auxdata))
+
+#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr);
+#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr);
+
+#define NPY_STRINGIFY(x) #x
+#define NPY_TOSTRING(x) NPY_STRINGIFY(x)
+
+  /*
+   * Macros to define how array, and dimension/strides data is
+   * allocated.
+   */
+
+  /* Data buffer - PyDataMem_NEW/FREE/RENEW are in multiarraymodule.c */
+
+#define NPY_USE_PYMEM 1
+
+#if NPY_USE_PYMEM == 1
+#define PyArray_malloc PyMem_Malloc
+#define PyArray_free PyMem_Free
+#define PyArray_realloc PyMem_Realloc
+#else
+#define PyArray_malloc malloc
+#define PyArray_free free
+#define PyArray_realloc realloc
+#endif
+
+/* Dimensions and strides */
+#define PyDimMem_NEW(size)                                         \
+    ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp)))
+
+#define PyDimMem_FREE(ptr) PyArray_free(ptr)
+
+#define PyDimMem_RENEW(ptr,size)                                   \
+        ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp)))
+
+/* forward declaration */
+struct _PyArray_Descr;
+
+/* These must deal with unaligned and swapped data if necessary */
+typedef PyObject * (PyArray_GetItemFunc) (void *, void *);
+typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *);
+
+typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp,
+                                     npy_intp, int, void *);
+
+typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *);
+typedef npy_bool (PyArray_NonzeroFunc)(void *, void *);
+
+
+/*
+ * These assume aligned and notswapped data -- a buffer will be used
+ * before or contiguous data will be obtained
+ */
+
+typedef int (PyArray_CompareFunc)(const void *, const void *, void *);
+typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *);
+
+typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *,
+                               npy_intp, void *);
+
+typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *,
+                                       void *);
+
+/*
+ * XXX the ignore argument should be removed next time the API version
+ * is bumped. It used to be the separator.
+ */
+typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr,
+                               char *ignore, struct _PyArray_Descr *);
+typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr,
+                                  struct _PyArray_Descr *);
+
+typedef int (PyArray_FillFunc)(void *, npy_intp, void *);
+
+typedef int (PyArray_SortFunc)(void *, npy_intp, void *);
+typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *);
+typedef int (PyArray_PartitionFunc)(void *, npy_intp, npy_intp,
+                                    npy_intp *, npy_intp *,
+                                    void *);
+typedef int (PyArray_ArgPartitionFunc)(void *, npy_intp *, npy_intp, npy_intp,
+                                       npy_intp *, npy_intp *,
+                                       void *);
+
+typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *);
+
+typedef int (PyArray_ScalarKindFunc)(void *);
+
+typedef void (PyArray_FastClipFunc)(void *in, npy_intp n_in, void *min,
+                                    void *max, void *out);
+typedef void (PyArray_FastPutmaskFunc)(void *in, void *mask, npy_intp n_in,
+                                       void *values, npy_intp nv);
+typedef int  (PyArray_FastTakeFunc)(void *dest, void *src, npy_intp *indarray,
+                                       npy_intp nindarray, npy_intp n_outer,
+                                       npy_intp m_middle, npy_intp nelem,
+                                       NPY_CLIPMODE clipmode);
+
+typedef struct {
+        npy_intp *ptr;
+        int len;
+} PyArray_Dims;
+
+typedef struct {
+        /*
+         * Functions to cast to most other standard types
+         * Can have some NULL entries. The types
+         * DATETIME, TIMEDELTA, and HALF go into the castdict
+         * even though they are built-in.
+         */
+        PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE];
+
+        /* The next four functions *cannot* be NULL */
+
+        /*
+         * Functions to get and set items with standard Python types
+         * -- not array scalars
+         */
+        PyArray_GetItemFunc *getitem;
+        PyArray_SetItemFunc *setitem;
+
+        /*
+         * Copy and/or swap data.  Memory areas may not overlap
+         * Use memmove first if they might
+         */
+        PyArray_CopySwapNFunc *copyswapn;
+        PyArray_CopySwapFunc *copyswap;
+
+        /*
+         * Function to compare items
+         * Can be NULL
+         */
+        PyArray_CompareFunc *compare;
+
+        /*
+         * Function to select largest
+         * Can be NULL
+         */
+        PyArray_ArgFunc *argmax;
+
+        /*
+         * Function to compute dot product
+         * Can be NULL
+         */
+        PyArray_DotFunc *dotfunc;
+
+        /*
+         * Function to scan an ASCII file and
+         * place a single value plus possible separator
+         * Can be NULL
+         */
+        PyArray_ScanFunc *scanfunc;
+
+        /*
+         * Function to read a single value from a string
+         * and adjust the pointer; Can be NULL
+         */
+        PyArray_FromStrFunc *fromstr;
+
+        /*
+         * Function to determine if data is zero or not
+         * If NULL a default version is
+         * used at Registration time.
+         */
+        PyArray_NonzeroFunc *nonzero;
+
+        /*
+         * Used for arange.
+         * Can be NULL.
+         */
+        PyArray_FillFunc *fill;
+
+        /*
+         * Function to fill arrays with scalar values
+         * Can be NULL
+         */
+        PyArray_FillWithScalarFunc *fillwithscalar;
+
+        /*
+         * Sorting functions
+         * Can be NULL
_______________________________________________
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to