Author: Philip Jenvey <[email protected]>
Branch: py3k
Changeset: r64553:31c7f1d4c308
Date: 2013-05-24 16:55 -0700
http://bitbucket.org/pypy/pypy/changeset/31c7f1d4c308/
Log: merge default
diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst
--- a/pypy/doc/cppyy.rst
+++ b/pypy/doc/cppyy.rst
@@ -163,6 +163,9 @@
$ genreflex MyClass.h
$ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include MyClass_rflx.cpp
-o libMyClassDict.so -L$REFLEXHOME/lib -lReflex
+Next, make sure that the library can be found through the dynamic lookup path
+(the ``LD_LIBRARY_PATH`` environment variable on Linux, ``PATH`` on Windows),
+for example by adding ".".
Now you're ready to use the bindings.
Since the bindings are designed to look pythonistic, it should be
straightforward::
diff --git a/pypy/goal/targetpypystandalone.py
b/pypy/goal/targetpypystandalone.py
--- a/pypy/goal/targetpypystandalone.py
+++ b/pypy/goal/targetpypystandalone.py
@@ -128,14 +128,21 @@
@entrypoint('main', [], c_name='pypy_init_threads')
def pypy_init_threads():
- if space.config.objspace.usemodules.thread:
- os_thread.setup_threads(space)
- rffi.aroundstate.before()
+ if not space.config.objspace.usemodules.thread:
+ return
+ os_thread.setup_threads(space)
+ rffi.aroundstate.before()
@entrypoint('main', [], c_name='pypy_thread_attach')
def pypy_thread_attach():
- if space.config.objspace.usemodules.thread:
- rthread.gc_thread_start()
+ if not space.config.objspace.usemodules.thread:
+ return
+ os_thread.setup_threads(space)
+ os_thread.bootstrapper.acquire(space, None, None)
+ rthread.gc_thread_start()
+ os_thread.bootstrapper.nbthreads += 1
+ os_thread.bootstrapper.release()
+ rffi.aroundstate.before()
w_globals = space.newdict()
space.setitem(w_globals, space.wrap('__builtins__'),
diff --git a/pypy/interpreter/pyparser/future.py
b/pypy/interpreter/pyparser/future.py
--- a/pypy/interpreter/pyparser/future.py
+++ b/pypy/interpreter/pyparser/future.py
@@ -1,308 +1,3 @@
-"""
-This automaton is designed to be invoked on a Python source string
-before the real parser starts working, in order to find all legal
-'from __future__ import blah'. As soon as something is encountered that
-would prevent more future imports, the analysis is aborted.
-The resulting legal futures are avaliable in self.flags after the
-pass has ended.
-
-Invocation is through get_futures(src), which returns a field of flags, one per
-found correct future import.
-
-The flags can then be used to set up the parser.
-All error detection is left to the parser.
-
-The reason we are not using the regular lexer/parser toolchain is that
-we do not want the overhead of generating tokens for entire files just
-to find information that resides in the first few lines of the file.
-Neither do we require sane error messages, as this job is handled by
-the parser.
-
-To make the parsing fast, especially when the module is translated to C,
-the code has been written in a very serial fashion, using an almost
-assembler like style. A further speedup could be achieved by replacing
-the "in" comparisons with explicit numeric comparisons.
-"""
-
-from pypy.interpreter.astcompiler.consts import (
- CO_GENERATOR_ALLOWED, CO_FUTURE_DIVISION, CO_FUTURE_WITH_STATEMENT,
- CO_FUTURE_ABSOLUTE_IMPORT, CO_FUTURE_BARRY_AS_BDFL)
-
-def get_futures(future_flags, source):
- futures = FutureAutomaton(future_flags, source)
- try:
- futures.start()
- except DoneException, e:
- pass
- return futures.flags, (futures.lineno, futures.col_offset)
-
-class DoneException(Exception):
- pass
-
-whitespace = ' \t\f'
-whitespace_or_newline = whitespace + '\n\r'
-letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxyz_'
-alphanumerics = letters + '1234567890'
-
-class FutureAutomaton(object):
- """
- A future statement must appear near the top of the module.
- The only lines that can appear before a future statement are:
-
- * the module docstring (if any),
- * comments,
- * blank lines, and
- * other future statements.
-
- The features recognized by Python 2.5 are "generators",
- "division", "nested_scopes" and "with_statement", "absolute_import".
- "generators", "division" and "nested_scopes" are redundant
- in 2.5 because they are always enabled.
-
- This module parses the input until it encounters something that is
- not recognized as a valid future statement or something that may
- precede a future statement.
- """
-
- def __init__(self, future_flags, string):
- self.future_flags = future_flags
- self.s = string
- self.pos = 0
- self.current_lineno = 1
- self.lineno = -1
- self.line_start_pos = 0
- self.col_offset = 0
- self.docstring_consumed = False
- self.flags = 0
- self.got_features = 0
-
- def getc(self, offset=0):
- try:
- return self.s[self.pos + offset]
- except IndexError:
- raise DoneException
-
- def start(self):
- c = self.getc()
- if c in ("'", '"', "r", "u") and not self.docstring_consumed:
- self.consume_docstring()
- elif c == '\\' or c in whitespace_or_newline:
- self.consume_empty_line()
- elif c == '#':
- self.consume_comment()
- elif c == 'f':
- self.consume_from()
- else:
- return
-
- def atbol(self):
- self.current_lineno += 1
- self.line_start_pos = self.pos
-
- def consume_docstring(self):
- self.docstring_consumed = True
- if self.getc() == "r":
- self.pos += 1
- if self.getc() == "u":
- self.pos += 1
- endchar = self.getc()
- if (self.getc() == self.getc(+1) and
- self.getc() == self.getc(+2)):
- self.pos += 3
- while 1: # Deal with a triple quoted docstring
- c = self.getc()
- if c == '\\':
- self.pos += 1
- self._skip_next_char_from_docstring()
- elif c != endchar:
- self._skip_next_char_from_docstring()
- else:
- self.pos += 1
- if (self.getc() == endchar and
- self.getc(+1) == endchar):
- self.pos += 2
- self.consume_empty_line()
- break
-
- else: # Deal with a single quoted docstring
- self.pos += 1
- while 1:
- c = self.getc()
- self.pos += 1
- if c == endchar:
- self.consume_empty_line()
- return
- elif c == '\\':
- self._skip_next_char_from_docstring()
- elif c in '\r\n':
- # Syntax error
- return
-
- def _skip_next_char_from_docstring(self):
- c = self.getc()
- self.pos += 1
- if c == '\n':
- self.atbol()
- elif c == '\r':
- if self.getc() == '\n':
- self.pos += 1
- self.atbol()
-
- def consume_continuation(self):
- c = self.getc()
- if c in '\n\r':
- self.pos += 1
- self.atbol()
-
- def consume_empty_line(self):
- """
- Called when the remainder of the line can only contain whitespace
- and comments.
- """
- while self.getc() in whitespace:
- self.pos += 1
- if self.getc() == '#':
- self.consume_comment()
- elif self.getc() == ';':
- self.pos += 1
- self.consume_whitespace()
- self.start()
- elif self.getc() in '\\':
- self.pos += 1
- self.consume_continuation()
- self.start()
- elif self.getc() in '\r\n':
- c = self.getc()
- self.pos += 1
- if c == '\r':
- if self.getc() == '\n':
- self.pos += 1
- self.atbol()
- else:
- self.atbol()
- self.start()
-
- def consume_comment(self):
- self.pos += 1
- while self.getc() not in '\r\n':
- self.pos += 1
- self.consume_empty_line()
-
- def consume_from(self):
- col_offset = self.pos - self.line_start_pos
- line = self.current_lineno
- self.pos += 1
- if self.getc() == 'r' and self.getc(+1) == 'o' and self.getc(+2) ==
'm':
- self.docstring_consumed = True
- self.pos += 3
- self.consume_mandatory_whitespace()
- if self.s[self.pos:self.pos+10] != '__future__':
- raise DoneException
- self.pos += 10
- self.consume_mandatory_whitespace()
- if self.s[self.pos:self.pos+6] != 'import':
- raise DoneException
- self.pos += 6
- self.consume_whitespace()
- old_got = self.got_features
- try:
- if self.getc() == '(':
- self.pos += 1
- self.consume_whitespace()
- self.set_flag(self.get_name())
- # Set flag corresponding to name
- self.get_more(paren_list=True)
- else:
- self.set_flag(self.get_name())
- self.get_more()
- finally:
- if self.got_features > old_got:
- self.col_offset = col_offset
- self.lineno = line
- self.consume_empty_line()
-
- def consume_mandatory_whitespace(self):
- if self.getc() not in whitespace + '\\':
- raise DoneException
- self.consume_whitespace()
-
- def consume_whitespace(self, newline_ok=False):
- while 1:
- c = self.getc()
- if c in whitespace:
- self.pos += 1
- continue
- elif c == '\\' or newline_ok:
- slash = c == '\\'
- if slash:
- self.pos += 1
- c = self.getc()
- if c == '\n':
- self.pos += 1
- self.atbol()
- continue
- elif c == '\r':
- self.pos += 1
- if self.getc() == '\n':
- self.pos += 1
- self.atbol()
- elif slash:
- raise DoneException
- else:
- return
- else:
- return
-
- def get_name(self):
- if self.getc() not in letters:
- raise DoneException
- p = self.pos
- try:
- while self.getc() in alphanumerics:
- self.pos += 1
- except DoneException:
- # If there's any name at all, we want to call self.set_flag().
- # Something else while get the DoneException again.
- if self.pos == p:
- raise
- end = self.pos
- else:
- end = self.pos
- self.consume_whitespace()
- return self.s[p:end]
-
- def get_more(self, paren_list=False):
- if paren_list and self.getc() == ')':
- self.pos += 1
- return
- if (self.getc() == 'a' and
- self.getc(+1) == 's' and
- self.getc(+2) in whitespace):
- self.get_name()
- self.get_name()
- self.get_more(paren_list=paren_list)
- return
- elif self.getc() != ',':
- return
- else:
- self.pos += 1
- self.consume_whitespace(paren_list)
- if paren_list and self.getc() == ')':
- self.pos += 1
- return # Handles trailing comma inside parenthesis
- self.set_flag(self.get_name())
- self.get_more(paren_list=paren_list)
-
- def set_flag(self, feature):
- self.got_features += 1
- try:
- self.flags |= self.future_flags.compiler_features[feature]
- except KeyError:
- pass
-
-from codeop import PyCF_DONT_IMPLY_DEDENT
-from pypy.interpreter.error import OperationError
-
from pypy.tool import stdlib___future__ as future
class FutureFlags(object):
@@ -328,7 +23,82 @@
flag_names.append(name)
return flag_names
+ def get_compiler_feature(self, name):
+ return self.compiler_features.get(name, 0)
+
futureFlags_2_4 = FutureFlags((2, 4, 4, 'final', 0))
futureFlags_2_5 = FutureFlags((2, 5, 0, 'final', 0))
futureFlags_2_7 = FutureFlags((2, 7, 0, 'final', 0))
futureFlags_3_2 = FutureFlags((3, 2, 0, 'final', 0))
+
+
+class TokenIterator:
+ def __init__(self, tokens):
+ self.tokens = tokens
+ self.index = 0
+ self.next()
+
+ def next(self):
+ index = self.index
+ self.index = index + 1
+ self.tok = self.tokens[index]
+
+ def skip(self, n):
+ if self.tok[0] == n:
+ self.next()
+ return True
+ else:
+ return False
+
+ def skip_name(self, name):
+ from pypy.interpreter.pyparser import pygram
+ if self.tok[0] == pygram.tokens.NAME and self.tok[1] == name:
+ self.next()
+ return True
+ else:
+ return False
+
+ def next_feature_name(self):
+ from pypy.interpreter.pyparser import pygram
+ if self.tok[0] == pygram.tokens.NAME:
+ name = self.tok[1]
+ self.next()
+ if self.skip_name("as"):
+ self.skip(pygram.tokens.NAME)
+ return name
+ else:
+ return ''
+
+ def skip_newlines(self):
+ from pypy.interpreter.pyparser import pygram
+ while self.skip(pygram.tokens.NEWLINE):
+ pass
+
+
+def add_future_flags(future_flags, tokens):
+ from pypy.interpreter.pyparser import pygram
+ it = TokenIterator(tokens)
+ result = 0
+ #
+ # The only things that can precede a future statement are another
+ # future statement and a doc string (only one). This is a very
+ # permissive parsing of the given list of tokens; it relies on
+ # the real parsing done afterwards to give errors.
+ it.skip_newlines()
+ it.skip_name("r") or it.skip_name("u") or it.skip_name("ru")
+ if it.skip(pygram.tokens.STRING):
+ it.skip_newlines()
+
+ while (it.skip_name("from") and
+ it.skip_name("__future__") and
+ it.skip_name("import")):
+ it.skip(pygram.tokens.LPAR) # optionally
+ result |= future_flags.get_compiler_feature(it.next_feature_name())
+ while it.skip(pygram.tokens.COMMA):
+ result |= future_flags.get_compiler_feature(it.next_feature_name())
+ it.skip(pygram.tokens.RPAR) # optionally
+ it.skip(pygram.tokens.SEMI) # optionally
+ it.skip_newlines()
+
+ position = (it.tok[2], it.tok[3])
+ return result, position
diff --git a/pypy/interpreter/pyparser/pyparse.py
b/pypy/interpreter/pyparser/pyparse.py
--- a/pypy/interpreter/pyparser/pyparse.py
+++ b/pypy/interpreter/pyparser/pyparse.py
@@ -139,14 +139,8 @@
raise error.SyntaxError(space.str_w(w_message))
raise
- f_flags, future_info = future.get_futures(self.future_flags, textsrc)
- compile_info.last_future_import = future_info
- compile_info.flags |= f_flags
-
flags = compile_info.flags
- self.grammar = pygram.python_grammar
-
# The tokenizer is very picky about how it wants its input.
source_lines = textsrc.splitlines(True)
if source_lines and not source_lines[-1].endswith("\n"):
@@ -158,7 +152,17 @@
tp = 0
try:
try:
+ # Note: we no longer pass the CO_FUTURE_* to the tokenizer,
+ # which is expected to work independently of them. It's
+ # certainly the case for all futures in Python <= 2.7.
tokens = pytokenizer.generate_tokens(source_lines, flags)
+
+ newflags, last_future_import = (
+ future.add_future_flags(self.future_flags, tokens))
+ compile_info.last_future_import = last_future_import
+ compile_info.flags |= newflags
+ self.grammar = pygram.python_grammar
+
for tp, value, lineno, column, line in tokens:
if self.add_token(tp, value, lineno, column, line):
break
diff --git a/pypy/interpreter/pyparser/test/test_futureautomaton.py
b/pypy/interpreter/pyparser/test/test_future.py
rename from pypy/interpreter/pyparser/test/test_futureautomaton.py
rename to pypy/interpreter/pyparser/test/test_future.py
--- a/pypy/interpreter/pyparser/test/test_futureautomaton.py
+++ b/pypy/interpreter/pyparser/test/test_future.py
@@ -1,29 +1,26 @@
import py
-import pypy.interpreter.pyparser.future as future
+from pypy.interpreter.pyparser import future, pytokenizer
from pypy.tool import stdlib___future__ as fut
-def run(s):
- f = future.FutureAutomaton(future.futureFlags_2_7, s)
- try:
- f.start()
- except future.DoneException:
- pass
- return f
+def run(s, expected_last_future=None):
+ source_lines = s.splitlines(True)
+ tokens = pytokenizer.generate_tokens(source_lines, 0)
+ expected_last_future = expected_last_future or tokens[-1][2:4]
+ #
+ flags, last_future_import = future.add_future_flags(
+ future.futureFlags_2_7, tokens)
+ assert last_future_import == expected_last_future
+ return flags
def test_docstring():
s = '"Docstring\\" "\nfrom __future__ import division\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == fut.CO_FUTURE_DIVISION
- assert f.lineno == 2
- assert f.col_offset == 0
+ assert f == fut.CO_FUTURE_DIVISION
def test_comment():
s = '# A comment about nothing ;\n'
f = run(s)
- assert f.pos == len(s)
- assert f.lineno == -1
- assert f.col_offset == 0
+ assert f == 0
def test_tripledocstring():
s = '''""" This is a
@@ -31,9 +28,7 @@
breaks in it. It even has a \n"""
'''
f = run(s)
- assert f.pos == len(s)
- assert f.lineno == -1
- assert f.col_offset == 0
+ assert f == 0
def test_escapedquote_in_tripledocstring():
s = '''""" This is a
@@ -41,233 +36,176 @@
breaks in it. \\"""It even has an escaped quote!"""
'''
f = run(s)
- assert f.pos == len(s)
- assert f.lineno == -1
- assert f.col_offset == 0
+ assert f == 0
def test_empty_line():
s = ' \t \f \n \n'
f = run(s)
- assert f.pos == len(s)
- assert f.lineno == -1
- assert f.col_offset == 0
+ assert f == 0
def test_from():
s = 'from __future__ import division\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == fut.CO_FUTURE_DIVISION
- assert f.lineno == 1
- assert f.col_offset == 0
+ assert f == fut.CO_FUTURE_DIVISION
def test_froms():
s = 'from __future__ import division, generators, with_statement\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == (fut.CO_FUTURE_DIVISION |
- fut.CO_GENERATOR_ALLOWED |
- fut.CO_FUTURE_WITH_STATEMENT)
- assert f.lineno == 1
- assert f.col_offset == 0
+ assert f == (fut.CO_FUTURE_DIVISION |
+ fut.CO_GENERATOR_ALLOWED |
+ fut.CO_FUTURE_WITH_STATEMENT)
def test_from_as():
s = 'from __future__ import division as b\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == fut.CO_FUTURE_DIVISION
- assert f.lineno == 1
- assert f.col_offset == 0
+ assert f == fut.CO_FUTURE_DIVISION
def test_froms_as():
s = 'from __future__ import division as b, generators as c\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == (fut.CO_FUTURE_DIVISION |
- fut.CO_GENERATOR_ALLOWED)
- assert f.lineno == 1
- assert f.col_offset == 0
+ assert f == (fut.CO_FUTURE_DIVISION |
+ fut.CO_GENERATOR_ALLOWED)
def test_from_paren():
s = 'from __future__ import (division)\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == fut.CO_FUTURE_DIVISION
- assert f.lineno == 1
- assert f.col_offset == 0
+ assert f == fut.CO_FUTURE_DIVISION
def test_froms_paren():
s = 'from __future__ import (division, generators)\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == (fut.CO_FUTURE_DIVISION |
- fut.CO_GENERATOR_ALLOWED)
- assert f.lineno == 1
- assert f.col_offset == 0
+ assert f == (fut.CO_FUTURE_DIVISION |
+ fut.CO_GENERATOR_ALLOWED)
def test_froms_paren_as():
s = 'from __future__ import (division as b, generators,)\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == (fut.CO_FUTURE_DIVISION |
- fut.CO_GENERATOR_ALLOWED)
- assert f.lineno == 1
- assert f.col_offset == 0
+ assert f == (fut.CO_FUTURE_DIVISION |
+ fut.CO_GENERATOR_ALLOWED)
def test_paren_with_newline():
s = 'from __future__ import (division,\nabsolute_import)\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == (fut.CO_FUTURE_DIVISION | fut.CO_FUTURE_ABSOLUTE_IMPORT)
- assert f.lineno == 1
- assert f.col_offset == 0
+ assert f == (fut.CO_FUTURE_DIVISION | fut.CO_FUTURE_ABSOLUTE_IMPORT)
+
+def test_paren_with_newline_2():
+ s = 'from __future__ import (\ndivision,\nabsolute_import)\n'
+ f = run(s)
+ assert f == (fut.CO_FUTURE_DIVISION | fut.CO_FUTURE_ABSOLUTE_IMPORT)
def test_multiline():
s = '"abc" #def\n #ghi\nfrom __future__ import (division as b,
generators,)\nfrom __future__ import with_statement\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == (fut.CO_FUTURE_DIVISION |
- fut.CO_GENERATOR_ALLOWED |
- fut.CO_FUTURE_WITH_STATEMENT)
- assert f.lineno == 4
- assert f.col_offset == 0
+ assert f == (fut.CO_FUTURE_DIVISION |
+ fut.CO_GENERATOR_ALLOWED |
+ fut.CO_FUTURE_WITH_STATEMENT)
def test_windows_style_lineendings():
s = '"abc" #def\r\n #ghi\r\nfrom __future__ import (division as b,
generators,)\r\nfrom __future__ import with_statement\r\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == (fut.CO_FUTURE_DIVISION |
- fut.CO_GENERATOR_ALLOWED |
- fut.CO_FUTURE_WITH_STATEMENT)
- assert f.lineno == 4
- assert f.col_offset == 0
+ assert f == (fut.CO_FUTURE_DIVISION |
+ fut.CO_GENERATOR_ALLOWED |
+ fut.CO_FUTURE_WITH_STATEMENT)
def test_mac_style_lineendings():
s = '"abc" #def\r #ghi\rfrom __future__ import (division as b,
generators,)\rfrom __future__ import with_statement\r'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == (fut.CO_FUTURE_DIVISION |
- fut.CO_GENERATOR_ALLOWED |
- fut.CO_FUTURE_WITH_STATEMENT)
- assert f.lineno == 4
- assert f.col_offset == 0
+ assert f == (fut.CO_FUTURE_DIVISION |
+ fut.CO_GENERATOR_ALLOWED |
+ fut.CO_FUTURE_WITH_STATEMENT)
def test_semicolon():
s = '"abc" #def\n #ghi\nfrom __future__ import (division as b,
generators,); from __future__ import with_statement\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == (fut.CO_FUTURE_DIVISION |
- fut.CO_GENERATOR_ALLOWED |
- fut.CO_FUTURE_WITH_STATEMENT)
- assert f.lineno == 3
- assert f.col_offset == 55
+ assert f == (fut.CO_FUTURE_DIVISION |
+ fut.CO_GENERATOR_ALLOWED |
+ fut.CO_FUTURE_WITH_STATEMENT)
+
+def test_semicolon_2():
+ s = 'from __future__ import division; from foo import bar'
+ f = run(s, expected_last_future=(1, 39))
+ assert f == fut.CO_FUTURE_DIVISION
def test_full_chain():
s = '"abc" #def\n #ghi\nfrom __future__ import (division as b,
generators,); from __future__ import with_statement\n'
- flags, pos = future.get_futures(future.futureFlags_2_5, s)
- assert flags == (fut.CO_FUTURE_DIVISION |
- fut.CO_GENERATOR_ALLOWED |
- fut.CO_FUTURE_WITH_STATEMENT)
- assert pos == (3, 55)
+ f = run(s)
+ assert f == (fut.CO_FUTURE_DIVISION |
+ fut.CO_GENERATOR_ALLOWED |
+ fut.CO_FUTURE_WITH_STATEMENT)
def test_intervening_code():
s = 'from __future__ import (division as b, generators,)\nfrom sys import
modules\nfrom __future__ import with_statement\n'
- flags, pos = future.get_futures(future.futureFlags_2_5, s)
- assert flags & fut.CO_FUTURE_WITH_STATEMENT == 0
- assert pos == (1, 0)
+ f = run(s, expected_last_future=(2, 5))
+ assert f == (fut.CO_FUTURE_DIVISION | fut.CO_GENERATOR_ALLOWED)
def test_nonexisting():
s = 'from __future__ import non_existing_feature\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == 0
- assert f.lineno == 1
- assert f.col_offset == 0
+ assert f == 0
+
+def test_nonexisting_2():
+ s = 'from __future__ import non_existing_feature, with_statement\n'
+ f = run(s)
+ assert f == fut.CO_FUTURE_WITH_STATEMENT
def test_from_import_abs_import():
s = 'from __future__ import absolute_import\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == fut.CO_FUTURE_ABSOLUTE_IMPORT
- assert f.lineno == 1
- assert f.col_offset == 0
+ assert f == fut.CO_FUTURE_ABSOLUTE_IMPORT
def test_raw_doc():
s = 'r"Doc"\nfrom __future__ import with_statement\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == fut.CO_FUTURE_WITH_STATEMENT
- assert f.lineno == 2
- assert f.col_offset == 0
+ assert f == fut.CO_FUTURE_WITH_STATEMENT
def test_unicode_doc():
s = 'u"Doc"\nfrom __future__ import with_statement\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == fut.CO_FUTURE_WITH_STATEMENT
- assert f.lineno == 2
- assert f.col_offset == 0
+ assert f == fut.CO_FUTURE_WITH_STATEMENT
def test_raw_unicode_doc():
s = 'ru"Doc"\nfrom __future__ import with_statement\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == fut.CO_FUTURE_WITH_STATEMENT
+ assert f == fut.CO_FUTURE_WITH_STATEMENT
def test_continuation_line():
s = "\\\nfrom __future__ import with_statement\n"
f = run(s)
- assert f.pos == len(s)
- assert f.flags == fut.CO_FUTURE_WITH_STATEMENT
- assert f.lineno == 2
- assert f.col_offset == 0
+ assert f == fut.CO_FUTURE_WITH_STATEMENT
def test_continuation_lines():
s = "\\\n \t\\\nfrom __future__ import with_statement\n"
f = run(s)
- assert f.pos == len(s)
- assert f.flags == fut.CO_FUTURE_WITH_STATEMENT
- assert f.lineno == 3
- assert f.col_offset == 0
+ assert f == fut.CO_FUTURE_WITH_STATEMENT
def test_lots_of_continuation_lines():
s = "\\\n\\\n\\\n\\\n\\\n\\\n\nfrom __future__ import with_statement\n"
f = run(s)
- assert f.pos == len(s)
- assert f.flags == fut.CO_FUTURE_WITH_STATEMENT
- assert f.lineno == 8
- assert f.col_offset == 0
+ assert f == fut.CO_FUTURE_WITH_STATEMENT
-# This looks like a bug in cpython parser
-# and would require extensive modifications
-# to future.py in order to emulate the same behaviour
def test_continuation_lines_raise():
- py.test.skip("probably a CPython bug")
s = " \\\n \t\\\nfrom __future__ import with_statement\n"
- try:
- f = run(s)
- except IndentationError, e:
- assert e.args == 'unexpected indent'
- assert f.pos == len(s)
- assert f.flags == 0
- assert f.lineno == -1
- assert f.col_offset == 0
- else:
- raise AssertionError('IndentationError not raised')
- assert f.lineno == 2
- assert f.col_offset == 0
+ f = run(s, expected_last_future=(1, 0))
+ assert f == 0 # because of the INDENT
def test_continuation_lines_in_docstring_single_quoted():
s = '"\\\n\\\n\\\n\\\n\\\n\\\n"\nfrom __future__ import division\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == fut.CO_FUTURE_DIVISION
- assert f.lineno == 8
- assert f.col_offset == 0
+ assert f == fut.CO_FUTURE_DIVISION
def test_continuation_lines_in_docstring_triple_quoted():
s = '"""\\\n\\\n\\\n\\\n\\\n\\\n"""\nfrom __future__ import division\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == fut.CO_FUTURE_DIVISION
- assert f.lineno == 8
- assert f.col_offset == 0
+ assert f == fut.CO_FUTURE_DIVISION
+
+def test_blank_lines():
+ s = ('\n\t\n\nfrom __future__ import with_statement'
+ ' \n \n \nfrom __future__ import division')
+ f = run(s)
+ assert f == fut.CO_FUTURE_WITH_STATEMENT | fut.CO_FUTURE_DIVISION
+
+def test_dummy_semicolons():
+ s = ('from __future__ import division;\n'
+ 'from __future__ import with_statement;')
+ f = run(s)
+ assert f == fut.CO_FUTURE_DIVISION | fut.CO_FUTURE_WITH_STATEMENT
diff --git a/pypy/interpreter/pyparser/test/test_pyparse.py
b/pypy/interpreter/pyparser/test/test_pyparse.py
--- a/pypy/interpreter/pyparser/test/test_pyparse.py
+++ b/pypy/interpreter/pyparser/test/test_pyparse.py
@@ -136,6 +136,9 @@
py.test.raises(SyntaxError, self.parse, '0b0l')
py.test.raises(SyntaxError, self.parse, "0b112")
+ def test_print_function(self):
+ self.parse("from __future__ import print_function\nx = print\n")
+
def test_py3k_reject_old_binary_literal(self):
py.test.raises(SyntaxError, self.parse, '0777')
@@ -195,4 +198,3 @@
exc = py.test.raises(SyntaxError, self.parse, input).value
assert exc.msg == ("'ascii' codec can't decode byte 0xc3 "
"in position 16: ordinal not in range(128)")
-
diff --git a/pypy/interpreter/test/test_compiler.py
b/pypy/interpreter/test/test_compiler.py
--- a/pypy/interpreter/test/test_compiler.py
+++ b/pypy/interpreter/test/test_compiler.py
@@ -304,6 +304,9 @@
'from __future__ import nested_scopes, generators',
'from __future__ import (nested_scopes,\ngenerators)',
'from __future__ import (nested_scopes,\ngenerators,)',
+ 'from __future__ import (\nnested_scopes,\ngenerators)',
+ 'from __future__ import(\n\tnested_scopes,\n\tgenerators)',
+ 'from __future__ import(\n\t\nnested_scopes)',
'from sys import stdin, stderr, stdout',
'from sys import (stdin, stderr,\nstdout)',
'from sys import (stdin, stderr,\nstdout,)',
diff --git a/pypy/module/micronumpy/test/test_dtypes.py
b/pypy/module/micronumpy/test/test_dtypes.py
--- a/pypy/module/micronumpy/test/test_dtypes.py
+++ b/pypy/module/micronumpy/test/test_dtypes.py
@@ -274,7 +274,7 @@
from numpypy import array, dtype
from cPickle import loads, dumps
a = array([1,2,3])
- if self.ptr_size == 8:
+ if self.ptr_size == 8:
assert a.dtype.__reduce__() == (dtype, ('i8', 0, 1), (3, '<',
None, None, None, -1, -1, 0))
else:
assert a.dtype.__reduce__() == (dtype, ('i4', 0, 1), (3, '<',
None, None, None, -1, -1, 0))
diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py
--- a/pypy/module/micronumpy/types.py
+++ b/pypy/module/micronumpy/types.py
@@ -1704,25 +1704,6 @@
T = lltype.Char
def _coerce(self, space, arr, ofs, dtype, w_items, shape):
- items_w = space.fixedview(w_items)
- for i in range(len(items_w)):
- subdtype = dtype.subdtype
- itemtype = subdtype.itemtype
- if space.len_w(shape) <= 1:
- w_box = itemtype.coerce(space, dtype.subdtype, items_w[i])
- itemtype.store(arr, 0, ofs, w_box)
- ofs += itemtype.get_element_size()
- else:
- size = 1
- for dimension in shape[1:]:
- size *= dimension
- size *= itemtype.get_element_size()
- for w_item in items_w:
- self._coerce(space, arr, ofs, dtype, w_items, shape[1:])
- ofs += size
- return arr
-
- def _coerce(self, space, arr, ofs, dtype, w_items, shape):
# TODO: Make sure the shape and the array match
items_w = space.fixedview(w_items)
subdtype = dtype.subdtype
diff --git a/pypy/module/pypyjit/test_pypy_c/test_array.py
b/pypy/module/pypyjit/test_pypy_c/test_array.py
--- a/pypy/module/pypyjit/test_pypy_c/test_array.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_array.py
@@ -105,7 +105,6 @@
assert loop.match("""
i10 = int_lt(i6, 1000)
guard_true(i10, descr=...)
- guard_not_invalidated(descr=...)
i11 = int_lt(i6, i7)
guard_true(i11, descr=...)
f13 = getarrayitem_raw(i8, i6, descr=<ArrayF 8>)
@@ -142,7 +141,6 @@
assert loop.match("""
i10 = int_lt(i6, 1000)
guard_true(i10, descr=...)
- guard_not_invalidated(descr=...)
i11 = int_lt(i6, i7)
guard_true(i11, descr=...)
i13 = getarrayitem_raw(i8, i6, descr=<Array. 4>)
diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py
b/pypy/module/pypyjit/test_pypy_c/test_call.py
--- a/pypy/module/pypyjit/test_pypy_c/test_call.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_call.py
@@ -339,7 +339,6 @@
loop, = log.loops_by_filename(self.filepath)
# the int strategy is used here
assert loop.match_by_id('append', """
- guard_not_invalidated(descr=...)
i13 = getfield_gc(p8, descr=<FieldS list.length .*>)
i15 = int_add(i13, 1)
# Will be killed by the backend
@@ -487,7 +486,6 @@
assert loop.match("""
i2 = int_lt(i0, i1)
guard_true(i2, descr=...)
- guard_not_invalidated(descr=...)
i3 = force_token()
i4 = int_add(i0, 1)
--TICK--
@@ -587,6 +585,7 @@
""", [1000])
loop, = log.loops_by_id('call')
assert loop.match_by_id('call', '''
+ guard_not_invalidated(descr=...)
i1 = force_token()
''')
diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py
--- a/pypy/tool/release/package.py
+++ b/pypy/tool/release/package.py
@@ -73,6 +73,11 @@
rename_pypy_c += '.exe'
binaries = [(pypy_c, rename_pypy_c)]
#
+ builddir = udir.ensure("build", dir=True)
+ pypydir = builddir.ensure(name, dir=True)
+ includedir = basedir.join('include')
+ pypydir.ensure('include', dir=True)
+
if sys.platform == 'win32':
#Don't include a mscvrXX.dll, users should get their own.
#Instructions are provided on the website.
@@ -85,12 +90,22 @@
p = pypy_c.dirpath().join(extra)
if not p.check():
p = py.path.local.sysfind(extra)
- assert p, "%s not found" % (extra,)
+ if not p:
+ print "%s not found, expect trouble if this is a shared
build" % (extra,)
+ continue
print "Picking %s" % p
binaries.append((p, p.basename))
- #
- builddir = udir.ensure("build", dir=True)
- pypydir = builddir.ensure(name, dir=True)
+ if pypy_c.dirpath().join("libpypy-c.lib").check():
+ shutil.copyfile(str(pypy_c.dirpath().join("libpypy-c.lib")),
+ str(pypydir.join('include/python27.lib')))
+ print "Picking %s as %s" % (pypy_c.dirpath().join("libpypy-c.lib"),
+ pypydir.join('include/python27.lib'))
+ else:
+ pass
+ # XXX users will complain that they cannot compile cpyext
+ # modules for windows, has the lib moved or are there no
+ # exported functions in the dll so no import library is created?
+
# Careful: to copy lib_pypy, copying just the svn-tracked files
# would not be enough: there are also ctypes_config_cache/_*_cache.py.
shutil.copytree(str(basedir.join('lib-python').join(STDLIB_VER)),
@@ -102,15 +117,10 @@
'*.c', '*.o'))
for file in ['LICENSE', 'README.rst']:
shutil.copy(str(basedir.join(file)), str(pypydir))
- pypydir.ensure('include', dir=True)
- if sys.platform == 'win32':
- shutil.copyfile(str(pypy_c.dirpath().join("libpypy-c.lib")),
- str(pypydir.join('include/python27.lib')))
- # we want to put there all *.h and *.inl from trunk/include
- # and from pypy/_interfaces
- includedir = basedir.join('include')
headers = includedir.listdir('*.h') + includedir.listdir('*.inl')
for n in headers:
+ # we want to put there all *.h and *.inl from trunk/include
+ # and from pypy/_interfaces
shutil.copy(str(n), str(pypydir.join('include')))
#
spdir = pypydir.ensure('site-packages', dir=True)
diff --git a/rpython/jit/metainterp/compile.py
b/rpython/jit/metainterp/compile.py
--- a/rpython/jit/metainterp/compile.py
+++ b/rpython/jit/metainterp/compile.py
@@ -85,7 +85,7 @@
# exported_state is clear by optimizeopt when the short preamble is
# constrcucted. if that did not happen the label should not show up
# in a trace that will be used
- assert descr.exported_state is None
+ assert descr.exported_state is None
if not we_are_translated():
op._descr_wref = weakref.ref(op._descr)
op.cleardescr() # clear reference to prevent the history.Stats
@@ -819,7 +819,7 @@
# The history contains new operations to attach as the code for the
# failure of 'resumekey.guard_op'.
- #
+ #
# Attempt to use optimize_bridge(). This may return None in case
# it does not work -- i.e. none of the existing old_loop_tokens match.
new_trace = create_empty_loop(metainterp)
_______________________________________________
pypy-commit mailing list
[email protected]
http://mail.python.org/mailman/listinfo/pypy-commit