7 new commits in pytest:
https://bitbucket.org/hpk42/pytest/commits/0b9d82e69dca/
Changeset: 0b9d82e69dca
User: pfctdayelise
Date: 2013-05-17 10:46:36
Summary: issue #308
first attempt, mark individual parametrize test instances with other marks
(like xfail)
Affected #: 2 files
diff -r 7c468f83e347c21fbed87eeae457f645bfcc7a66 -r
0b9d82e69dcaf5d73e7b9824723fcbe7c6c61038 _pytest/python.py
--- a/_pytest/python.py
+++ b/_pytest/python.py
@@ -4,6 +4,7 @@
import sys
import pytest
from _pytest.main import getfslineno
+from _pytest.mark import MarkDecorator, MarkInfo
from _pytest.monkeypatch import monkeypatch
from py._code.code import TerminalRepr
@@ -565,11 +566,13 @@
self._globalid_args = set()
self._globalparam = _notexists
self._arg2scopenum = {} # used for sorting parametrized resources
+ self.keywords = {}
def copy(self, metafunc):
cs = CallSpec2(self.metafunc)
cs.funcargs.update(self.funcargs)
cs.params.update(self.params)
+ cs.keywords.update(self.keywords)
cs._arg2scopenum.update(self._arg2scopenum)
cs._idlist = list(self._idlist)
cs._globalid = self._globalid
@@ -593,7 +596,7 @@
def id(self):
return "-".join(map(str, filter(None, self._idlist)))
- def setmulti(self, valtype, argnames, valset, id, scopenum=0):
+ def setmulti(self, valtype, argnames, valset, id, keywords, scopenum=0):
for arg,val in zip(argnames, valset):
self._checkargnotcontained(arg)
getattr(self, valtype)[arg] = val
@@ -605,6 +608,7 @@
if val is _notexists:
self._emptyparamspecified = True
self._idlist.append(id)
+ self.keywords.update(keywords)
def setall(self, funcargs, id, param):
for x in funcargs:
@@ -673,6 +677,18 @@
if not argvalues:
argvalues = [(_notexists,) * len(argnames)]
+ # these marks/keywords will be applied in Function init
+ newkeywords = {}
+ for i, argval in enumerate(argvalues):
+ newkeywords[i] = {}
+ if isinstance(argval, MarkDecorator):
+ # convert into a mark without the test content mixed in
+ newmark = MarkDecorator(argval.markname, argval.args[:-1],
argval.kwargs)
+ newkeywords[i] = {newmark.markname: newmark}
+
+ argvalues = [av.args[-1] if isinstance(av, MarkDecorator) else av
+ for av in argvalues]
+
if scope is None:
scope = "subfunction"
scopenum = scopes.index(scope)
@@ -691,7 +707,7 @@
assert len(valset) == len(argnames)
newcallspec = callspec.copy(self)
newcallspec.setmulti(valtype, argnames, valset, ids[i],
- scopenum)
+ newkeywords[i], scopenum)
newcalls.append(newcallspec)
self._calls = newcalls
@@ -908,6 +924,9 @@
for name, val in (py.builtin._getfuncdict(self.obj) or {}).items():
self.keywords[name] = val
+ if callspec:
+ for name, val in callspec.keywords.items():
+ self.keywords[name] = val
if keywords:
for name, val in keywords.items():
self.keywords[name] = val
diff -r 7c468f83e347c21fbed87eeae457f645bfcc7a66 -r
0b9d82e69dcaf5d73e7b9824723fcbe7c6c61038 testing/python/metafunc.py
--- a/testing/python/metafunc.py
+++ b/testing/python/metafunc.py
@@ -577,4 +577,175 @@
"*3 passed*"
])
+ @pytest.mark.issue308
+ def test_mark_on_individual_parametrize_instance(self, testdir):
+ s = """
+ import pytest
+ @pytest.mark.foo
+ @pytest.mark.parametrize(("input", "expected"), [
+ (1, 2),
+ pytest.mark.bar((1, 3)),
+ (2, 3),
+ ])
+ def test_increment(input, expected):
+ assert input + 1 == expected
+ """
+ items = testdir.getitems(s)
+ assert len(items) == 3
+ for item in items:
+ assert 'foo' in item.keywords
+ assert 'bar' not in items[0].keywords
+ assert 'bar' in items[1].keywords
+ assert 'bar' not in items[2].keywords
+
+ @pytest.mark.issue308
+ def test_select_individual_parametrize_instance_based_on_mark(self,
testdir):
+ s = """
+ import pytest
+
+ @pytest.mark.parametrize(("input", "expected"), [
+ (1, 2),
+ pytest.mark.foo((2, 3)),
+ (3, 4),
+ ])
+ def test_increment(input, expected):
+ assert input + 1 == expected
+ """
+ testdir.makepyfile(s)
+ rec = testdir.inline_run("-m", 'foo')
+ passed, skipped, fail = rec.listoutcomes()
+ assert len(passed) == 1
+ assert len(skipped) == 0
+ assert len(fail) == 0
+
+ @pytest.mark.xfail("is this important to support??")
+ @pytest.mark.issue308
+ def test_nested_marks_on_individual_parametrize_instance(self, testdir):
+ s = """
+ import pytest
+
+ @pytest.mark.parametrize(("input", "expected"), [
+ (1, 2),
+ pytest.mark.foo(pytest.mark.bar((1, 3))),
+ (2, 3),
+ ])
+ def test_increment(input, expected):
+ assert input + 1 == expected
+ """
+ items = testdir.getitems(s)
+ assert len(items) == 3
+ for mark in ['foo', 'bar']:
+ assert mark not in items[0].keywords
+ assert mark in items[1].keywords
+ assert mark not in items[2].keywords
+
+ @pytest.mark.xfail(reason="is this important to support??")
+ @pytest.mark.issue308
+ def test_nested_marks_on_individual_parametrize_instance(self, testdir):
+ s = """
+ import pytest
+ mastermark = pytest.mark.foo(pytest.mark.bar)
+
+ @pytest.mark.parametrize(("input", "expected"), [
+ (1, 2),
+ mastermark((1, 3)),
+ (2, 3),
+ ])
+ def test_increment(input, expected):
+ assert input + 1 == expected
+ """
+ items = testdir.getitems(s)
+ assert len(items) == 3
+ for mark in ['foo', 'bar']:
+ assert mark not in items[0].keywords
+ assert mark in items[1].keywords
+ assert mark not in items[2].keywords
+
+ @pytest.mark.issue308
+ def test_simple_xfail_on_individual_parametrize_instance(self, testdir):
+ s = """
+ import pytest
+
+ @pytest.mark.parametrize(("input", "expected"), [
+ (1, 2),
+ pytest.mark.xfail((1, 3)),
+ (2, 3),
+ ])
+ def test_increment(input, expected):
+ assert input + 1 == expected
+ """
+ testdir.makepyfile(s)
+ reprec = testdir.inline_run()
+ # xfail is skip??
+ reprec.assertoutcome(passed=2, skipped=1)
+
+ @pytest.mark.issue308
+ def test_xfail_with_arg_on_individual_parametrize_instance(self, testdir):
+ s = """
+ import pytest
+
+ @pytest.mark.parametrize(("input", "expected"), [
+ (1, 2),
+ pytest.mark.xfail("sys.version > 0")((1, 3)),
+ (2, 3),
+ ])
+ def test_increment(input, expected):
+ assert input + 1 == expected
+ """
+ testdir.makepyfile(s)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=2, skipped=1)
+
+ @pytest.mark.issue308
+ def test_xfail_with_kwarg_on_individual_parametrize_instance(self,
testdir):
+ s = """
+ import pytest
+
+ @pytest.mark.parametrize(("input", "expected"), [
+ (1, 2),
+ pytest.mark.xfail(reason="some bug")((1, 3)),
+ (2, 3),
+ ])
+ def test_increment(input, expected):
+ assert input + 1 == expected
+ """
+ testdir.makepyfile(s)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=2, skipped=1)
+
+ @pytest.mark.issue308
+ def test_xfail_with_arg_and_kwarg_on_individual_parametrize_instance(self,
testdir):
+ s = """
+ import pytest
+
+ @pytest.mark.parametrize(("input", "expected"), [
+ (1, 2),
+ pytest.mark.xfail("sys.version > 0", reason="some bug")((1,
3)),
+ (2, 3),
+ ])
+ def test_increment(input, expected):
+ assert input + 1 == expected
+ """
+ testdir.makepyfile(s)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=2, skipped=1)
+
+ @pytest.mark.issue308
+ def test_xfail_is_xpass_on_individual_parametrize_instance(self, testdir):
+ s = """
+ import pytest
+
+ @pytest.mark.parametrize(("input", "expected"), [
+ (1, 2),
+ pytest.mark.xfail("sys.version > 0", reason="some bug")((2,
3)),
+ (3, 4),
+ ])
+ def test_increment(input, expected):
+ assert input + 1 == expected
+ """
+ testdir.makepyfile(s)
+ reprec = testdir.inline_run()
+ # xpass is fail, obviously :)
+ reprec.assertoutcome(passed=2, failed=1)
+
https://bitbucket.org/hpk42/pytest/commits/3c892457f19e/
Changeset: 3c892457f19e
User: hpk42
Date: 2013-05-17 11:32:52
Summary: Merged hpk42/pytest into default
Affected #: 2 files
diff -r 0b9d82e69dcaf5d73e7b9824723fcbe7c6c61038 -r
3c892457f19ed9c676d7ddf26b7b49e151eb579f CHANGELOG
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -20,6 +20,8 @@
- honor --tb style for setup/teardown errors as well. Thanks Maho.
+- fix issue307 - use yaml.safe_load in example, thanks Mark Eichin.
+
Changes between 2.3.4 and 2.3.5
-----------------------------------
diff -r 0b9d82e69dcaf5d73e7b9824723fcbe7c6c61038 -r
3c892457f19ed9c676d7ddf26b7b49e151eb579f doc/en/example/nonpython/conftest.py
--- a/doc/en/example/nonpython/conftest.py
+++ b/doc/en/example/nonpython/conftest.py
@@ -9,7 +9,7 @@
class YamlFile(pytest.File):
def collect(self):
import yaml # we need a yaml parser, e.g. PyYAML
- raw = yaml.load(self.fspath.open())
+ raw = yaml.safe_load(self.fspath.open())
for name, spec in raw.items():
yield YamlItem(name, self, spec)
https://bitbucket.org/hpk42/pytest/commits/c38344905336/
Changeset: c38344905336
User: pfctdayelise
Date: 2013-05-20 04:52:20
Summary: issue #308
address some comments by @hpk42 on 0b9d82e :
- move tests into their own class, rename
- add test showing metafunc.parametrize called in pytest_generate_tests rather
than as decorator
- add test and fix single-argname case
- convert two loops into one in parametrize()
also
- renamed 'input' to 'n', since 'input' is a built-in
Affected #: 2 files
diff -r 0b9d82e69dcaf5d73e7b9824723fcbe7c6c61038 -r
c38344905336154f7dd36cc05f84f1fb26cde9c4 _pytest/python.py
--- a/_pytest/python.py
+++ b/_pytest/python.py
@@ -671,24 +671,27 @@
It will also override any fixture-function defined scope, allowing
to set a dynamic scope using test context or configuration.
"""
+ # remove any marks applied to individual tests instances
+ # these marks will be applied in Function init
+ newkeywords = {}
+ strippedargvalues = []
+ for i, argval in enumerate(argvalues):
+ if isinstance(argval, MarkDecorator):
+ # convert into a mark without the test content mixed in
+ newmark = MarkDecorator(argval.markname, argval.args[:-1],
argval.kwargs)
+ newkeywords[i] = {newmark.markname: newmark}
+ strippedargvalues.append(argval.args[-1])
+ else:
+ newkeywords[i] = {}
+ strippedargvalues.append(argval)
+ argvalues = strippedargvalues
+
if not isinstance(argnames, (tuple, list)):
argnames = (argnames,)
argvalues = [(val,) for val in argvalues]
if not argvalues:
argvalues = [(_notexists,) * len(argnames)]
- # these marks/keywords will be applied in Function init
- newkeywords = {}
- for i, argval in enumerate(argvalues):
- newkeywords[i] = {}
- if isinstance(argval, MarkDecorator):
- # convert into a mark without the test content mixed in
- newmark = MarkDecorator(argval.markname, argval.args[:-1],
argval.kwargs)
- newkeywords[i] = {newmark.markname: newmark}
-
- argvalues = [av.args[-1] if isinstance(av, MarkDecorator) else av
- for av in argvalues]
-
if scope is None:
scope = "subfunction"
scopenum = scopes.index(scope)
diff -r 0b9d82e69dcaf5d73e7b9824723fcbe7c6c61038 -r
c38344905336154f7dd36cc05f84f1fb26cde9c4 testing/python/metafunc.py
--- a/testing/python/metafunc.py
+++ b/testing/python/metafunc.py
@@ -577,19 +577,21 @@
"*3 passed*"
])
- @pytest.mark.issue308
- def test_mark_on_individual_parametrize_instance(self, testdir):
+
[email protected]
+class TestMarkersWithParametrization:
+ def test_simple_mark(self, testdir):
s = """
import pytest
@pytest.mark.foo
- @pytest.mark.parametrize(("input", "expected"), [
+ @pytest.mark.parametrize(("n", "expected"), [
(1, 2),
pytest.mark.bar((1, 3)),
(2, 3),
])
- def test_increment(input, expected):
- assert input + 1 == expected
+ def test_increment(n, expected):
+ assert n + 1 == expected
"""
items = testdir.getitems(s)
assert len(items) == 3
@@ -599,18 +601,17 @@
assert 'bar' in items[1].keywords
assert 'bar' not in items[2].keywords
- @pytest.mark.issue308
- def test_select_individual_parametrize_instance_based_on_mark(self,
testdir):
+ def test_select_based_on_mark(self, testdir):
s = """
import pytest
- @pytest.mark.parametrize(("input", "expected"), [
+ @pytest.mark.parametrize(("n", "expected"), [
(1, 2),
pytest.mark.foo((2, 3)),
(3, 4),
])
- def test_increment(input, expected):
- assert input + 1 == expected
+ def test_increment(n, expected):
+ assert n + 1 == expected
"""
testdir.makepyfile(s)
rec = testdir.inline_run("-m", 'foo')
@@ -619,19 +620,19 @@
assert len(skipped) == 0
assert len(fail) == 0
- @pytest.mark.xfail("is this important to support??")
- @pytest.mark.issue308
- def test_nested_marks_on_individual_parametrize_instance(self, testdir):
+ @pytest.mark.xfail(reason="is this important to support??")
+ def test_nested_marks(self, testdir):
s = """
import pytest
+ mastermark = pytest.mark.foo(pytest.mark.bar)
- @pytest.mark.parametrize(("input", "expected"), [
+ @pytest.mark.parametrize(("n", "expected"), [
(1, 2),
- pytest.mark.foo(pytest.mark.bar((1, 3))),
+ mastermark((1, 3)),
(2, 3),
])
- def test_increment(input, expected):
- assert input + 1 == expected
+ def test_increment(n, expected):
+ assert n + 1 == expected
"""
items = testdir.getitems(s)
assert len(items) == 3
@@ -640,112 +641,123 @@
assert mark in items[1].keywords
assert mark not in items[2].keywords
- @pytest.mark.xfail(reason="is this important to support??")
- @pytest.mark.issue308
- def test_nested_marks_on_individual_parametrize_instance(self, testdir):
- s = """
- import pytest
- mastermark = pytest.mark.foo(pytest.mark.bar)
-
- @pytest.mark.parametrize(("input", "expected"), [
- (1, 2),
- mastermark((1, 3)),
- (2, 3),
- ])
- def test_increment(input, expected):
- assert input + 1 == expected
- """
- items = testdir.getitems(s)
- assert len(items) == 3
- for mark in ['foo', 'bar']:
- assert mark not in items[0].keywords
- assert mark in items[1].keywords
- assert mark not in items[2].keywords
-
- @pytest.mark.issue308
- def test_simple_xfail_on_individual_parametrize_instance(self, testdir):
+ def test_simple_xfail(self, testdir):
s = """
import pytest
- @pytest.mark.parametrize(("input", "expected"), [
+ @pytest.mark.parametrize(("n", "expected"), [
(1, 2),
pytest.mark.xfail((1, 3)),
(2, 3),
])
- def test_increment(input, expected):
- assert input + 1 == expected
+ def test_increment(n, expected):
+ assert n + 1 == expected
"""
testdir.makepyfile(s)
reprec = testdir.inline_run()
# xfail is skip??
reprec.assertoutcome(passed=2, skipped=1)
- @pytest.mark.issue308
- def test_xfail_with_arg_on_individual_parametrize_instance(self, testdir):
+ def test_simple_xfail_single_argname(self, testdir):
s = """
import pytest
- @pytest.mark.parametrize(("input", "expected"), [
- (1, 2),
- pytest.mark.xfail("sys.version > 0")((1, 3)),
- (2, 3),
+ @pytest.mark.parametrize("n", [
+ 2,
+ pytest.mark.xfail(3),
+ 4,
])
- def test_increment(input, expected):
- assert input + 1 == expected
+ def test_isEven(n):
+ assert n % 2 == 0
"""
testdir.makepyfile(s)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2, skipped=1)
- @pytest.mark.issue308
- def test_xfail_with_kwarg_on_individual_parametrize_instance(self,
testdir):
+ def test_xfail_with_arg(self, testdir):
s = """
import pytest
- @pytest.mark.parametrize(("input", "expected"), [
+ @pytest.mark.parametrize(("n", "expected"), [
(1, 2),
- pytest.mark.xfail(reason="some bug")((1, 3)),
+ pytest.mark.xfail("sys.version > 0")((1, 3)),
(2, 3),
])
- def test_increment(input, expected):
- assert input + 1 == expected
+ def test_increment(n, expected):
+ assert n + 1 == expected
"""
testdir.makepyfile(s)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2, skipped=1)
- @pytest.mark.issue308
- def test_xfail_with_arg_and_kwarg_on_individual_parametrize_instance(self,
testdir):
+ def test_xfail_with_kwarg(self, testdir):
s = """
import pytest
- @pytest.mark.parametrize(("input", "expected"), [
+ @pytest.mark.parametrize(("n", "expected"), [
(1, 2),
- pytest.mark.xfail("sys.version > 0", reason="some bug")((1,
3)),
+ pytest.mark.xfail(reason="some bug")((1, 3)),
(2, 3),
])
- def test_increment(input, expected):
- assert input + 1 == expected
+ def test_increment(n, expected):
+ assert n + 1 == expected
"""
testdir.makepyfile(s)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2, skipped=1)
- @pytest.mark.issue308
- def test_xfail_is_xpass_on_individual_parametrize_instance(self, testdir):
+ def test_xfail_with_arg_and_kwarg(self, testdir):
s = """
import pytest
- @pytest.mark.parametrize(("input", "expected"), [
+ @pytest.mark.parametrize(("n", "expected"), [
+ (1, 2),
+ pytest.mark.xfail("sys.version > 0", reason="some bug")((1,
3)),
+ (2, 3),
+ ])
+ def test_increment(n, expected):
+ assert n + 1 == expected
+ """
+ testdir.makepyfile(s)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=2, skipped=1)
+
+ def test_xfail_passing_is_xpass(self, testdir):
+ s = """
+ import pytest
+
+ @pytest.mark.parametrize(("n", "expected"), [
(1, 2),
pytest.mark.xfail("sys.version > 0", reason="some bug")((2,
3)),
(3, 4),
])
- def test_increment(input, expected):
- assert input + 1 == expected
+ def test_increment(n, expected):
+ assert n + 1 == expected
"""
testdir.makepyfile(s)
reprec = testdir.inline_run()
# xpass is fail, obviously :)
reprec.assertoutcome(passed=2, failed=1)
+ def test_parametrize_called_in_generate_tests(self, testdir):
+ s = """
+ import pytest
+
+
+ def pytest_generate_tests(metafunc):
+ passingTestData = [(1, 2),
+ (2, 3)]
+ failingTestData = [(1, 3),
+ (2, 2)]
+
+ testData = passingTestData + [pytest.mark.xfail(d)
+ for d in failingTestData]
+ metafunc.parametrize(("n", "expected"), testData)
+
+
+ def test_increment(n, expected):
+ assert n + 1 == expected
+ """
+ testdir.makepyfile(s)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=2, skipped=2)
https://bitbucket.org/hpk42/pytest/commits/598409cef71a/
Changeset: 598409cef71a
User: pfctdayelise
Date: 2013-05-20 04:56:30
Summary: ? pull/merge
Affected #: 2 files
diff -r c38344905336154f7dd36cc05f84f1fb26cde9c4 -r
598409cef71aeb988c97bdb83fbf01782ac42b75 CHANGELOG
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -20,6 +20,8 @@
- honor --tb style for setup/teardown errors as well. Thanks Maho.
+- fix issue307 - use yaml.safe_load in example, thanks Mark Eichin.
+
Changes between 2.3.4 and 2.3.5
-----------------------------------
diff -r c38344905336154f7dd36cc05f84f1fb26cde9c4 -r
598409cef71aeb988c97bdb83fbf01782ac42b75 doc/en/example/nonpython/conftest.py
--- a/doc/en/example/nonpython/conftest.py
+++ b/doc/en/example/nonpython/conftest.py
@@ -9,7 +9,7 @@
class YamlFile(pytest.File):
def collect(self):
import yaml # we need a yaml parser, e.g. PyYAML
- raw = yaml.load(self.fspath.open())
+ raw = yaml.safe_load(self.fspath.open())
for name, spec in raw.items():
yield YamlItem(name, self, spec)
https://bitbucket.org/hpk42/pytest/commits/9210cf8ca829/
Changeset: 9210cf8ca829
User: pfctdayelise
Date: 2013-05-21 03:12:45
Summary: issue #308
+ docs
Affected #: 3 files
diff -r 598409cef71aeb988c97bdb83fbf01782ac42b75 -r
9210cf8ca82927da6f835cbebddbfe2367ddc817 doc/en/example/markers.txt
--- a/doc/en/example/markers.txt
+++ b/doc/en/example/markers.txt
@@ -185,6 +185,29 @@
in which case it will be applied to all functions and
methods defined in the module.
+.. _`marking individual tests when using parametrize`:
+
+Marking individual tests when using parametrize
+-----------------------------------------------
+
+When using parametrize, applying a mark will make it apply
+to each individual test. However it is also possible to
+apply a marker to an individual test instance::
+
+ import pytest
+
+ @pytest.mark.foo
+ @pytest.mark.parametrize(("n", "expected"), [
+ (1, 2),
+ pytest.mark.bar((1, 3)),
+ (2, 3),
+ ])
+ def test_increment(n, expected):
+ assert n + 1 == expected
+
+In this example the mark "foo" will apply to each of the three
+tests, whereas the "bar" mark is only applied to the second test.
+Skip and xfail marks can also be applied in this way, see :ref:`skip/xfail
with parametrize`.
.. _`adding a custom marker from a plugin`:
diff -r 598409cef71aeb988c97bdb83fbf01782ac42b75 -r
9210cf8ca82927da6f835cbebddbfe2367ddc817 doc/en/parametrize.txt
--- a/doc/en/parametrize.txt
+++ b/doc/en/parametrize.txt
@@ -82,6 +82,18 @@
Note that there ways how you can mark a class or a module,
see :ref:`mark`.
+It is also possible to mark individual test instances within parametrize::
+
+ # content of test_expectation.py
+ import pytest
+ @pytest.mark.parametrize(("input", "expected"), [
+ ("3+5", 8),
+ ("2+4", 6),
+ pytest.mark.xfail(("6*9", 42)),
+ ])
+ def test_eval(input, expected):
+ assert eval(input) == expected
+
.. _`pytest_generate_tests`:
diff -r 598409cef71aeb988c97bdb83fbf01782ac42b75 -r
9210cf8ca82927da6f835cbebddbfe2367ddc817 doc/en/skipping.txt
--- a/doc/en/skipping.txt
+++ b/doc/en/skipping.txt
@@ -176,6 +176,28 @@
======================== 6 xfailed in 0.05 seconds
=========================
+.. _`skip/xfail with parametrize`:
+
+Skip/xfail with parametrize
+---------------------------
+
+It is possible to apply markers like skip and xfail to individual
+test instances when using parametrize:
+
+ import pytest
+
+ @pytest.mark.parametrize(("n", "expected"), [
+ (1, 2),
+ pytest.mark.xfail((1, 0)),
+ pytest.mark.xfail(reason="some bug")((1, 3)),
+ (2, 3),
+ (3, 4),
+ (4, 5),
+ pytest.mark.skipif("sys.version_info >= (3,0)")((10, 11)),
+ ])
+ def test_increment(n, expected):
+ assert n + 1 == expected
+
Imperative xfail from within a test or setup function
------------------------------------------------------
https://bitbucket.org/hpk42/pytest/commits/2917dfdb26d3/
Changeset: 2917dfdb26d3
User: pfctdayelise
Date: 2013-05-21 03:18:37
Summary: Merged hpk42/pytest into default
Affected #: 3 files
diff -r 9210cf8ca82927da6f835cbebddbfe2367ddc817 -r
2917dfdb26d3152ec0ac40590cbcbe64bb764ec0 CHANGELOG
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -11,6 +11,11 @@
when importing markers between modules. Specifying conditions
as strings will remain fully supported.
+- improved doctest counting for doctests in python modules --
+ files without any doctest items will not show up anymore
+ and doctest examples are counted as separate test items.
+ thanks Danilo Bellini.
+
- fix issue245 by depending on the released py-1.4.14
which fixes py.io.dupfile to work with files with no
mode. Thanks Jason R. Coombs.
diff -r 9210cf8ca82927da6f835cbebddbfe2367ddc817 -r
2917dfdb26d3152ec0ac40590cbcbe64bb764ec0 _pytest/doctest.py
--- a/_pytest/doctest.py
+++ b/_pytest/doctest.py
@@ -34,6 +34,14 @@
self.reprlocation.toterminal(tw)
class DoctestItem(pytest.Item):
+ def __init__(self, name, parent, runner=None, dtest=None):
+ super(DoctestItem, self).__init__(name, parent)
+ self.runner = runner
+ self.dtest = dtest
+
+ def runtest(self):
+ self.runner.run(self.dtest)
+
def repr_failure(self, excinfo):
doctest = py.std.doctest
if excinfo.errisinstance((doctest.DocTestFailure,
@@ -76,7 +84,7 @@
return super(DoctestItem, self).repr_failure(excinfo)
def reportinfo(self):
- return self.fspath, None, "[doctest]"
+ return self.fspath, None, "[doctest] %s" % self.name
class DoctestTextfile(DoctestItem, pytest.File):
def runtest(self):
@@ -91,8 +99,8 @@
extraglobs=dict(getfixture=fixture_request.getfuncargvalue),
raise_on_error=True, verbose=0)
-class DoctestModule(DoctestItem, pytest.File):
- def runtest(self):
+class DoctestModule(pytest.File):
+ def collect(self):
doctest = py.std.doctest
if self.fspath.basename == "conftest.py":
module = self.config._conftest.importconftest(self.fspath)
@@ -102,7 +110,11 @@
self.funcargs = {}
self._fixtureinfo = FuncFixtureInfo((), [], {})
fixture_request = FixtureRequest(self)
- failed, tot = doctest.testmod(
- module, raise_on_error=True, verbose=0,
- extraglobs=dict(getfixture=fixture_request.getfuncargvalue),
- optionflags=doctest.ELLIPSIS)
+ doctest_globals = dict(getfixture=fixture_request.getfuncargvalue)
+ # uses internal doctest module parsing mechanism
+ finder = doctest.DocTestFinder()
+ runner = doctest.DebugRunner(verbose=0, optionflags=doctest.ELLIPSIS)
+ for test in finder.find(module, module.__name__,
+ extraglobs=doctest_globals):
+ if test.examples: # skip empty doctests
+ yield DoctestItem(test.name, self, runner, test)
diff -r 9210cf8ca82927da6f835cbebddbfe2367ddc817 -r
2917dfdb26d3152ec0ac40590cbcbe64bb764ec0 testing/test_doctest.py
--- a/testing/test_doctest.py
+++ b/testing/test_doctest.py
@@ -1,4 +1,4 @@
-from _pytest.doctest import DoctestModule, DoctestTextfile
+from _pytest.doctest import DoctestItem, DoctestModule, DoctestTextfile
import py, pytest
class TestDoctests:
@@ -19,13 +19,61 @@
items, reprec = testdir.inline_genitems(w)
assert len(items) == 1
- def test_collect_module(self, testdir):
+ def test_collect_module_empty(self, testdir):
path = testdir.makepyfile(whatever="#")
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p,
'--doctest-modules')
+ assert len(items) == 0
+
+ def test_collect_module_single_modulelevel_doctest(self, testdir):
+ path = testdir.makepyfile(whatever='""">>> pass"""')
+ for p in (path, testdir.tmpdir):
+ items, reprec = testdir.inline_genitems(p,
+ '--doctest-modules')
assert len(items) == 1
- assert isinstance(items[0], DoctestModule)
+ assert isinstance(items[0], DoctestItem)
+ assert isinstance(items[0].parent, DoctestModule)
+
+ def test_collect_module_two_doctest_one_modulelevel(self, testdir):
+ path = testdir.makepyfile(whatever="""
+ '>>> x = None'
+ def my_func():
+ ">>> magic = 42 "
+ """)
+ for p in (path, testdir.tmpdir):
+ items, reprec = testdir.inline_genitems(p,
+ '--doctest-modules')
+ assert len(items) == 2
+ assert isinstance(items[0], DoctestItem)
+ assert isinstance(items[1], DoctestItem)
+ assert isinstance(items[0].parent, DoctestModule)
+ assert items[0].parent is items[1].parent
+
+ def test_collect_module_two_doctest_no_modulelevel(self, testdir):
+ path = testdir.makepyfile(whatever="""
+ '# Empty'
+ def my_func():
+ ">>> magic = 42 "
+ def unuseful():
+ '''
+ # This is a function
+ # >>> # it doesn't have any doctest
+ '''
+ def another():
+ '''
+ # This is another function
+ >>> import os # this one does have a doctest
+ '''
+ """)
+ for p in (path, testdir.tmpdir):
+ items, reprec = testdir.inline_genitems(p,
+ '--doctest-modules')
+ assert len(items) == 2
+ assert isinstance(items[0], DoctestItem)
+ assert isinstance(items[1], DoctestItem)
+ assert isinstance(items[0].parent, DoctestModule)
+ assert items[0].parent is items[1].parent
def test_simple_doctestfile(self, testdir):
p = testdir.maketxtfile(test_doc="""
@@ -164,3 +212,47 @@
""")
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
+
+ def test_doctestmodule_three_tests(self, testdir):
+ p = testdir.makepyfile("""
+ '''
+ >>> dir = getfixture('tmpdir')
+ >>> type(dir).__name__
+ 'LocalPath'
+ '''
+ def my_func():
+ '''
+ >>> magic = 42
+ >>> magic - 42
+ 0
+ '''
+ def unuseful():
+ pass
+ def another():
+ '''
+ >>> import os
+ >>> os is os
+ True
+ '''
+ """)
+ reprec = testdir.inline_run(p, "--doctest-modules")
+ reprec.assertoutcome(passed=3)
+
+ def test_doctestmodule_two_tests_one_fail(self, testdir):
+ p = testdir.makepyfile("""
+ class MyClass:
+ def bad_meth(self):
+ '''
+ >>> magic = 42
+ >>> magic
+ 0
+ '''
+ def nice_meth(self):
+ '''
+ >>> magic = 42
+ >>> magic - 42
+ 0
+ '''
+ """)
+ reprec = testdir.inline_run(p, "--doctest-modules")
+ reprec.assertoutcome(failed=1, passed=1)
https://bitbucket.org/hpk42/pytest/commits/fff62647d862/
Changeset: fff62647d862
User: hpk42
Date: 2013-05-22 13:36:39
Summary: Merged in pfctdayelise/pytest (pull request #36)
issue 308
Affected #: 5 files
diff -r db9c8ef9f6e01273a0dcc23b9ba55f954aab662a -r
fff62647d8622e95f5f0e28f1e4a25aeeb4b0a8f _pytest/python.py
--- a/_pytest/python.py
+++ b/_pytest/python.py
@@ -4,6 +4,7 @@
import sys
import pytest
from _pytest.main import getfslineno
+from _pytest.mark import MarkDecorator, MarkInfo
from _pytest.monkeypatch import monkeypatch
from py._code.code import TerminalRepr
@@ -565,11 +566,13 @@
self._globalid_args = set()
self._globalparam = _notexists
self._arg2scopenum = {} # used for sorting parametrized resources
+ self.keywords = {}
def copy(self, metafunc):
cs = CallSpec2(self.metafunc)
cs.funcargs.update(self.funcargs)
cs.params.update(self.params)
+ cs.keywords.update(self.keywords)
cs._arg2scopenum.update(self._arg2scopenum)
cs._idlist = list(self._idlist)
cs._globalid = self._globalid
@@ -593,7 +596,7 @@
def id(self):
return "-".join(map(str, filter(None, self._idlist)))
- def setmulti(self, valtype, argnames, valset, id, scopenum=0):
+ def setmulti(self, valtype, argnames, valset, id, keywords, scopenum=0):
for arg,val in zip(argnames, valset):
self._checkargnotcontained(arg)
getattr(self, valtype)[arg] = val
@@ -605,6 +608,7 @@
if val is _notexists:
self._emptyparamspecified = True
self._idlist.append(id)
+ self.keywords.update(keywords)
def setall(self, funcargs, id, param):
for x in funcargs:
@@ -667,6 +671,21 @@
It will also override any fixture-function defined scope, allowing
to set a dynamic scope using test context or configuration.
"""
+ # remove any marks applied to individual tests instances
+ # these marks will be applied in Function init
+ newkeywords = {}
+ strippedargvalues = []
+ for i, argval in enumerate(argvalues):
+ if isinstance(argval, MarkDecorator):
+ # convert into a mark without the test content mixed in
+ newmark = MarkDecorator(argval.markname, argval.args[:-1],
argval.kwargs)
+ newkeywords[i] = {newmark.markname: newmark}
+ strippedargvalues.append(argval.args[-1])
+ else:
+ newkeywords[i] = {}
+ strippedargvalues.append(argval)
+ argvalues = strippedargvalues
+
if not isinstance(argnames, (tuple, list)):
argnames = (argnames,)
argvalues = [(val,) for val in argvalues]
@@ -691,7 +710,7 @@
assert len(valset) == len(argnames)
newcallspec = callspec.copy(self)
newcallspec.setmulti(valtype, argnames, valset, ids[i],
- scopenum)
+ newkeywords[i], scopenum)
newcalls.append(newcallspec)
self._calls = newcalls
@@ -908,6 +927,9 @@
for name, val in (py.builtin._getfuncdict(self.obj) or {}).items():
self.keywords[name] = val
+ if callspec:
+ for name, val in callspec.keywords.items():
+ self.keywords[name] = val
if keywords:
for name, val in keywords.items():
self.keywords[name] = val
diff -r db9c8ef9f6e01273a0dcc23b9ba55f954aab662a -r
fff62647d8622e95f5f0e28f1e4a25aeeb4b0a8f doc/en/example/markers.txt
--- a/doc/en/example/markers.txt
+++ b/doc/en/example/markers.txt
@@ -185,6 +185,29 @@
in which case it will be applied to all functions and
methods defined in the module.
+.. _`marking individual tests when using parametrize`:
+
+Marking individual tests when using parametrize
+-----------------------------------------------
+
+When using parametrize, applying a mark will make it apply
+to each individual test. However it is also possible to
+apply a marker to an individual test instance::
+
+ import pytest
+
+ @pytest.mark.foo
+ @pytest.mark.parametrize(("n", "expected"), [
+ (1, 2),
+ pytest.mark.bar((1, 3)),
+ (2, 3),
+ ])
+ def test_increment(n, expected):
+ assert n + 1 == expected
+
+In this example the mark "foo" will apply to each of the three
+tests, whereas the "bar" mark is only applied to the second test.
+Skip and xfail marks can also be applied in this way, see :ref:`skip/xfail
with parametrize`.
.. _`adding a custom marker from a plugin`:
diff -r db9c8ef9f6e01273a0dcc23b9ba55f954aab662a -r
fff62647d8622e95f5f0e28f1e4a25aeeb4b0a8f doc/en/parametrize.txt
--- a/doc/en/parametrize.txt
+++ b/doc/en/parametrize.txt
@@ -82,6 +82,18 @@
Note that there ways how you can mark a class or a module,
see :ref:`mark`.
+It is also possible to mark individual test instances within parametrize::
+
+ # content of test_expectation.py
+ import pytest
+ @pytest.mark.parametrize(("input", "expected"), [
+ ("3+5", 8),
+ ("2+4", 6),
+ pytest.mark.xfail(("6*9", 42)),
+ ])
+ def test_eval(input, expected):
+ assert eval(input) == expected
+
.. _`pytest_generate_tests`:
diff -r db9c8ef9f6e01273a0dcc23b9ba55f954aab662a -r
fff62647d8622e95f5f0e28f1e4a25aeeb4b0a8f doc/en/skipping.txt
--- a/doc/en/skipping.txt
+++ b/doc/en/skipping.txt
@@ -176,6 +176,28 @@
======================== 6 xfailed in 0.05 seconds
=========================
+.. _`skip/xfail with parametrize`:
+
+Skip/xfail with parametrize
+---------------------------
+
+It is possible to apply markers like skip and xfail to individual
+test instances when using parametrize:
+
+ import pytest
+
+ @pytest.mark.parametrize(("n", "expected"), [
+ (1, 2),
+ pytest.mark.xfail((1, 0)),
+ pytest.mark.xfail(reason="some bug")((1, 3)),
+ (2, 3),
+ (3, 4),
+ (4, 5),
+ pytest.mark.skipif("sys.version_info >= (3,0)")((10, 11)),
+ ])
+ def test_increment(n, expected):
+ assert n + 1 == expected
+
Imperative xfail from within a test or setup function
------------------------------------------------------
diff -r db9c8ef9f6e01273a0dcc23b9ba55f954aab662a -r
fff62647d8622e95f5f0e28f1e4a25aeeb4b0a8f testing/python/metafunc.py
--- a/testing/python/metafunc.py
+++ b/testing/python/metafunc.py
@@ -578,3 +578,186 @@
])
[email protected]
+class TestMarkersWithParametrization:
+ def test_simple_mark(self, testdir):
+ s = """
+ import pytest
+
+ @pytest.mark.foo
+ @pytest.mark.parametrize(("n", "expected"), [
+ (1, 2),
+ pytest.mark.bar((1, 3)),
+ (2, 3),
+ ])
+ def test_increment(n, expected):
+ assert n + 1 == expected
+ """
+ items = testdir.getitems(s)
+ assert len(items) == 3
+ for item in items:
+ assert 'foo' in item.keywords
+ assert 'bar' not in items[0].keywords
+ assert 'bar' in items[1].keywords
+ assert 'bar' not in items[2].keywords
+
+ def test_select_based_on_mark(self, testdir):
+ s = """
+ import pytest
+
+ @pytest.mark.parametrize(("n", "expected"), [
+ (1, 2),
+ pytest.mark.foo((2, 3)),
+ (3, 4),
+ ])
+ def test_increment(n, expected):
+ assert n + 1 == expected
+ """
+ testdir.makepyfile(s)
+ rec = testdir.inline_run("-m", 'foo')
+ passed, skipped, fail = rec.listoutcomes()
+ assert len(passed) == 1
+ assert len(skipped) == 0
+ assert len(fail) == 0
+
+ @pytest.mark.xfail(reason="is this important to support??")
+ def test_nested_marks(self, testdir):
+ s = """
+ import pytest
+ mastermark = pytest.mark.foo(pytest.mark.bar)
+
+ @pytest.mark.parametrize(("n", "expected"), [
+ (1, 2),
+ mastermark((1, 3)),
+ (2, 3),
+ ])
+ def test_increment(n, expected):
+ assert n + 1 == expected
+ """
+ items = testdir.getitems(s)
+ assert len(items) == 3
+ for mark in ['foo', 'bar']:
+ assert mark not in items[0].keywords
+ assert mark in items[1].keywords
+ assert mark not in items[2].keywords
+
+ def test_simple_xfail(self, testdir):
+ s = """
+ import pytest
+
+ @pytest.mark.parametrize(("n", "expected"), [
+ (1, 2),
+ pytest.mark.xfail((1, 3)),
+ (2, 3),
+ ])
+ def test_increment(n, expected):
+ assert n + 1 == expected
+ """
+ testdir.makepyfile(s)
+ reprec = testdir.inline_run()
+ # xfail is skip??
+ reprec.assertoutcome(passed=2, skipped=1)
+
+ def test_simple_xfail_single_argname(self, testdir):
+ s = """
+ import pytest
+
+ @pytest.mark.parametrize("n", [
+ 2,
+ pytest.mark.xfail(3),
+ 4,
+ ])
+ def test_isEven(n):
+ assert n % 2 == 0
+ """
+ testdir.makepyfile(s)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=2, skipped=1)
+
+ def test_xfail_with_arg(self, testdir):
+ s = """
+ import pytest
+
+ @pytest.mark.parametrize(("n", "expected"), [
+ (1, 2),
+ pytest.mark.xfail("sys.version > 0")((1, 3)),
+ (2, 3),
+ ])
+ def test_increment(n, expected):
+ assert n + 1 == expected
+ """
+ testdir.makepyfile(s)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=2, skipped=1)
+
+ def test_xfail_with_kwarg(self, testdir):
+ s = """
+ import pytest
+
+ @pytest.mark.parametrize(("n", "expected"), [
+ (1, 2),
+ pytest.mark.xfail(reason="some bug")((1, 3)),
+ (2, 3),
+ ])
+ def test_increment(n, expected):
+ assert n + 1 == expected
+ """
+ testdir.makepyfile(s)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=2, skipped=1)
+
+ def test_xfail_with_arg_and_kwarg(self, testdir):
+ s = """
+ import pytest
+
+ @pytest.mark.parametrize(("n", "expected"), [
+ (1, 2),
+ pytest.mark.xfail("sys.version > 0", reason="some bug")((1,
3)),
+ (2, 3),
+ ])
+ def test_increment(n, expected):
+ assert n + 1 == expected
+ """
+ testdir.makepyfile(s)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=2, skipped=1)
+
+ def test_xfail_passing_is_xpass(self, testdir):
+ s = """
+ import pytest
+
+ @pytest.mark.parametrize(("n", "expected"), [
+ (1, 2),
+ pytest.mark.xfail("sys.version > 0", reason="some bug")((2,
3)),
+ (3, 4),
+ ])
+ def test_increment(n, expected):
+ assert n + 1 == expected
+ """
+ testdir.makepyfile(s)
+ reprec = testdir.inline_run()
+ # xpass is fail, obviously :)
+ reprec.assertoutcome(passed=2, failed=1)
+
+ def test_parametrize_called_in_generate_tests(self, testdir):
+ s = """
+ import pytest
+
+
+ def pytest_generate_tests(metafunc):
+ passingTestData = [(1, 2),
+ (2, 3)]
+ failingTestData = [(1, 3),
+ (2, 2)]
+
+ testData = passingTestData + [pytest.mark.xfail(d)
+ for d in failingTestData]
+ metafunc.parametrize(("n", "expected"), testData)
+
+
+ def test_increment(n, expected):
+ assert n + 1 == expected
+ """
+ testdir.makepyfile(s)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=2, skipped=2)
Repository URL: https://bitbucket.org/hpk42/pytest/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
_______________________________________________
pytest-commit mailing list
[email protected]
http://mail.python.org/mailman/listinfo/pytest-commit