fix pytest

This commit is contained in:
Robert Kaussow 2019-04-11 15:56:20 +02:00
parent 80ea191fa1
commit 10aaa8e7e3
854 changed files with 160940 additions and 42 deletions

View File

@ -73,14 +73,16 @@ class Candidate(object):
version = utils.standards_latest(self.standards)
if self.expected_version:
if isinstance(self, RoleFile):
LOG.warn("%s %s is in a role that contains a meta/main.yml without a declared "
"standards version. "
"Using latest standards version %s" %
(type(self).__name__, self.path, version))
LOG.warning(
"%s %s is in a role that contains a meta/main.yml without a declared "
"standards version. "
"Using latest standards version %s" %
(type(self).__name__, self.path, version))
else:
LOG.warn("%s %s does not present standards version. "
"Using latest standards version %s" %
(type(self).__name__, self.path, version))
LOG.warning(
"%s %s does not present standards version. "
"Using latest standards version %s" %
(type(self).__name__, self.path, version))
else:
LOG.info("%s %s declares standards version %s" %
(type(self).__name__, self.path, version))
@ -125,13 +127,13 @@ class Candidate(object):
err_labels.update(err.to_dict())
if not standard.version:
LOG.warn("{id}Best practice '{name}' not met:\n{path}:{error}".format(
LOG.warning("{id}Best practice '{name}' not met:\n{path}:{error}".format(
id=self._format_id(standard.id),
name=standard.name,
path=self.path,
error=err), extra=flag_extra(err_labels))
elif LooseVersion(standard.version) > LooseVersion(self.version):
LOG.warn("{id}Future standard '{name}' not met:\n{path}:{error}".format(
LOG.warning("{id}Future standard '{name}' not met:\n{path}:{error}".format(
id=self._format_id(standard.id),
name=standard.name,
path=self.path,

View File

@ -128,10 +128,10 @@ def _get_warn_handler(json=False):
def _get_info_handler(json=False):
handler = logging.StreamHandler(sys.stderr)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
handler.addFilter(LogFilter(logging.INFO))
handler.setFormatter(MultilineFormatter(info("%(message)s")))
handler.setFormatter(MultilineFormatter(info(CONSOLE_FORMAT)))
if json:
handler.setFormatter(MultilineJsonFormatter(JSON_FORMAT))

View File

@ -1,41 +1,42 @@
"""Test logging module."""
from __future__ import print_function
import colorama
import logging
from ansiblelater.utils import info, warn, abort, error, should_do_markup
from ansiblelater import logger
def test_abort(capsys, mocker):
abort('foo')
stdout, _ = capsys.readouterr()
def test_critical(capsys, mocker):
log = logger.get_logger("test_critical")
log.critical("foo")
_, stderr = capsys.readouterr()
print('{}{}{}'.format(colorama.Fore.RED, 'FATAL: foo'.rstrip(),
print("{}{}{}".format(colorama.Fore.RED, "CRITICAL: foo".rstrip(),
colorama.Style.RESET_ALL))
x, _ = capsys.readouterr()
assert x == stdout
assert x == stderr
def test_error(capsys, mocker):
error('foo')
stdout, _ = capsys.readouterr()
log = logger.get_logger("test_error")
log.error("foo")
_, stderr = capsys.readouterr()
print('{}{}{}'.format(colorama.Fore.RED, 'ERROR: foo'.rstrip(),
print("{}{}{}".format(colorama.Fore.RED, "ERROR: foo".rstrip(),
colorama.Style.RESET_ALL))
x, _ = capsys.readouterr()
assert x == stdout
assert x == stderr
def test_warn(capsys, mocker):
settings = mocker.MagicMock()
settings.log_level = getattr(logging, 'WARNING')
warn('foo', settings)
log = logger.get_logger("test_warn")
log.warning("foo")
stdout, _ = capsys.readouterr()
print('{}{}{}'.format(colorama.Fore.YELLOW, 'WARN: foo'.rstrip(),
print("{}{}{}".format(colorama.Fore.YELLOW, "WARNING: foo".rstrip(),
colorama.Style.RESET_ALL))
x, _ = capsys.readouterr()
@ -43,13 +44,11 @@ def test_warn(capsys, mocker):
def test_info(capsys, mocker):
settings = mocker.MagicMock()
settings.log_level = getattr(logging, 'INFO')
info('foo', settings)
log = logger.get_logger("test_info")
log.info("foo")
stdout, _ = capsys.readouterr()
print('{}{}{}'.format(colorama.Fore.BLUE, 'INFO: foo'.rstrip(),
print("{}{}{}".format(colorama.Fore.BLUE, "INFO: foo".rstrip(),
colorama.Style.RESET_ALL))
x, _ = capsys.readouterr()
@ -57,26 +56,26 @@ def test_info(capsys, mocker):
def test_markup_detection_pycolors0(monkeypatch):
monkeypatch.setenv('PY_COLORS', '0')
assert not should_do_markup()
monkeypatch.setenv("PY_COLORS", "0")
assert not logger._should_do_markup()
def test_markup_detection_pycolors1(monkeypatch):
monkeypatch.setenv('PY_COLORS', '1')
assert should_do_markup()
monkeypatch.setenv("PY_COLORS", "1")
assert logger._should_do_markup()
def test_markup_detection_tty_yes(mocker):
mocker.patch('sys.stdout.isatty', return_value=True)
mocker.patch('os.environ', {'TERM': 'xterm'})
assert should_do_markup()
mocker.patch("sys.stdout.isatty", return_value=True)
mocker.patch("os.environ", {"TERM": "xterm"})
assert logger._should_do_markup()
mocker.resetall()
mocker.stopall()
def test_markup_detection_tty_no(mocker):
mocker.patch('os.environ', {})
mocker.patch('sys.stdout.isatty', return_value=False)
assert not should_do_markup()
mocker.patch("os.environ", {})
mocker.patch("sys.stdout.isatty", return_value=False)
assert not logger._should_do_markup()
mocker.resetall()
mocker.stopall()

10
env_27/bin/bandit Executable file
View File

@ -0,0 +1,10 @@
#!/Users/rkau2905/Devel/python/private/ansible-later/env_27/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from bandit.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())

10
env_27/bin/bandit-baseline Executable file
View File

@ -0,0 +1,10 @@
#!/Users/rkau2905/Devel/python/private/ansible-later/env_27/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from bandit.cli.baseline import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())

View File

@ -0,0 +1,10 @@
#!/Users/rkau2905/Devel/python/private/ansible-later/env_27/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from bandit.cli.config_generator import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())

10
env_27/bin/coverage Executable file
View File

@ -0,0 +1,10 @@
#!/Users/rkau2905/Devel/python/private/ansible-later/env_27/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from coverage.cmdline import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())

10
env_27/bin/coverage-2.7 Executable file
View File

@ -0,0 +1,10 @@
#!/Users/rkau2905/Devel/python/private/ansible-later/env_27/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from coverage.cmdline import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())

10
env_27/bin/coverage2 Executable file
View File

@ -0,0 +1,10 @@
#!/Users/rkau2905/Devel/python/private/ansible-later/env_27/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from coverage.cmdline import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())

10
env_27/bin/isort Executable file
View File

@ -0,0 +1,10 @@
#!/Users/rkau2905/Devel/python/private/ansible-later/env_27/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from isort.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())

10
env_27/bin/pbr Executable file
View File

@ -0,0 +1,10 @@
#!/Users/rkau2905/Devel/python/private/ansible-later/env_27/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pbr.cmd.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())

10
env_27/bin/py.test Executable file
View File

@ -0,0 +1,10 @@
#!/Users/rkau2905/Devel/python/private/ansible-later/env_27/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pytest import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())

10
env_27/bin/pydocstyle Executable file
View File

@ -0,0 +1,10 @@
#!/Users/rkau2905/Devel/python/private/ansible-later/env_27/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pydocstyle.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())

10
env_27/bin/pytest Executable file
View File

@ -0,0 +1,10 @@
#!/Users/rkau2905/Devel/python/private/ansible-later/env_27/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pytest import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())

View File

@ -0,0 +1,31 @@
Metadata-Version: 2.1
Name: GitPython
Version: 2.1.11
Summary: Python Git Library
Home-page: https://github.com/gitpython-developers/GitPython
Author: Sebastian Thiel, Michael Trier
Author-email: byronimo@gmail.com, mtrier@gmail.com
License: BSD License
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Environment :: Console
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: BSD License
Classifier: Operating System :: OS Independent
Classifier: Operating System :: POSIX
Classifier: Operating System :: Microsoft :: Windows
Classifier: Operating System :: MacOS :: MacOS X
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Requires: gitdb2 (>=2.0.0)
Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*
Requires-Dist: gitdb2 (>=2.0.0)
GitPython is a python library used to interact with Git repositories

View File

@ -0,0 +1,202 @@
GitPython-2.1.11.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
GitPython-2.1.11.dist-info/METADATA,sha256=1hl7XlFNafLSMaQA9lMPWs3HvZNCeLBPxWCOk9DFA8w,1174
GitPython-2.1.11.dist-info/RECORD,,
GitPython-2.1.11.dist-info/WHEEL,sha256=gduuPyBvFJQSQ0zdyxF7k0zynDXbIbvg5ZBHoXum5uk,110
GitPython-2.1.11.dist-info/top_level.txt,sha256=0hzDuIp8obv624V3GmbqsagBWkk8ohtGU-Bc1PmTT0o,4
git/__init__.py,sha256=ADA42qaulal45gMhMwD34EWD1BB2ipRDLAv0lITLhUg,2336
git/__init__.pyc,,
git/cmd.py,sha256=5mmEzjWKKZcTE_NWOt6qAVejTnfB40vfp_r1ojTcrGM,42731
git/cmd.pyc,,
git/compat.py,sha256=iikgGbxQeH2zQdbH6-17aU02YjQLNoScj_IzXHmklIU,9372
git/compat.pyc,,
git/config.py,sha256=F20DpDX1NENCG4hCkwDkOA4wkgfZwG32TrLoNxMbyho,23132
git/config.pyc,,
git/db.py,sha256=pgC0B4wCqRN7xrvLuMQhLwM252ilkZyFL6qq0cPA2j4,1980
git/db.pyc,,
git/diff.py,sha256=Wtj8JQadZj473oXmx5RoMueUp1_P7I1vpRrxt6Nnhwg,18742
git/diff.pyc,,
git/exc.py,sha256=U01S3wgpn9gGfGG8fIvL6p127Uu2LI_Ho7Jcc24xOYI,4878
git/exc.pyc,,
git/index/__init__.py,sha256=Wj5zgJggZkEXueEDXdijxXahzxhextC08k70n0lHRN0,129
git/index/__init__.pyc,,
git/index/base.py,sha256=ACRqf_rarJULDQwl5G8QWCbMv4JmT0E9Dg_uVfaLQdo,52048
git/index/base.pyc,,
git/index/fun.py,sha256=ZptIr1Wr7Zh1RlI1LliRxxGaxxwLsLHd94GSpk9ClGw,14312
git/index/fun.pyc,,
git/index/typ.py,sha256=GLBZbDS3yScHJs0U18CX-heLTDjjGu6fN7T2L_NQr4A,4976
git/index/typ.pyc,,
git/index/util.py,sha256=l6oh9_1KU1v5GQdpxqCOqs6WLt5xN1uWvkVHQqcCToA,2902
git/index/util.pyc,,
git/objects/__init__.py,sha256=6C02LlMygiFwTYtncz3GxEQfzHZr2WvUId0fnJ8HfLo,683
git/objects/__init__.pyc,,
git/objects/base.py,sha256=UZiyzyzx4_OJ3bWnwqb3mqh0LXT7oo0biYaTm-sLuAw,6689
git/objects/base.pyc,,
git/objects/blob.py,sha256=evI3ptPmlln6gLpoQRvbIKjK4v59nT8ipd1vk1dGYtc,927
git/objects/blob.pyc,,
git/objects/commit.py,sha256=KfpGWRrN9Y2QikcfeFThN2gc8NZ68K5pBAPhz8OrN7Q,20791
git/objects/commit.pyc,,
git/objects/fun.py,sha256=kEUFE2Q5kXeoxbjALRNl_jzRP_m9KKqQyxIcW-h6XGM,7415
git/objects/fun.pyc,,
git/objects/submodule/__init__.py,sha256=OsMeiex7cG6ev2f35IaJ5csH-eXchSoNKCt4HXUG5Ws,93
git/objects/submodule/__init__.pyc,,
git/objects/submodule/base.py,sha256=0DLx40Mz-l0QPz3RDXz9gols8LkGkCQlwvCKerNGd1s,53921
git/objects/submodule/base.pyc,,
git/objects/submodule/root.py,sha256=N2i0PjRcw5bNLLIDAkviQjXhf9RvGSfVnbav4FNzkXo,17656
git/objects/submodule/root.pyc,,
git/objects/submodule/util.py,sha256=VdgIG-cBo47b_7JcolAvjWaIMU0X5oImLjJ4wluc_iw,2745
git/objects/submodule/util.pyc,,
git/objects/tag.py,sha256=h2nD3iO4GB6f9yqr2nqJ4mhMj1DDaqEJl2hQm2b8vJ8,3162
git/objects/tag.pyc,,
git/objects/tree.py,sha256=Ta1qAkuwzn7lk54_d7knqF2WL6DOc2MQG1k8mKLel1s,11069
git/objects/tree.pyc,,
git/objects/util.py,sha256=GRIAWLR1gaK2QE3s5TtmNmN_LqOjEBlUkZWiouJMVe8,12355
git/objects/util.pyc,,
git/refs/__init__.py,sha256=3CRfAyE-Z78rJ3kSdKR1PNiXHEjHLw2VkU2JyDviNDU,242
git/refs/__init__.pyc,,
git/refs/head.py,sha256=MbO65f5SU0X3DvzmHpU0dY4h4SyvwqNaW2d8bUu3YSc,8724
git/refs/head.pyc,,
git/refs/log.py,sha256=_4JC--cqZc49Gm0ZLkE66ePsaxwP4TagiCQi53_xI88,10881
git/refs/log.pyc,,
git/refs/reference.py,sha256=OcQMwHJuelR1yKe1EF0IBfxeQZYv2kf0xunNSVwZV-M,4408
git/refs/reference.pyc,,
git/refs/remote.py,sha256=6JOyIurnomM3tNXdKRXfMK_V75gJNgr9_2sdevKU_tI,1670
git/refs/remote.pyc,,
git/refs/symbolic.py,sha256=2YA20sTqEP5IOqZi7TR9AxIBHrwAPRnR62wm9q8k5ko,26858
git/refs/symbolic.pyc,,
git/refs/tag.py,sha256=qoHwJ9suHx8u8NNg-6GvNftK36RnCNkpElRjh2r9wcI,2964
git/refs/tag.pyc,,
git/remote.py,sha256=mRDU9YLiJGpz0nctfE0HNWLbLnqCo5_umcJeMSPFT8M,35554
git/remote.pyc,,
git/repo/__init__.py,sha256=ssUH4IVCoua5shI5h1l46P0X1kp82ydxVcH3PIVCnzg,108
git/repo/__init__.pyc,,
git/repo/base.py,sha256=cAqM1khmzcpKkV7bVsAkZTV65weMPs7i-8b0FZhVl6w,41729
git/repo/base.pyc,,
git/repo/fun.py,sha256=chh3Jp9ZKtKU9dWyEvHuY5vnGQjNyCKLc5fU8K6POyY,11382
git/repo/fun.pyc,,
git/test/__init__.py,sha256=q-WCITGqFKTHnRFjUvJz5hUJBi8SP4InaAZRXZ8qj8k,220
git/test/__init__.pyc,,
git/test/fixtures/blame,sha256=4EDRSXdgbRtxHU_2lASFXC7eNShL2cVq3IU43tLWlD4,3663
git/test/fixtures/blame_binary,sha256=YLzoHqTAuv2Uv8IILh4ndQxJ_A1c09176E-3d5FMQsM,14807
git/test/fixtures/blame_complex_revision,sha256=tPguLsqmLxjuZWg5nRcdZCZeaBi-LOeVQEHfTX6X_B0,7645
git/test/fixtures/blame_incremental,sha256=3VXtrk8LVqfS5f2vsP5DTzFU3opeevUbENQUq22vTdw,982
git/test/fixtures/blame_incremental_2.11.1_plus,sha256=JDA_xCevOrOMDeKW-U8svYeA0E8Pa3sI7G8GALpxOHw,1154
git/test/fixtures/cat_file.py,sha256=7RDIymGyByw8I1OibenXM-DVsZ0_7gpazeYYG4C5GDM,136
git/test/fixtures/cat_file.pyc,,
git/test/fixtures/cat_file_blob,sha256=ZOyIygCyaOW6GjVnihtTFtIS9PNmskdyMlNKiuyjfzw,11
git/test/fixtures/cat_file_blob_nl,sha256=GJShnIW6FTrL90OsTkP8AEyJFgSyb4xp4eg-oq_HxI8,12
git/test/fixtures/cat_file_blob_size,sha256=JdTyqG3rXiV0uzIQtnuyT8xK-xn5OntloFfaqHSp0Y4,3
git/test/fixtures/commit_invalid_data,sha256=QlV-Pw5mw1Vhp6qivAQY5kcBP_BMJ_OIdLCinmes5Sw,242
git/test/fixtures/commit_with_gpgsig,sha256=3in_tJPkQv2K1wFx-PGqaCZQe40liMnl9cMYOJ8krTA,1387
git/test/fixtures/diff_2,sha256=sxE-xkV5lQrUEbpllp2X_AcFfPUmUr2wvSsc9qkZQLc,1994
git/test/fixtures/diff_2f,sha256=na11T8R1dhJUOKeO-fEeHymOxhXNrjvzzmA_r7x6oJM,732
git/test/fixtures/diff_abbrev-40_full-index_M_raw_no-color,sha256=AW-YEfutyH_RVyaP2nCTPhtjvkfqWi7NVL4s9Ab3Qww,109
git/test/fixtures/diff_change_in_type,sha256=Wo1iCaT1YBfGn5ZSJ40H7iVeqXKm-v-qJnsBUBKrpsI,319
git/test/fixtures/diff_change_in_type_raw,sha256=67KYtwIlQdTSwesABnIYTZxFgiwPhVyBXaDFoPXRFt4,108
git/test/fixtures/diff_f,sha256=sNsG26bYvqU4pK_RwahaO-Lya8O9Xonwlyth8do_ptY,504
git/test/fixtures/diff_file_with_spaces,sha256=BOvQkq4AjQ_cR1e0iLYDQdNq2BLa-P5xhI4Xal7hYcE,216
git/test/fixtures/diff_i,sha256=792rEQvP9Q-MNxZ3_FsvhG5emE_q1nT9jpmQ_A1hFWE,5705
git/test/fixtures/diff_index_patch,sha256=qd9jD_eAQY5I9OLsbqdz3-lm_ncL2ALJhVLyj3enAfk,4598
git/test/fixtures/diff_index_raw,sha256=odNXPZQ4rlBnqYfJvvTKGS8QvfJE33WN_X-lIRMT8NI,101
git/test/fixtures/diff_initial,sha256=1RJTg7QSTdMGlqLDvjFUhKtV0bAV2NFW8rHBgzlVfyg,76
git/test/fixtures/diff_mode_only,sha256=pqDOHBLm09TWZ0orff-S7pCkQktD2sooW5mURG0vqLQ,46005
git/test/fixtures/diff_new_mode,sha256=b70EDNoC_gfq_P_fVFCIqT3WHU_P0l-1jhuR2cSEJFg,546
git/test/fixtures/diff_numstat,sha256=_Ls171vvsERXlRiJ1i1tA5vHyoYCzt3hKorFmic7UyE,22
git/test/fixtures/diff_p,sha256=3YlhR3UNFIPDv90Zn1vCXC46kQCVDuepUZIzwzD8xmk,19273
git/test/fixtures/diff_patch_binary,sha256=CLWigD0x0z3n_fpdh8LlkEyRUy7oDiWM-CJpGrqWPiM,155
git/test/fixtures/diff_patch_unsafe_paths,sha256=jsc2GM8j56puEDnMEhlBHG4jIhziN0uY8cuzGTTtHmw,3145
git/test/fixtures/diff_raw_binary,sha256=-PUPqf5wop8KkmubHnPK6RAVinlJuQf9Lqo4VBff23I,103
git/test/fixtures/diff_rename,sha256=-f4kqw0Zt1lRZZOmt5I0w9Jenbr3PngyTH2QeUQfv8g,415
git/test/fixtures/diff_rename_raw,sha256=VVBUjGEoXWWMYQFq-dyE708DijCnG974Qn79plVT39Q,112
git/test/fixtures/diff_tree_numstat_root,sha256=NbBofQm3wGm-1hyz8XKIoxMtC_bzz4x8TlxxuF8LLDU,63
git/test/fixtures/for_each_ref_with_path_component,sha256=hHVSiVHNEW5PKSPP4zFxxpYs4EYlPSJ9y-yykzkpWjk,84
git/test/fixtures/git_config,sha256=_Igi3In2TsksvwUdn7YcusMv-069ftMdlV1G7ZCs8nU,1517
git/test/fixtures/git_config-inc.cfg,sha256=jYjjNgfYBBkEAXYj5wLy7en-ISXbvVyOOfOmKsURYdc,92
git/test/fixtures/git_config_global,sha256=_tFDHYTW1Hxue2WXqjafVm_b9eM-OjTV6WTD2yZ3aqM,366
git/test/fixtures/git_config_with_comments,sha256=Q9IHrB4KE3l15iXoYD9-4TIMyd_rFczQ1CPAu-CI8bU,3997
git/test/fixtures/git_config_with_empty_value,sha256=686iisjxnex4YeT4qWdjsQh22X8UDw5yzKSerefFSTM,35
git/test/fixtures/git_file,sha256=44Qr9_8TluxWGPiPjDT4dEyF8x3fvnA9W7moDNiFAKo,16
git/test/fixtures/index,sha256=OBeM4XodizcBFgK_7S92fdjNTaitNxGzSBkcHXXWQvs,163616
git/test/fixtures/index_merge,sha256=IdtRRV85gi9dGFC4LNuGrZU2yttGAAANeS0_qvNO85w,9192
git/test/fixtures/issue-301_stderr,sha256=z6QL_UgCKQ1MMviNQNdhM22hOgp00zfJyc5LCm7Jl64,302879
git/test/fixtures/ls_tree_a,sha256=uBvIY8-7HnaBvSsVYigYJdsbeslxrtfeXh-tWXKtOnc,429
git/test/fixtures/ls_tree_b,sha256=pW3aIRcXMA1ZSE36049fJWeiVQl95qk_31U8Eh3Tc1c,119
git/test/fixtures/ls_tree_commit,sha256=cOgzX5Qcqvy4LU4dIBkcc63ccrOPBLab5DsCQPVpz_E,173
git/test/fixtures/ls_tree_empty,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
git/test/fixtures/reflog_HEAD,sha256=8J5zwsJRmdb8xdWyQoRUzJYJnDSaeo0rLa5uthBovyQ,114210
git/test/fixtures/reflog_invalid_date,sha256=VlAYk2kGs3CySphJV0OmWwpWZK_uB9FxICTICZuKwSM,409
git/test/fixtures/reflog_invalid_email,sha256=1OoNdoGKNcNKWVQAv5ZKSgVEt0zSkigvHOTs3MMhVW0,411
git/test/fixtures/reflog_invalid_newsha,sha256=i-xph-C12dZT-dEKWS4VTDtX4AzQVUcCF3KXfMp9Gu0,404
git/test/fixtures/reflog_invalid_oldsha,sha256=guzXH-wQOfz3yQJFMChzhuXcgQ6G6rGTSwlIdBVX8Wg,398
git/test/fixtures/reflog_invalid_sep,sha256=0D9WHWpIGE2tQXD8utDcq-bbxdgVnWWCAMK_vwI3-zA,415
git/test/fixtures/reflog_master,sha256=K1-VX1oQ3gM_23qTjVV-8yQOXeXuRtePgUXAE6D1TVo,31286
git/test/fixtures/rev_list,sha256=pJPFZuJGwLzQ6m4P2d7VNaRLdMefGxxtztgU9fQfCCU,123
git/test/fixtures/rev_list_bisect_all,sha256=r0gnyZwq-IVHxNss4qE6zMv29PEcLyE0t_fV4MKISHc,2172
git/test/fixtures/rev_list_commit_diffs,sha256=n8qhU8FHEqr7Z8z8PvRGEODveuPbFIuaXB8UYGTqTPc,306
git/test/fixtures/rev_list_commit_idabbrev,sha256=W_cHcxor5sFGeS8-nmIpWNim-wtFY7636Hwh04Sfve8,271
git/test/fixtures/rev_list_commit_stats,sha256=1bZgYDN3iqjdIiZtYUuPNZXcyJYlDiusy3dw5utnr3M,244
git/test/fixtures/rev_list_count,sha256=wyBmlaA46bFntXaF6nx28phdDPwTZVW5kJr71pRrmb0,26855
git/test/fixtures/rev_list_delta_a,sha256=ikrcoYkO311vbCS_xoeyKE6myYKlKP5by88KU4oG6qI,328
git/test/fixtures/rev_list_delta_b,sha256=iiTGJRF2nzZrsHLXB1oOcZaoLvnSGAB3B9PLt5acmno,451
git/test/fixtures/rev_list_single,sha256=YqAJowQ_ujS8kUnNfBlm8ibKY7ki5vu2nXc_vt-4nq0,293
git/test/fixtures/rev_parse,sha256=y9iM5H6QPxDLEoGO9D4qSMBuDw4nz196c5VMflC1rak,8
git/test/fixtures/show_empty_commit,sha256=xeKoNCOFUPZcSztV3olKSs6u14fVdHwjnkGYLsEcZn8,252
git/test/fixtures/uncommon_branch_prefix_FETCH_HEAD,sha256=NO36DB4HWl4sOisR6EdFroTDakA-4XOx2kk4lFQIsiQ,603
git/test/fixtures/uncommon_branch_prefix_stderr,sha256=4-rJlXvPu-1ByjZzsUUJXFruPRxan7C5ssNtM7qZbeo,324
git/test/lib/__init__.py,sha256=k2xMRT9FC0m3yX_iMKaDcyuuZe0tGSr95ork3VOaeWk,414
git/test/lib/__init__.pyc,,
git/test/lib/asserts.py,sha256=_9sOUHopeO-3PZOkxMXfTWaTxxPaWwmpnAVaDxpcaWk,2273
git/test/lib/asserts.pyc,,
git/test/lib/helper.py,sha256=TI69pdx0xIMhfzOzBDB3BwqPvPsykp9bUXiyw2B0Xd8,13592
git/test/lib/helper.pyc,,
git/test/performance/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
git/test/performance/__init__.pyc,,
git/test/performance/lib.py,sha256=qSicSiyRI30rP3EFeVoevC_sBDgXDFtZKIFr_Ikz84g,2427
git/test/performance/lib.pyc,,
git/test/performance/test_commit.py,sha256=ws8ORcvg3h0eXkI2G7a4OEl5QFG-9s2Agf0ut_8sUqU,3732
git/test/performance/test_commit.pyc,,
git/test/performance/test_odb.py,sha256=knbDhq2sRagwyGHKQ7uNZLWN8bzYt_VF6bNucoON6dI,2651
git/test/performance/test_odb.pyc,,
git/test/performance/test_streams.py,sha256=w5mqPX6Yjo-j3mAz9GGa0O_pzYCBgGhhdoKCAz-iMD0,5850
git/test/performance/test_streams.pyc,,
git/test/test_actor.py,sha256=1bYmrTwWAYT_Qj9l9chbvuI8nNtHY6yGlDRJDDEq9A0,1242
git/test/test_actor.pyc,,
git/test/test_base.py,sha256=k6I5nG7ZeBCYpXwi3HX_mvURFelgvQFys5pWVQR6kjw,5649
git/test/test_base.pyc,,
git/test/test_blob.py,sha256=Bs4FWke9Sjzx06EJuG9hh1T5qBgJEEz4aBCcr3cW9L0,878
git/test/test_blob.pyc,,
git/test/test_commit.py,sha256=IhFvgXZ05tT0MTdheIHjNCHjVXpWytORBdcgaunJ2HY,15460
git/test/test_commit.pyc,,
git/test/test_config.py,sha256=HaAqpKZyURYV4ggy65FEIO9KAvS7wEaR5rbJvE0_NGQ,11066
git/test/test_config.pyc,,
git/test/test_db.py,sha256=e9UNddyQfoa-kzZo-XyrwVuYiq887NUkYrK8wZkTu9M,939
git/test/test_db.pyc,,
git/test/test_diff.py,sha256=D6JROrJiEmpvju_WZxkiO64ryU4yfA4viUdMN_3eBcg,12598
git/test/test_diff.pyc,,
git/test/test_docs.py,sha256=Au9rFCRPdF6HDq4TosrS6N_tmqo7xUUhdwhqHFArfLI,25357
git/test/test_docs.pyc,,
git/test/test_exc.py,sha256=0DBYNiYVfPVlFKYRzqsoZUJnf0lQiUDmdrRIIHWeSlE,5123
git/test/test_exc.pyc,,
git/test/test_fun.py,sha256=a91XgGk-YPwlgJEc-gy2tI_ilSq29XSQEywwc-kDnG0,10456
git/test/test_fun.pyc,,
git/test/test_git.py,sha256=Z1TnRsgcvxT9vO09t8_5Z6Vcp5kgqw8iyrggb8aJklA,10681
git/test/test_git.pyc,,
git/test/test_index.py,sha256=WiHPJNgD5Y00YKJoklTZkPJH0LE-0XEOTrA_2O9rsRs,37014
git/test/test_index.pyc,,
git/test/test_reflog.py,sha256=vfI-NQCtnGlJEUtYR0_k7Y1Hc4pZQ5F_T4T49hxSnNU,3505
git/test/test_reflog.pyc,,
git/test/test_refs.py,sha256=2rNm9HdJZTWXx775JHG_R9Pd5X022IQ9C2CbP_9vDoE,23357
git/test/test_refs.pyc,,
git/test/test_remote.py,sha256=6HuzDH827CjTVklhEUR0di2z78_N61OxJ0t5ZkD0jAw,26426
git/test/test_remote.pyc,,
git/test/test_repo.py,sha256=-J6Bdz3t0zPNEhjBaN41uZZQAEKZHPMhzNGhih4eYL0,37732
git/test/test_repo.pyc,,
git/test/test_stats.py,sha256=qmF2lL1wW0tEd17E-tkjmpPFVXzjREf7KW5JMCTQ4Zg,971
git/test/test_stats.pyc,,
git/test/test_submodule.py,sha256=aO4WTQjfiyUcus2FFAm2UIb-F53KYhe2TvaRvbloXMo,41434
git/test/test_submodule.pyc,,
git/test/test_tree.py,sha256=nR5OAQZLhv7kISoL3RO8ppkXAbKFYz3XlPAxABU1b4o,4046
git/test/test_tree.pyc,,
git/test/test_util.py,sha256=oh0ZJbOwPtqmMP3xEFkOsjD4G24-3kpZM9fqjZviLqU,8714
git/test/test_util.pyc,,
git/util.py,sha256=1mvwDNVbMu5hNGQZmATcYx9QbMhW6ZFRvnq7LtJGajk,32154
git/util.pyc,,

View File

@ -0,0 +1,6 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.31.1)
Root-Is-Purelib: true
Tag: py2-none-any
Tag: py3-none-any

View File

@ -0,0 +1,8 @@
__all__ = ["__version__"]
try:
from ._version import version as __version__
except ImportError:
# broken installation, we don't even try
# unknown only works because we do poor mans version compare
__version__ = "unknown"

View File

@ -0,0 +1,109 @@
"""allow bash-completion for argparse with argcomplete if installed
needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail
to find the magic string, so _ARGCOMPLETE env. var is never set, and
this does not need special code.
Function try_argcomplete(parser) should be called directly before
the call to ArgumentParser.parse_args().
The filescompleter is what you normally would use on the positional
arguments specification, in order to get "dirname/" after "dirn<TAB>"
instead of the default "dirname ":
optparser.add_argument(Config._file_or_dir, nargs='*'
).completer=filescompleter
Other, application specific, completers should go in the file
doing the add_argument calls as they need to be specified as .completer
attributes as well. (If argcomplete is not installed, the function the
attribute points to will not be used).
SPEEDUP
=======
The generic argcomplete script for bash-completion
(/etc/bash_completion.d/python-argcomplete.sh )
uses a python program to determine startup script generated by pip.
You can speed up completion somewhat by changing this script to include
# PYTHON_ARGCOMPLETE_OK
so the the python-argcomplete-check-easy-install-script does not
need to be called to find the entry point of the code and see if that is
marked with PYTHON_ARGCOMPLETE_OK
INSTALL/DEBUGGING
=================
To include this support in another application that has setup.py generated
scripts:
- add the line:
# PYTHON_ARGCOMPLETE_OK
near the top of the main python entry point
- include in the file calling parse_args():
from _argcomplete import try_argcomplete, filescompleter
, call try_argcomplete just before parse_args(), and optionally add
filescompleter to the positional arguments' add_argument()
If things do not work right away:
- switch on argcomplete debugging with (also helpful when doing custom
completers):
export _ARC_DEBUG=1
- run:
python-argcomplete-check-easy-install-script $(which appname)
echo $?
will echo 0 if the magic line has been found, 1 if not
- sometimes it helps to find early on errors using:
_ARGCOMPLETE=1 _ARC_DEBUG=1 appname
which should throw a KeyError: 'COMPLINE' (which is properly set by the
global argcomplete script).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from glob import glob
class FastFilesCompleter(object):
"Fast file completer class"
def __init__(self, directories=True):
self.directories = directories
def __call__(self, prefix, **kwargs):
"""only called on non option completions"""
if os.path.sep in prefix[1:]:
prefix_dir = len(os.path.dirname(prefix) + os.path.sep)
else:
prefix_dir = 0
completion = []
globbed = []
if "*" not in prefix and "?" not in prefix:
# we are on unix, otherwise no bash
if not prefix or prefix[-1] == os.path.sep:
globbed.extend(glob(prefix + ".*"))
prefix += "*"
globbed.extend(glob(prefix))
for x in sorted(globbed):
if os.path.isdir(x):
x += "/"
# append stripping the prefix (like bash, not like compgen)
completion.append(x[prefix_dir:])
return completion
if os.environ.get("_ARGCOMPLETE"):
try:
import argcomplete.completers
except ImportError:
sys.exit(-1)
filescompleter = FastFilesCompleter()
def try_argcomplete(parser):
argcomplete.autocomplete(parser, always_complete_options=False)
else:
def try_argcomplete(parser):
pass
filescompleter = None

View File

@ -0,0 +1,14 @@
""" python inspection/code generation API """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .code import Code # noqa
from .code import ExceptionInfo # noqa
from .code import filter_traceback # noqa
from .code import Frame # noqa
from .code import getrawcode # noqa
from .code import Traceback # noqa
from .source import compile_ as compile # noqa
from .source import getfslineno # noqa
from .source import Source # noqa

View File

@ -0,0 +1,94 @@
# copied from python-2.7.3's traceback.py
# CHANGES:
# - some_str is replaced, trying to create unicode strings
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import types
from six import text_type
def format_exception_only(etype, value):
"""Format the exception part of a traceback.
The arguments are the exception type and value such as given by
sys.last_type and sys.last_value. The return value is a list of
strings, each ending in a newline.
Normally, the list contains a single string; however, for
SyntaxError exceptions, it contains several lines that (when
printed) display detailed information about where the syntax
error occurred.
The message indicating which exception occurred is always the last
string in the list.
"""
# An instance should not have a meaningful value parameter, but
# sometimes does, particularly for string exceptions, such as
# >>> raise string1, string2 # deprecated
#
# Clear these out first because issubtype(string1, SyntaxError)
# would throw another exception and mask the original problem.
if (
isinstance(etype, BaseException)
or isinstance(etype, types.InstanceType)
or etype is None
or type(etype) is str
):
return [_format_final_exc_line(etype, value)]
stype = etype.__name__
if not issubclass(etype, SyntaxError):
return [_format_final_exc_line(stype, value)]
# It was a syntax error; show exactly where the problem was found.
lines = []
try:
msg, (filename, lineno, offset, badline) = value.args
except Exception:
pass
else:
filename = filename or "<string>"
lines.append(' File "{}", line {}\n'.format(filename, lineno))
if badline is not None:
if isinstance(badline, bytes): # python 2 only
badline = badline.decode("utf-8", "replace")
lines.append(" {}\n".format(badline.strip()))
if offset is not None:
caretspace = badline.rstrip("\n")[:offset].lstrip()
# non-space whitespace (likes tabs) must be kept for alignment
caretspace = ((c.isspace() and c or " ") for c in caretspace)
# only three spaces to account for offset1 == pos 0
lines.append(" {}^\n".format("".join(caretspace)))
value = msg
lines.append(_format_final_exc_line(stype, value))
return lines
def _format_final_exc_line(etype, value):
"""Return a list of a single line -- normal case for format_exception_only"""
valuestr = _some_str(value)
if value is None or not valuestr:
line = "{}\n".format(etype)
else:
line = "{}: {}\n".format(etype, valuestr)
return line
def _some_str(value):
try:
return text_type(value)
except Exception:
try:
return bytes(value).decode("UTF-8", "replace")
except Exception:
pass
return "<unprintable {} object>".format(type(value).__name__)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,328 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import inspect
import linecache
import sys
import textwrap
import tokenize
import warnings
from ast import PyCF_ONLY_AST as _AST_FLAG
from bisect import bisect_right
import py
import six
class Source(object):
""" an immutable object holding a source code fragment,
possibly deindenting it.
"""
_compilecounter = 0
def __init__(self, *parts, **kwargs):
self.lines = lines = []
de = kwargs.get("deindent", True)
for part in parts:
if not part:
partlines = []
elif isinstance(part, Source):
partlines = part.lines
elif isinstance(part, (tuple, list)):
partlines = [x.rstrip("\n") for x in part]
elif isinstance(part, six.string_types):
partlines = part.split("\n")
else:
partlines = getsource(part, deindent=de).lines
if de:
partlines = deindent(partlines)
lines.extend(partlines)
def __eq__(self, other):
try:
return self.lines == other.lines
except AttributeError:
if isinstance(other, str):
return str(self) == other
return False
__hash__ = None
def __getitem__(self, key):
if isinstance(key, int):
return self.lines[key]
else:
if key.step not in (None, 1):
raise IndexError("cannot slice a Source with a step")
newsource = Source()
newsource.lines = self.lines[key.start : key.stop]
return newsource
def __len__(self):
return len(self.lines)
def strip(self):
""" return new source object with trailing
and leading blank lines removed.
"""
start, end = 0, len(self)
while start < end and not self.lines[start].strip():
start += 1
while end > start and not self.lines[end - 1].strip():
end -= 1
source = Source()
source.lines[:] = self.lines[start:end]
return source
def putaround(self, before="", after="", indent=" " * 4):
""" return a copy of the source object with
'before' and 'after' wrapped around it.
"""
before = Source(before)
after = Source(after)
newsource = Source()
lines = [(indent + line) for line in self.lines]
newsource.lines = before.lines + lines + after.lines
return newsource
def indent(self, indent=" " * 4):
""" return a copy of the source object with
all lines indented by the given indent-string.
"""
newsource = Source()
newsource.lines = [(indent + line) for line in self.lines]
return newsource
def getstatement(self, lineno):
""" return Source statement which contains the
given linenumber (counted from 0).
"""
start, end = self.getstatementrange(lineno)
return self[start:end]
def getstatementrange(self, lineno):
""" return (start, end) tuple which spans the minimal
statement region which containing the given lineno.
"""
if not (0 <= lineno < len(self)):
raise IndexError("lineno out of range")
ast, start, end = getstatementrange_ast(lineno, self)
return start, end
def deindent(self):
"""return a new source object deindented."""
newsource = Source()
newsource.lines[:] = deindent(self.lines)
return newsource
def isparseable(self, deindent=True):
""" return True if source is parseable, heuristically
deindenting it by default.
"""
from parser import suite as syntax_checker
if deindent:
source = str(self.deindent())
else:
source = str(self)
try:
# compile(source+'\n', "x", "exec")
syntax_checker(source + "\n")
except KeyboardInterrupt:
raise
except Exception:
return False
else:
return True
def __str__(self):
return "\n".join(self.lines)
def compile(
self, filename=None, mode="exec", flag=0, dont_inherit=0, _genframe=None
):
""" return compiled code object. if filename is None
invent an artificial filename which displays
the source/line position of the caller frame.
"""
if not filename or py.path.local(filename).check(file=0):
if _genframe is None:
_genframe = sys._getframe(1) # the caller
fn, lineno = _genframe.f_code.co_filename, _genframe.f_lineno
base = "<%d-codegen " % self._compilecounter
self.__class__._compilecounter += 1
if not filename:
filename = base + "%s:%d>" % (fn, lineno)
else:
filename = base + "%r %s:%d>" % (filename, fn, lineno)
source = "\n".join(self.lines) + "\n"
try:
co = compile(source, filename, mode, flag)
except SyntaxError:
ex = sys.exc_info()[1]
# re-represent syntax errors from parsing python strings
msglines = self.lines[: ex.lineno]
if ex.offset:
msglines.append(" " * ex.offset + "^")
msglines.append("(code was compiled probably from here: %s)" % filename)
newex = SyntaxError("\n".join(msglines))
newex.offset = ex.offset
newex.lineno = ex.lineno
newex.text = ex.text
raise newex
else:
if flag & _AST_FLAG:
return co
lines = [(x + "\n") for x in self.lines]
linecache.cache[filename] = (1, None, lines, filename)
return co
#
# public API shortcut functions
#
def compile_(source, filename=None, mode="exec", flags=0, dont_inherit=0):
""" compile the given source to a raw code object,
and maintain an internal cache which allows later
retrieval of the source code for the code object
and any recursively created code objects.
"""
if isinstance(source, ast.AST):
# XXX should Source support having AST?
return compile(source, filename, mode, flags, dont_inherit)
_genframe = sys._getframe(1) # the caller
s = Source(source)
co = s.compile(filename, mode, flags, _genframe=_genframe)
return co
def getfslineno(obj):
""" Return source location (path, lineno) for the given object.
If the source cannot be determined return ("", -1).
The line number is 0-based.
"""
from .code import Code
try:
code = Code(obj)
except TypeError:
try:
fn = inspect.getsourcefile(obj) or inspect.getfile(obj)
except TypeError:
return "", -1
fspath = fn and py.path.local(fn) or None
lineno = -1
if fspath:
try:
_, lineno = findsource(obj)
except IOError:
pass
else:
fspath = code.path
lineno = code.firstlineno
assert isinstance(lineno, int)
return fspath, lineno
#
# helper functions
#
def findsource(obj):
try:
sourcelines, lineno = inspect.findsource(obj)
except Exception:
return None, -1
source = Source()
source.lines = [line.rstrip() for line in sourcelines]
return source, lineno
def getsource(obj, **kwargs):
from .code import getrawcode
obj = getrawcode(obj)
try:
strsrc = inspect.getsource(obj)
except IndentationError:
strsrc = '"Buggy python version consider upgrading, cannot get source"'
assert isinstance(strsrc, str)
return Source(strsrc, **kwargs)
def deindent(lines):
return textwrap.dedent("\n".join(lines)).splitlines()
def get_statement_startend2(lineno, node):
import ast
# flatten all statements and except handlers into one lineno-list
# AST's line numbers start indexing at 1
values = []
for x in ast.walk(node):
if isinstance(x, (ast.stmt, ast.ExceptHandler)):
values.append(x.lineno - 1)
for name in ("finalbody", "orelse"):
val = getattr(x, name, None)
if val:
# treat the finally/orelse part as its own statement
values.append(val[0].lineno - 1 - 1)
values.sort()
insert_index = bisect_right(values, lineno)
start = values[insert_index - 1]
if insert_index >= len(values):
end = None
else:
end = values[insert_index]
return start, end
def getstatementrange_ast(lineno, source, assertion=False, astnode=None):
if astnode is None:
content = str(source)
# See #4260:
# don't produce duplicate warnings when compiling source to find ast
with warnings.catch_warnings():
warnings.simplefilter("ignore")
astnode = compile(content, "source", "exec", _AST_FLAG)
start, end = get_statement_startend2(lineno, astnode)
# we need to correct the end:
# - ast-parsing strips comments
# - there might be empty lines
# - we might have lesser indented code blocks at the end
if end is None:
end = len(source.lines)
if end > start + 1:
# make sure we don't span differently indented code blocks
# by using the BlockFinder helper used which inspect.getsource() uses itself
block_finder = inspect.BlockFinder()
# if we start with an indented line, put blockfinder to "started" mode
block_finder.started = source.lines[start][0].isspace()
it = ((x + "\n") for x in source.lines[start:end])
try:
for tok in tokenize.generate_tokens(lambda: next(it)):
block_finder.tokeneater(*tok)
except (inspect.EndOfBlock, IndentationError):
end = block_finder.last + start
except Exception:
pass
# the end might still point to a comment or empty line, correct it
while end:
line = source.lines[end - 1].lstrip()
if line.startswith("#") or not line:
end -= 1
else:
break
return astnode, start, end

View File

@ -0,0 +1,72 @@
import sys
from six.moves import reprlib
class SafeRepr(reprlib.Repr):
"""subclass of repr.Repr that limits the resulting size of repr()
and includes information on exceptions raised during the call.
"""
def repr(self, x):
return self._callhelper(reprlib.Repr.repr, self, x)
def repr_unicode(self, x, level):
# Strictly speaking wrong on narrow builds
def repr(u):
if "'" not in u:
return u"'%s'" % u
elif '"' not in u:
return u'"%s"' % u
else:
return u"'%s'" % u.replace("'", r"\'")
s = repr(x[: self.maxstring])
if len(s) > self.maxstring:
i = max(0, (self.maxstring - 3) // 2)
j = max(0, self.maxstring - 3 - i)
s = repr(x[:i] + x[len(x) - j :])
s = s[:i] + "..." + s[len(s) - j :]
return s
def repr_instance(self, x, level):
return self._callhelper(repr, x)
def _callhelper(self, call, x, *args):
try:
# Try the vanilla repr and make sure that the result is a string
s = call(x, *args)
except Exception:
cls, e, tb = sys.exc_info()
exc_name = getattr(cls, "__name__", "unknown")
try:
exc_info = str(e)
except Exception:
exc_info = "unknown"
return '<[%s("%s") raised in repr()] %s object at 0x%x>' % (
exc_name,
exc_info,
x.__class__.__name__,
id(x),
)
else:
if len(s) > self.maxsize:
i = max(0, (self.maxsize - 3) // 2)
j = max(0, self.maxsize - 3 - i)
s = s[:i] + "..." + s[len(s) - j :]
return s
def saferepr(obj, maxsize=240):
"""return a size-limited safe repr-string for the given object.
Failing __repr__ functions of user instances will be represented
with a short exception info and 'saferepr' generally takes
care to never raise exceptions itself. This function is a wrapper
around the Repr/reprlib functionality of the standard 2.6 lib.
"""
# review exception handling
srepr = SafeRepr()
srepr.maxstring = maxsize
srepr.maxsize = maxsize
srepr.maxother = 160
return srepr.repr(obj)

View File

@ -0,0 +1,4 @@
# coding: utf-8
# file generated by setuptools_scm
# don't change, don't track in version control
version = '4.4.0'

View File

@ -0,0 +1,155 @@
"""
support for presenting detailed information in failing assertions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import six
from _pytest.assertion import rewrite
from _pytest.assertion import truncate
from _pytest.assertion import util
def pytest_addoption(parser):
group = parser.getgroup("debugconfig")
group.addoption(
"--assert",
action="store",
dest="assertmode",
choices=("rewrite", "plain"),
default="rewrite",
metavar="MODE",
help="""Control assertion debugging tools. 'plain'
performs no assertion debugging. 'rewrite'
(the default) rewrites assert statements in
test modules on import to provide assert
expression information.""",
)
def register_assert_rewrite(*names):
"""Register one or more module names to be rewritten on import.
This function will make sure that this module or all modules inside
the package will get their assert statements rewritten.
Thus you should make sure to call this before the module is
actually imported, usually in your __init__.py if you are a plugin
using a package.
:raise TypeError: if the given module names are not strings.
"""
for name in names:
if not isinstance(name, str):
msg = "expected module names as *args, got {0} instead"
raise TypeError(msg.format(repr(names)))
for hook in sys.meta_path:
if isinstance(hook, rewrite.AssertionRewritingHook):
importhook = hook
break
else:
importhook = DummyRewriteHook()
importhook.mark_rewrite(*names)
class DummyRewriteHook(object):
"""A no-op import hook for when rewriting is disabled."""
def mark_rewrite(self, *names):
pass
class AssertionState(object):
"""State for the assertion plugin."""
def __init__(self, config, mode):
self.mode = mode
self.trace = config.trace.root.get("assertion")
self.hook = None
def install_importhook(config):
"""Try to install the rewrite hook, raise SystemError if it fails."""
# Jython has an AST bug that make the assertion rewriting hook malfunction.
if sys.platform.startswith("java"):
raise SystemError("rewrite not supported")
config._assertstate = AssertionState(config, "rewrite")
config._assertstate.hook = hook = rewrite.AssertionRewritingHook(config)
sys.meta_path.insert(0, hook)
config._assertstate.trace("installed rewrite import hook")
def undo():
hook = config._assertstate.hook
if hook is not None and hook in sys.meta_path:
sys.meta_path.remove(hook)
config.add_cleanup(undo)
return hook
def pytest_collection(session):
# this hook is only called when test modules are collected
# so for example not in the master process of pytest-xdist
# (which does not collect test modules)
assertstate = getattr(session.config, "_assertstate", None)
if assertstate:
if assertstate.hook is not None:
assertstate.hook.set_session(session)
def pytest_runtest_setup(item):
"""Setup the pytest_assertrepr_compare hook
The newinterpret and rewrite modules will use util._reprcompare if
it exists to use custom reporting via the
pytest_assertrepr_compare hook. This sets up this custom
comparison for the test.
"""
def callbinrepr(op, left, right):
"""Call the pytest_assertrepr_compare hook and prepare the result
This uses the first result from the hook and then ensures the
following:
* Overly verbose explanations are truncated unless configured otherwise
(eg. if running in verbose mode).
* Embedded newlines are escaped to help util.format_explanation()
later.
* If the rewrite mode is used embedded %-characters are replaced
to protect later % formatting.
The result can be formatted by util.format_explanation() for
pretty printing.
"""
hook_result = item.ihook.pytest_assertrepr_compare(
config=item.config, op=op, left=left, right=right
)
for new_expl in hook_result:
if new_expl:
new_expl = truncate.truncate_if_required(new_expl, item)
new_expl = [line.replace("\n", "\\n") for line in new_expl]
res = six.text_type("\n~").join(new_expl)
if item.config.getvalue("assertmode") == "rewrite":
res = res.replace("%", "%%")
return res
util._reprcompare = callbinrepr
def pytest_runtest_teardown(item):
util._reprcompare = None
def pytest_sessionfinish(session):
assertstate = getattr(session.config, "_assertstate", None)
if assertstate:
if assertstate.hook is not None:
assertstate.hook.set_session(None)
# Expose this plugin's implementation for the pytest_assertrepr_compare hook
pytest_assertrepr_compare = util.assertrepr_compare

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,101 @@
"""
Utilities for truncating assertion output.
Current default behaviour is to truncate assertion explanations at
~8 terminal lines, unless running in "-vv" mode or running on CI.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import six
DEFAULT_MAX_LINES = 8
DEFAULT_MAX_CHARS = 8 * 80
USAGE_MSG = "use '-vv' to show"
def truncate_if_required(explanation, item, max_length=None):
"""
Truncate this assertion explanation if the given test item is eligible.
"""
if _should_truncate_item(item):
return _truncate_explanation(explanation)
return explanation
def _should_truncate_item(item):
"""
Whether or not this test item is eligible for truncation.
"""
verbose = item.config.option.verbose
return verbose < 2 and not _running_on_ci()
def _running_on_ci():
"""Check if we're currently running on a CI system."""
env_vars = ["CI", "BUILD_NUMBER"]
return any(var in os.environ for var in env_vars)
def _truncate_explanation(input_lines, max_lines=None, max_chars=None):
"""
Truncate given list of strings that makes up the assertion explanation.
Truncates to either 8 lines, or 640 characters - whichever the input reaches
first. The remaining lines will be replaced by a usage message.
"""
if max_lines is None:
max_lines = DEFAULT_MAX_LINES
if max_chars is None:
max_chars = DEFAULT_MAX_CHARS
# Check if truncation required
input_char_count = len("".join(input_lines))
if len(input_lines) <= max_lines and input_char_count <= max_chars:
return input_lines
# Truncate first to max_lines, and then truncate to max_chars if max_chars
# is exceeded.
truncated_explanation = input_lines[:max_lines]
truncated_explanation = _truncate_by_char_count(truncated_explanation, max_chars)
# Add ellipsis to final line
truncated_explanation[-1] = truncated_explanation[-1] + "..."
# Append useful message to explanation
truncated_line_count = len(input_lines) - len(truncated_explanation)
truncated_line_count += 1 # Account for the part-truncated final line
msg = "...Full output truncated"
if truncated_line_count == 1:
msg += " ({} line hidden)".format(truncated_line_count)
else:
msg += " ({} lines hidden)".format(truncated_line_count)
msg += ", {}".format(USAGE_MSG)
truncated_explanation.extend([six.text_type(""), six.text_type(msg)])
return truncated_explanation
def _truncate_by_char_count(input_lines, max_chars):
# Check if truncation required
if len("".join(input_lines)) <= max_chars:
return input_lines
# Find point at which input length exceeds total allowed length
iterated_char_count = 0
for iterated_index, input_line in enumerate(input_lines):
if iterated_char_count + len(input_line) > max_chars:
break
iterated_char_count += len(input_line)
# Create truncated explanation with modified final line
truncated_result = input_lines[:iterated_index]
final_line = input_lines[iterated_index]
if final_line:
final_line_truncate_point = max_chars - iterated_char_count
final_line = final_line[:final_line_truncate_point]
truncated_result.append(final_line)
return truncated_result

View File

@ -0,0 +1,397 @@
"""Utilities for assertion debugging"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pprint
import six
import _pytest._code
from ..compat import Sequence
from _pytest import outcomes
from _pytest._io.saferepr import saferepr
# The _reprcompare attribute on the util module is used by the new assertion
# interpretation code and assertion rewriter to detect this plugin was
# loaded and in turn call the hooks defined here as part of the
# DebugInterpreter.
_reprcompare = None
# the re-encoding is needed for python2 repr
# with non-ascii characters (see issue 877 and 1379)
def ecu(s):
if isinstance(s, bytes):
return s.decode("UTF-8", "replace")
else:
return s
def format_explanation(explanation):
"""This formats an explanation
Normally all embedded newlines are escaped, however there are
three exceptions: \n{, \n} and \n~. The first two are intended
cover nested explanations, see function and attribute explanations
for examples (.visit_Call(), visit_Attribute()). The last one is
for when one explanation needs to span multiple lines, e.g. when
displaying diffs.
"""
explanation = ecu(explanation)
lines = _split_explanation(explanation)
result = _format_lines(lines)
return u"\n".join(result)
def _split_explanation(explanation):
"""Return a list of individual lines in the explanation
This will return a list of lines split on '\n{', '\n}' and '\n~'.
Any other newlines will be escaped and appear in the line as the
literal '\n' characters.
"""
raw_lines = (explanation or u"").split("\n")
lines = [raw_lines[0]]
for values in raw_lines[1:]:
if values and values[0] in ["{", "}", "~", ">"]:
lines.append(values)
else:
lines[-1] += "\\n" + values
return lines
def _format_lines(lines):
"""Format the individual lines
This will replace the '{', '}' and '~' characters of our mini
formatting language with the proper 'where ...', 'and ...' and ' +
...' text, taking care of indentation along the way.
Return a list of formatted lines.
"""
result = lines[:1]
stack = [0]
stackcnt = [0]
for line in lines[1:]:
if line.startswith("{"):
if stackcnt[-1]:
s = u"and "
else:
s = u"where "
stack.append(len(result))
stackcnt[-1] += 1
stackcnt.append(0)
result.append(u" +" + u" " * (len(stack) - 1) + s + line[1:])
elif line.startswith("}"):
stack.pop()
stackcnt.pop()
result[stack[-1]] += line[1:]
else:
assert line[0] in ["~", ">"]
stack[-1] += 1
indent = len(stack) if line.startswith("~") else len(stack) - 1
result.append(u" " * indent + line[1:])
assert len(stack) == 1
return result
# Provide basestring in python3
try:
basestring = basestring
except NameError:
basestring = str
def issequence(x):
return isinstance(x, Sequence) and not isinstance(x, basestring)
def istext(x):
return isinstance(x, basestring)
def isdict(x):
return isinstance(x, dict)
def isset(x):
return isinstance(x, (set, frozenset))
def isdatacls(obj):
return getattr(obj, "__dataclass_fields__", None) is not None
def isattrs(obj):
return getattr(obj, "__attrs_attrs__", None) is not None
def isiterable(obj):
try:
iter(obj)
return not istext(obj)
except TypeError:
return False
def assertrepr_compare(config, op, left, right):
"""Return specialised explanations for some operators/operands"""
width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
left_repr = saferepr(left, maxsize=int(width // 2))
right_repr = saferepr(right, maxsize=width - len(left_repr))
summary = u"%s %s %s" % (ecu(left_repr), op, ecu(right_repr))
verbose = config.getoption("verbose")
explanation = None
try:
if op == "==":
if istext(left) and istext(right):
explanation = _diff_text(left, right, verbose)
else:
if issequence(left) and issequence(right):
explanation = _compare_eq_sequence(left, right, verbose)
elif isset(left) and isset(right):
explanation = _compare_eq_set(left, right, verbose)
elif isdict(left) and isdict(right):
explanation = _compare_eq_dict(left, right, verbose)
elif type(left) == type(right) and (isdatacls(left) or isattrs(left)):
type_fn = (isdatacls, isattrs)
explanation = _compare_eq_cls(left, right, verbose, type_fn)
elif verbose > 0:
explanation = _compare_eq_verbose(left, right)
if isiterable(left) and isiterable(right):
expl = _compare_eq_iterable(left, right, verbose)
if explanation is not None:
explanation.extend(expl)
else:
explanation = expl
elif op == "not in":
if istext(left) and istext(right):
explanation = _notin_text(left, right, verbose)
except outcomes.Exit:
raise
except Exception:
explanation = [
u"(pytest_assertion plugin: representation of details failed. "
u"Probably an object has a faulty __repr__.)",
six.text_type(_pytest._code.ExceptionInfo.from_current()),
]
if not explanation:
return None
return [summary] + explanation
def _diff_text(left, right, verbose=0):
"""Return the explanation for the diff between text or bytes.
Unless --verbose is used this will skip leading and trailing
characters which are identical to keep the diff minimal.
If the input are bytes they will be safely converted to text.
"""
from difflib import ndiff
explanation = []
def escape_for_readable_diff(binary_text):
"""
Ensures that the internal string is always valid unicode, converting any bytes safely to valid unicode.
This is done using repr() which then needs post-processing to fix the encompassing quotes and un-escape
newlines and carriage returns (#429).
"""
r = six.text_type(repr(binary_text)[1:-1])
r = r.replace(r"\n", "\n")
r = r.replace(r"\r", "\r")
return r
if isinstance(left, bytes):
left = escape_for_readable_diff(left)
if isinstance(right, bytes):
right = escape_for_readable_diff(right)
if verbose < 1:
i = 0 # just in case left or right has zero length
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
break
if i > 42:
i -= 10 # Provide some context
explanation = [
u"Skipping %s identical leading characters in diff, use -v to show" % i
]
left = left[i:]
right = right[i:]
if len(left) == len(right):
for i in range(len(left)):
if left[-i] != right[-i]:
break
if i > 42:
i -= 10 # Provide some context
explanation += [
u"Skipping {} identical trailing "
u"characters in diff, use -v to show".format(i)
]
left = left[:-i]
right = right[:-i]
keepends = True
if left.isspace() or right.isspace():
left = repr(str(left))
right = repr(str(right))
explanation += [u"Strings contain only whitespace, escaping them using repr()"]
explanation += [
line.strip("\n")
for line in ndiff(left.splitlines(keepends), right.splitlines(keepends))
]
return explanation
def _compare_eq_verbose(left, right):
keepends = True
left_lines = repr(left).splitlines(keepends)
right_lines = repr(right).splitlines(keepends)
explanation = []
explanation += [u"-" + line for line in left_lines]
explanation += [u"+" + line for line in right_lines]
return explanation
def _compare_eq_iterable(left, right, verbose=0):
if not verbose:
return [u"Use -v to get the full diff"]
# dynamic import to speedup pytest
import difflib
try:
left_formatting = pprint.pformat(left).splitlines()
right_formatting = pprint.pformat(right).splitlines()
explanation = [u"Full diff:"]
except Exception:
# hack: PrettyPrinter.pformat() in python 2 fails when formatting items that can't be sorted(), ie, calling
# sorted() on a list would raise. See issue #718.
# As a workaround, the full diff is generated by using the repr() string of each item of each container.
left_formatting = sorted(repr(x) for x in left)
right_formatting = sorted(repr(x) for x in right)
explanation = [u"Full diff (fallback to calling repr on each item):"]
explanation.extend(
line.strip() for line in difflib.ndiff(left_formatting, right_formatting)
)
return explanation
def _compare_eq_sequence(left, right, verbose=0):
explanation = []
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
explanation += [u"At index %s diff: %r != %r" % (i, left[i], right[i])]
break
if len(left) > len(right):
explanation += [
u"Left contains more items, first extra item: %s"
% saferepr(left[len(right)])
]
elif len(left) < len(right):
explanation += [
u"Right contains more items, first extra item: %s"
% saferepr(right[len(left)])
]
return explanation
def _compare_eq_set(left, right, verbose=0):
explanation = []
diff_left = left - right
diff_right = right - left
if diff_left:
explanation.append(u"Extra items in the left set:")
for item in diff_left:
explanation.append(saferepr(item))
if diff_right:
explanation.append(u"Extra items in the right set:")
for item in diff_right:
explanation.append(saferepr(item))
return explanation
def _compare_eq_dict(left, right, verbose=0):
explanation = []
common = set(left).intersection(set(right))
same = {k: left[k] for k in common if left[k] == right[k]}
if same and verbose < 2:
explanation += [u"Omitting %s identical items, use -vv to show" % len(same)]
elif same:
explanation += [u"Common items:"]
explanation += pprint.pformat(same).splitlines()
diff = {k for k in common if left[k] != right[k]}
if diff:
explanation += [u"Differing items:"]
for k in diff:
explanation += [saferepr({k: left[k]}) + " != " + saferepr({k: right[k]})]
extra_left = set(left) - set(right)
if extra_left:
explanation.append(u"Left contains more items:")
explanation.extend(
pprint.pformat({k: left[k] for k in extra_left}).splitlines()
)
extra_right = set(right) - set(left)
if extra_right:
explanation.append(u"Right contains more items:")
explanation.extend(
pprint.pformat({k: right[k] for k in extra_right}).splitlines()
)
return explanation
def _compare_eq_cls(left, right, verbose, type_fns):
isdatacls, isattrs = type_fns
if isdatacls(left):
all_fields = left.__dataclass_fields__
fields_to_check = [field for field, info in all_fields.items() if info.compare]
elif isattrs(left):
all_fields = left.__attrs_attrs__
fields_to_check = [field.name for field in all_fields if field.cmp]
same = []
diff = []
for field in fields_to_check:
if getattr(left, field) == getattr(right, field):
same.append(field)
else:
diff.append(field)
explanation = []
if same and verbose < 2:
explanation.append(u"Omitting %s identical items, use -vv to show" % len(same))
elif same:
explanation += [u"Matching attributes:"]
explanation += pprint.pformat(same).splitlines()
if diff:
explanation += [u"Differing attributes:"]
for field in diff:
explanation += [
(u"%s: %r != %r") % (field, getattr(left, field), getattr(right, field))
]
return explanation
def _notin_text(term, text, verbose=0):
index = text.find(term)
head = text[:index]
tail = text[index + len(term) :]
correct_text = head + tail
diff = _diff_text(correct_text, text, verbose)
newdiff = [u"%s is contained here:" % saferepr(term, maxsize=42)]
for line in diff:
if line.startswith(u"Skipping"):
continue
if line.startswith(u"- "):
continue
if line.startswith(u"+ "):
newdiff.append(u" " + line[2:])
else:
newdiff.append(line)
return newdiff

View File

@ -0,0 +1,387 @@
"""
merged implementation of the cache provider
the name cache was not chosen to ensure pluggy automatically
ignores the external pytest-cache
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from collections import OrderedDict
import attr
import py
import six
import pytest
from .compat import _PY2 as PY2
from .pathlib import Path
from .pathlib import resolve_from_str
from .pathlib import rmtree
README_CONTENT = u"""\
# pytest cache directory #
This directory contains data from the pytest's cache plugin,
which provides the `--lf` and `--ff` options, as well as the `cache` fixture.
**Do not** commit this to version control.
See [the docs](https://docs.pytest.org/en/latest/cache.html) for more information.
"""
CACHEDIR_TAG_CONTENT = b"""\
Signature: 8a477f597d28d172789f06886806bc55
# This file is a cache directory tag created by pytest.
# For information about cache directory tags, see:
# http://www.bford.info/cachedir/spec.html
"""
@attr.s
class Cache(object):
_cachedir = attr.ib(repr=False)
_config = attr.ib(repr=False)
@classmethod
def for_config(cls, config):
cachedir = cls.cache_dir_from_config(config)
if config.getoption("cacheclear") and cachedir.exists():
rmtree(cachedir, force=True)
cachedir.mkdir()
return cls(cachedir, config)
@staticmethod
def cache_dir_from_config(config):
return resolve_from_str(config.getini("cache_dir"), config.rootdir)
def warn(self, fmt, **args):
from _pytest.warnings import _issue_warning_captured
from _pytest.warning_types import PytestWarning
_issue_warning_captured(
PytestWarning(fmt.format(**args) if args else fmt),
self._config.hook,
stacklevel=3,
)
def makedir(self, name):
""" return a directory path object with the given name. If the
directory does not yet exist, it will be created. You can use it
to manage files likes e. g. store/retrieve database
dumps across test sessions.
:param name: must be a string not containing a ``/`` separator.
Make sure the name contains your plugin or application
identifiers to prevent clashes with other cache users.
"""
name = Path(name)
if len(name.parts) > 1:
raise ValueError("name is not allowed to contain path separators")
res = self._cachedir.joinpath("d", name)
res.mkdir(exist_ok=True, parents=True)
return py.path.local(res)
def _getvaluepath(self, key):
return self._cachedir.joinpath("v", Path(key))
def get(self, key, default):
""" return cached value for the given key. If no value
was yet cached or the value cannot be read, the specified
default is returned.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param default: must be provided in case of a cache-miss or
invalid cache values.
"""
path = self._getvaluepath(key)
try:
with path.open("r") as f:
return json.load(f)
except (ValueError, IOError, OSError):
return default
def set(self, key, value):
""" save value for the given key.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param value: must be of any combination of basic
python types, including nested types
like e. g. lists of dictionaries.
"""
path = self._getvaluepath(key)
try:
if path.parent.is_dir():
cache_dir_exists_already = True
else:
cache_dir_exists_already = self._cachedir.exists()
path.parent.mkdir(exist_ok=True, parents=True)
except (IOError, OSError):
self.warn("could not create cache path {path}", path=path)
return
if not cache_dir_exists_already:
self._ensure_supporting_files()
try:
f = path.open("wb" if PY2 else "w")
except (IOError, OSError):
self.warn("cache could not write path {path}", path=path)
else:
with f:
json.dump(value, f, indent=2, sort_keys=True)
def _ensure_supporting_files(self):
"""Create supporting files in the cache dir that are not really part of the cache."""
readme_path = self._cachedir / "README.md"
readme_path.write_text(README_CONTENT)
gitignore_path = self._cachedir.joinpath(".gitignore")
msg = u"# Created by pytest automatically.\n*"
gitignore_path.write_text(msg, encoding="UTF-8")
cachedir_tag_path = self._cachedir.joinpath("CACHEDIR.TAG")
cachedir_tag_path.write_bytes(CACHEDIR_TAG_CONTENT)
class LFPlugin(object):
""" Plugin which implements the --lf (run last-failing) option """
def __init__(self, config):
self.config = config
active_keys = "lf", "failedfirst"
self.active = any(config.getoption(key) for key in active_keys)
self.lastfailed = config.cache.get("cache/lastfailed", {})
self._previously_failed_count = None
self._no_failures_behavior = self.config.getoption("last_failed_no_failures")
def pytest_report_collectionfinish(self):
if self.active and self.config.getoption("verbose") >= 0:
if not self._previously_failed_count:
return None
noun = "failure" if self._previously_failed_count == 1 else "failures"
suffix = " first" if self.config.getoption("failedfirst") else ""
mode = "rerun previous {count} {noun}{suffix}".format(
count=self._previously_failed_count, suffix=suffix, noun=noun
)
return "run-last-failure: %s" % mode
def pytest_runtest_logreport(self, report):
if (report.when == "call" and report.passed) or report.skipped:
self.lastfailed.pop(report.nodeid, None)
elif report.failed:
self.lastfailed[report.nodeid] = True
def pytest_collectreport(self, report):
passed = report.outcome in ("passed", "skipped")
if passed:
if report.nodeid in self.lastfailed:
self.lastfailed.pop(report.nodeid)
self.lastfailed.update((item.nodeid, True) for item in report.result)
else:
self.lastfailed[report.nodeid] = True
def pytest_collection_modifyitems(self, session, config, items):
if self.active:
if self.lastfailed:
previously_failed = []
previously_passed = []
for item in items:
if item.nodeid in self.lastfailed:
previously_failed.append(item)
else:
previously_passed.append(item)
self._previously_failed_count = len(previously_failed)
if not previously_failed:
# running a subset of all tests with recorded failures outside
# of the set of tests currently executing
return
if self.config.getoption("lf"):
items[:] = previously_failed
config.hook.pytest_deselected(items=previously_passed)
else:
items[:] = previously_failed + previously_passed
elif self._no_failures_behavior == "none":
config.hook.pytest_deselected(items=items)
items[:] = []
def pytest_sessionfinish(self, session):
config = self.config
if config.getoption("cacheshow") or hasattr(config, "slaveinput"):
return
saved_lastfailed = config.cache.get("cache/lastfailed", {})
if saved_lastfailed != self.lastfailed:
config.cache.set("cache/lastfailed", self.lastfailed)
class NFPlugin(object):
""" Plugin which implements the --nf (run new-first) option """
def __init__(self, config):
self.config = config
self.active = config.option.newfirst
self.cached_nodeids = config.cache.get("cache/nodeids", [])
def pytest_collection_modifyitems(self, session, config, items):
if self.active:
new_items = OrderedDict()
other_items = OrderedDict()
for item in items:
if item.nodeid not in self.cached_nodeids:
new_items[item.nodeid] = item
else:
other_items[item.nodeid] = item
items[:] = self._get_increasing_order(
six.itervalues(new_items)
) + self._get_increasing_order(six.itervalues(other_items))
self.cached_nodeids = [x.nodeid for x in items if isinstance(x, pytest.Item)]
def _get_increasing_order(self, items):
return sorted(items, key=lambda item: item.fspath.mtime(), reverse=True)
def pytest_sessionfinish(self, session):
config = self.config
if config.getoption("cacheshow") or hasattr(config, "slaveinput"):
return
config.cache.set("cache/nodeids", self.cached_nodeids)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
"--lf",
"--last-failed",
action="store_true",
dest="lf",
help="rerun only the tests that failed "
"at the last run (or all if none failed)",
)
group.addoption(
"--ff",
"--failed-first",
action="store_true",
dest="failedfirst",
help="run all tests but run the last failures first. "
"This may re-order tests and thus lead to "
"repeated fixture setup/teardown",
)
group.addoption(
"--nf",
"--new-first",
action="store_true",
dest="newfirst",
help="run tests from new files first, then the rest of the tests "
"sorted by file mtime",
)
group.addoption(
"--cache-show",
action="store_true",
dest="cacheshow",
help="show cache contents, don't perform collection or tests",
)
group.addoption(
"--cache-clear",
action="store_true",
dest="cacheclear",
help="remove all cache contents at start of test run.",
)
cache_dir_default = ".pytest_cache"
if "TOX_ENV_DIR" in os.environ:
cache_dir_default = os.path.join(os.environ["TOX_ENV_DIR"], cache_dir_default)
parser.addini("cache_dir", default=cache_dir_default, help="cache directory path.")
group.addoption(
"--lfnf",
"--last-failed-no-failures",
action="store",
dest="last_failed_no_failures",
choices=("all", "none"),
default="all",
help="change the behavior when no test failed in the last run or no "
"information about the last failures was found in the cache",
)
def pytest_cmdline_main(config):
if config.option.cacheshow:
from _pytest.main import wrap_session
return wrap_session(config, cacheshow)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
config.cache = Cache.for_config(config)
config.pluginmanager.register(LFPlugin(config), "lfplugin")
config.pluginmanager.register(NFPlugin(config), "nfplugin")
@pytest.fixture
def cache(request):
"""
Return a cache object that can persist state between testing sessions.
cache.get(key, default)
cache.set(key, value)
Keys must be a ``/`` separated value, where the first part is usually the
name of your plugin or application to avoid clashes with other cache users.
Values can be any object handled by the json stdlib module.
"""
return request.config.cache
def pytest_report_header(config):
"""Display cachedir with --cache-show and if non-default."""
if config.option.verbose > 0 or config.getini("cache_dir") != ".pytest_cache":
cachedir = config.cache._cachedir
# TODO: evaluate generating upward relative paths
# starting with .., ../.. if sensible
try:
displaypath = cachedir.relative_to(config.rootdir)
except ValueError:
displaypath = cachedir
return "cachedir: {}".format(displaypath)
def cacheshow(config, session):
from pprint import pformat
tw = py.io.TerminalWriter()
tw.line("cachedir: " + str(config.cache._cachedir))
if not config.cache._cachedir.is_dir():
tw.line("cache is empty")
return 0
dummy = object()
basedir = config.cache._cachedir
vdir = basedir / "v"
tw.sep("-", "cache values")
for valpath in sorted(x for x in vdir.rglob("*") if x.is_file()):
key = valpath.relative_to(vdir)
val = config.cache.get(key, dummy)
if val is dummy:
tw.line("%s contains unreadable content, will be ignored" % key)
else:
tw.line("%s contains:" % key)
for line in pformat(val).splitlines():
tw.line(" " + line)
ddir = basedir / "d"
if ddir.is_dir():
contents = sorted(ddir.rglob("*"))
tw.sep("-", "cache directories")
for p in contents:
# if p.check(dir=1):
# print("%s/" % p.relto(basedir))
if p.is_file():
key = p.relative_to(basedir)
tw.line("{} is a file of length {:d}".format(key, p.stat().st_size))
return 0

View File

@ -0,0 +1,822 @@
"""
per-test stdout/stderr capturing mechanism.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import io
import os
import sys
from io import UnsupportedOperation
from tempfile import TemporaryFile
import six
import pytest
from _pytest.compat import _PY3
from _pytest.compat import CaptureIO
patchsysdict = {0: "stdin", 1: "stdout", 2: "stderr"}
def pytest_addoption(parser):
group = parser.getgroup("general")
group._addoption(
"--capture",
action="store",
default="fd" if hasattr(os, "dup") else "sys",
metavar="method",
choices=["fd", "sys", "no"],
help="per-test capturing method: one of fd|sys|no.",
)
group._addoption(
"-s",
action="store_const",
const="no",
dest="capture",
help="shortcut for --capture=no.",
)
@pytest.hookimpl(hookwrapper=True)
def pytest_load_initial_conftests(early_config, parser, args):
ns = early_config.known_args_namespace
if ns.capture == "fd":
_py36_windowsconsoleio_workaround(sys.stdout)
_colorama_workaround()
_readline_workaround()
pluginmanager = early_config.pluginmanager
capman = CaptureManager(ns.capture)
pluginmanager.register(capman, "capturemanager")
# make sure that capturemanager is properly reset at final shutdown
early_config.add_cleanup(capman.stop_global_capturing)
# make sure logging does not raise exceptions at the end
def silence_logging_at_shutdown():
if "logging" in sys.modules:
sys.modules["logging"].raiseExceptions = False
early_config.add_cleanup(silence_logging_at_shutdown)
# finally trigger conftest loading but while capturing (issue93)
capman.start_global_capturing()
outcome = yield
capman.suspend_global_capture()
if outcome.excinfo is not None:
out, err = capman.read_global_capture()
sys.stdout.write(out)
sys.stderr.write(err)
class CaptureManager(object):
"""
Capture plugin, manages that the appropriate capture method is enabled/disabled during collection and each
test phase (setup, call, teardown). After each of those points, the captured output is obtained and
attached to the collection/runtest report.
There are two levels of capture:
* global: which is enabled by default and can be suppressed by the ``-s`` option. This is always enabled/disabled
during collection and each test phase.
* fixture: when a test function or one of its fixture depend on the ``capsys`` or ``capfd`` fixtures. In this
case special handling is needed to ensure the fixtures take precedence over the global capture.
"""
def __init__(self, method):
self._method = method
self._global_capturing = None
self._current_item = None
def __repr__(self):
return "<CaptureManager _method=%r _global_capturing=%r _current_item=%r>" % (
self._method,
self._global_capturing,
self._current_item,
)
def _getcapture(self, method):
if method == "fd":
return MultiCapture(out=True, err=True, Capture=FDCapture)
elif method == "sys":
return MultiCapture(out=True, err=True, Capture=SysCapture)
elif method == "no":
return MultiCapture(out=False, err=False, in_=False)
raise ValueError("unknown capturing method: %r" % method) # pragma: no cover
def is_capturing(self):
if self.is_globally_capturing():
return "global"
capture_fixture = getattr(self._current_item, "_capture_fixture", None)
if capture_fixture is not None:
return (
"fixture %s" % self._current_item._capture_fixture.request.fixturename
)
return False
# Global capturing control
def is_globally_capturing(self):
return self._method != "no"
def start_global_capturing(self):
assert self._global_capturing is None
self._global_capturing = self._getcapture(self._method)
self._global_capturing.start_capturing()
def stop_global_capturing(self):
if self._global_capturing is not None:
self._global_capturing.pop_outerr_to_orig()
self._global_capturing.stop_capturing()
self._global_capturing = None
def resume_global_capture(self):
# During teardown of the python process, and on rare occasions, capture
# attributes can be `None` while trying to resume global capture.
if self._global_capturing is not None:
self._global_capturing.resume_capturing()
def suspend_global_capture(self, in_=False):
cap = getattr(self, "_global_capturing", None)
if cap is not None:
cap.suspend_capturing(in_=in_)
def suspend(self, in_=False):
# Need to undo local capsys-et-al if it exists before disabling global capture.
self.suspend_fixture(self._current_item)
self.suspend_global_capture(in_)
def resume(self):
self.resume_global_capture()
self.resume_fixture(self._current_item)
def read_global_capture(self):
return self._global_capturing.readouterr()
# Fixture Control (it's just forwarding, think about removing this later)
def activate_fixture(self, item):
"""If the current item is using ``capsys`` or ``capfd``, activate them so they take precedence over
the global capture.
"""
fixture = getattr(item, "_capture_fixture", None)
if fixture is not None:
fixture._start()
def deactivate_fixture(self, item):
"""Deactivates the ``capsys`` or ``capfd`` fixture of this item, if any."""
fixture = getattr(item, "_capture_fixture", None)
if fixture is not None:
fixture.close()
def suspend_fixture(self, item):
fixture = getattr(item, "_capture_fixture", None)
if fixture is not None:
fixture._suspend()
def resume_fixture(self, item):
fixture = getattr(item, "_capture_fixture", None)
if fixture is not None:
fixture._resume()
# Helper context managers
@contextlib.contextmanager
def global_and_fixture_disabled(self):
"""Context manager to temporarily disable global and current fixture capturing."""
self.suspend()
try:
yield
finally:
self.resume()
@contextlib.contextmanager
def item_capture(self, when, item):
self.resume_global_capture()
self.activate_fixture(item)
try:
yield
finally:
self.deactivate_fixture(item)
self.suspend_global_capture(in_=False)
out, err = self.read_global_capture()
item.add_report_section(when, "stdout", out)
item.add_report_section(when, "stderr", err)
# Hooks
@pytest.hookimpl(hookwrapper=True)
def pytest_make_collect_report(self, collector):
if isinstance(collector, pytest.File):
self.resume_global_capture()
outcome = yield
self.suspend_global_capture()
out, err = self.read_global_capture()
rep = outcome.get_result()
if out:
rep.sections.append(("Captured stdout", out))
if err:
rep.sections.append(("Captured stderr", err))
else:
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_protocol(self, item):
self._current_item = item
yield
self._current_item = None
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_setup(self, item):
with self.item_capture("setup", item):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_call(self, item):
with self.item_capture("call", item):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_teardown(self, item):
with self.item_capture("teardown", item):
yield
@pytest.hookimpl(tryfirst=True)
def pytest_keyboard_interrupt(self, excinfo):
self.stop_global_capturing()
@pytest.hookimpl(tryfirst=True)
def pytest_internalerror(self, excinfo):
self.stop_global_capturing()
capture_fixtures = {"capfd", "capfdbinary", "capsys", "capsysbinary"}
def _ensure_only_one_capture_fixture(request, name):
fixtures = set(request.fixturenames) & capture_fixtures - {name}
if fixtures:
fixtures = sorted(fixtures)
fixtures = fixtures[0] if len(fixtures) == 1 else fixtures
raise request.raiseerror(
"cannot use {} and {} at the same time".format(fixtures, name)
)
@pytest.fixture
def capsys(request):
"""Enable text capturing of writes to ``sys.stdout`` and ``sys.stderr``.
The captured output is made available via ``capsys.readouterr()`` method
calls, which return a ``(out, err)`` namedtuple.
``out`` and ``err`` will be ``text`` objects.
"""
_ensure_only_one_capture_fixture(request, "capsys")
with _install_capture_fixture_on_item(request, SysCapture) as fixture:
yield fixture
@pytest.fixture
def capsysbinary(request):
"""Enable bytes capturing of writes to ``sys.stdout`` and ``sys.stderr``.
The captured output is made available via ``capsysbinary.readouterr()``
method calls, which return a ``(out, err)`` namedtuple.
``out`` and ``err`` will be ``bytes`` objects.
"""
_ensure_only_one_capture_fixture(request, "capsysbinary")
# Currently, the implementation uses the python3 specific `.buffer`
# property of CaptureIO.
if sys.version_info < (3,):
raise request.raiseerror("capsysbinary is only supported on Python 3")
with _install_capture_fixture_on_item(request, SysCaptureBinary) as fixture:
yield fixture
@pytest.fixture
def capfd(request):
"""Enable text capturing of writes to file descriptors ``1`` and ``2``.
The captured output is made available via ``capfd.readouterr()`` method
calls, which return a ``(out, err)`` namedtuple.
``out`` and ``err`` will be ``text`` objects.
"""
_ensure_only_one_capture_fixture(request, "capfd")
if not hasattr(os, "dup"):
pytest.skip(
"capfd fixture needs os.dup function which is not available in this system"
)
with _install_capture_fixture_on_item(request, FDCapture) as fixture:
yield fixture
@pytest.fixture
def capfdbinary(request):
"""Enable bytes capturing of writes to file descriptors ``1`` and ``2``.
The captured output is made available via ``capfd.readouterr()`` method
calls, which return a ``(out, err)`` namedtuple.
``out`` and ``err`` will be ``byte`` objects.
"""
_ensure_only_one_capture_fixture(request, "capfdbinary")
if not hasattr(os, "dup"):
pytest.skip(
"capfdbinary fixture needs os.dup function which is not available in this system"
)
with _install_capture_fixture_on_item(request, FDCaptureBinary) as fixture:
yield fixture
@contextlib.contextmanager
def _install_capture_fixture_on_item(request, capture_class):
"""
Context manager which creates a ``CaptureFixture`` instance and "installs" it on
the item/node of the given request. Used by ``capsys`` and ``capfd``.
The CaptureFixture is added as attribute of the item because it needs to accessed
by ``CaptureManager`` during its ``pytest_runtest_*`` hooks.
"""
request.node._capture_fixture = fixture = CaptureFixture(capture_class, request)
capmanager = request.config.pluginmanager.getplugin("capturemanager")
# Need to active this fixture right away in case it is being used by another fixture (setup phase).
# If this fixture is being used only by a test function (call phase), then we wouldn't need this
# activation, but it doesn't hurt.
capmanager.activate_fixture(request.node)
yield fixture
fixture.close()
del request.node._capture_fixture
class CaptureFixture(object):
"""
Object returned by :py:func:`capsys`, :py:func:`capsysbinary`, :py:func:`capfd` and :py:func:`capfdbinary`
fixtures.
"""
def __init__(self, captureclass, request):
self.captureclass = captureclass
self.request = request
self._capture = None
self._captured_out = self.captureclass.EMPTY_BUFFER
self._captured_err = self.captureclass.EMPTY_BUFFER
def _start(self):
# Start if not started yet
if getattr(self, "_capture", None) is None:
self._capture = MultiCapture(
out=True, err=True, in_=False, Capture=self.captureclass
)
self._capture.start_capturing()
def close(self):
if self._capture is not None:
out, err = self._capture.pop_outerr_to_orig()
self._captured_out += out
self._captured_err += err
self._capture.stop_capturing()
self._capture = None
def readouterr(self):
"""Read and return the captured output so far, resetting the internal buffer.
:return: captured content as a namedtuple with ``out`` and ``err`` string attributes
"""
captured_out, captured_err = self._captured_out, self._captured_err
if self._capture is not None:
out, err = self._capture.readouterr()
captured_out += out
captured_err += err
self._captured_out = self.captureclass.EMPTY_BUFFER
self._captured_err = self.captureclass.EMPTY_BUFFER
return CaptureResult(captured_out, captured_err)
def _suspend(self):
"""Suspends this fixture's own capturing temporarily."""
self._capture.suspend_capturing()
def _resume(self):
"""Resumes this fixture's own capturing temporarily."""
self._capture.resume_capturing()
@contextlib.contextmanager
def disabled(self):
"""Temporarily disables capture while inside the 'with' block."""
capmanager = self.request.config.pluginmanager.getplugin("capturemanager")
with capmanager.global_and_fixture_disabled():
yield
def safe_text_dupfile(f, mode, default_encoding="UTF8"):
""" return an open text file object that's a duplicate of f on the
FD-level if possible.
"""
encoding = getattr(f, "encoding", None)
try:
fd = f.fileno()
except Exception:
if "b" not in getattr(f, "mode", "") and hasattr(f, "encoding"):
# we seem to have a text stream, let's just use it
return f
else:
newfd = os.dup(fd)
if "b" not in mode:
mode += "b"
f = os.fdopen(newfd, mode, 0) # no buffering
return EncodedFile(f, encoding or default_encoding)
class EncodedFile(object):
errors = "strict" # possibly needed by py3 code (issue555)
def __init__(self, buffer, encoding):
self.buffer = buffer
self.encoding = encoding
def write(self, obj):
if isinstance(obj, six.text_type):
obj = obj.encode(self.encoding, "replace")
elif _PY3:
raise TypeError(
"write() argument must be str, not {}".format(type(obj).__name__)
)
self.buffer.write(obj)
def writelines(self, linelist):
data = "".join(linelist)
self.write(data)
@property
def name(self):
"""Ensure that file.name is a string."""
return repr(self.buffer)
def __getattr__(self, name):
return getattr(object.__getattribute__(self, "buffer"), name)
CaptureResult = collections.namedtuple("CaptureResult", ["out", "err"])
class MultiCapture(object):
out = err = in_ = None
def __init__(self, out=True, err=True, in_=True, Capture=None):
if in_:
self.in_ = Capture(0)
if out:
self.out = Capture(1)
if err:
self.err = Capture(2)
def __repr__(self):
return "<MultiCapture out=%r err=%r in_=%r>" % (self.out, self.err, self.in_)
def start_capturing(self):
if self.in_:
self.in_.start()
if self.out:
self.out.start()
if self.err:
self.err.start()
def pop_outerr_to_orig(self):
""" pop current snapshot out/err capture and flush to orig streams. """
out, err = self.readouterr()
if out:
self.out.writeorg(out)
if err:
self.err.writeorg(err)
return out, err
def suspend_capturing(self, in_=False):
if self.out:
self.out.suspend()
if self.err:
self.err.suspend()
if in_ and self.in_:
self.in_.suspend()
self._in_suspended = True
def resume_capturing(self):
if self.out:
self.out.resume()
if self.err:
self.err.resume()
if hasattr(self, "_in_suspended"):
self.in_.resume()
del self._in_suspended
def stop_capturing(self):
""" stop capturing and reset capturing streams """
if hasattr(self, "_reset"):
raise ValueError("was already stopped")
self._reset = True
if self.out:
self.out.done()
if self.err:
self.err.done()
if self.in_:
self.in_.done()
def readouterr(self):
""" return snapshot unicode value of stdout/stderr capturings. """
return CaptureResult(
self.out.snap() if self.out is not None else "",
self.err.snap() if self.err is not None else "",
)
class NoCapture(object):
EMPTY_BUFFER = None
__init__ = start = done = suspend = resume = lambda *args: None
class FDCaptureBinary(object):
"""Capture IO to/from a given os-level filedescriptor.
snap() produces `bytes`
"""
EMPTY_BUFFER = b""
def __init__(self, targetfd, tmpfile=None):
self.targetfd = targetfd
try:
self.targetfd_save = os.dup(self.targetfd)
except OSError:
self.start = lambda: None
self.done = lambda: None
else:
if targetfd == 0:
assert not tmpfile, "cannot set tmpfile with stdin"
tmpfile = open(os.devnull, "r")
self.syscapture = SysCapture(targetfd)
else:
if tmpfile is None:
f = TemporaryFile()
with f:
tmpfile = safe_text_dupfile(f, mode="wb+")
if targetfd in patchsysdict:
self.syscapture = SysCapture(targetfd, tmpfile)
else:
self.syscapture = NoCapture()
self.tmpfile = tmpfile
self.tmpfile_fd = tmpfile.fileno()
def __repr__(self):
return "<FDCapture %s oldfd=%s>" % (
self.targetfd,
getattr(self, "targetfd_save", None),
)
def start(self):
""" Start capturing on targetfd using memorized tmpfile. """
try:
os.fstat(self.targetfd_save)
except (AttributeError, OSError):
raise ValueError("saved filedescriptor not valid anymore")
os.dup2(self.tmpfile_fd, self.targetfd)
self.syscapture.start()
def snap(self):
self.tmpfile.seek(0)
res = self.tmpfile.read()
self.tmpfile.seek(0)
self.tmpfile.truncate()
return res
def done(self):
""" stop capturing, restore streams, return original capture file,
seeked to position zero. """
targetfd_save = self.__dict__.pop("targetfd_save")
os.dup2(targetfd_save, self.targetfd)
os.close(targetfd_save)
self.syscapture.done()
_attempt_to_close_capture_file(self.tmpfile)
def suspend(self):
self.syscapture.suspend()
os.dup2(self.targetfd_save, self.targetfd)
def resume(self):
self.syscapture.resume()
os.dup2(self.tmpfile_fd, self.targetfd)
def writeorg(self, data):
""" write to original file descriptor. """
if isinstance(data, six.text_type):
data = data.encode("utf8") # XXX use encoding of original stream
os.write(self.targetfd_save, data)
class FDCapture(FDCaptureBinary):
"""Capture IO to/from a given os-level filedescriptor.
snap() produces text
"""
EMPTY_BUFFER = str()
def snap(self):
res = super(FDCapture, self).snap()
enc = getattr(self.tmpfile, "encoding", None)
if enc and isinstance(res, bytes):
res = six.text_type(res, enc, "replace")
return res
class SysCapture(object):
EMPTY_BUFFER = str()
def __init__(self, fd, tmpfile=None):
name = patchsysdict[fd]
self._old = getattr(sys, name)
self.name = name
if tmpfile is None:
if name == "stdin":
tmpfile = DontReadFromInput()
else:
tmpfile = CaptureIO()
self.tmpfile = tmpfile
def start(self):
setattr(sys, self.name, self.tmpfile)
def snap(self):
res = self.tmpfile.getvalue()
self.tmpfile.seek(0)
self.tmpfile.truncate()
return res
def done(self):
setattr(sys, self.name, self._old)
del self._old
_attempt_to_close_capture_file(self.tmpfile)
def suspend(self):
setattr(sys, self.name, self._old)
def resume(self):
setattr(sys, self.name, self.tmpfile)
def writeorg(self, data):
self._old.write(data)
self._old.flush()
class SysCaptureBinary(SysCapture):
EMPTY_BUFFER = b""
def snap(self):
res = self.tmpfile.buffer.getvalue()
self.tmpfile.seek(0)
self.tmpfile.truncate()
return res
class DontReadFromInput(six.Iterator):
"""Temporary stub class. Ideally when stdin is accessed, the
capturing should be turned off, with possibly all data captured
so far sent to the screen. This should be configurable, though,
because in automated test runs it is better to crash than
hang indefinitely.
"""
encoding = None
def read(self, *args):
raise IOError("reading from stdin while output is captured")
readline = read
readlines = read
__next__ = read
def __iter__(self):
return self
def fileno(self):
raise UnsupportedOperation("redirected stdin is pseudofile, has no fileno()")
def isatty(self):
return False
def close(self):
pass
@property
def buffer(self):
if sys.version_info >= (3, 0):
return self
else:
raise AttributeError("redirected stdin has no attribute buffer")
def _colorama_workaround():
"""
Ensure colorama is imported so that it attaches to the correct stdio
handles on Windows.
colorama uses the terminal on import time. So if something does the
first import of colorama while I/O capture is active, colorama will
fail in various ways.
"""
if sys.platform.startswith("win32"):
try:
import colorama # noqa: F401
except ImportError:
pass
def _readline_workaround():
"""
Ensure readline is imported so that it attaches to the correct stdio
handles on Windows.
Pdb uses readline support where available--when not running from the Python
prompt, the readline module is not imported until running the pdb REPL. If
running pytest with the --pdb option this means the readline module is not
imported until after I/O capture has been started.
This is a problem for pyreadline, which is often used to implement readline
support on Windows, as it does not attach to the correct handles for stdout
and/or stdin if they have been redirected by the FDCapture mechanism. This
workaround ensures that readline is imported before I/O capture is setup so
that it can attach to the actual stdin/out for the console.
See https://github.com/pytest-dev/pytest/pull/1281
"""
if sys.platform.startswith("win32"):
try:
import readline # noqa: F401
except ImportError:
pass
def _py36_windowsconsoleio_workaround(stream):
"""
Python 3.6 implemented unicode console handling for Windows. This works
by reading/writing to the raw console handle using
``{Read,Write}ConsoleW``.
The problem is that we are going to ``dup2`` over the stdio file
descriptors when doing ``FDCapture`` and this will ``CloseHandle`` the
handles used by Python to write to the console. Though there is still some
weirdness and the console handle seems to only be closed randomly and not
on the first call to ``CloseHandle``, or maybe it gets reopened with the
same handle value when we suspend capturing.
The workaround in this case will reopen stdio with a different fd which
also means a different handle by replicating the logic in
"Py_lifecycle.c:initstdio/create_stdio".
:param stream: in practice ``sys.stdout`` or ``sys.stderr``, but given
here as parameter for unittesting purposes.
See https://github.com/pytest-dev/py/issues/103
"""
if not sys.platform.startswith("win32") or sys.version_info[:2] < (3, 6):
return
# bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666)
if not hasattr(stream, "buffer"):
return
buffered = hasattr(stream.buffer, "raw")
raw_stdout = stream.buffer.raw if buffered else stream.buffer
if not isinstance(raw_stdout, io._WindowsConsoleIO):
return
def _reopen_stdio(f, mode):
if not buffered and mode[0] == "w":
buffering = 0
else:
buffering = -1
return io.TextIOWrapper(
open(os.dup(f.fileno()), mode, buffering),
f.encoding,
f.errors,
f.newlines,
f.line_buffering,
)
sys.stdin = _reopen_stdio(sys.stdin, "rb")
sys.stdout = _reopen_stdio(sys.stdout, "wb")
sys.stderr = _reopen_stdio(sys.stderr, "wb")
def _attempt_to_close_capture_file(f):
"""Suppress IOError when closing the temporary file used for capturing streams in py27 (#2370)"""
if six.PY2:
try:
f.close()
except IOError:
pass
else:
f.close()

View File

@ -0,0 +1,457 @@
"""
python version compatibility code
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import codecs
import functools
import inspect
import re
import sys
from contextlib import contextmanager
import py
import six
from six import text_type
import _pytest
from _pytest._io.saferepr import saferepr
from _pytest.outcomes import fail
from _pytest.outcomes import TEST_OUTCOME
try:
import enum
except ImportError: # pragma: no cover
# Only available in Python 3.4+ or as a backport
enum = None
_PY3 = sys.version_info > (3, 0)
_PY2 = not _PY3
if _PY3:
from inspect import signature, Parameter as Parameter
else:
from funcsigs import signature, Parameter as Parameter
NoneType = type(None)
NOTSET = object()
PY35 = sys.version_info[:2] >= (3, 5)
PY36 = sys.version_info[:2] >= (3, 6)
MODULE_NOT_FOUND_ERROR = "ModuleNotFoundError" if PY36 else "ImportError"
if _PY3:
from collections.abc import MutableMapping as MappingMixin
from collections.abc import Iterable, Mapping, Sequence, Sized
else:
# those raise DeprecationWarnings in Python >=3.7
from collections import MutableMapping as MappingMixin # noqa
from collections import Iterable, Mapping, Sequence, Sized # noqa
if sys.version_info >= (3, 4):
from importlib.util import spec_from_file_location
else:
def spec_from_file_location(*_, **__):
return None
def _format_args(func):
return str(signature(func))
isfunction = inspect.isfunction
isclass = inspect.isclass
# used to work around a python2 exception info leak
exc_clear = getattr(sys, "exc_clear", lambda: None)
# The type of re.compile objects is not exposed in Python.
REGEX_TYPE = type(re.compile(""))
def is_generator(func):
genfunc = inspect.isgeneratorfunction(func)
return genfunc and not iscoroutinefunction(func)
def iscoroutinefunction(func):
"""Return True if func is a decorated coroutine function.
Note: copied and modified from Python 3.5's builtin couroutines.py to avoid import asyncio directly,
which in turns also initializes the "logging" module as side-effect (see issue #8).
"""
return getattr(func, "_is_coroutine", False) or (
hasattr(inspect, "iscoroutinefunction") and inspect.iscoroutinefunction(func)
)
def getlocation(function, curdir):
function = get_real_func(function)
fn = py.path.local(inspect.getfile(function))
lineno = function.__code__.co_firstlineno
if fn.relto(curdir):
fn = fn.relto(curdir)
return "%s:%d" % (fn, lineno + 1)
def num_mock_patch_args(function):
""" return number of arguments used up by mock arguments (if any) """
patchings = getattr(function, "patchings", None)
if not patchings:
return 0
mock_modules = [sys.modules.get("mock"), sys.modules.get("unittest.mock")]
if any(mock_modules):
sentinels = [m.DEFAULT for m in mock_modules if m is not None]
return len(
[p for p in patchings if not p.attribute_name and p.new in sentinels]
)
return len(patchings)
def getfuncargnames(function, is_method=False, cls=None):
"""Returns the names of a function's mandatory arguments.
This should return the names of all function arguments that:
* Aren't bound to an instance or type as in instance or class methods.
* Don't have default values.
* Aren't bound with functools.partial.
* Aren't replaced with mocks.
The is_method and cls arguments indicate that the function should
be treated as a bound method even though it's not unless, only in
the case of cls, the function is a static method.
@RonnyPfannschmidt: This function should be refactored when we
revisit fixtures. The fixture mechanism should ask the node for
the fixture names, and not try to obtain directly from the
function object well after collection has occurred.
"""
# The parameters attribute of a Signature object contains an
# ordered mapping of parameter names to Parameter instances. This
# creates a tuple of the names of the parameters that don't have
# defaults.
try:
parameters = signature(function).parameters
except (ValueError, TypeError) as e:
fail(
"Could not determine arguments of {!r}: {}".format(function, e),
pytrace=False,
)
arg_names = tuple(
p.name
for p in parameters.values()
if (
p.kind is Parameter.POSITIONAL_OR_KEYWORD
or p.kind is Parameter.KEYWORD_ONLY
)
and p.default is Parameter.empty
)
# If this function should be treated as a bound method even though
# it's passed as an unbound method or function, remove the first
# parameter name.
if is_method or (
cls and not isinstance(cls.__dict__.get(function.__name__, None), staticmethod)
):
arg_names = arg_names[1:]
# Remove any names that will be replaced with mocks.
if hasattr(function, "__wrapped__"):
arg_names = arg_names[num_mock_patch_args(function) :]
return arg_names
@contextmanager
def dummy_context_manager():
"""Context manager that does nothing, useful in situations where you might need an actual context manager or not
depending on some condition. Using this allow to keep the same code"""
yield
def get_default_arg_names(function):
# Note: this code intentionally mirrors the code at the beginning of getfuncargnames,
# to get the arguments which were excluded from its result because they had default values
return tuple(
p.name
for p in signature(function).parameters.values()
if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY)
and p.default is not Parameter.empty
)
_non_printable_ascii_translate_table = {
i: u"\\x{:02x}".format(i) for i in range(128) if i not in range(32, 127)
}
_non_printable_ascii_translate_table.update(
{ord("\t"): u"\\t", ord("\r"): u"\\r", ord("\n"): u"\\n"}
)
def _translate_non_printable(s):
return s.translate(_non_printable_ascii_translate_table)
if _PY3:
STRING_TYPES = bytes, str
UNICODE_TYPES = six.text_type
if PY35:
def _bytes_to_ascii(val):
return val.decode("ascii", "backslashreplace")
else:
def _bytes_to_ascii(val):
if val:
# source: http://goo.gl/bGsnwC
encoded_bytes, _ = codecs.escape_encode(val)
return encoded_bytes.decode("ascii")
else:
# empty bytes crashes codecs.escape_encode (#1087)
return ""
def ascii_escaped(val):
"""If val is pure ascii, returns it as a str(). Otherwise, escapes
bytes objects into a sequence of escaped bytes:
b'\xc3\xb4\xc5\xd6' -> u'\\xc3\\xb4\\xc5\\xd6'
and escapes unicode objects into a sequence of escaped unicode
ids, e.g.:
'4\\nV\\U00043efa\\x0eMXWB\\x1e\\u3028\\u15fd\\xcd\\U0007d944'
note:
the obvious "v.decode('unicode-escape')" will return
valid utf-8 unicode if it finds them in bytes, but we
want to return escaped bytes for any byte, even if they match
a utf-8 string.
"""
if isinstance(val, bytes):
ret = _bytes_to_ascii(val)
else:
ret = val.encode("unicode_escape").decode("ascii")
return _translate_non_printable(ret)
else:
STRING_TYPES = six.string_types
UNICODE_TYPES = six.text_type
def ascii_escaped(val):
"""In py2 bytes and str are the same type, so return if it's a bytes
object, return it unchanged if it is a full ascii string,
otherwise escape it into its binary form.
If it's a unicode string, change the unicode characters into
unicode escapes.
"""
if isinstance(val, bytes):
try:
ret = val.decode("ascii")
except UnicodeDecodeError:
ret = val.encode("string-escape").decode("ascii")
else:
ret = val.encode("unicode-escape").decode("ascii")
return _translate_non_printable(ret)
class _PytestWrapper(object):
"""Dummy wrapper around a function object for internal use only.
Used to correctly unwrap the underlying function object
when we are creating fixtures, because we wrap the function object ourselves with a decorator
to issue warnings when the fixture function is called directly.
"""
def __init__(self, obj):
self.obj = obj
def get_real_func(obj):
""" gets the real function object of the (possibly) wrapped object by
functools.wraps or functools.partial.
"""
start_obj = obj
for i in range(100):
# __pytest_wrapped__ is set by @pytest.fixture when wrapping the fixture function
# to trigger a warning if it gets called directly instead of by pytest: we don't
# want to unwrap further than this otherwise we lose useful wrappings like @mock.patch (#3774)
new_obj = getattr(obj, "__pytest_wrapped__", None)
if isinstance(new_obj, _PytestWrapper):
obj = new_obj.obj
break
new_obj = getattr(obj, "__wrapped__", None)
if new_obj is None:
break
obj = new_obj
else:
raise ValueError(
("could not find real function of {start}\nstopped at {current}").format(
start=saferepr(start_obj), current=saferepr(obj)
)
)
if isinstance(obj, functools.partial):
obj = obj.func
return obj
def get_real_method(obj, holder):
"""
Attempts to obtain the real function object that might be wrapping ``obj``, while at the same time
returning a bound method to ``holder`` if the original object was a bound method.
"""
try:
is_method = hasattr(obj, "__func__")
obj = get_real_func(obj)
except Exception:
return obj
if is_method and hasattr(obj, "__get__") and callable(obj.__get__):
obj = obj.__get__(holder)
return obj
def getfslineno(obj):
# xxx let decorators etc specify a sane ordering
obj = get_real_func(obj)
if hasattr(obj, "place_as"):
obj = obj.place_as
fslineno = _pytest._code.getfslineno(obj)
assert isinstance(fslineno[1], int), obj
return fslineno
def getimfunc(func):
try:
return func.__func__
except AttributeError:
return func
def safe_getattr(object, name, default):
""" Like getattr but return default upon any Exception or any OutcomeException.
Attribute access can potentially fail for 'evil' Python objects.
See issue #214.
It catches OutcomeException because of #2490 (issue #580), new outcomes are derived from BaseException
instead of Exception (for more details check #2707)
"""
try:
return getattr(object, name, default)
except TEST_OUTCOME:
return default
def safe_isclass(obj):
"""Ignore any exception via isinstance on Python 3."""
try:
return isclass(obj)
except Exception:
return False
def _is_unittest_unexpected_success_a_failure():
"""Return if the test suite should fail if an @expectedFailure unittest test PASSES.
From https://docs.python.org/3/library/unittest.html?highlight=unittest#unittest.TestResult.wasSuccessful:
Changed in version 3.4: Returns False if there were any
unexpectedSuccesses from tests marked with the expectedFailure() decorator.
"""
return sys.version_info >= (3, 4)
if _PY3:
def safe_str(v):
"""returns v as string"""
return str(v)
else:
def safe_str(v):
"""returns v as string, converting to ascii if necessary"""
try:
return str(v)
except UnicodeError:
if not isinstance(v, text_type):
v = text_type(v)
errors = "replace"
return v.encode("utf-8", errors)
COLLECT_FAKEMODULE_ATTRIBUTES = (
"Collector",
"Module",
"Function",
"Instance",
"Session",
"Item",
"Class",
"File",
"_fillfuncargs",
)
def _setup_collect_fakemodule():
from types import ModuleType
import pytest
pytest.collect = ModuleType("pytest.collect")
pytest.collect.__all__ = [] # used for setns
for attr in COLLECT_FAKEMODULE_ATTRIBUTES:
setattr(pytest.collect, attr, getattr(pytest, attr))
if _PY2:
# Without this the test_dupfile_on_textio will fail, otherwise CaptureIO could directly inherit from StringIO.
from py.io import TextIO
class CaptureIO(TextIO):
@property
def encoding(self):
return getattr(self, "_encoding", "UTF-8")
else:
import io
class CaptureIO(io.TextIOWrapper):
def __init__(self):
super(CaptureIO, self).__init__(
io.BytesIO(), encoding="UTF-8", newline="", write_through=True
)
def getvalue(self):
return self.buffer.getvalue().decode("UTF-8")
class FuncargnamesCompatAttr(object):
""" helper class so that Metafunc, Function and FixtureRequest
don't need to each define the "funcargnames" compatibility attribute.
"""
@property
def funcargnames(self):
""" alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
return self.fixturenames
if six.PY2:
def lru_cache(*_, **__):
def dec(fn):
return fn
return dec
else:
from functools import lru_cache # noqa: F401

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,409 @@
import argparse
import warnings
import py
import six
from _pytest.config.exceptions import UsageError
FILE_OR_DIR = "file_or_dir"
class Parser(object):
""" Parser for command line arguments and ini-file values.
:ivar extra_info: dict of generic param -> value to display in case
there's an error processing the command line arguments.
"""
prog = None
def __init__(self, usage=None, processopt=None):
self._anonymous = OptionGroup("custom options", parser=self)
self._groups = []
self._processopt = processopt
self._usage = usage
self._inidict = {}
self._ininames = []
self.extra_info = {}
def processoption(self, option):
if self._processopt:
if option.dest:
self._processopt(option)
def getgroup(self, name, description="", after=None):
""" get (or create) a named option Group.
:name: name of the option group.
:description: long description for --help output.
:after: name of other group, used for ordering --help output.
The returned group object has an ``addoption`` method with the same
signature as :py:func:`parser.addoption
<_pytest.config.Parser.addoption>` but will be shown in the
respective group in the output of ``pytest. --help``.
"""
for group in self._groups:
if group.name == name:
return group
group = OptionGroup(name, description, parser=self)
i = 0
for i, grp in enumerate(self._groups):
if grp.name == after:
break
self._groups.insert(i + 1, group)
return group
def addoption(self, *opts, **attrs):
""" register a command line option.
:opts: option names, can be short or long options.
:attrs: same attributes which the ``add_option()`` function of the
`argparse library
<http://docs.python.org/2/library/argparse.html>`_
accepts.
After command line parsing options are available on the pytest config
object via ``config.option.NAME`` where ``NAME`` is usually set
by passing a ``dest`` attribute, for example
``addoption("--long", dest="NAME", ...)``.
"""
self._anonymous.addoption(*opts, **attrs)
def parse(self, args, namespace=None):
from _pytest._argcomplete import try_argcomplete
self.optparser = self._getparser()
try_argcomplete(self.optparser)
args = [str(x) if isinstance(x, py.path.local) else x for x in args]
return self.optparser.parse_args(args, namespace=namespace)
def _getparser(self):
from _pytest._argcomplete import filescompleter
optparser = MyOptionParser(self, self.extra_info, prog=self.prog)
groups = self._groups + [self._anonymous]
for group in groups:
if group.options:
desc = group.description or group.name
arggroup = optparser.add_argument_group(desc)
for option in group.options:
n = option.names()
a = option.attrs()
arggroup.add_argument(*n, **a)
# bash like autocompletion for dirs (appending '/')
optparser.add_argument(FILE_OR_DIR, nargs="*").completer = filescompleter
return optparser
def parse_setoption(self, args, option, namespace=None):
parsedoption = self.parse(args, namespace=namespace)
for name, value in parsedoption.__dict__.items():
setattr(option, name, value)
return getattr(parsedoption, FILE_OR_DIR)
def parse_known_args(self, args, namespace=None):
"""parses and returns a namespace object with known arguments at this
point.
"""
return self.parse_known_and_unknown_args(args, namespace=namespace)[0]
def parse_known_and_unknown_args(self, args, namespace=None):
"""parses and returns a namespace object with known arguments, and
the remaining arguments unknown at this point.
"""
optparser = self._getparser()
args = [str(x) if isinstance(x, py.path.local) else x for x in args]
return optparser.parse_known_args(args, namespace=namespace)
def addini(self, name, help, type=None, default=None):
""" register an ini-file option.
:name: name of the ini-variable
:type: type of the variable, can be ``pathlist``, ``args``, ``linelist``
or ``bool``.
:default: default value if no ini-file option exists but is queried.
The value of ini-variables can be retrieved via a call to
:py:func:`config.getini(name) <_pytest.config.Config.getini>`.
"""
assert type in (None, "pathlist", "args", "linelist", "bool")
self._inidict[name] = (help, type, default)
self._ininames.append(name)
class ArgumentError(Exception):
"""
Raised if an Argument instance is created with invalid or
inconsistent arguments.
"""
def __init__(self, msg, option):
self.msg = msg
self.option_id = str(option)
def __str__(self):
if self.option_id:
return "option %s: %s" % (self.option_id, self.msg)
else:
return self.msg
class Argument(object):
"""class that mimics the necessary behaviour of optparse.Option
it's currently a least effort implementation
and ignoring choices and integer prefixes
https://docs.python.org/3/library/optparse.html#optparse-standard-option-types
"""
_typ_map = {"int": int, "string": str, "float": float, "complex": complex}
def __init__(self, *names, **attrs):
"""store parms in private vars for use in add_argument"""
self._attrs = attrs
self._short_opts = []
self._long_opts = []
self.dest = attrs.get("dest")
if "%default" in (attrs.get("help") or ""):
warnings.warn(
'pytest now uses argparse. "%default" should be'
' changed to "%(default)s" ',
DeprecationWarning,
stacklevel=3,
)
try:
typ = attrs["type"]
except KeyError:
pass
else:
# this might raise a keyerror as well, don't want to catch that
if isinstance(typ, six.string_types):
if typ == "choice":
warnings.warn(
"`type` argument to addoption() is the string %r."
" For choices this is optional and can be omitted, "
" but when supplied should be a type (for example `str` or `int`)."
" (options: %s)" % (typ, names),
DeprecationWarning,
stacklevel=4,
)
# argparse expects a type here take it from
# the type of the first element
attrs["type"] = type(attrs["choices"][0])
else:
warnings.warn(
"`type` argument to addoption() is the string %r, "
" but when supplied should be a type (for example `str` or `int`)."
" (options: %s)" % (typ, names),
DeprecationWarning,
stacklevel=4,
)
attrs["type"] = Argument._typ_map[typ]
# used in test_parseopt -> test_parse_defaultgetter
self.type = attrs["type"]
else:
self.type = typ
try:
# attribute existence is tested in Config._processopt
self.default = attrs["default"]
except KeyError:
pass
self._set_opt_strings(names)
if not self.dest:
if self._long_opts:
self.dest = self._long_opts[0][2:].replace("-", "_")
else:
try:
self.dest = self._short_opts[0][1:]
except IndexError:
raise ArgumentError("need a long or short option", self)
def names(self):
return self._short_opts + self._long_opts
def attrs(self):
# update any attributes set by processopt
attrs = "default dest help".split()
if self.dest:
attrs.append(self.dest)
for attr in attrs:
try:
self._attrs[attr] = getattr(self, attr)
except AttributeError:
pass
if self._attrs.get("help"):
a = self._attrs["help"]
a = a.replace("%default", "%(default)s")
# a = a.replace('%prog', '%(prog)s')
self._attrs["help"] = a
return self._attrs
def _set_opt_strings(self, opts):
"""directly from optparse
might not be necessary as this is passed to argparse later on"""
for opt in opts:
if len(opt) < 2:
raise ArgumentError(
"invalid option string %r: "
"must be at least two characters long" % opt,
self,
)
elif len(opt) == 2:
if not (opt[0] == "-" and opt[1] != "-"):
raise ArgumentError(
"invalid short option string %r: "
"must be of the form -x, (x any non-dash char)" % opt,
self,
)
self._short_opts.append(opt)
else:
if not (opt[0:2] == "--" and opt[2] != "-"):
raise ArgumentError(
"invalid long option string %r: "
"must start with --, followed by non-dash" % opt,
self,
)
self._long_opts.append(opt)
def __repr__(self):
args = []
if self._short_opts:
args += ["_short_opts: " + repr(self._short_opts)]
if self._long_opts:
args += ["_long_opts: " + repr(self._long_opts)]
args += ["dest: " + repr(self.dest)]
if hasattr(self, "type"):
args += ["type: " + repr(self.type)]
if hasattr(self, "default"):
args += ["default: " + repr(self.default)]
return "Argument({})".format(", ".join(args))
class OptionGroup(object):
def __init__(self, name, description="", parser=None):
self.name = name
self.description = description
self.options = []
self.parser = parser
def addoption(self, *optnames, **attrs):
""" add an option to this group.
if a shortened version of a long option is specified it will
be suppressed in the help. addoption('--twowords', '--two-words')
results in help showing '--two-words' only, but --twowords gets
accepted **and** the automatic destination is in args.twowords
"""
conflict = set(optnames).intersection(
name for opt in self.options for name in opt.names()
)
if conflict:
raise ValueError("option names %s already added" % conflict)
option = Argument(*optnames, **attrs)
self._addoption_instance(option, shortupper=False)
def _addoption(self, *optnames, **attrs):
option = Argument(*optnames, **attrs)
self._addoption_instance(option, shortupper=True)
def _addoption_instance(self, option, shortupper=False):
if not shortupper:
for opt in option._short_opts:
if opt[0] == "-" and opt[1].islower():
raise ValueError("lowercase shortoptions reserved")
if self.parser:
self.parser.processoption(option)
self.options.append(option)
class MyOptionParser(argparse.ArgumentParser):
def __init__(self, parser, extra_info=None, prog=None):
if not extra_info:
extra_info = {}
self._parser = parser
argparse.ArgumentParser.__init__(
self,
prog=prog,
usage=parser._usage,
add_help=False,
formatter_class=DropShorterLongHelpFormatter,
)
# extra_info is a dict of (param -> value) to display if there's
# an usage error to provide more contextual information to the user
self.extra_info = extra_info
def error(self, message):
"""Transform argparse error message into UsageError."""
msg = "%s: error: %s" % (self.prog, message)
if hasattr(self._parser, "_config_source_hint"):
msg = "%s (%s)" % (msg, self._parser._config_source_hint)
raise UsageError(self.format_usage() + msg)
def parse_args(self, args=None, namespace=None):
"""allow splitting of positional arguments"""
args, argv = self.parse_known_args(args, namespace)
if argv:
for arg in argv:
if arg and arg[0] == "-":
lines = ["unrecognized arguments: %s" % (" ".join(argv))]
for k, v in sorted(self.extra_info.items()):
lines.append(" %s: %s" % (k, v))
self.error("\n".join(lines))
getattr(args, FILE_OR_DIR).extend(argv)
return args
class DropShorterLongHelpFormatter(argparse.HelpFormatter):
"""shorten help for long options that differ only in extra hyphens
- collapse **long** options that are the same except for extra hyphens
- special action attribute map_long_option allows surpressing additional
long options
- shortcut if there are only two options and one of them is a short one
- cache result on action object as this is called at least 2 times
"""
def _format_action_invocation(self, action):
orgstr = argparse.HelpFormatter._format_action_invocation(self, action)
if orgstr and orgstr[0] != "-": # only optional arguments
return orgstr
res = getattr(action, "_formatted_action_invocation", None)
if res:
return res
options = orgstr.split(", ")
if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2):
# a shortcut for '-h, --help' or '--abc', '-a'
action._formatted_action_invocation = orgstr
return orgstr
return_list = []
option_map = getattr(action, "map_long_option", {})
if option_map is None:
option_map = {}
short_long = {}
for option in options:
if len(option) == 2 or option[2] == " ":
continue
if not option.startswith("--"):
raise ArgumentError(
'long optional argument without "--": [%s]' % (option), self
)
xxoption = option[2:]
if xxoption.split()[0] not in option_map:
shortened = xxoption.replace("-", "")
if shortened not in short_long or len(short_long[shortened]) < len(
xxoption
):
short_long[shortened] = xxoption
# now short_long has been filled out to the longest with dashes
# **and** we keep the right option ordering from add_argument
for option in options:
if len(option) == 2 or option[2] == " ":
return_list.append(option)
if option[2:] == short_long.get(option.replace("-", "")):
return_list.append(option.replace(" ", "=", 1))
action._formatted_action_invocation = ", ".join(return_list)
return action._formatted_action_invocation

View File

@ -0,0 +1,9 @@
class UsageError(Exception):
""" error in pytest usage or invocation"""
class PrintHelp(Exception):
"""Raised when pytest should print it's help to skip the rest of the
argument parsing and validation."""
pass

View File

@ -0,0 +1,148 @@
import os
import py
from .exceptions import UsageError
from _pytest.outcomes import fail
def exists(path, ignore=EnvironmentError):
try:
return path.check()
except ignore:
return False
def getcfg(args, config=None):
"""
Search the list of arguments for a valid ini-file for pytest,
and return a tuple of (rootdir, inifile, cfg-dict).
note: config is optional and used only to issue warnings explicitly (#2891).
"""
from _pytest.deprecated import CFG_PYTEST_SECTION
inibasenames = ["pytest.ini", "tox.ini", "setup.cfg"]
args = [x for x in args if not str(x).startswith("-")]
if not args:
args = [py.path.local()]
for arg in args:
arg = py.path.local(arg)
for base in arg.parts(reverse=True):
for inibasename in inibasenames:
p = base.join(inibasename)
if exists(p):
iniconfig = py.iniconfig.IniConfig(p)
if (
inibasename == "setup.cfg"
and "tool:pytest" in iniconfig.sections
):
return base, p, iniconfig["tool:pytest"]
elif "pytest" in iniconfig.sections:
if inibasename == "setup.cfg" and config is not None:
fail(
CFG_PYTEST_SECTION.format(filename=inibasename),
pytrace=False,
)
return base, p, iniconfig["pytest"]
elif inibasename == "pytest.ini":
# allowed to be empty
return base, p, {}
return None, None, None
def get_common_ancestor(paths):
common_ancestor = None
for path in paths:
if not path.exists():
continue
if common_ancestor is None:
common_ancestor = path
else:
if path.relto(common_ancestor) or path == common_ancestor:
continue
elif common_ancestor.relto(path):
common_ancestor = path
else:
shared = path.common(common_ancestor)
if shared is not None:
common_ancestor = shared
if common_ancestor is None:
common_ancestor = py.path.local()
elif common_ancestor.isfile():
common_ancestor = common_ancestor.dirpath()
return common_ancestor
def get_dirs_from_args(args):
def is_option(x):
return str(x).startswith("-")
def get_file_part_from_node_id(x):
return str(x).split("::")[0]
def get_dir_from_path(path):
if path.isdir():
return path
return py.path.local(path.dirname)
# These look like paths but may not exist
possible_paths = (
py.path.local(get_file_part_from_node_id(arg))
for arg in args
if not is_option(arg)
)
return [get_dir_from_path(path) for path in possible_paths if path.exists()]
def determine_setup(inifile, args, rootdir_cmd_arg=None, config=None):
dirs = get_dirs_from_args(args)
if inifile:
iniconfig = py.iniconfig.IniConfig(inifile)
is_cfg_file = str(inifile).endswith(".cfg")
sections = ["tool:pytest", "pytest"] if is_cfg_file else ["pytest"]
for section in sections:
try:
inicfg = iniconfig[section]
if is_cfg_file and section == "pytest" and config is not None:
from _pytest.deprecated import CFG_PYTEST_SECTION
fail(
CFG_PYTEST_SECTION.format(filename=str(inifile)), pytrace=False
)
break
except KeyError:
inicfg = None
if rootdir_cmd_arg is None:
rootdir = get_common_ancestor(dirs)
else:
ancestor = get_common_ancestor(dirs)
rootdir, inifile, inicfg = getcfg([ancestor], config=config)
if rootdir is None and rootdir_cmd_arg is None:
for possible_rootdir in ancestor.parts(reverse=True):
if possible_rootdir.join("setup.py").exists():
rootdir = possible_rootdir
break
else:
if dirs != [ancestor]:
rootdir, inifile, inicfg = getcfg(dirs, config=config)
if rootdir is None:
if config is not None:
cwd = config.invocation_dir
else:
cwd = py.path.local()
rootdir = get_common_ancestor([cwd, ancestor])
is_fs_root = os.path.splitdrive(str(rootdir))[1] == "/"
if is_fs_root:
rootdir = ancestor
if rootdir_cmd_arg:
rootdir = py.path.local(os.path.expandvars(rootdir_cmd_arg))
if not rootdir.isdir():
raise UsageError(
"Directory '{}' not found. Check your '--rootdir' option.".format(
rootdir
)
)
return rootdir, inifile, inicfg or {}

View File

@ -0,0 +1,308 @@
""" interactive debugging with PDB, the Python Debugger. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import pdb
import sys
from doctest import UnexpectedException
from _pytest import outcomes
from _pytest.config import hookimpl
def _validate_usepdb_cls(value):
try:
modname, classname = value.split(":")
except ValueError:
raise argparse.ArgumentTypeError(
"{!r} is not in the format 'modname:classname'".format(value)
)
try:
__import__(modname)
mod = sys.modules[modname]
# Handle --pdbcls=pdb:pdb.Pdb (useful e.g. with pdbpp).
parts = classname.split(".")
pdb_cls = getattr(mod, parts[0])
for part in parts[1:]:
pdb_cls = getattr(pdb_cls, part)
return pdb_cls
except Exception as exc:
raise argparse.ArgumentTypeError(
"could not get pdb class for {!r}: {}".format(value, exc)
)
def pytest_addoption(parser):
group = parser.getgroup("general")
group._addoption(
"--pdb",
dest="usepdb",
action="store_true",
help="start the interactive Python debugger on errors or KeyboardInterrupt.",
)
group._addoption(
"--pdbcls",
dest="usepdb_cls",
metavar="modulename:classname",
type=_validate_usepdb_cls,
help="start a custom interactive Python debugger on errors. "
"For example: --pdbcls=IPython.terminal.debugger:TerminalPdb",
)
group._addoption(
"--trace",
dest="trace",
action="store_true",
help="Immediately break when running each test.",
)
def pytest_configure(config):
pdb_cls = config.getvalue("usepdb_cls")
if not pdb_cls:
pdb_cls = pdb.Pdb
if config.getvalue("trace"):
config.pluginmanager.register(PdbTrace(), "pdbtrace")
if config.getvalue("usepdb"):
config.pluginmanager.register(PdbInvoke(), "pdbinvoke")
pytestPDB._saved.append(
(pdb.set_trace, pytestPDB._pluginmanager, pytestPDB._config, pytestPDB._pdb_cls)
)
pdb.set_trace = pytestPDB.set_trace
pytestPDB._pluginmanager = config.pluginmanager
pytestPDB._config = config
pytestPDB._pdb_cls = pdb_cls
# NOTE: not using pytest_unconfigure, since it might get called although
# pytest_configure was not (if another plugin raises UsageError).
def fin():
(
pdb.set_trace,
pytestPDB._pluginmanager,
pytestPDB._config,
pytestPDB._pdb_cls,
) = pytestPDB._saved.pop()
config._cleanup.append(fin)
class pytestPDB(object):
""" Pseudo PDB that defers to the real pdb. """
_pluginmanager = None
_config = None
_pdb_cls = pdb.Pdb
_saved = []
_recursive_debug = 0
@classmethod
def _is_capturing(cls, capman):
if capman:
return capman.is_capturing()
return False
@classmethod
def _init_pdb(cls, *args, **kwargs):
""" Initialize PDB debugging, dropping any IO capturing. """
import _pytest.config
if cls._pluginmanager is not None:
capman = cls._pluginmanager.getplugin("capturemanager")
if capman:
capman.suspend(in_=True)
tw = _pytest.config.create_terminal_writer(cls._config)
tw.line()
if cls._recursive_debug == 0:
# Handle header similar to pdb.set_trace in py37+.
header = kwargs.pop("header", None)
if header is not None:
tw.sep(">", header)
else:
capturing = cls._is_capturing(capman)
if capturing:
if capturing == "global":
tw.sep(">", "PDB set_trace (IO-capturing turned off)")
else:
tw.sep(
">",
"PDB set_trace (IO-capturing turned off for %s)"
% capturing,
)
else:
tw.sep(">", "PDB set_trace")
class _PdbWrapper(cls._pdb_cls, object):
_pytest_capman = capman
_continued = False
def do_debug(self, arg):
cls._recursive_debug += 1
ret = super(_PdbWrapper, self).do_debug(arg)
cls._recursive_debug -= 1
return ret
def do_continue(self, arg):
ret = super(_PdbWrapper, self).do_continue(arg)
if cls._recursive_debug == 0:
tw = _pytest.config.create_terminal_writer(cls._config)
tw.line()
capman = self._pytest_capman
capturing = pytestPDB._is_capturing(capman)
if capturing:
if capturing == "global":
tw.sep(">", "PDB continue (IO-capturing resumed)")
else:
tw.sep(
">",
"PDB continue (IO-capturing resumed for %s)"
% capturing,
)
capman.resume()
else:
tw.sep(">", "PDB continue")
cls._pluginmanager.hook.pytest_leave_pdb(
config=cls._config, pdb=self
)
self._continued = True
return ret
do_c = do_cont = do_continue
def set_quit(self):
"""Raise Exit outcome when quit command is used in pdb.
This is a bit of a hack - it would be better if BdbQuit
could be handled, but this would require to wrap the
whole pytest run, and adjust the report etc.
"""
super(_PdbWrapper, self).set_quit()
if cls._recursive_debug == 0:
outcomes.exit("Quitting debugger")
def setup(self, f, tb):
"""Suspend on setup().
Needed after do_continue resumed, and entering another
breakpoint again.
"""
ret = super(_PdbWrapper, self).setup(f, tb)
if not ret and self._continued:
# pdb.setup() returns True if the command wants to exit
# from the interaction: do not suspend capturing then.
if self._pytest_capman:
self._pytest_capman.suspend_global_capture(in_=True)
return ret
_pdb = _PdbWrapper(**kwargs)
cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config, pdb=_pdb)
else:
_pdb = cls._pdb_cls(**kwargs)
return _pdb
@classmethod
def set_trace(cls, *args, **kwargs):
"""Invoke debugging via ``Pdb.set_trace``, dropping any IO capturing."""
frame = sys._getframe().f_back
_pdb = cls._init_pdb(*args, **kwargs)
_pdb.set_trace(frame)
class PdbInvoke(object):
def pytest_exception_interact(self, node, call, report):
capman = node.config.pluginmanager.getplugin("capturemanager")
if capman:
capman.suspend_global_capture(in_=True)
out, err = capman.read_global_capture()
sys.stdout.write(out)
sys.stdout.write(err)
_enter_pdb(node, call.excinfo, report)
def pytest_internalerror(self, excrepr, excinfo):
tb = _postmortem_traceback(excinfo)
post_mortem(tb)
class PdbTrace(object):
@hookimpl(hookwrapper=True)
def pytest_pyfunc_call(self, pyfuncitem):
_test_pytest_function(pyfuncitem)
yield
def _test_pytest_function(pyfuncitem):
_pdb = pytestPDB._init_pdb()
testfunction = pyfuncitem.obj
pyfuncitem.obj = _pdb.runcall
if "func" in pyfuncitem._fixtureinfo.argnames: # noqa
raise ValueError("--trace can't be used with a fixture named func!")
pyfuncitem.funcargs["func"] = testfunction
new_list = list(pyfuncitem._fixtureinfo.argnames)
new_list.append("func")
pyfuncitem._fixtureinfo.argnames = tuple(new_list)
def _enter_pdb(node, excinfo, rep):
# XXX we re-use the TerminalReporter's terminalwriter
# because this seems to avoid some encoding related troubles
# for not completely clear reasons.
tw = node.config.pluginmanager.getplugin("terminalreporter")._tw
tw.line()
showcapture = node.config.option.showcapture
for sectionname, content in (
("stdout", rep.capstdout),
("stderr", rep.capstderr),
("log", rep.caplog),
):
if showcapture in (sectionname, "all") and content:
tw.sep(">", "captured " + sectionname)
if content[-1:] == "\n":
content = content[:-1]
tw.line(content)
tw.sep(">", "traceback")
rep.toterminal(tw)
tw.sep(">", "entering PDB")
tb = _postmortem_traceback(excinfo)
rep._pdbshown = True
post_mortem(tb)
return rep
def _postmortem_traceback(excinfo):
if isinstance(excinfo.value, UnexpectedException):
# A doctest.UnexpectedException is not useful for post_mortem.
# Use the underlying exception instead:
return excinfo.value.exc_info[2]
else:
return excinfo._excinfo[2]
def _find_last_non_hidden_frame(stack):
i = max(0, len(stack) - 1)
while i and stack[i][0].f_locals.get("__tracebackhide__", False):
i -= 1
return i
def post_mortem(t):
class Pdb(pytestPDB._pdb_cls, object):
def get_stack(self, f, t):
stack, i = super(Pdb, self).get_stack(f, t)
if f is None:
i = _find_last_non_hidden_frame(stack)
return stack, i
p = Pdb()
p.reset()
p.interaction(None, t)
if p.quitting:
outcomes.exit("Quitting debugger")

View File

@ -0,0 +1,95 @@
"""
This module contains deprecation messages and bits of code used elsewhere in the codebase
that is planned to be removed in the next pytest release.
Keeping it in a central location makes it easy to track what is deprecated and should
be removed when the time comes.
All constants defined in this module should be either PytestWarning instances or UnformattedWarning
in case of warnings which need to format their messages.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from _pytest.warning_types import PytestDeprecationWarning
from _pytest.warning_types import RemovedInPytest4Warning
from _pytest.warning_types import UnformattedWarning
YIELD_TESTS = "yield tests were removed in pytest 4.0 - {name} will be ignored"
FIXTURE_FUNCTION_CALL = (
'Fixture "{name}" called directly. Fixtures are not meant to be called directly,\n'
"but are created automatically when test functions request them as parameters.\n"
"See https://docs.pytest.org/en/latest/fixture.html for more information about fixtures, and\n"
"https://docs.pytest.org/en/latest/deprecations.html#calling-fixtures-directly about how to update your code."
)
FIXTURE_NAMED_REQUEST = PytestDeprecationWarning(
"'request' is a reserved name for fixtures and will raise an error in future versions"
)
CFG_PYTEST_SECTION = "[pytest] section in {filename} files is no longer supported, change to [tool:pytest] instead."
GETFUNCARGVALUE = RemovedInPytest4Warning(
"getfuncargvalue is deprecated, use getfixturevalue"
)
RAISES_MESSAGE_PARAMETER = PytestDeprecationWarning(
"The 'message' parameter is deprecated.\n"
"(did you mean to use `match='some regex'` to check the exception message?)\n"
"Please comment on https://github.com/pytest-dev/pytest/issues/3974 "
"if you have concerns about removal of this parameter."
)
RESULT_LOG = PytestDeprecationWarning(
"--result-log is deprecated and scheduled for removal in pytest 5.0.\n"
"See https://docs.pytest.org/en/latest/deprecations.html#result-log-result-log for more information."
)
MARK_INFO_ATTRIBUTE = RemovedInPytest4Warning(
"MarkInfo objects are deprecated as they contain merged marks which are hard to deal with correctly.\n"
"Please use node.get_closest_marker(name) or node.iter_markers(name).\n"
"Docs: https://docs.pytest.org/en/latest/mark.html#updating-code"
)
RAISES_EXEC = PytestDeprecationWarning(
"raises(..., 'code(as_a_string)') is deprecated, use the context manager form or use `exec()` directly\n\n"
"See https://docs.pytest.org/en/latest/deprecations.html#raises-warns-exec"
)
WARNS_EXEC = PytestDeprecationWarning(
"warns(..., 'code(as_a_string)') is deprecated, use the context manager form or use `exec()` directly.\n\n"
"See https://docs.pytest.org/en/latest/deprecations.html#raises-warns-exec"
)
PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST = (
"Defining 'pytest_plugins' in a non-top-level conftest is no longer supported "
"because it affects the entire directory tree in a non-explicit way.\n"
" {}\n"
"Please move it to a top level conftest file at the rootdir:\n"
" {}\n"
"For more information, visit:\n"
" https://docs.pytest.org/en/latest/deprecations.html#pytest-plugins-in-non-top-level-conftest-files"
)
PYTEST_CONFIG_GLOBAL = PytestDeprecationWarning(
"the `pytest.config` global is deprecated. Please use `request.config` "
"or `pytest_configure` (if you're a pytest plugin) instead."
)
PYTEST_ENSURETEMP = RemovedInPytest4Warning(
"pytest/tmpdir_factory.ensuretemp is deprecated, \n"
"please use the tmp_path fixture or tmp_path_factory.mktemp"
)
PYTEST_LOGWARNING = PytestDeprecationWarning(
"pytest_logwarning is deprecated, no longer being called, and will be removed soon\n"
"please use pytest_warning_captured instead"
)
PYTEST_WARNS_UNKNOWN_KWARGS = UnformattedWarning(
PytestDeprecationWarning,
"pytest.warns() got unexpected keyword arguments: {args!r}.\n"
"This will be an error in future versions.",
)

View File

@ -0,0 +1,572 @@
""" discover and run doctests in modules and test files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import platform
import sys
import traceback
from contextlib import contextmanager
import pytest
from _pytest._code.code import ExceptionInfo
from _pytest._code.code import ReprFileLocation
from _pytest._code.code import TerminalRepr
from _pytest.compat import safe_getattr
from _pytest.fixtures import FixtureRequest
from _pytest.outcomes import Skipped
DOCTEST_REPORT_CHOICE_NONE = "none"
DOCTEST_REPORT_CHOICE_CDIFF = "cdiff"
DOCTEST_REPORT_CHOICE_NDIFF = "ndiff"
DOCTEST_REPORT_CHOICE_UDIFF = "udiff"
DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = "only_first_failure"
DOCTEST_REPORT_CHOICES = (
DOCTEST_REPORT_CHOICE_NONE,
DOCTEST_REPORT_CHOICE_CDIFF,
DOCTEST_REPORT_CHOICE_NDIFF,
DOCTEST_REPORT_CHOICE_UDIFF,
DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE,
)
# Lazy definition of runner class
RUNNER_CLASS = None
def pytest_addoption(parser):
parser.addini(
"doctest_optionflags",
"option flags for doctests",
type="args",
default=["ELLIPSIS"],
)
parser.addini(
"doctest_encoding", "encoding used for doctest files", default="utf-8"
)
group = parser.getgroup("collect")
group.addoption(
"--doctest-modules",
action="store_true",
default=False,
help="run doctests in all .py modules",
dest="doctestmodules",
)
group.addoption(
"--doctest-report",
type=str.lower,
default="udiff",
help="choose another output format for diffs on doctest failure",
choices=DOCTEST_REPORT_CHOICES,
dest="doctestreport",
)
group.addoption(
"--doctest-glob",
action="append",
default=[],
metavar="pat",
help="doctests file matching pattern, default: test*.txt",
dest="doctestglob",
)
group.addoption(
"--doctest-ignore-import-errors",
action="store_true",
default=False,
help="ignore doctest ImportErrors",
dest="doctest_ignore_import_errors",
)
group.addoption(
"--doctest-continue-on-failure",
action="store_true",
default=False,
help="for a given doctest, continue to run after the first failure",
dest="doctest_continue_on_failure",
)
def pytest_collect_file(path, parent):
config = parent.config
if path.ext == ".py":
if config.option.doctestmodules and not _is_setup_py(config, path, parent):
return DoctestModule(path, parent)
elif _is_doctest(config, path, parent):
return DoctestTextfile(path, parent)
def _is_setup_py(config, path, parent):
if path.basename != "setup.py":
return False
contents = path.read()
return "setuptools" in contents or "distutils" in contents
def _is_doctest(config, path, parent):
if path.ext in (".txt", ".rst") and parent.session.isinitpath(path):
return True
globs = config.getoption("doctestglob") or ["test*.txt"]
for glob in globs:
if path.check(fnmatch=glob):
return True
return False
class ReprFailDoctest(TerminalRepr):
def __init__(self, reprlocation_lines):
# List of (reprlocation, lines) tuples
self.reprlocation_lines = reprlocation_lines
def toterminal(self, tw):
for reprlocation, lines in self.reprlocation_lines:
for line in lines:
tw.line(line)
reprlocation.toterminal(tw)
class MultipleDoctestFailures(Exception):
def __init__(self, failures):
super(MultipleDoctestFailures, self).__init__()
self.failures = failures
def _init_runner_class():
import doctest
class PytestDoctestRunner(doctest.DebugRunner):
"""
Runner to collect failures. Note that the out variable in this case is
a list instead of a stdout-like object
"""
def __init__(
self, checker=None, verbose=None, optionflags=0, continue_on_failure=True
):
doctest.DebugRunner.__init__(
self, checker=checker, verbose=verbose, optionflags=optionflags
)
self.continue_on_failure = continue_on_failure
def report_failure(self, out, test, example, got):
failure = doctest.DocTestFailure(test, example, got)
if self.continue_on_failure:
out.append(failure)
else:
raise failure
def report_unexpected_exception(self, out, test, example, exc_info):
if isinstance(exc_info[1], Skipped):
raise exc_info[1]
failure = doctest.UnexpectedException(test, example, exc_info)
if self.continue_on_failure:
out.append(failure)
else:
raise failure
return PytestDoctestRunner
def _get_runner(checker=None, verbose=None, optionflags=0, continue_on_failure=True):
# We need this in order to do a lazy import on doctest
global RUNNER_CLASS
if RUNNER_CLASS is None:
RUNNER_CLASS = _init_runner_class()
return RUNNER_CLASS(
checker=checker,
verbose=verbose,
optionflags=optionflags,
continue_on_failure=continue_on_failure,
)
class DoctestItem(pytest.Item):
def __init__(self, name, parent, runner=None, dtest=None):
super(DoctestItem, self).__init__(name, parent)
self.runner = runner
self.dtest = dtest
self.obj = None
self.fixture_request = None
def setup(self):
if self.dtest is not None:
self.fixture_request = _setup_fixtures(self)
globs = dict(getfixture=self.fixture_request.getfixturevalue)
for name, value in self.fixture_request.getfixturevalue(
"doctest_namespace"
).items():
globs[name] = value
self.dtest.globs.update(globs)
def runtest(self):
_check_all_skipped(self.dtest)
self._disable_output_capturing_for_darwin()
failures = []
self.runner.run(self.dtest, out=failures)
if failures:
raise MultipleDoctestFailures(failures)
def _disable_output_capturing_for_darwin(self):
"""
Disable output capturing. Otherwise, stdout is lost to doctest (#985)
"""
if platform.system() != "Darwin":
return
capman = self.config.pluginmanager.getplugin("capturemanager")
if capman:
capman.suspend_global_capture(in_=True)
out, err = capman.read_global_capture()
sys.stdout.write(out)
sys.stderr.write(err)
def repr_failure(self, excinfo):
import doctest
failures = None
if excinfo.errisinstance((doctest.DocTestFailure, doctest.UnexpectedException)):
failures = [excinfo.value]
elif excinfo.errisinstance(MultipleDoctestFailures):
failures = excinfo.value.failures
if failures is not None:
reprlocation_lines = []
for failure in failures:
example = failure.example
test = failure.test
filename = test.filename
if test.lineno is None:
lineno = None
else:
lineno = test.lineno + example.lineno + 1
message = type(failure).__name__
reprlocation = ReprFileLocation(filename, lineno, message)
checker = _get_checker()
report_choice = _get_report_choice(
self.config.getoption("doctestreport")
)
if lineno is not None:
lines = failure.test.docstring.splitlines(False)
# add line numbers to the left of the error message
lines = [
"%03d %s" % (i + test.lineno + 1, x)
for (i, x) in enumerate(lines)
]
# trim docstring error lines to 10
lines = lines[max(example.lineno - 9, 0) : example.lineno + 1]
else:
lines = [
"EXAMPLE LOCATION UNKNOWN, not showing all tests of that example"
]
indent = ">>>"
for line in example.source.splitlines():
lines.append("??? %s %s" % (indent, line))
indent = "..."
if isinstance(failure, doctest.DocTestFailure):
lines += checker.output_difference(
example, failure.got, report_choice
).split("\n")
else:
inner_excinfo = ExceptionInfo(failure.exc_info)
lines += ["UNEXPECTED EXCEPTION: %s" % repr(inner_excinfo.value)]
lines += traceback.format_exception(*failure.exc_info)
reprlocation_lines.append((reprlocation, lines))
return ReprFailDoctest(reprlocation_lines)
else:
return super(DoctestItem, self).repr_failure(excinfo)
def reportinfo(self):
return self.fspath, self.dtest.lineno, "[doctest] %s" % self.name
def _get_flag_lookup():
import doctest
return dict(
DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1,
DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE,
NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE,
ELLIPSIS=doctest.ELLIPSIS,
IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL,
COMPARISON_FLAGS=doctest.COMPARISON_FLAGS,
ALLOW_UNICODE=_get_allow_unicode_flag(),
ALLOW_BYTES=_get_allow_bytes_flag(),
)
def get_optionflags(parent):
optionflags_str = parent.config.getini("doctest_optionflags")
flag_lookup_table = _get_flag_lookup()
flag_acc = 0
for flag in optionflags_str:
flag_acc |= flag_lookup_table[flag]
return flag_acc
def _get_continue_on_failure(config):
continue_on_failure = config.getvalue("doctest_continue_on_failure")
if continue_on_failure:
# We need to turn off this if we use pdb since we should stop at
# the first failure
if config.getvalue("usepdb"):
continue_on_failure = False
return continue_on_failure
class DoctestTextfile(pytest.Module):
obj = None
def collect(self):
import doctest
# inspired by doctest.testfile; ideally we would use it directly,
# but it doesn't support passing a custom checker
encoding = self.config.getini("doctest_encoding")
text = self.fspath.read_text(encoding)
filename = str(self.fspath)
name = self.fspath.basename
globs = {"__name__": "__main__"}
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=0,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
_fix_spoof_python2(runner, encoding)
parser = doctest.DocTestParser()
test = parser.get_doctest(text, globs, name, filename, 0)
if test.examples:
yield DoctestItem(test.name, self, runner, test)
def _check_all_skipped(test):
"""raises pytest.skip() if all examples in the given DocTest have the SKIP
option set.
"""
import doctest
all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples)
if all_skipped:
pytest.skip("all tests skipped by +SKIP option")
def _is_mocked(obj):
"""
returns if a object is possibly a mock object by checking the existence of a highly improbable attribute
"""
return (
safe_getattr(obj, "pytest_mock_example_attribute_that_shouldnt_exist", None)
is not None
)
@contextmanager
def _patch_unwrap_mock_aware():
"""
contextmanager which replaces ``inspect.unwrap`` with a version
that's aware of mock objects and doesn't recurse on them
"""
real_unwrap = getattr(inspect, "unwrap", None)
if real_unwrap is None:
yield
else:
def _mock_aware_unwrap(obj, stop=None):
if stop is None:
return real_unwrap(obj, stop=_is_mocked)
else:
return real_unwrap(obj, stop=lambda obj: _is_mocked(obj) or stop(obj))
inspect.unwrap = _mock_aware_unwrap
try:
yield
finally:
inspect.unwrap = real_unwrap
class DoctestModule(pytest.Module):
def collect(self):
import doctest
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""
a hackish doctest finder that overrides stdlib internals to fix a stdlib bug
https://github.com/pytest-dev/pytest/issues/3456
https://bugs.python.org/issue25532
"""
def _find(self, tests, obj, name, module, source_lines, globs, seen):
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
doctest.DocTestFinder._find(
self, tests, obj, name, module, source_lines, globs, seen
)
if self.fspath.basename == "conftest.py":
module = self.config.pluginmanager._importconftest(self.fspath)
else:
try:
module = self.fspath.pyimport()
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
pytest.skip("unable to import module %r" % self.fspath)
else:
raise
# uses internal doctest module parsing mechanism
finder = MockAwareDocTestFinder()
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=0,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests
yield DoctestItem(test.name, self, runner, test)
def _setup_fixtures(doctest_item):
"""
Used by DoctestTextfile and DoctestItem to setup fixture information.
"""
def func():
pass
doctest_item.funcargs = {}
fm = doctest_item.session._fixturemanager
doctest_item._fixtureinfo = fm.getfixtureinfo(
node=doctest_item, func=func, cls=None, funcargs=False
)
fixture_request = FixtureRequest(doctest_item)
fixture_request._fillfixtures()
return fixture_request
def _get_checker():
"""
Returns a doctest.OutputChecker subclass that takes in account the
ALLOW_UNICODE option to ignore u'' prefixes in strings and ALLOW_BYTES
to strip b'' prefixes.
Useful when the same doctest should run in Python 2 and Python 3.
An inner class is used to avoid importing "doctest" at the module
level.
"""
if hasattr(_get_checker, "LiteralsOutputChecker"):
return _get_checker.LiteralsOutputChecker()
import doctest
import re
class LiteralsOutputChecker(doctest.OutputChecker):
"""
Copied from doctest_nose_plugin.py from the nltk project:
https://github.com/nltk/nltk
Further extended to also support byte literals.
"""
_unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE)
_bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE)
def check_output(self, want, got, optionflags):
res = doctest.OutputChecker.check_output(self, want, got, optionflags)
if res:
return True
allow_unicode = optionflags & _get_allow_unicode_flag()
allow_bytes = optionflags & _get_allow_bytes_flag()
if not allow_unicode and not allow_bytes:
return False
else: # pragma: no cover
def remove_prefixes(regex, txt):
return re.sub(regex, r"\1\2", txt)
if allow_unicode:
want = remove_prefixes(self._unicode_literal_re, want)
got = remove_prefixes(self._unicode_literal_re, got)
if allow_bytes:
want = remove_prefixes(self._bytes_literal_re, want)
got = remove_prefixes(self._bytes_literal_re, got)
res = doctest.OutputChecker.check_output(self, want, got, optionflags)
return res
_get_checker.LiteralsOutputChecker = LiteralsOutputChecker
return _get_checker.LiteralsOutputChecker()
def _get_allow_unicode_flag():
"""
Registers and returns the ALLOW_UNICODE flag.
"""
import doctest
return doctest.register_optionflag("ALLOW_UNICODE")
def _get_allow_bytes_flag():
"""
Registers and returns the ALLOW_BYTES flag.
"""
import doctest
return doctest.register_optionflag("ALLOW_BYTES")
def _get_report_choice(key):
"""
This function returns the actual `doctest` module flag value, we want to do it as late as possible to avoid
importing `doctest` and all its dependencies when parsing options, as it adds overhead and breaks tests.
"""
import doctest
return {
DOCTEST_REPORT_CHOICE_UDIFF: doctest.REPORT_UDIFF,
DOCTEST_REPORT_CHOICE_CDIFF: doctest.REPORT_CDIFF,
DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF,
DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE,
DOCTEST_REPORT_CHOICE_NONE: 0,
}[key]
def _fix_spoof_python2(runner, encoding):
"""
Installs a "SpoofOut" into the given DebugRunner so it properly deals with unicode output. This
should patch only doctests for text files because they don't have a way to declare their
encoding. Doctests in docstrings from Python modules don't have the same problem given that
Python already decoded the strings.
This fixes the problem related in issue #2434.
"""
from _pytest.compat import _PY2
if not _PY2:
return
from doctest import _SpoofOut
class UnicodeSpoof(_SpoofOut):
def getvalue(self):
result = _SpoofOut.getvalue(self)
if encoding and isinstance(result, bytes):
result = result.decode(encoding)
return result
runner._fakeout = UnicodeSpoof()
@pytest.fixture(scope="session")
def doctest_namespace():
"""
Fixture that returns a :py:class:`dict` that will be injected into the namespace of doctests.
"""
return dict()

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,47 @@
"""
Provides a function to report all internal modules for using freezing tools
pytest
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def freeze_includes():
"""
Returns a list of module names used by pytest that should be
included by cx_freeze.
"""
import py
import _pytest
result = list(_iter_all_modules(py))
result += list(_iter_all_modules(_pytest))
return result
def _iter_all_modules(package, prefix=""):
"""
Iterates over the names of all modules that can be found in the given
package, recursively.
Example:
_iter_all_modules(_pytest) ->
['_pytest.assertion.newinterpret',
'_pytest.capture',
'_pytest.core',
...
]
"""
import os
import pkgutil
if type(package) is not str:
path, prefix = package.__path__[0], package.__name__ + "."
else:
path = package
for _, name, is_package in pkgutil.iter_modules([path]):
if is_package:
for m in _iter_all_modules(os.path.join(path, name), prefix=name + "."):
yield prefix + m
else:
yield prefix + name

View File

@ -0,0 +1,221 @@
""" version info, help messages, tracing configuration. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from argparse import Action
import py
import pytest
from _pytest.config import PrintHelp
class HelpAction(Action):
"""This is an argparse Action that will raise an exception in
order to skip the rest of the argument parsing when --help is passed.
This prevents argparse from quitting due to missing required arguments
when any are defined, for example by ``pytest_addoption``.
This is similar to the way that the builtin argparse --help option is
implemented by raising SystemExit.
"""
def __init__(self, option_strings, dest=None, default=False, help=None):
super(HelpAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=True,
default=default,
nargs=0,
help=help,
)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, self.const)
# We should only skip the rest of the parsing after preparse is done
if getattr(parser._parser, "after_preparse", False):
raise PrintHelp
def pytest_addoption(parser):
group = parser.getgroup("debugconfig")
group.addoption(
"--version",
action="store_true",
help="display pytest lib version and import information.",
)
group._addoption(
"-h",
"--help",
action=HelpAction,
dest="help",
help="show help message and configuration info",
)
group._addoption(
"-p",
action="append",
dest="plugins",
default=[],
metavar="name",
help="early-load given plugin module name or entry point (multi-allowed). "
"To avoid loading of plugins, use the `no:` prefix, e.g. "
"`no:doctest`.",
)
group.addoption(
"--traceconfig",
"--trace-config",
action="store_true",
default=False,
help="trace considerations of conftest.py files.",
),
group.addoption(
"--debug",
action="store_true",
dest="debug",
default=False,
help="store internal tracing debug information in 'pytestdebug.log'.",
)
group._addoption(
"-o",
"--override-ini",
dest="override_ini",
action="append",
help='override ini option with "option=value" style, e.g. `-o xfail_strict=True -o cache_dir=cache`.',
)
@pytest.hookimpl(hookwrapper=True)
def pytest_cmdline_parse():
outcome = yield
config = outcome.get_result()
if config.option.debug:
path = os.path.abspath("pytestdebug.log")
debugfile = open(path, "w")
debugfile.write(
"versions pytest-%s, py-%s, "
"python-%s\ncwd=%s\nargs=%s\n\n"
% (
pytest.__version__,
py.__version__,
".".join(map(str, sys.version_info)),
os.getcwd(),
config._origargs,
)
)
config.trace.root.setwriter(debugfile.write)
undo_tracing = config.pluginmanager.enable_tracing()
sys.stderr.write("writing pytestdebug information to %s\n" % path)
def unset_tracing():
debugfile.close()
sys.stderr.write("wrote pytestdebug information to %s\n" % debugfile.name)
config.trace.root.setwriter(None)
undo_tracing()
config.add_cleanup(unset_tracing)
def showversion(config):
p = py.path.local(pytest.__file__)
sys.stderr.write(
"This is pytest version %s, imported from %s\n" % (pytest.__version__, p)
)
plugininfo = getpluginversioninfo(config)
if plugininfo:
for line in plugininfo:
sys.stderr.write(line + "\n")
def pytest_cmdline_main(config):
if config.option.version:
showversion(config)
return 0
elif config.option.help:
config._do_configure()
showhelp(config)
config._ensure_unconfigure()
return 0
def showhelp(config):
reporter = config.pluginmanager.get_plugin("terminalreporter")
tw = reporter._tw
tw.write(config._parser.optparser.format_help())
tw.line()
tw.line()
tw.line(
"[pytest] ini-options in the first pytest.ini|tox.ini|setup.cfg file found:"
)
tw.line()
for name in config._parser._ininames:
help, type, default = config._parser._inidict[name]
if type is None:
type = "string"
spec = "%s (%s)" % (name, type)
line = " %-24s %s" % (spec, help)
tw.line(line[: tw.fullwidth])
tw.line()
tw.line("environment variables:")
vars = [
("PYTEST_ADDOPTS", "extra command line options"),
("PYTEST_PLUGINS", "comma-separated plugins to load during startup"),
("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "set to disable plugin auto-loading"),
("PYTEST_DEBUG", "set to enable debug tracing of pytest's internals"),
]
for name, help in vars:
tw.line(" %-24s %s" % (name, help))
tw.line()
tw.line()
tw.line("to see available markers type: pytest --markers")
tw.line("to see available fixtures type: pytest --fixtures")
tw.line(
"(shown according to specified file_or_dir or current dir "
"if not specified; fixtures with leading '_' are only shown "
"with the '-v' option"
)
for warningreport in reporter.stats.get("warnings", []):
tw.line("warning : " + warningreport.message, red=True)
return
conftest_options = [("pytest_plugins", "list of plugin names to load")]
def getpluginversioninfo(config):
lines = []
plugininfo = config.pluginmanager.list_plugin_distinfo()
if plugininfo:
lines.append("setuptools registered plugins:")
for plugin, dist in plugininfo:
loc = getattr(plugin, "__file__", repr(plugin))
content = "%s-%s at %s" % (dist.project_name, dist.version, loc)
lines.append(" " + content)
return lines
def pytest_report_header(config):
lines = []
if config.option.debug or config.option.traceconfig:
lines.append("using: pytest-%s pylib-%s" % (pytest.__version__, py.__version__))
verinfo = getpluginversioninfo(config)
if verinfo:
lines.extend(verinfo)
if config.option.traceconfig:
lines.append("active plugins:")
items = config.pluginmanager.list_name_plugin()
for name, plugin in items:
if hasattr(plugin, "__file__"):
r = plugin.__file__
else:
r = repr(plugin)
lines.append(" %-20s: %s" % (name, r))
return lines

View File

@ -0,0 +1,635 @@
""" hook specifications for pytest plugins, invoked from main.py and builtin plugins. """
from pluggy import HookspecMarker
from _pytest.deprecated import PYTEST_LOGWARNING
hookspec = HookspecMarker("pytest")
# -------------------------------------------------------------------------
# Initialization hooks called for every plugin
# -------------------------------------------------------------------------
@hookspec(historic=True)
def pytest_addhooks(pluginmanager):
"""called at plugin registration time to allow adding new hooks via a call to
``pluginmanager.add_hookspecs(module_or_class, prefix)``.
:param _pytest.config.PytestPluginManager pluginmanager: pytest plugin manager
.. note::
This hook is incompatible with ``hookwrapper=True``.
"""
@hookspec(historic=True)
def pytest_plugin_registered(plugin, manager):
""" a new pytest plugin got registered.
:param plugin: the plugin module or instance
:param _pytest.config.PytestPluginManager manager: pytest plugin manager
.. note::
This hook is incompatible with ``hookwrapper=True``.
"""
@hookspec(historic=True)
def pytest_addoption(parser):
"""register argparse-style options and ini-style config values,
called once at the beginning of a test run.
.. note::
This function should be implemented only in plugins or ``conftest.py``
files situated at the tests root directory due to how pytest
:ref:`discovers plugins during startup <pluginorder>`.
:arg _pytest.config.Parser parser: To add command line options, call
:py:func:`parser.addoption(...) <_pytest.config.Parser.addoption>`.
To add ini-file values call :py:func:`parser.addini(...)
<_pytest.config.Parser.addini>`.
Options can later be accessed through the
:py:class:`config <_pytest.config.Config>` object, respectively:
- :py:func:`config.getoption(name) <_pytest.config.Config.getoption>` to
retrieve the value of a command line option.
- :py:func:`config.getini(name) <_pytest.config.Config.getini>` to retrieve
a value read from an ini-style file.
The config object is passed around on many internal objects via the ``.config``
attribute or can be retrieved as the ``pytestconfig`` fixture.
.. note::
This hook is incompatible with ``hookwrapper=True``.
"""
@hookspec(historic=True)
def pytest_configure(config):
"""
Allows plugins and conftest files to perform initial configuration.
This hook is called for every plugin and initial conftest file
after command line options have been parsed.
After that, the hook is called for other conftest files as they are
imported.
.. note::
This hook is incompatible with ``hookwrapper=True``.
:arg _pytest.config.Config config: pytest config object
"""
# -------------------------------------------------------------------------
# Bootstrapping hooks called for plugins registered early enough:
# internal and 3rd party plugins.
# -------------------------------------------------------------------------
@hookspec(firstresult=True)
def pytest_cmdline_parse(pluginmanager, args):
"""return initialized config object, parsing the specified args.
Stops at first non-None result, see :ref:`firstresult`
.. note::
This hook will only be called for plugin classes passed to the ``plugins`` arg when using `pytest.main`_ to
perform an in-process test run.
:param _pytest.config.PytestPluginManager pluginmanager: pytest plugin manager
:param list[str] args: list of arguments passed on the command line
"""
def pytest_cmdline_preparse(config, args):
"""(**Deprecated**) modify command line arguments before option parsing.
This hook is considered deprecated and will be removed in a future pytest version. Consider
using :func:`pytest_load_initial_conftests` instead.
.. note::
This hook will not be called for ``conftest.py`` files, only for setuptools plugins.
:param _pytest.config.Config config: pytest config object
:param list[str] args: list of arguments passed on the command line
"""
@hookspec(firstresult=True)
def pytest_cmdline_main(config):
""" called for performing the main command line action. The default
implementation will invoke the configure hooks and runtest_mainloop.
.. note::
This hook will not be called for ``conftest.py`` files, only for setuptools plugins.
Stops at first non-None result, see :ref:`firstresult`
:param _pytest.config.Config config: pytest config object
"""
def pytest_load_initial_conftests(early_config, parser, args):
""" implements the loading of initial conftest files ahead
of command line option parsing.
.. note::
This hook will not be called for ``conftest.py`` files, only for setuptools plugins.
:param _pytest.config.Config early_config: pytest config object
:param list[str] args: list of arguments passed on the command line
:param _pytest.config.Parser parser: to add command line options
"""
# -------------------------------------------------------------------------
# collection hooks
# -------------------------------------------------------------------------
@hookspec(firstresult=True)
def pytest_collection(session):
"""Perform the collection protocol for the given session.
Stops at first non-None result, see :ref:`firstresult`.
:param _pytest.main.Session session: the pytest session object
"""
def pytest_collection_modifyitems(session, config, items):
""" called after collection has been performed, may filter or re-order
the items in-place.
:param _pytest.main.Session session: the pytest session object
:param _pytest.config.Config config: pytest config object
:param List[_pytest.nodes.Item] items: list of item objects
"""
def pytest_collection_finish(session):
""" called after collection has been performed and modified.
:param _pytest.main.Session session: the pytest session object
"""
@hookspec(firstresult=True)
def pytest_ignore_collect(path, config):
""" return True to prevent considering this path for collection.
This hook is consulted for all files and directories prior to calling
more specific hooks.
Stops at first non-None result, see :ref:`firstresult`
:param str path: the path to analyze
:param _pytest.config.Config config: pytest config object
"""
@hookspec(firstresult=True)
def pytest_collect_directory(path, parent):
""" called before traversing a directory for collection files.
Stops at first non-None result, see :ref:`firstresult`
:param str path: the path to analyze
"""
def pytest_collect_file(path, parent):
""" return collection Node or None for the given path. Any new node
needs to have the specified ``parent`` as a parent.
:param str path: the path to collect
"""
# logging hooks for collection
def pytest_collectstart(collector):
""" collector starts collecting. """
def pytest_itemcollected(item):
""" we just collected a test item. """
def pytest_collectreport(report):
""" collector finished collecting. """
def pytest_deselected(items):
""" called for test items deselected by keyword. """
@hookspec(firstresult=True)
def pytest_make_collect_report(collector):
""" perform ``collector.collect()`` and return a CollectReport.
Stops at first non-None result, see :ref:`firstresult` """
# -------------------------------------------------------------------------
# Python test function related hooks
# -------------------------------------------------------------------------
@hookspec(firstresult=True)
def pytest_pycollect_makemodule(path, parent):
""" return a Module collector or None for the given path.
This hook will be called for each matching test module path.
The pytest_collect_file hook needs to be used if you want to
create test modules for files that do not match as a test module.
Stops at first non-None result, see :ref:`firstresult` """
@hookspec(firstresult=True)
def pytest_pycollect_makeitem(collector, name, obj):
""" return custom item/collector for a python object in a module, or None.
Stops at first non-None result, see :ref:`firstresult` """
@hookspec(firstresult=True)
def pytest_pyfunc_call(pyfuncitem):
""" call underlying test function.
Stops at first non-None result, see :ref:`firstresult` """
def pytest_generate_tests(metafunc):
""" generate (multiple) parametrized calls to a test function."""
@hookspec(firstresult=True)
def pytest_make_parametrize_id(config, val, argname):
"""Return a user-friendly string representation of the given ``val`` that will be used
by @pytest.mark.parametrize calls. Return None if the hook doesn't know about ``val``.
The parameter name is available as ``argname``, if required.
Stops at first non-None result, see :ref:`firstresult`
:param _pytest.config.Config config: pytest config object
:param val: the parametrized value
:param str argname: the automatic parameter name produced by pytest
"""
# -------------------------------------------------------------------------
# generic runtest related hooks
# -------------------------------------------------------------------------
@hookspec(firstresult=True)
def pytest_runtestloop(session):
""" called for performing the main runtest loop
(after collection finished).
Stops at first non-None result, see :ref:`firstresult`
:param _pytest.main.Session session: the pytest session object
"""
def pytest_itemstart(item, node):
"""(**Deprecated**) use pytest_runtest_logstart. """
@hookspec(firstresult=True)
def pytest_runtest_protocol(item, nextitem):
""" implements the runtest_setup/call/teardown protocol for
the given test item, including capturing exceptions and calling
reporting hooks.
:arg item: test item for which the runtest protocol is performed.
:arg nextitem: the scheduled-to-be-next test item (or None if this
is the end my friend). This argument is passed on to
:py:func:`pytest_runtest_teardown`.
:return boolean: True if no further hook implementations should be invoked.
Stops at first non-None result, see :ref:`firstresult` """
def pytest_runtest_logstart(nodeid, location):
""" signal the start of running a single test item.
This hook will be called **before** :func:`pytest_runtest_setup`, :func:`pytest_runtest_call` and
:func:`pytest_runtest_teardown` hooks.
:param str nodeid: full id of the item
:param location: a triple of ``(filename, linenum, testname)``
"""
def pytest_runtest_logfinish(nodeid, location):
""" signal the complete finish of running a single test item.
This hook will be called **after** :func:`pytest_runtest_setup`, :func:`pytest_runtest_call` and
:func:`pytest_runtest_teardown` hooks.
:param str nodeid: full id of the item
:param location: a triple of ``(filename, linenum, testname)``
"""
def pytest_runtest_setup(item):
""" called before ``pytest_runtest_call(item)``. """
def pytest_runtest_call(item):
""" called to execute the test ``item``. """
def pytest_runtest_teardown(item, nextitem):
""" called after ``pytest_runtest_call``.
:arg nextitem: the scheduled-to-be-next test item (None if no further
test item is scheduled). This argument can be used to
perform exact teardowns, i.e. calling just enough finalizers
so that nextitem only needs to call setup-functions.
"""
@hookspec(firstresult=True)
def pytest_runtest_makereport(item, call):
""" return a :py:class:`_pytest.runner.TestReport` object
for the given :py:class:`pytest.Item <_pytest.main.Item>` and
:py:class:`_pytest.runner.CallInfo`.
Stops at first non-None result, see :ref:`firstresult` """
def pytest_runtest_logreport(report):
""" process a test setup/call/teardown report relating to
the respective phase of executing a test. """
@hookspec(firstresult=True)
def pytest_report_to_serializable(config, report):
"""
.. warning::
This hook is experimental and subject to change between pytest releases, even
bug fixes.
The intent is for this to be used by plugins maintained by the core-devs, such
as ``pytest-xdist``, ``pytest-subtests``, and as a replacement for the internal
'resultlog' plugin.
In the future it might become part of the public hook API.
Serializes the given report object into a data structure suitable for sending
over the wire, e.g. converted to JSON.
"""
@hookspec(firstresult=True)
def pytest_report_from_serializable(config, data):
"""
.. warning::
This hook is experimental and subject to change between pytest releases, even
bug fixes.
The intent is for this to be used by plugins maintained by the core-devs, such
as ``pytest-xdist``, ``pytest-subtests``, and as a replacement for the internal
'resultlog' plugin.
In the future it might become part of the public hook API.
Restores a report object previously serialized with pytest_report_to_serializable().
"""
# -------------------------------------------------------------------------
# Fixture related hooks
# -------------------------------------------------------------------------
@hookspec(firstresult=True)
def pytest_fixture_setup(fixturedef, request):
""" performs fixture setup execution.
:return: The return value of the call to the fixture function
Stops at first non-None result, see :ref:`firstresult`
.. note::
If the fixture function returns None, other implementations of
this hook function will continue to be called, according to the
behavior of the :ref:`firstresult` option.
"""
def pytest_fixture_post_finalizer(fixturedef, request):
""" called after fixture teardown, but before the cache is cleared so
the fixture result cache ``fixturedef.cached_result`` can
still be accessed."""
# -------------------------------------------------------------------------
# test session related hooks
# -------------------------------------------------------------------------
def pytest_sessionstart(session):
""" called after the ``Session`` object has been created and before performing collection
and entering the run test loop.
:param _pytest.main.Session session: the pytest session object
"""
def pytest_sessionfinish(session, exitstatus):
""" called after whole test run finished, right before returning the exit status to the system.
:param _pytest.main.Session session: the pytest session object
:param int exitstatus: the status which pytest will return to the system
"""
def pytest_unconfigure(config):
""" called before test process is exited.
:param _pytest.config.Config config: pytest config object
"""
# -------------------------------------------------------------------------
# hooks for customizing the assert methods
# -------------------------------------------------------------------------
def pytest_assertrepr_compare(config, op, left, right):
"""return explanation for comparisons in failing assert expressions.
Return None for no custom explanation, otherwise return a list
of strings. The strings will be joined by newlines but any newlines
*in* a string will be escaped. Note that all but the first line will
be indented slightly, the intention is for the first line to be a summary.
:param _pytest.config.Config config: pytest config object
"""
# -------------------------------------------------------------------------
# hooks for influencing reporting (invoked from _pytest_terminal)
# -------------------------------------------------------------------------
def pytest_report_header(config, startdir):
""" return a string or list of strings to be displayed as header info for terminal reporting.
:param _pytest.config.Config config: pytest config object
:param startdir: py.path object with the starting dir
.. note::
This function should be implemented only in plugins or ``conftest.py``
files situated at the tests root directory due to how pytest
:ref:`discovers plugins during startup <pluginorder>`.
"""
def pytest_report_collectionfinish(config, startdir, items):
"""
.. versionadded:: 3.2
return a string or list of strings to be displayed after collection has finished successfully.
This strings will be displayed after the standard "collected X items" message.
:param _pytest.config.Config config: pytest config object
:param startdir: py.path object with the starting dir
:param items: list of pytest items that are going to be executed; this list should not be modified.
"""
@hookspec(firstresult=True)
def pytest_report_teststatus(report, config):
""" return result-category, shortletter and verbose word for reporting.
:param _pytest.config.Config config: pytest config object
Stops at first non-None result, see :ref:`firstresult` """
def pytest_terminal_summary(terminalreporter, exitstatus, config):
"""Add a section to terminal summary reporting.
:param _pytest.terminal.TerminalReporter terminalreporter: the internal terminal reporter object
:param int exitstatus: the exit status that will be reported back to the OS
:param _pytest.config.Config config: pytest config object
.. versionadded:: 4.2
The ``config`` parameter.
"""
@hookspec(historic=True, warn_on_impl=PYTEST_LOGWARNING)
def pytest_logwarning(message, code, nodeid, fslocation):
"""
.. deprecated:: 3.8
This hook is will stop working in a future release.
pytest no longer triggers this hook, but the
terminal writer still implements it to display warnings issued by
:meth:`_pytest.config.Config.warn` and :meth:`_pytest.nodes.Node.warn`. Calling those functions will be
an error in future releases.
process a warning specified by a message, a code string,
a nodeid and fslocation (both of which may be None
if the warning is not tied to a particular node/location).
.. note::
This hook is incompatible with ``hookwrapper=True``.
"""
@hookspec(historic=True)
def pytest_warning_captured(warning_message, when, item):
"""
Process a warning captured by the internal pytest warnings plugin.
:param warnings.WarningMessage warning_message:
The captured warning. This is the same object produced by :py:func:`warnings.catch_warnings`, and contains
the same attributes as the parameters of :py:func:`warnings.showwarning`.
:param str when:
Indicates when the warning was captured. Possible values:
* ``"config"``: during pytest configuration/initialization stage.
* ``"collect"``: during test collection.
* ``"runtest"``: during test execution.
:param pytest.Item|None item:
**DEPRECATED**: This parameter is incompatible with ``pytest-xdist``, and will always receive ``None``
in a future release.
The item being executed if ``when`` is ``"runtest"``, otherwise ``None``.
"""
# -------------------------------------------------------------------------
# doctest hooks
# -------------------------------------------------------------------------
@hookspec(firstresult=True)
def pytest_doctest_prepare_content(content):
""" return processed content for a given doctest
Stops at first non-None result, see :ref:`firstresult` """
# -------------------------------------------------------------------------
# error handling and internal debugging hooks
# -------------------------------------------------------------------------
def pytest_internalerror(excrepr, excinfo):
""" called for internal errors. """
def pytest_keyboard_interrupt(excinfo):
""" called for keyboard interrupt. """
def pytest_exception_interact(node, call, report):
"""called when an exception was raised which can potentially be
interactively handled.
This hook is only called if an exception was raised
that is not an internal exception like ``skip.Exception``.
"""
def pytest_enter_pdb(config, pdb):
""" called upon pdb.set_trace(), can be used by plugins to take special
action just before the python debugger enters in interactive mode.
:param _pytest.config.Config config: pytest config object
:param pdb.Pdb pdb: Pdb instance
"""
def pytest_leave_pdb(config, pdb):
""" called when leaving pdb (e.g. with continue after pdb.set_trace()).
Can be used by plugins to take special action just after the python
debugger leaves interactive mode.
:param _pytest.config.Config config: pytest config object
:param pdb.Pdb pdb: Pdb instance
"""

View File

@ -0,0 +1,627 @@
"""
report test results in JUnit-XML format,
for use with Jenkins and build integration servers.
Based on initial code from Ross Lawley.
Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/
src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import re
import sys
import time
import py
import six
import pytest
from _pytest import nodes
from _pytest.config import filename_arg
# Python 2.X and 3.X compatibility
if sys.version_info[0] < 3:
from codecs import open
class Junit(py.xml.Namespace):
pass
# We need to get the subset of the invalid unicode ranges according to
# XML 1.0 which are valid in this python build. Hence we calculate
# this dynamically instead of hardcoding it. The spec range of valid
# chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD]
# | [#x10000-#x10FFFF]
_legal_chars = (0x09, 0x0A, 0x0D)
_legal_ranges = ((0x20, 0x7E), (0x80, 0xD7FF), (0xE000, 0xFFFD), (0x10000, 0x10FFFF))
_legal_xml_re = [
u"%s-%s" % (six.unichr(low), six.unichr(high))
for (low, high) in _legal_ranges
if low < sys.maxunicode
]
_legal_xml_re = [six.unichr(x) for x in _legal_chars] + _legal_xml_re
illegal_xml_re = re.compile(u"[^%s]" % u"".join(_legal_xml_re))
del _legal_chars
del _legal_ranges
del _legal_xml_re
_py_ext_re = re.compile(r"\.py$")
def bin_xml_escape(arg):
def repl(matchobj):
i = ord(matchobj.group())
if i <= 0xFF:
return u"#x%02X" % i
else:
return u"#x%04X" % i
return py.xml.raw(illegal_xml_re.sub(repl, py.xml.escape(arg)))
def merge_family(left, right):
result = {}
for kl, vl in left.items():
for kr, vr in right.items():
if not isinstance(vl, list):
raise TypeError(type(vl))
result[kl] = vl + vr
left.update(result)
families = {}
families["_base"] = {"testcase": ["classname", "name"]}
families["_base_legacy"] = {"testcase": ["file", "line", "url"]}
# xUnit 1.x inherits legacy attributes
families["xunit1"] = families["_base"].copy()
merge_family(families["xunit1"], families["_base_legacy"])
# xUnit 2.x uses strict base attributes
families["xunit2"] = families["_base"]
class _NodeReporter(object):
def __init__(self, nodeid, xml):
self.id = nodeid
self.xml = xml
self.add_stats = self.xml.add_stats
self.family = self.xml.family
self.duration = 0
self.properties = []
self.nodes = []
self.testcase = None
self.attrs = {}
def append(self, node):
self.xml.add_stats(type(node).__name__)
self.nodes.append(node)
def add_property(self, name, value):
self.properties.append((str(name), bin_xml_escape(value)))
def add_attribute(self, name, value):
self.attrs[str(name)] = bin_xml_escape(value)
def make_properties_node(self):
"""Return a Junit node containing custom properties, if any.
"""
if self.properties:
return Junit.properties(
[
Junit.property(name=name, value=value)
for name, value in self.properties
]
)
return ""
def record_testreport(self, testreport):
assert not self.testcase
names = mangle_test_address(testreport.nodeid)
existing_attrs = self.attrs
classnames = names[:-1]
if self.xml.prefix:
classnames.insert(0, self.xml.prefix)
attrs = {
"classname": ".".join(classnames),
"name": bin_xml_escape(names[-1]),
"file": testreport.location[0],
}
if testreport.location[1] is not None:
attrs["line"] = testreport.location[1]
if hasattr(testreport, "url"):
attrs["url"] = testreport.url
self.attrs = attrs
self.attrs.update(existing_attrs) # restore any user-defined attributes
# Preserve legacy testcase behavior
if self.family == "xunit1":
return
# Filter out attributes not permitted by this test family.
# Including custom attributes because they are not valid here.
temp_attrs = {}
for key in self.attrs.keys():
if key in families[self.family]["testcase"]:
temp_attrs[key] = self.attrs[key]
self.attrs = temp_attrs
def to_xml(self):
testcase = Junit.testcase(time="%.3f" % self.duration, **self.attrs)
testcase.append(self.make_properties_node())
for node in self.nodes:
testcase.append(node)
return testcase
def _add_simple(self, kind, message, data=None):
data = bin_xml_escape(data)
node = kind(data, message=message)
self.append(node)
def write_captured_output(self, report):
content_out = report.capstdout
content_log = report.caplog
content_err = report.capstderr
if content_log or content_out:
if content_log and self.xml.logging == "system-out":
if content_out:
# syncing stdout and the log-output is not done yet. It's
# probably not worth the effort. Therefore, first the captured
# stdout is shown and then the captured logs.
content = "\n".join(
[
" Captured Stdout ".center(80, "-"),
content_out,
"",
" Captured Log ".center(80, "-"),
content_log,
]
)
else:
content = content_log
else:
content = content_out
if content:
tag = getattr(Junit, "system-out")
self.append(tag(bin_xml_escape(content)))
if content_log or content_err:
if content_log and self.xml.logging == "system-err":
if content_err:
content = "\n".join(
[
" Captured Stderr ".center(80, "-"),
content_err,
"",
" Captured Log ".center(80, "-"),
content_log,
]
)
else:
content = content_log
else:
content = content_err
if content:
tag = getattr(Junit, "system-err")
self.append(tag(bin_xml_escape(content)))
def append_pass(self, report):
self.add_stats("passed")
def append_failure(self, report):
# msg = str(report.longrepr.reprtraceback.extraline)
if hasattr(report, "wasxfail"):
self._add_simple(Junit.skipped, "xfail-marked test passes unexpectedly")
else:
if hasattr(report.longrepr, "reprcrash"):
message = report.longrepr.reprcrash.message
elif isinstance(report.longrepr, six.string_types):
message = report.longrepr
else:
message = str(report.longrepr)
message = bin_xml_escape(message)
fail = Junit.failure(message=message)
fail.append(bin_xml_escape(report.longrepr))
self.append(fail)
def append_collect_error(self, report):
# msg = str(report.longrepr.reprtraceback.extraline)
self.append(
Junit.error(bin_xml_escape(report.longrepr), message="collection failure")
)
def append_collect_skipped(self, report):
self._add_simple(Junit.skipped, "collection skipped", report.longrepr)
def append_error(self, report):
if report.when == "teardown":
msg = "test teardown failure"
else:
msg = "test setup failure"
self._add_simple(Junit.error, msg, report.longrepr)
def append_skipped(self, report):
if hasattr(report, "wasxfail"):
self._add_simple(Junit.skipped, "expected test failure", report.wasxfail)
else:
filename, lineno, skipreason = report.longrepr
if skipreason.startswith("Skipped: "):
skipreason = skipreason[9:]
details = "%s:%s: %s" % (filename, lineno, skipreason)
self.append(
Junit.skipped(
bin_xml_escape(details),
type="pytest.skip",
message=bin_xml_escape(skipreason),
)
)
self.write_captured_output(report)
def finalize(self):
data = self.to_xml().unicode(indent=0)
self.__dict__.clear()
self.to_xml = lambda: py.xml.raw(data)
@pytest.fixture
def record_property(request):
"""Add an extra properties the calling test.
User properties become part of the test report and are available to the
configured reporters, like JUnit XML.
The fixture is callable with ``(name, value)``, with value being automatically
xml-encoded.
Example::
def test_function(record_property):
record_property("example_key", 1)
"""
def append_property(name, value):
request.node.user_properties.append((name, value))
return append_property
@pytest.fixture
def record_xml_attribute(request):
"""Add extra xml attributes to the tag for the calling test.
The fixture is callable with ``(name, value)``, with value being
automatically xml-encoded
"""
from _pytest.warning_types import PytestWarning
request.node.warn(PytestWarning("record_xml_attribute is an experimental feature"))
# Declare noop
def add_attr_noop(name, value):
pass
attr_func = add_attr_noop
xml = getattr(request.config, "_xml", None)
if xml is not None and xml.family != "xunit1":
request.node.warn(
PytestWarning(
"record_xml_attribute is incompatible with junit_family: "
"%s (use: legacy|xunit1)" % xml.family
)
)
elif xml is not None:
node_reporter = xml.node_reporter(request.node.nodeid)
attr_func = node_reporter.add_attribute
return attr_func
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting")
group.addoption(
"--junitxml",
"--junit-xml",
action="store",
dest="xmlpath",
metavar="path",
type=functools.partial(filename_arg, optname="--junitxml"),
default=None,
help="create junit-xml style report file at given path.",
)
group.addoption(
"--junitprefix",
"--junit-prefix",
action="store",
metavar="str",
default=None,
help="prepend prefix to classnames in junit-xml output",
)
parser.addini(
"junit_suite_name", "Test suite name for JUnit report", default="pytest"
)
parser.addini(
"junit_logging",
"Write captured log messages to JUnit report: "
"one of no|system-out|system-err",
default="no",
) # choices=['no', 'stdout', 'stderr'])
parser.addini(
"junit_duration_report",
"Duration time to report: one of total|call",
default="total",
) # choices=['total', 'call'])
parser.addini(
"junit_family",
"Emit XML for schema: one of legacy|xunit1|xunit2",
default="xunit1",
)
def pytest_configure(config):
xmlpath = config.option.xmlpath
# prevent opening xmllog on slave nodes (xdist)
if xmlpath and not hasattr(config, "slaveinput"):
config._xml = LogXML(
xmlpath,
config.option.junitprefix,
config.getini("junit_suite_name"),
config.getini("junit_logging"),
config.getini("junit_duration_report"),
config.getini("junit_family"),
)
config.pluginmanager.register(config._xml)
def pytest_unconfigure(config):
xml = getattr(config, "_xml", None)
if xml:
del config._xml
config.pluginmanager.unregister(xml)
def mangle_test_address(address):
path, possible_open_bracket, params = address.partition("[")
names = path.split("::")
try:
names.remove("()")
except ValueError:
pass
# convert file path to dotted path
names[0] = names[0].replace(nodes.SEP, ".")
names[0] = _py_ext_re.sub("", names[0])
# put any params back
names[-1] += possible_open_bracket + params
return names
class LogXML(object):
def __init__(
self,
logfile,
prefix,
suite_name="pytest",
logging="no",
report_duration="total",
family="xunit1",
):
logfile = os.path.expanduser(os.path.expandvars(logfile))
self.logfile = os.path.normpath(os.path.abspath(logfile))
self.prefix = prefix
self.suite_name = suite_name
self.logging = logging
self.report_duration = report_duration
self.family = family
self.stats = dict.fromkeys(["error", "passed", "failure", "skipped"], 0)
self.node_reporters = {} # nodeid -> _NodeReporter
self.node_reporters_ordered = []
self.global_properties = []
# List of reports that failed on call but teardown is pending.
self.open_reports = []
self.cnt_double_fail_tests = 0
# Replaces convenience family with real family
if self.family == "legacy":
self.family = "xunit1"
def finalize(self, report):
nodeid = getattr(report, "nodeid", report)
# local hack to handle xdist report order
slavenode = getattr(report, "node", None)
reporter = self.node_reporters.pop((nodeid, slavenode))
if reporter is not None:
reporter.finalize()
def node_reporter(self, report):
nodeid = getattr(report, "nodeid", report)
# local hack to handle xdist report order
slavenode = getattr(report, "node", None)
key = nodeid, slavenode
if key in self.node_reporters:
# TODO: breasks for --dist=each
return self.node_reporters[key]
reporter = _NodeReporter(nodeid, self)
self.node_reporters[key] = reporter
self.node_reporters_ordered.append(reporter)
return reporter
def add_stats(self, key):
if key in self.stats:
self.stats[key] += 1
def _opentestcase(self, report):
reporter = self.node_reporter(report)
reporter.record_testreport(report)
return reporter
def pytest_runtest_logreport(self, report):
"""handle a setup/call/teardown report, generating the appropriate
xml tags as necessary.
note: due to plugins like xdist, this hook may be called in interlaced
order with reports from other nodes. for example:
usual call order:
-> setup node1
-> call node1
-> teardown node1
-> setup node2
-> call node2
-> teardown node2
possible call order in xdist:
-> setup node1
-> call node1
-> setup node2
-> call node2
-> teardown node2
-> teardown node1
"""
close_report = None
if report.passed:
if report.when == "call": # ignore setup/teardown
reporter = self._opentestcase(report)
reporter.append_pass(report)
elif report.failed:
if report.when == "teardown":
# The following vars are needed when xdist plugin is used
report_wid = getattr(report, "worker_id", None)
report_ii = getattr(report, "item_index", None)
close_report = next(
(
rep
for rep in self.open_reports
if (
rep.nodeid == report.nodeid
and getattr(rep, "item_index", None) == report_ii
and getattr(rep, "worker_id", None) == report_wid
)
),
None,
)
if close_report:
# We need to open new testcase in case we have failure in
# call and error in teardown in order to follow junit
# schema
self.finalize(close_report)
self.cnt_double_fail_tests += 1
reporter = self._opentestcase(report)
if report.when == "call":
reporter.append_failure(report)
self.open_reports.append(report)
else:
reporter.append_error(report)
elif report.skipped:
reporter = self._opentestcase(report)
reporter.append_skipped(report)
self.update_testcase_duration(report)
if report.when == "teardown":
reporter = self._opentestcase(report)
reporter.write_captured_output(report)
for propname, propvalue in report.user_properties:
reporter.add_property(propname, propvalue)
self.finalize(report)
report_wid = getattr(report, "worker_id", None)
report_ii = getattr(report, "item_index", None)
close_report = next(
(
rep
for rep in self.open_reports
if (
rep.nodeid == report.nodeid
and getattr(rep, "item_index", None) == report_ii
and getattr(rep, "worker_id", None) == report_wid
)
),
None,
)
if close_report:
self.open_reports.remove(close_report)
def update_testcase_duration(self, report):
"""accumulates total duration for nodeid from given report and updates
the Junit.testcase with the new total if already created.
"""
if self.report_duration == "total" or report.when == self.report_duration:
reporter = self.node_reporter(report)
reporter.duration += getattr(report, "duration", 0.0)
def pytest_collectreport(self, report):
if not report.passed:
reporter = self._opentestcase(report)
if report.failed:
reporter.append_collect_error(report)
else:
reporter.append_collect_skipped(report)
def pytest_internalerror(self, excrepr):
reporter = self.node_reporter("internal")
reporter.attrs.update(classname="pytest", name="internal")
reporter._add_simple(Junit.error, "internal error", excrepr)
def pytest_sessionstart(self):
self.suite_start_time = time.time()
def pytest_sessionfinish(self):
dirname = os.path.dirname(os.path.abspath(self.logfile))
if not os.path.isdir(dirname):
os.makedirs(dirname)
logfile = open(self.logfile, "w", encoding="utf-8")
suite_stop_time = time.time()
suite_time_delta = suite_stop_time - self.suite_start_time
numtests = (
self.stats["passed"]
+ self.stats["failure"]
+ self.stats["skipped"]
+ self.stats["error"]
- self.cnt_double_fail_tests
)
logfile.write('<?xml version="1.0" encoding="utf-8"?>')
logfile.write(
Junit.testsuite(
self._get_global_properties_node(),
[x.to_xml() for x in self.node_reporters_ordered],
name=self.suite_name,
errors=self.stats["error"],
failures=self.stats["failure"],
skipped=self.stats["skipped"],
tests=numtests,
time="%.3f" % suite_time_delta,
).unicode(indent=0)
)
logfile.close()
def pytest_terminal_summary(self, terminalreporter):
terminalreporter.write_sep("-", "generated xml file: %s" % (self.logfile))
def add_global_property(self, name, value):
self.global_properties.append((str(name), bin_xml_escape(value)))
def _get_global_properties_node(self):
"""Return a Junit node containing custom properties, if any.
"""
if self.global_properties:
return Junit.properties(
[
Junit.property(name=name, value=value)
for name, value in self.global_properties
]
)
return ""

View File

@ -0,0 +1,662 @@
""" Access and control log capturing. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import re
from contextlib import contextmanager
import py
import six
import pytest
from _pytest.compat import dummy_context_manager
from _pytest.config import create_terminal_writer
from _pytest.pathlib import Path
DEFAULT_LOG_FORMAT = "%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s"
DEFAULT_LOG_DATE_FORMAT = "%H:%M:%S"
class ColoredLevelFormatter(logging.Formatter):
"""
Colorize the %(levelname)..s part of the log format passed to __init__.
"""
LOGLEVEL_COLOROPTS = {
logging.CRITICAL: {"red"},
logging.ERROR: {"red", "bold"},
logging.WARNING: {"yellow"},
logging.WARN: {"yellow"},
logging.INFO: {"green"},
logging.DEBUG: {"purple"},
logging.NOTSET: set(),
}
LEVELNAME_FMT_REGEX = re.compile(r"%\(levelname\)([+-]?\d*s)")
def __init__(self, terminalwriter, *args, **kwargs):
super(ColoredLevelFormatter, self).__init__(*args, **kwargs)
if six.PY2:
self._original_fmt = self._fmt
else:
self._original_fmt = self._style._fmt
self._level_to_fmt_mapping = {}
levelname_fmt_match = self.LEVELNAME_FMT_REGEX.search(self._fmt)
if not levelname_fmt_match:
return
levelname_fmt = levelname_fmt_match.group()
for level, color_opts in self.LOGLEVEL_COLOROPTS.items():
formatted_levelname = levelname_fmt % {
"levelname": logging.getLevelName(level)
}
# add ANSI escape sequences around the formatted levelname
color_kwargs = {name: True for name in color_opts}
colorized_formatted_levelname = terminalwriter.markup(
formatted_levelname, **color_kwargs
)
self._level_to_fmt_mapping[level] = self.LEVELNAME_FMT_REGEX.sub(
colorized_formatted_levelname, self._fmt
)
def format(self, record):
fmt = self._level_to_fmt_mapping.get(record.levelno, self._original_fmt)
if six.PY2:
self._fmt = fmt
else:
self._style._fmt = fmt
return super(ColoredLevelFormatter, self).format(record)
def get_option_ini(config, *names):
for name in names:
ret = config.getoption(name) # 'default' arg won't work as expected
if ret is None:
ret = config.getini(name)
if ret:
return ret
def pytest_addoption(parser):
"""Add options to control log capturing."""
group = parser.getgroup("logging")
def add_option_ini(option, dest, default=None, type=None, **kwargs):
parser.addini(
dest, default=default, type=type, help="default value for " + option
)
group.addoption(option, dest=dest, **kwargs)
add_option_ini(
"--no-print-logs",
dest="log_print",
action="store_const",
const=False,
default=True,
type="bool",
help="disable printing caught logs on failed tests.",
)
add_option_ini(
"--log-level",
dest="log_level",
default=None,
help="logging level used by the logging module",
)
add_option_ini(
"--log-format",
dest="log_format",
default=DEFAULT_LOG_FORMAT,
help="log format as used by the logging module.",
)
add_option_ini(
"--log-date-format",
dest="log_date_format",
default=DEFAULT_LOG_DATE_FORMAT,
help="log date format as used by the logging module.",
)
parser.addini(
"log_cli",
default=False,
type="bool",
help='enable log display during test run (also known as "live logging").',
)
add_option_ini(
"--log-cli-level", dest="log_cli_level", default=None, help="cli logging level."
)
add_option_ini(
"--log-cli-format",
dest="log_cli_format",
default=None,
help="log format as used by the logging module.",
)
add_option_ini(
"--log-cli-date-format",
dest="log_cli_date_format",
default=None,
help="log date format as used by the logging module.",
)
add_option_ini(
"--log-file",
dest="log_file",
default=None,
help="path to a file when logging will be written to.",
)
add_option_ini(
"--log-file-level",
dest="log_file_level",
default=None,
help="log file logging level.",
)
add_option_ini(
"--log-file-format",
dest="log_file_format",
default=DEFAULT_LOG_FORMAT,
help="log format as used by the logging module.",
)
add_option_ini(
"--log-file-date-format",
dest="log_file_date_format",
default=DEFAULT_LOG_DATE_FORMAT,
help="log date format as used by the logging module.",
)
@contextmanager
def catching_logs(handler, formatter=None, level=None):
"""Context manager that prepares the whole logging machinery properly."""
root_logger = logging.getLogger()
if formatter is not None:
handler.setFormatter(formatter)
if level is not None:
handler.setLevel(level)
# Adding the same handler twice would confuse logging system.
# Just don't do that.
add_new_handler = handler not in root_logger.handlers
if add_new_handler:
root_logger.addHandler(handler)
if level is not None:
orig_level = root_logger.level
root_logger.setLevel(min(orig_level, level))
try:
yield handler
finally:
if level is not None:
root_logger.setLevel(orig_level)
if add_new_handler:
root_logger.removeHandler(handler)
class LogCaptureHandler(logging.StreamHandler):
"""A logging handler that stores log records and the log text."""
def __init__(self):
"""Creates a new log handler."""
logging.StreamHandler.__init__(self, py.io.TextIO())
self.records = []
def emit(self, record):
"""Keep the log records in a list in addition to the log text."""
self.records.append(record)
logging.StreamHandler.emit(self, record)
def reset(self):
self.records = []
self.stream = py.io.TextIO()
class LogCaptureFixture(object):
"""Provides access and control of log capturing."""
def __init__(self, item):
"""Creates a new funcarg."""
self._item = item
# dict of log name -> log level
self._initial_log_levels = {} # Dict[str, int]
def _finalize(self):
"""Finalizes the fixture.
This restores the log levels changed by :meth:`set_level`.
"""
# restore log levels
for logger_name, level in self._initial_log_levels.items():
logger = logging.getLogger(logger_name)
logger.setLevel(level)
@property
def handler(self):
"""
:rtype: LogCaptureHandler
"""
return self._item.catch_log_handler
def get_records(self, when):
"""
Get the logging records for one of the possible test phases.
:param str when:
Which test phase to obtain the records from. Valid values are: "setup", "call" and "teardown".
:rtype: List[logging.LogRecord]
:return: the list of captured records at the given stage
.. versionadded:: 3.4
"""
handler = self._item.catch_log_handlers.get(when)
if handler:
return handler.records
else:
return []
@property
def text(self):
"""Returns the log text."""
return self.handler.stream.getvalue()
@property
def records(self):
"""Returns the list of log records."""
return self.handler.records
@property
def record_tuples(self):
"""Returns a list of a stripped down version of log records intended
for use in assertion comparison.
The format of the tuple is:
(logger_name, log_level, message)
"""
return [(r.name, r.levelno, r.getMessage()) for r in self.records]
@property
def messages(self):
"""Returns a list of format-interpolated log messages.
Unlike 'records', which contains the format string and parameters for interpolation, log messages in this list
are all interpolated.
Unlike 'text', which contains the output from the handler, log messages in this list are unadorned with
levels, timestamps, etc, making exact comparisons more reliable.
Note that traceback or stack info (from :func:`logging.exception` or the `exc_info` or `stack_info` arguments
to the logging functions) is not included, as this is added by the formatter in the handler.
.. versionadded:: 3.7
"""
return [r.getMessage() for r in self.records]
def clear(self):
"""Reset the list of log records and the captured log text."""
self.handler.reset()
def set_level(self, level, logger=None):
"""Sets the level for capturing of logs. The level will be restored to its previous value at the end of
the test.
:param int level: the logger to level.
:param str logger: the logger to update the level. If not given, the root logger level is updated.
.. versionchanged:: 3.4
The levels of the loggers changed by this function will be restored to their initial values at the
end of the test.
"""
logger_name = logger
logger = logging.getLogger(logger_name)
# save the original log-level to restore it during teardown
self._initial_log_levels.setdefault(logger_name, logger.level)
logger.setLevel(level)
@contextmanager
def at_level(self, level, logger=None):
"""Context manager that sets the level for capturing of logs. After the end of the 'with' statement the
level is restored to its original value.
:param int level: the logger to level.
:param str logger: the logger to update the level. If not given, the root logger level is updated.
"""
logger = logging.getLogger(logger)
orig_level = logger.level
logger.setLevel(level)
try:
yield
finally:
logger.setLevel(orig_level)
@pytest.fixture
def caplog(request):
"""Access and control log capturing.
Captured logs are available through the following properties/methods::
* caplog.text -> string containing formatted log output
* caplog.records -> list of logging.LogRecord instances
* caplog.record_tuples -> list of (logger_name, level, message) tuples
* caplog.clear() -> clear captured records and formatted log output string
"""
result = LogCaptureFixture(request.node)
yield result
result._finalize()
def get_actual_log_level(config, *setting_names):
"""Return the actual logging level."""
for setting_name in setting_names:
log_level = config.getoption(setting_name)
if log_level is None:
log_level = config.getini(setting_name)
if log_level:
break
else:
return
if isinstance(log_level, six.string_types):
log_level = log_level.upper()
try:
return int(getattr(logging, log_level, log_level))
except ValueError:
# Python logging does not recognise this as a logging level
raise pytest.UsageError(
"'{}' is not recognized as a logging level name for "
"'{}'. Please consider passing the "
"logging level num instead.".format(log_level, setting_name)
)
# run after terminalreporter/capturemanager are configured
@pytest.hookimpl(trylast=True)
def pytest_configure(config):
config.pluginmanager.register(LoggingPlugin(config), "logging-plugin")
class LoggingPlugin(object):
"""Attaches to the logging module and captures log messages for each test.
"""
def __init__(self, config):
"""Creates a new plugin to capture log messages.
The formatter can be safely shared across all handlers so
create a single one for the entire test session here.
"""
self._config = config
# enable verbose output automatically if live logging is enabled
if self._log_cli_enabled() and config.getoption("verbose") < 1:
config.option.verbose = 1
self.print_logs = get_option_ini(config, "log_print")
self.formatter = logging.Formatter(
get_option_ini(config, "log_format"),
get_option_ini(config, "log_date_format"),
)
self.log_level = get_actual_log_level(config, "log_level")
self.log_file_level = get_actual_log_level(config, "log_file_level")
self.log_file_format = get_option_ini(config, "log_file_format", "log_format")
self.log_file_date_format = get_option_ini(
config, "log_file_date_format", "log_date_format"
)
self.log_file_formatter = logging.Formatter(
self.log_file_format, datefmt=self.log_file_date_format
)
log_file = get_option_ini(config, "log_file")
if log_file:
self.log_file_handler = logging.FileHandler(
log_file, mode="w", encoding="UTF-8"
)
self.log_file_handler.setFormatter(self.log_file_formatter)
else:
self.log_file_handler = None
self.log_cli_handler = None
self.live_logs_context = lambda: dummy_context_manager()
# Note that the lambda for the live_logs_context is needed because
# live_logs_context can otherwise not be entered multiple times due
# to limitations of contextlib.contextmanager.
if self._log_cli_enabled():
self._setup_cli_logging()
def _setup_cli_logging(self):
config = self._config
terminal_reporter = config.pluginmanager.get_plugin("terminalreporter")
if terminal_reporter is None:
# terminal reporter is disabled e.g. by pytest-xdist.
return
capture_manager = config.pluginmanager.get_plugin("capturemanager")
# if capturemanager plugin is disabled, live logging still works.
log_cli_handler = _LiveLoggingStreamHandler(terminal_reporter, capture_manager)
log_cli_format = get_option_ini(config, "log_cli_format", "log_format")
log_cli_date_format = get_option_ini(
config, "log_cli_date_format", "log_date_format"
)
if (
config.option.color != "no"
and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search(log_cli_format)
):
log_cli_formatter = ColoredLevelFormatter(
create_terminal_writer(config),
log_cli_format,
datefmt=log_cli_date_format,
)
else:
log_cli_formatter = logging.Formatter(
log_cli_format, datefmt=log_cli_date_format
)
log_cli_level = get_actual_log_level(config, "log_cli_level", "log_level")
self.log_cli_handler = log_cli_handler
self.live_logs_context = lambda: catching_logs(
log_cli_handler, formatter=log_cli_formatter, level=log_cli_level
)
def set_log_path(self, fname):
"""Public method, which can set filename parameter for
Logging.FileHandler(). Also creates parent directory if
it does not exist.
.. warning::
Please considered as an experimental API.
"""
fname = Path(fname)
if not fname.is_absolute():
fname = Path(self._config.rootdir, fname)
if not fname.parent.exists():
fname.parent.mkdir(exist_ok=True, parents=True)
self.log_file_handler = logging.FileHandler(
str(fname), mode="w", encoding="UTF-8"
)
self.log_file_handler.setFormatter(self.log_file_formatter)
def _log_cli_enabled(self):
"""Return True if log_cli should be considered enabled, either explicitly
or because --log-cli-level was given in the command-line.
"""
return self._config.getoption(
"--log-cli-level"
) is not None or self._config.getini("log_cli")
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_collection(self):
with self.live_logs_context():
if self.log_cli_handler:
self.log_cli_handler.set_when("collection")
if self.log_file_handler is not None:
with catching_logs(self.log_file_handler, level=self.log_file_level):
yield
else:
yield
@contextmanager
def _runtest_for(self, item, when):
with self._runtest_for_main(item, when):
if self.log_file_handler is not None:
with catching_logs(self.log_file_handler, level=self.log_file_level):
yield
else:
yield
@contextmanager
def _runtest_for_main(self, item, when):
"""Implements the internals of pytest_runtest_xxx() hook."""
with catching_logs(
LogCaptureHandler(), formatter=self.formatter, level=self.log_level
) as log_handler:
if self.log_cli_handler:
self.log_cli_handler.set_when(when)
if item is None:
yield # run the test
return
if not hasattr(item, "catch_log_handlers"):
item.catch_log_handlers = {}
item.catch_log_handlers[when] = log_handler
item.catch_log_handler = log_handler
try:
yield # run test
finally:
if when == "teardown":
del item.catch_log_handler
del item.catch_log_handlers
if self.print_logs:
# Add a captured log section to the report.
log = log_handler.stream.getvalue().strip()
item.add_report_section(when, "log", log)
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_setup(self, item):
with self._runtest_for(item, "setup"):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_call(self, item):
with self._runtest_for(item, "call"):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_teardown(self, item):
with self._runtest_for(item, "teardown"):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_logstart(self):
if self.log_cli_handler:
self.log_cli_handler.reset()
with self._runtest_for(None, "start"):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_logfinish(self):
with self._runtest_for(None, "finish"):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_logreport(self):
with self._runtest_for(None, "logreport"):
yield
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_sessionfinish(self):
with self.live_logs_context():
if self.log_cli_handler:
self.log_cli_handler.set_when("sessionfinish")
if self.log_file_handler is not None:
try:
with catching_logs(
self.log_file_handler, level=self.log_file_level
):
yield
finally:
# Close the FileHandler explicitly.
# (logging.shutdown might have lost the weakref?!)
self.log_file_handler.close()
else:
yield
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_sessionstart(self):
with self.live_logs_context():
if self.log_cli_handler:
self.log_cli_handler.set_when("sessionstart")
if self.log_file_handler is not None:
with catching_logs(self.log_file_handler, level=self.log_file_level):
yield
else:
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtestloop(self, session):
"""Runs all collected test items."""
with self.live_logs_context():
if self.log_file_handler is not None:
with catching_logs(self.log_file_handler, level=self.log_file_level):
yield # run all the tests
else:
yield # run all the tests
class _LiveLoggingStreamHandler(logging.StreamHandler):
"""
Custom StreamHandler used by the live logging feature: it will write a newline before the first log message
in each test.
During live logging we must also explicitly disable stdout/stderr capturing otherwise it will get captured
and won't appear in the terminal.
"""
def __init__(self, terminal_reporter, capture_manager):
"""
:param _pytest.terminal.TerminalReporter terminal_reporter:
:param _pytest.capture.CaptureManager capture_manager:
"""
logging.StreamHandler.__init__(self, stream=terminal_reporter)
self.capture_manager = capture_manager
self.reset()
self.set_when(None)
self._test_outcome_written = False
def reset(self):
"""Reset the handler; should be called before the start of each test"""
self._first_record_emitted = False
def set_when(self, when):
"""Prepares for the given test phase (setup/call/teardown)"""
self._when = when
self._section_name_shown = False
if when == "start":
self._test_outcome_written = False
def emit(self, record):
ctx_manager = (
self.capture_manager.global_and_fixture_disabled()
if self.capture_manager
else dummy_context_manager()
)
with ctx_manager:
if not self._first_record_emitted:
self.stream.write("\n")
self._first_record_emitted = True
elif self._when in ("teardown", "finish"):
if not self._test_outcome_written:
self._test_outcome_written = True
self.stream.write("\n")
if not self._section_name_shown and self._when:
self.stream.section("live log " + self._when, sep="-", bold=True)
self._section_name_shown = True
logging.StreamHandler.emit(self, record)

View File

@ -0,0 +1,773 @@
""" core implementation of testing process: init, session, runtest loop. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import fnmatch
import functools
import os
import pkgutil
import sys
import warnings
import attr
import py
import six
import _pytest._code
from _pytest import nodes
from _pytest.config import directory_arg
from _pytest.config import hookimpl
from _pytest.config import UsageError
from _pytest.deprecated import PYTEST_CONFIG_GLOBAL
from _pytest.outcomes import exit
from _pytest.runner import collect_one_node
# exitcodes for the command line
EXIT_OK = 0
EXIT_TESTSFAILED = 1
EXIT_INTERRUPTED = 2
EXIT_INTERNALERROR = 3
EXIT_USAGEERROR = 4
EXIT_NOTESTSCOLLECTED = 5
def pytest_addoption(parser):
parser.addini(
"norecursedirs",
"directory patterns to avoid for recursion",
type="args",
default=[".*", "build", "dist", "CVS", "_darcs", "{arch}", "*.egg", "venv"],
)
parser.addini(
"testpaths",
"directories to search for tests when no files or directories are given in the "
"command line.",
type="args",
default=[],
)
# parser.addini("dirpatterns",
# "patterns specifying possible locations of test files",
# type="linelist", default=["**/test_*.txt",
# "**/test_*.py", "**/*_test.py"]
# )
group = parser.getgroup("general", "running and selection options")
group._addoption(
"-x",
"--exitfirst",
action="store_const",
dest="maxfail",
const=1,
help="exit instantly on first error or failed test.",
),
group._addoption(
"--maxfail",
metavar="num",
action="store",
type=int,
dest="maxfail",
default=0,
help="exit after first num failures or errors.",
)
group._addoption(
"--strict",
action="store_true",
help="marks not registered in configuration file raise errors.",
)
group._addoption(
"-c",
metavar="file",
type=str,
dest="inifilename",
help="load configuration from `file` instead of trying to locate one of the implicit "
"configuration files.",
)
group._addoption(
"--continue-on-collection-errors",
action="store_true",
default=False,
dest="continue_on_collection_errors",
help="Force test execution even if collection errors occur.",
)
group._addoption(
"--rootdir",
action="store",
dest="rootdir",
help="Define root directory for tests. Can be relative path: 'root_dir', './root_dir', "
"'root_dir/another_dir/'; absolute path: '/home/user/root_dir'; path with variables: "
"'$HOME/root_dir'.",
)
group = parser.getgroup("collect", "collection")
group.addoption(
"--collectonly",
"--collect-only",
action="store_true",
help="only collect tests, don't execute them.",
),
group.addoption(
"--pyargs",
action="store_true",
help="try to interpret all arguments as python packages.",
)
group.addoption(
"--ignore",
action="append",
metavar="path",
help="ignore path during collection (multi-allowed).",
)
group.addoption(
"--ignore-glob",
action="append",
metavar="path",
help="ignore path pattern during collection (multi-allowed).",
)
group.addoption(
"--deselect",
action="append",
metavar="nodeid_prefix",
help="deselect item during collection (multi-allowed).",
)
# when changing this to --conf-cut-dir, config.py Conftest.setinitial
# needs upgrading as well
group.addoption(
"--confcutdir",
dest="confcutdir",
default=None,
metavar="dir",
type=functools.partial(directory_arg, optname="--confcutdir"),
help="only load conftest.py's relative to specified dir.",
)
group.addoption(
"--noconftest",
action="store_true",
dest="noconftest",
default=False,
help="Don't load any conftest.py files.",
)
group.addoption(
"--keepduplicates",
"--keep-duplicates",
action="store_true",
dest="keepduplicates",
default=False,
help="Keep duplicate tests.",
)
group.addoption(
"--collect-in-virtualenv",
action="store_true",
dest="collect_in_virtualenv",
default=False,
help="Don't ignore tests in a local virtualenv directory",
)
group = parser.getgroup("debugconfig", "test session debugging and configuration")
group.addoption(
"--basetemp",
dest="basetemp",
default=None,
metavar="dir",
help=(
"base temporary directory for this test run."
"(warning: this directory is removed if it exists)"
),
)
class _ConfigDeprecated(object):
def __init__(self, config):
self.__dict__["_config"] = config
def __getattr__(self, attr):
warnings.warn(PYTEST_CONFIG_GLOBAL, stacklevel=2)
return getattr(self._config, attr)
def __setattr__(self, attr, val):
warnings.warn(PYTEST_CONFIG_GLOBAL, stacklevel=2)
return setattr(self._config, attr, val)
def __repr__(self):
return "{}({!r})".format(type(self).__name__, self._config)
def pytest_configure(config):
__import__("pytest").config = _ConfigDeprecated(config) # compatibility
def wrap_session(config, doit):
"""Skeleton command line program"""
session = Session(config)
session.exitstatus = EXIT_OK
initstate = 0
try:
try:
config._do_configure()
initstate = 1
config.hook.pytest_sessionstart(session=session)
initstate = 2
session.exitstatus = doit(config, session) or 0
except UsageError:
raise
except Failed:
session.exitstatus = EXIT_TESTSFAILED
except (KeyboardInterrupt, exit.Exception):
excinfo = _pytest._code.ExceptionInfo.from_current()
exitstatus = EXIT_INTERRUPTED
if initstate <= 2 and isinstance(excinfo.value, exit.Exception):
sys.stderr.write("{}: {}\n".format(excinfo.typename, excinfo.value.msg))
if excinfo.value.returncode is not None:
exitstatus = excinfo.value.returncode
config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
session.exitstatus = exitstatus
except: # noqa
excinfo = _pytest._code.ExceptionInfo.from_current()
config.notify_exception(excinfo, config.option)
session.exitstatus = EXIT_INTERNALERROR
if excinfo.errisinstance(SystemExit):
sys.stderr.write("mainloop: caught unexpected SystemExit!\n")
finally:
excinfo = None # Explicitly break reference cycle.
session.startdir.chdir()
if initstate >= 2:
config.hook.pytest_sessionfinish(
session=session, exitstatus=session.exitstatus
)
config._ensure_unconfigure()
return session.exitstatus
def pytest_cmdline_main(config):
return wrap_session(config, _main)
def _main(config, session):
""" default command line protocol for initialization, session,
running tests and reporting. """
config.hook.pytest_collection(session=session)
config.hook.pytest_runtestloop(session=session)
if session.testsfailed:
return EXIT_TESTSFAILED
elif session.testscollected == 0:
return EXIT_NOTESTSCOLLECTED
def pytest_collection(session):
return session.perform_collect()
def pytest_runtestloop(session):
if session.testsfailed and not session.config.option.continue_on_collection_errors:
raise session.Interrupted("%d errors during collection" % session.testsfailed)
if session.config.option.collectonly:
return True
for i, item in enumerate(session.items):
nextitem = session.items[i + 1] if i + 1 < len(session.items) else None
item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
if session.shouldfail:
raise session.Failed(session.shouldfail)
if session.shouldstop:
raise session.Interrupted(session.shouldstop)
return True
def _in_venv(path):
"""Attempts to detect if ``path`` is the root of a Virtual Environment by
checking for the existence of the appropriate activate script"""
bindir = path.join("Scripts" if sys.platform.startswith("win") else "bin")
if not bindir.isdir():
return False
activates = (
"activate",
"activate.csh",
"activate.fish",
"Activate",
"Activate.bat",
"Activate.ps1",
)
return any([fname.basename in activates for fname in bindir.listdir()])
def pytest_ignore_collect(path, config):
ignore_paths = config._getconftest_pathlist("collect_ignore", path=path.dirpath())
ignore_paths = ignore_paths or []
excludeopt = config.getoption("ignore")
if excludeopt:
ignore_paths.extend([py.path.local(x) for x in excludeopt])
if py.path.local(path) in ignore_paths:
return True
ignore_globs = config._getconftest_pathlist(
"collect_ignore_glob", path=path.dirpath()
)
ignore_globs = ignore_globs or []
excludeglobopt = config.getoption("ignore_glob")
if excludeglobopt:
ignore_globs.extend([py.path.local(x) for x in excludeglobopt])
if any(
fnmatch.fnmatch(six.text_type(path), six.text_type(glob))
for glob in ignore_globs
):
return True
allow_in_venv = config.getoption("collect_in_virtualenv")
if not allow_in_venv and _in_venv(path):
return True
return False
def pytest_collection_modifyitems(items, config):
deselect_prefixes = tuple(config.getoption("deselect") or [])
if not deselect_prefixes:
return
remaining = []
deselected = []
for colitem in items:
if colitem.nodeid.startswith(deselect_prefixes):
deselected.append(colitem)
else:
remaining.append(colitem)
if deselected:
config.hook.pytest_deselected(items=deselected)
items[:] = remaining
@contextlib.contextmanager
def _patched_find_module():
"""Patch bug in pkgutil.ImpImporter.find_module
When using pkgutil.find_loader on python<3.4 it removes symlinks
from the path due to a call to os.path.realpath. This is not consistent
with actually doing the import (in these versions, pkgutil and __import__
did not share the same underlying code). This can break conftest
discovery for pytest where symlinks are involved.
The only supported python<3.4 by pytest is python 2.7.
"""
if six.PY2: # python 3.4+ uses importlib instead
def find_module_patched(self, fullname, path=None):
# Note: we ignore 'path' argument since it is only used via meta_path
subname = fullname.split(".")[-1]
if subname != fullname and self.path is None:
return None
if self.path is None:
path = None
else:
# original: path = [os.path.realpath(self.path)]
path = [self.path]
try:
file, filename, etc = pkgutil.imp.find_module(subname, path)
except ImportError:
return None
return pkgutil.ImpLoader(fullname, file, filename, etc)
old_find_module = pkgutil.ImpImporter.find_module
pkgutil.ImpImporter.find_module = find_module_patched
try:
yield
finally:
pkgutil.ImpImporter.find_module = old_find_module
else:
yield
class FSHookProxy(object):
def __init__(self, fspath, pm, remove_mods):
self.fspath = fspath
self.pm = pm
self.remove_mods = remove_mods
def __getattr__(self, name):
x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods)
self.__dict__[name] = x
return x
class NoMatch(Exception):
""" raised if matching cannot locate a matching names. """
class Interrupted(KeyboardInterrupt):
""" signals an interrupted test run. """
__module__ = "builtins" # for py3
class Failed(Exception):
""" signals a stop as failed test run. """
@attr.s
class _bestrelpath_cache(dict):
path = attr.ib()
def __missing__(self, path):
r = self.path.bestrelpath(path)
self[path] = r
return r
class Session(nodes.FSCollector):
Interrupted = Interrupted
Failed = Failed
def __init__(self, config):
nodes.FSCollector.__init__(
self, config.rootdir, parent=None, config=config, session=self, nodeid=""
)
self.testsfailed = 0
self.testscollected = 0
self.shouldstop = False
self.shouldfail = False
self.trace = config.trace.root.get("collection")
self._norecursepatterns = config.getini("norecursedirs")
self.startdir = py.path.local()
self._initialpaths = frozenset()
# Keep track of any collected nodes in here, so we don't duplicate fixtures
self._node_cache = {}
self._bestrelpathcache = _bestrelpath_cache(config.rootdir)
# Dirnames of pkgs with dunder-init files.
self._pkg_roots = {}
self.config.pluginmanager.register(self, name="session")
def __repr__(self):
return "<%s %s exitstatus=%r testsfailed=%d testscollected=%d>" % (
self.__class__.__name__,
self.name,
getattr(self, "exitstatus", "<UNSET>"),
self.testsfailed,
self.testscollected,
)
def _node_location_to_relpath(self, node_path):
# bestrelpath is a quite slow function
return self._bestrelpathcache[node_path]
@hookimpl(tryfirst=True)
def pytest_collectstart(self):
if self.shouldfail:
raise self.Failed(self.shouldfail)
if self.shouldstop:
raise self.Interrupted(self.shouldstop)
@hookimpl(tryfirst=True)
def pytest_runtest_logreport(self, report):
if report.failed and not hasattr(report, "wasxfail"):
self.testsfailed += 1
maxfail = self.config.getvalue("maxfail")
if maxfail and self.testsfailed >= maxfail:
self.shouldfail = "stopping after %d failures" % (self.testsfailed)
pytest_collectreport = pytest_runtest_logreport
def isinitpath(self, path):
return path in self._initialpaths
def gethookproxy(self, fspath):
# check if we have the common case of running
# hooks with all conftest.py files
pm = self.config.pluginmanager
my_conftestmodules = pm._getconftestmodules(fspath)
remove_mods = pm._conftest_plugins.difference(my_conftestmodules)
if remove_mods:
# one or more conftests are not in use at this fspath
proxy = FSHookProxy(fspath, pm, remove_mods)
else:
# all plugis are active for this fspath
proxy = self.config.hook
return proxy
def perform_collect(self, args=None, genitems=True):
hook = self.config.hook
try:
items = self._perform_collect(args, genitems)
self.config.pluginmanager.check_pending()
hook.pytest_collection_modifyitems(
session=self, config=self.config, items=items
)
finally:
hook.pytest_collection_finish(session=self)
self.testscollected = len(items)
return items
def _perform_collect(self, args, genitems):
if args is None:
args = self.config.args
self.trace("perform_collect", self, args)
self.trace.root.indent += 1
self._notfound = []
initialpaths = []
self._initialparts = []
self.items = items = []
for arg in args:
parts = self._parsearg(arg)
self._initialparts.append(parts)
initialpaths.append(parts[0])
self._initialpaths = frozenset(initialpaths)
rep = collect_one_node(self)
self.ihook.pytest_collectreport(report=rep)
self.trace.root.indent -= 1
if self._notfound:
errors = []
for arg, exc in self._notfound:
line = "(no name %r in any of %r)" % (arg, exc.args[0])
errors.append("not found: %s\n%s" % (arg, line))
# XXX: test this
raise UsageError(*errors)
if not genitems:
return rep.result
else:
if rep.passed:
for node in rep.result:
self.items.extend(self.genitems(node))
return items
def collect(self):
for initialpart in self._initialparts:
arg = "::".join(map(str, initialpart))
self.trace("processing argument", arg)
self.trace.root.indent += 1
try:
for x in self._collect(arg):
yield x
except NoMatch:
# we are inside a make_report hook so
# we cannot directly pass through the exception
self._notfound.append((arg, sys.exc_info()[1]))
self.trace.root.indent -= 1
def _collect(self, arg):
from _pytest.python import Package
names = self._parsearg(arg)
argpath = names.pop(0)
# Start with a Session root, and delve to argpath item (dir or file)
# and stack all Packages found on the way.
# No point in finding packages when collecting doctests
if not self.config.getoption("doctestmodules", False):
pm = self.config.pluginmanager
for parent in reversed(argpath.parts()):
if pm._confcutdir and pm._confcutdir.relto(parent):
break
if parent.isdir():
pkginit = parent.join("__init__.py")
if pkginit.isfile():
if pkginit not in self._node_cache:
col = self._collectfile(pkginit, handle_dupes=False)
if col:
if isinstance(col[0], Package):
self._pkg_roots[parent] = col[0]
# always store a list in the cache, matchnodes expects it
self._node_cache[col[0].fspath] = [col[0]]
# If it's a directory argument, recurse and look for any Subpackages.
# Let the Package collector deal with subnodes, don't collect here.
if argpath.check(dir=1):
assert not names, "invalid arg %r" % (arg,)
seen_dirs = set()
for path in argpath.visit(
fil=self._visit_filter, rec=self._recurse, bf=True, sort=True
):
dirpath = path.dirpath()
if dirpath not in seen_dirs:
# Collect packages first.
seen_dirs.add(dirpath)
pkginit = dirpath.join("__init__.py")
if pkginit.exists():
for x in self._collectfile(pkginit):
yield x
if isinstance(x, Package):
self._pkg_roots[dirpath] = x
if dirpath in self._pkg_roots:
# Do not collect packages here.
continue
for x in self._collectfile(path):
key = (type(x), x.fspath)
if key in self._node_cache:
yield self._node_cache[key]
else:
self._node_cache[key] = x
yield x
else:
assert argpath.check(file=1)
if argpath in self._node_cache:
col = self._node_cache[argpath]
else:
collect_root = self._pkg_roots.get(argpath.dirname, self)
col = collect_root._collectfile(argpath, handle_dupes=False)
if col:
self._node_cache[argpath] = col
m = self.matchnodes(col, names)
# If __init__.py was the only file requested, then the matched node will be
# the corresponding Package, and the first yielded item will be the __init__
# Module itself, so just use that. If this special case isn't taken, then all
# the files in the package will be yielded.
if argpath.basename == "__init__.py":
yield next(m[0].collect())
return
for y in m:
yield y
def _collectfile(self, path, handle_dupes=True):
assert path.isfile(), "%r is not a file (isdir=%r, exists=%r, islink=%r)" % (
path,
path.isdir(),
path.exists(),
path.islink(),
)
ihook = self.gethookproxy(path)
if not self.isinitpath(path):
if ihook.pytest_ignore_collect(path=path, config=self.config):
return ()
if handle_dupes:
keepduplicates = self.config.getoption("keepduplicates")
if not keepduplicates:
duplicate_paths = self.config.pluginmanager._duplicatepaths
if path in duplicate_paths:
return ()
else:
duplicate_paths.add(path)
return ihook.pytest_collect_file(path=path, parent=self)
def _recurse(self, dirpath):
if dirpath.basename == "__pycache__":
return False
ihook = self.gethookproxy(dirpath.dirpath())
if ihook.pytest_ignore_collect(path=dirpath, config=self.config):
return False
for pat in self._norecursepatterns:
if dirpath.check(fnmatch=pat):
return False
ihook = self.gethookproxy(dirpath)
ihook.pytest_collect_directory(path=dirpath, parent=self)
return True
if six.PY2:
@staticmethod
def _visit_filter(f):
return f.check(file=1) and not f.strpath.endswith("*.pyc")
else:
@staticmethod
def _visit_filter(f):
return f.check(file=1)
def _tryconvertpyarg(self, x):
"""Convert a dotted module name to path."""
try:
with _patched_find_module():
loader = pkgutil.find_loader(x)
except ImportError:
return x
if loader is None:
return x
# This method is sometimes invoked when AssertionRewritingHook, which
# does not define a get_filename method, is already in place:
try:
with _patched_find_module():
path = loader.get_filename(x)
except AttributeError:
# Retrieve path from AssertionRewritingHook:
path = loader.modules[x][0].co_filename
if loader.is_package(x):
path = os.path.dirname(path)
return path
def _parsearg(self, arg):
""" return (fspath, names) tuple after checking the file exists. """
parts = str(arg).split("::")
if self.config.option.pyargs:
parts[0] = self._tryconvertpyarg(parts[0])
relpath = parts[0].replace("/", os.sep)
path = self.config.invocation_dir.join(relpath, abs=True)
if not path.check():
if self.config.option.pyargs:
raise UsageError(
"file or package not found: " + arg + " (missing __init__.py?)"
)
raise UsageError("file not found: " + arg)
parts[0] = path.realpath()
return parts
def matchnodes(self, matching, names):
self.trace("matchnodes", matching, names)
self.trace.root.indent += 1
nodes = self._matchnodes(matching, names)
num = len(nodes)
self.trace("matchnodes finished -> ", num, "nodes")
self.trace.root.indent -= 1
if num == 0:
raise NoMatch(matching, names[:1])
return nodes
def _matchnodes(self, matching, names):
if not matching or not names:
return matching
name = names[0]
assert name
nextnames = names[1:]
resultnodes = []
for node in matching:
if isinstance(node, nodes.Item):
if not names:
resultnodes.append(node)
continue
assert isinstance(node, nodes.Collector)
key = (type(node), node.nodeid)
if key in self._node_cache:
rep = self._node_cache[key]
else:
rep = collect_one_node(node)
self._node_cache[key] = rep
if rep.passed:
has_matched = False
for x in rep.result:
# TODO: remove parametrized workaround once collection structure contains parametrization
if x.name == name or x.name.split("[")[0] == name:
resultnodes.extend(self.matchnodes([x], nextnames))
has_matched = True
# XXX accept IDs that don't have "()" for class instances
if not has_matched and len(rep.result) == 1 and x.name == "()":
nextnames.insert(0, name)
resultnodes.extend(self.matchnodes([x], nextnames))
else:
# report collection failures here to avoid failing to run some test
# specified in the command line because the module could not be
# imported (#134)
node.ihook.pytest_collectreport(report=rep)
return resultnodes
def genitems(self, node):
self.trace("genitems", node)
if isinstance(node, nodes.Item):
node.ihook.pytest_itemcollected(item=node)
yield node
else:
assert isinstance(node, nodes.Collector)
rep = collect_one_node(node)
if rep.passed:
for subnode in rep.result:
for x in self.genitems(subnode):
yield x
node.ihook.pytest_collectreport(report=rep)

View File

@ -0,0 +1,163 @@
""" generic mechanism for marking and selecting python functions. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .legacy import matchkeyword
from .legacy import matchmark
from .structures import EMPTY_PARAMETERSET_OPTION
from .structures import get_empty_parameterset_mark
from .structures import Mark
from .structures import MARK_GEN
from .structures import MarkDecorator
from .structures import MarkGenerator
from .structures import ParameterSet
from _pytest.config import UsageError
__all__ = ["Mark", "MarkDecorator", "MarkGenerator", "get_empty_parameterset_mark"]
def param(*values, **kw):
"""Specify a parameter in `pytest.mark.parametrize`_ calls or
:ref:`parametrized fixtures <fixture-parametrize-marks>`.
.. code-block:: python
@pytest.mark.parametrize("test_input,expected", [
("3+5", 8),
pytest.param("6*9", 42, marks=pytest.mark.xfail),
])
def test_eval(test_input, expected):
assert eval(test_input) == expected
:param values: variable args of the values of the parameter set, in order.
:keyword marks: a single mark or a list of marks to be applied to this parameter set.
:keyword str id: the id to attribute to this parameter set.
"""
return ParameterSet.param(*values, **kw)
def pytest_addoption(parser):
group = parser.getgroup("general")
group._addoption(
"-k",
action="store",
dest="keyword",
default="",
metavar="EXPRESSION",
help="only run tests which match the given substring expression. "
"An expression is a python evaluatable expression "
"where all names are substring-matched against test names "
"and their parent classes. Example: -k 'test_method or test_"
"other' matches all test functions and classes whose name "
"contains 'test_method' or 'test_other', while -k 'not test_method' "
"matches those that don't contain 'test_method' in their names. "
"-k 'not test_method and not test_other' will eliminate the matches. "
"Additionally keywords are matched to classes and functions "
"containing extra names in their 'extra_keyword_matches' set, "
"as well as functions which have names assigned directly to them.",
)
group._addoption(
"-m",
action="store",
dest="markexpr",
default="",
metavar="MARKEXPR",
help="only run tests matching given mark expression. "
"example: -m 'mark1 and not mark2'.",
)
group.addoption(
"--markers",
action="store_true",
help="show markers (builtin, plugin and per-project ones).",
)
parser.addini("markers", "markers for test functions", "linelist")
parser.addini(EMPTY_PARAMETERSET_OPTION, "default marker for empty parametersets")
def pytest_cmdline_main(config):
import _pytest.config
if config.option.markers:
config._do_configure()
tw = _pytest.config.create_terminal_writer(config)
for line in config.getini("markers"):
parts = line.split(":", 1)
name = parts[0]
rest = parts[1] if len(parts) == 2 else ""
tw.write("@pytest.mark.%s:" % name, bold=True)
tw.line(rest)
tw.line()
config._ensure_unconfigure()
return 0
pytest_cmdline_main.tryfirst = True
def deselect_by_keyword(items, config):
keywordexpr = config.option.keyword.lstrip()
if keywordexpr.startswith("-"):
keywordexpr = "not " + keywordexpr[1:]
selectuntil = False
if keywordexpr[-1:] == ":":
selectuntil = True
keywordexpr = keywordexpr[:-1]
remaining = []
deselected = []
for colitem in items:
if keywordexpr and not matchkeyword(colitem, keywordexpr):
deselected.append(colitem)
else:
if selectuntil:
keywordexpr = None
remaining.append(colitem)
if deselected:
config.hook.pytest_deselected(items=deselected)
items[:] = remaining
def deselect_by_mark(items, config):
matchexpr = config.option.markexpr
if not matchexpr:
return
remaining = []
deselected = []
for item in items:
if matchmark(item, matchexpr):
remaining.append(item)
else:
deselected.append(item)
if deselected:
config.hook.pytest_deselected(items=deselected)
items[:] = remaining
def pytest_collection_modifyitems(items, config):
deselect_by_keyword(items, config)
deselect_by_mark(items, config)
def pytest_configure(config):
config._old_mark_config = MARK_GEN._config
if config.option.strict:
MARK_GEN._config = config
empty_parameterset = config.getini(EMPTY_PARAMETERSET_OPTION)
if empty_parameterset not in ("skip", "xfail", "fail_at_collect", None, ""):
raise UsageError(
"{!s} must be one of skip, xfail or fail_at_collect"
" but it is {!r}".format(EMPTY_PARAMETERSET_OPTION, empty_parameterset)
)
def pytest_unconfigure(config):
MARK_GEN._config = getattr(config, "_old_mark_config", None)

View File

@ -0,0 +1,125 @@
import os
import platform
import sys
import traceback
import six
from ..outcomes import fail
from ..outcomes import TEST_OUTCOME
def cached_eval(config, expr, d):
if not hasattr(config, "_evalcache"):
config._evalcache = {}
try:
return config._evalcache[expr]
except KeyError:
import _pytest._code
exprcode = _pytest._code.compile(expr, mode="eval")
config._evalcache[expr] = x = eval(exprcode, d)
return x
class MarkEvaluator(object):
def __init__(self, item, name):
self.item = item
self._marks = None
self._mark = None
self._mark_name = name
def __bool__(self):
# dont cache here to prevent staleness
return bool(self._get_marks())
__nonzero__ = __bool__
def wasvalid(self):
return not hasattr(self, "exc")
def _get_marks(self):
return list(self.item.iter_markers(name=self._mark_name))
def invalidraise(self, exc):
raises = self.get("raises")
if not raises:
return
return not isinstance(exc, raises)
def istrue(self):
try:
return self._istrue()
except TEST_OUTCOME:
self.exc = sys.exc_info()
if isinstance(self.exc[1], SyntaxError):
msg = [" " * (self.exc[1].offset + 4) + "^"]
msg.append("SyntaxError: invalid syntax")
else:
msg = traceback.format_exception_only(*self.exc[:2])
fail(
"Error evaluating %r expression\n"
" %s\n"
"%s" % (self._mark_name, self.expr, "\n".join(msg)),
pytrace=False,
)
def _getglobals(self):
d = {"os": os, "sys": sys, "platform": platform, "config": self.item.config}
if hasattr(self.item, "obj"):
d.update(self.item.obj.__globals__)
return d
def _istrue(self):
if hasattr(self, "result"):
return self.result
self._marks = self._get_marks()
if self._marks:
self.result = False
for mark in self._marks:
self._mark = mark
if "condition" in mark.kwargs:
args = (mark.kwargs["condition"],)
else:
args = mark.args
for expr in args:
self.expr = expr
if isinstance(expr, six.string_types):
d = self._getglobals()
result = cached_eval(self.item.config, expr, d)
else:
if "reason" not in mark.kwargs:
# XXX better be checked at collection time
msg = (
"you need to specify reason=STRING "
"when using booleans as conditions."
)
fail(msg)
result = bool(expr)
if result:
self.result = True
self.reason = mark.kwargs.get("reason", None)
self.expr = expr
return self.result
if not args:
self.result = True
self.reason = mark.kwargs.get("reason", None)
return self.result
return False
def get(self, attr, default=None):
if self._mark is None:
return default
return self._mark.kwargs.get(attr, default)
def getexplanation(self):
expl = getattr(self, "reason", None) or self.get("reason", None)
if not expl:
if not hasattr(self, "expr"):
return ""
else:
return "condition: " + str(self.expr)
return expl

View File

@ -0,0 +1,102 @@
"""
this is a place where we put datastructures used by legacy apis
we hope ot remove
"""
import keyword
import attr
from _pytest.config import UsageError
@attr.s
class MarkMapping(object):
"""Provides a local mapping for markers where item access
resolves to True if the marker is present. """
own_mark_names = attr.ib()
@classmethod
def from_item(cls, item):
mark_names = {mark.name for mark in item.iter_markers()}
return cls(mark_names)
def __getitem__(self, name):
return name in self.own_mark_names
class KeywordMapping(object):
"""Provides a local mapping for keywords.
Given a list of names, map any substring of one of these names to True.
"""
def __init__(self, names):
self._names = names
@classmethod
def from_item(cls, item):
mapped_names = set()
# Add the names of the current item and any parent items
import pytest
for item in item.listchain():
if not isinstance(item, pytest.Instance):
mapped_names.add(item.name)
# Add the names added as extra keywords to current or parent items
mapped_names.update(item.listextrakeywords())
# Add the names attached to the current function through direct assignment
if hasattr(item, "function"):
mapped_names.update(item.function.__dict__)
# add the markers to the keywords as we no longer handle them correctly
mapped_names.update(mark.name for mark in item.iter_markers())
return cls(mapped_names)
def __getitem__(self, subname):
for name in self._names:
if subname in name:
return True
return False
python_keywords_allowed_list = ["or", "and", "not"]
def matchmark(colitem, markexpr):
"""Tries to match on any marker names, attached to the given colitem."""
try:
return eval(markexpr, {}, MarkMapping.from_item(colitem))
except SyntaxError as e:
raise SyntaxError(str(e) + "\nMarker expression must be valid Python!")
def matchkeyword(colitem, keywordexpr):
"""Tries to match given keyword expression to given collector item.
Will match on the name of colitem, including the names of its parents.
Only matches names of items which are either a :class:`Class` or a
:class:`Function`.
Additionally, matches on names in the 'extra_keyword_matches' set of
any item, as well as names directly assigned to test functions.
"""
mapping = KeywordMapping.from_item(colitem)
if " " not in keywordexpr:
# special case to allow for simple "-k pass" and "-k 1.3"
return mapping[keywordexpr]
elif keywordexpr.startswith("not ") and " " not in keywordexpr[4:]:
return not mapping[keywordexpr[4:]]
for kwd in keywordexpr.split():
if keyword.iskeyword(kwd) and kwd not in python_keywords_allowed_list:
raise UsageError(
"Python keyword '{}' not accepted in expressions passed to '-k'".format(
kwd
)
)
try:
return eval(keywordexpr, {}, mapping)
except SyntaxError:
raise UsageError("Wrong expression passed to '-k': {}".format(keywordexpr))

View File

@ -0,0 +1,378 @@
import inspect
import warnings
from collections import namedtuple
from operator import attrgetter
import attr
import six
from ..compat import ascii_escaped
from ..compat import getfslineno
from ..compat import MappingMixin
from ..compat import NOTSET
from _pytest.outcomes import fail
EMPTY_PARAMETERSET_OPTION = "empty_parameter_set_mark"
def alias(name, warning=None):
getter = attrgetter(name)
def warned(self):
warnings.warn(warning, stacklevel=2)
return getter(self)
return property(getter if warning is None else warned, doc="alias for " + name)
def istestfunc(func):
return (
hasattr(func, "__call__")
and getattr(func, "__name__", "<lambda>") != "<lambda>"
)
def get_empty_parameterset_mark(config, argnames, func):
from ..nodes import Collector
requested_mark = config.getini(EMPTY_PARAMETERSET_OPTION)
if requested_mark in ("", None, "skip"):
mark = MARK_GEN.skip
elif requested_mark == "xfail":
mark = MARK_GEN.xfail(run=False)
elif requested_mark == "fail_at_collect":
f_name = func.__name__
_, lineno = getfslineno(func)
raise Collector.CollectError(
"Empty parameter set in '%s' at line %d" % (f_name, lineno + 1)
)
else:
raise LookupError(requested_mark)
fs, lineno = getfslineno(func)
reason = "got empty parameter set %r, function %s at %s:%d" % (
argnames,
func.__name__,
fs,
lineno,
)
return mark(reason=reason)
class ParameterSet(namedtuple("ParameterSet", "values, marks, id")):
@classmethod
def param(cls, *values, **kw):
marks = kw.pop("marks", ())
if isinstance(marks, MarkDecorator):
marks = (marks,)
else:
assert isinstance(marks, (tuple, list, set))
id_ = kw.pop("id", None)
if id_ is not None:
if not isinstance(id_, six.string_types):
raise TypeError(
"Expected id to be a string, got {}: {!r}".format(type(id_), id_)
)
id_ = ascii_escaped(id_)
return cls(values, marks, id_)
@classmethod
def extract_from(cls, parameterset, force_tuple=False):
"""
:param parameterset:
a legacy style parameterset that may or may not be a tuple,
and may or may not be wrapped into a mess of mark objects
:param force_tuple:
enforce tuple wrapping so single argument tuple values
don't get decomposed and break tests
"""
if isinstance(parameterset, cls):
return parameterset
if force_tuple:
return cls.param(parameterset)
else:
return cls(parameterset, marks=[], id=None)
@classmethod
def _for_parametrize(cls, argnames, argvalues, func, config, function_definition):
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
force_tuple = len(argnames) == 1
else:
force_tuple = False
parameters = [
ParameterSet.extract_from(x, force_tuple=force_tuple) for x in argvalues
]
del argvalues
if parameters:
# check all parameter sets have the correct number of values
for param in parameters:
if len(param.values) != len(argnames):
msg = (
'{nodeid}: in "parametrize" the number of names ({names_len}):\n'
" {names}\n"
"must be equal to the number of values ({values_len}):\n"
" {values}"
)
fail(
msg.format(
nodeid=function_definition.nodeid,
values=param.values,
names=argnames,
names_len=len(argnames),
values_len=len(param.values),
),
pytrace=False,
)
else:
# empty parameter set (likely computed at runtime): create a single
# parameter set with NOSET values, with the "empty parameter set" mark applied to it
mark = get_empty_parameterset_mark(config, argnames, func)
parameters.append(
ParameterSet(values=(NOTSET,) * len(argnames), marks=[mark], id=None)
)
return argnames, parameters
@attr.s(frozen=True)
class Mark(object):
#: name of the mark
name = attr.ib(type=str)
#: positional arguments of the mark decorator
args = attr.ib() # List[object]
#: keyword arguments of the mark decorator
kwargs = attr.ib() # Dict[str, object]
def combined_with(self, other):
"""
:param other: the mark to combine with
:type other: Mark
:rtype: Mark
combines by appending aargs and merging the mappings
"""
assert self.name == other.name
return Mark(
self.name, self.args + other.args, dict(self.kwargs, **other.kwargs)
)
@attr.s
class MarkDecorator(object):
""" A decorator for test functions and test classes. When applied
it will create :class:`MarkInfo` objects which may be
:ref:`retrieved by hooks as item keywords <excontrolskip>`.
MarkDecorator instances are often created like this::
mark1 = pytest.mark.NAME # simple MarkDecorator
mark2 = pytest.mark.NAME(name1=value) # parametrized MarkDecorator
and can then be applied as decorators to test functions::
@mark2
def test_function():
pass
When a MarkDecorator instance is called it does the following:
1. If called with a single class as its only positional argument and no
additional keyword arguments, it attaches itself to the class so it
gets applied automatically to all test cases found in that class.
2. If called with a single function as its only positional argument and
no additional keyword arguments, it attaches a MarkInfo object to the
function, containing all the arguments already stored internally in
the MarkDecorator.
3. When called in any other case, it performs a 'fake construction' call,
i.e. it returns a new MarkDecorator instance with the original
MarkDecorator's content updated with the arguments passed to this
call.
Note: The rules above prevent MarkDecorator objects from storing only a
single function or class reference as their positional argument with no
additional keyword or positional arguments.
"""
mark = attr.ib(validator=attr.validators.instance_of(Mark))
name = alias("mark.name")
args = alias("mark.args")
kwargs = alias("mark.kwargs")
@property
def markname(self):
return self.name # for backward-compat (2.4.1 had this attr)
def __eq__(self, other):
return self.mark == other.mark if isinstance(other, MarkDecorator) else False
def __repr__(self):
return "<MarkDecorator %r>" % (self.mark,)
def with_args(self, *args, **kwargs):
""" return a MarkDecorator with extra arguments added
unlike call this can be used even if the sole argument is a callable/class
:return: MarkDecorator
"""
mark = Mark(self.name, args, kwargs)
return self.__class__(self.mark.combined_with(mark))
def __call__(self, *args, **kwargs):
""" if passed a single callable argument: decorate it with mark info.
otherwise add *args/**kwargs in-place to mark information. """
if args and not kwargs:
func = args[0]
is_class = inspect.isclass(func)
if len(args) == 1 and (istestfunc(func) or is_class):
store_mark(func, self.mark)
return func
return self.with_args(*args, **kwargs)
def get_unpacked_marks(obj):
"""
obtain the unpacked marks that are stored on an object
"""
mark_list = getattr(obj, "pytestmark", [])
if not isinstance(mark_list, list):
mark_list = [mark_list]
return normalize_mark_list(mark_list)
def normalize_mark_list(mark_list):
"""
normalizes marker decorating helpers to mark objects
:type mark_list: List[Union[Mark, Markdecorator]]
:rtype: List[Mark]
"""
extracted = [
getattr(mark, "mark", mark) for mark in mark_list
] # unpack MarkDecorator
for mark in extracted:
if not isinstance(mark, Mark):
raise TypeError("got {!r} instead of Mark".format(mark))
return [x for x in extracted if isinstance(x, Mark)]
def store_mark(obj, mark):
"""store a Mark on an object
this is used to implement the Mark declarations/decorators correctly
"""
assert isinstance(mark, Mark), mark
# always reassign name to avoid updating pytestmark
# in a reference that was only borrowed
obj.pytestmark = get_unpacked_marks(obj) + [mark]
class MarkGenerator(object):
""" Factory for :class:`MarkDecorator` objects - exposed as
a ``pytest.mark`` singleton instance. Example::
import pytest
@pytest.mark.slowtest
def test_function():
pass
will set a 'slowtest' :class:`MarkInfo` object
on the ``test_function`` object. """
_config = None
def __getattr__(self, name):
if name[0] == "_":
raise AttributeError("Marker name must NOT start with underscore")
if self._config is not None:
self._check(name)
return MarkDecorator(Mark(name, (), {}))
def _check(self, name):
try:
if name in self._markers:
return
except AttributeError:
pass
self._markers = values = set()
for line in self._config.getini("markers"):
marker = line.split(":", 1)[0]
marker = marker.rstrip()
x = marker.split("(", 1)[0]
values.add(x)
if name not in self._markers:
fail("{!r} not a registered marker".format(name), pytrace=False)
MARK_GEN = MarkGenerator()
class NodeKeywords(MappingMixin):
def __init__(self, node):
self.node = node
self.parent = node.parent
self._markers = {node.name: True}
def __getitem__(self, key):
try:
return self._markers[key]
except KeyError:
if self.parent is None:
raise
return self.parent.keywords[key]
def __setitem__(self, key, value):
self._markers[key] = value
def __delitem__(self, key):
raise ValueError("cannot delete key in keywords dict")
def __iter__(self):
seen = self._seen()
return iter(seen)
def _seen(self):
seen = set(self._markers)
if self.parent is not None:
seen.update(self.parent.keywords)
return seen
def __len__(self):
return len(self._seen())
def __repr__(self):
return "<NodeKeywords for node %s>" % (self.node,)
@attr.s(cmp=False, hash=False)
class NodeMarkers(object):
"""
internal structure for storing marks belonging to a node
..warning::
unstable api
"""
own_markers = attr.ib(default=attr.Factory(list))
def update(self, add_markers):
"""update the own markers
"""
self.own_markers.extend(add_markers)
def find(self, name):
"""
find markers in own nodes or parent nodes
needs a better place
"""
for mark in self.own_markers:
if mark.name == name:
yield mark
def __iter__(self):
return iter(self.own_markers)

View File

@ -0,0 +1,323 @@
""" monkeypatching and mocking functionality. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import warnings
from contextlib import contextmanager
import six
import pytest
from _pytest.fixtures import fixture
from _pytest.pathlib import Path
RE_IMPORT_ERROR_NAME = re.compile(r"^No module named (.*)$")
@fixture
def monkeypatch():
"""The returned ``monkeypatch`` fixture provides these
helper methods to modify objects, dictionaries or os.environ::
monkeypatch.setattr(obj, name, value, raising=True)
monkeypatch.delattr(obj, name, raising=True)
monkeypatch.setitem(mapping, name, value)
monkeypatch.delitem(obj, name, raising=True)
monkeypatch.setenv(name, value, prepend=False)
monkeypatch.delenv(name, raising=True)
monkeypatch.syspath_prepend(path)
monkeypatch.chdir(path)
All modifications will be undone after the requesting
test function or fixture has finished. The ``raising``
parameter determines if a KeyError or AttributeError
will be raised if the set/deletion operation has no target.
"""
mpatch = MonkeyPatch()
yield mpatch
mpatch.undo()
def resolve(name):
# simplified from zope.dottedname
parts = name.split(".")
used = parts.pop(0)
found = __import__(used)
for part in parts:
used += "." + part
try:
found = getattr(found, part)
except AttributeError:
pass
else:
continue
# we use explicit un-nesting of the handling block in order
# to avoid nested exceptions on python 3
try:
__import__(used)
except ImportError as ex:
# str is used for py2 vs py3
expected = str(ex).split()[-1]
if expected == used:
raise
else:
raise ImportError("import error in %s: %s" % (used, ex))
found = annotated_getattr(found, part, used)
return found
def annotated_getattr(obj, name, ann):
try:
obj = getattr(obj, name)
except AttributeError:
raise AttributeError(
"%r object at %s has no attribute %r" % (type(obj).__name__, ann, name)
)
return obj
def derive_importpath(import_path, raising):
if not isinstance(import_path, six.string_types) or "." not in import_path:
raise TypeError("must be absolute import path string, not %r" % (import_path,))
module, attr = import_path.rsplit(".", 1)
target = resolve(module)
if raising:
annotated_getattr(target, attr, ann=module)
return attr, target
class Notset(object):
def __repr__(self):
return "<notset>"
notset = Notset()
class MonkeyPatch(object):
""" Object returned by the ``monkeypatch`` fixture keeping a record of setattr/item/env/syspath changes.
"""
def __init__(self):
self._setattr = []
self._setitem = []
self._cwd = None
self._savesyspath = None
@contextmanager
def context(self):
"""
Context manager that returns a new :class:`MonkeyPatch` object which
undoes any patching done inside the ``with`` block upon exit:
.. code-block:: python
import functools
def test_partial(monkeypatch):
with monkeypatch.context() as m:
m.setattr(functools, "partial", 3)
Useful in situations where it is desired to undo some patches before the test ends,
such as mocking ``stdlib`` functions that might break pytest itself if mocked (for examples
of this see `#3290 <https://github.com/pytest-dev/pytest/issues/3290>`_.
"""
m = MonkeyPatch()
try:
yield m
finally:
m.undo()
def setattr(self, target, name, value=notset, raising=True):
""" Set attribute value on target, memorizing the old value.
By default raise AttributeError if the attribute did not exist.
For convenience you can specify a string as ``target`` which
will be interpreted as a dotted import path, with the last part
being the attribute name. Example:
``monkeypatch.setattr("os.getcwd", lambda: "/")``
would set the ``getcwd`` function of the ``os`` module.
The ``raising`` value determines if the setattr should fail
if the attribute is not already present (defaults to True
which means it will raise).
"""
__tracebackhide__ = True
import inspect
if value is notset:
if not isinstance(target, six.string_types):
raise TypeError(
"use setattr(target, name, value) or "
"setattr(target, value) with target being a dotted "
"import string"
)
value = name
name, target = derive_importpath(target, raising)
oldval = getattr(target, name, notset)
if raising and oldval is notset:
raise AttributeError("%r has no attribute %r" % (target, name))
# avoid class descriptors like staticmethod/classmethod
if inspect.isclass(target):
oldval = target.__dict__.get(name, notset)
self._setattr.append((target, name, oldval))
setattr(target, name, value)
def delattr(self, target, name=notset, raising=True):
""" Delete attribute ``name`` from ``target``, by default raise
AttributeError it the attribute did not previously exist.
If no ``name`` is specified and ``target`` is a string
it will be interpreted as a dotted import path with the
last part being the attribute name.
If ``raising`` is set to False, no exception will be raised if the
attribute is missing.
"""
__tracebackhide__ = True
import inspect
if name is notset:
if not isinstance(target, six.string_types):
raise TypeError(
"use delattr(target, name) or "
"delattr(target) with target being a dotted "
"import string"
)
name, target = derive_importpath(target, raising)
if not hasattr(target, name):
if raising:
raise AttributeError(name)
else:
oldval = getattr(target, name, notset)
# Avoid class descriptors like staticmethod/classmethod.
if inspect.isclass(target):
oldval = target.__dict__.get(name, notset)
self._setattr.append((target, name, oldval))
delattr(target, name)
def setitem(self, dic, name, value):
""" Set dictionary entry ``name`` to value. """
self._setitem.append((dic, name, dic.get(name, notset)))
dic[name] = value
def delitem(self, dic, name, raising=True):
""" Delete ``name`` from dict. Raise KeyError if it doesn't exist.
If ``raising`` is set to False, no exception will be raised if the
key is missing.
"""
if name not in dic:
if raising:
raise KeyError(name)
else:
self._setitem.append((dic, name, dic.get(name, notset)))
del dic[name]
def _warn_if_env_name_is_not_str(self, name):
"""On Python 2, warn if the given environment variable name is not a native str (#4056)"""
if six.PY2 and not isinstance(name, str):
warnings.warn(
pytest.PytestWarning(
"Environment variable name {!r} should be str".format(name)
)
)
def setenv(self, name, value, prepend=None):
""" Set environment variable ``name`` to ``value``. If ``prepend``
is a character, read the current environment variable value
and prepend the ``value`` adjoined with the ``prepend`` character."""
if not isinstance(value, str):
warnings.warn(
pytest.PytestWarning(
"Value of environment variable {name} type should be str, but got "
"{value!r} (type: {type}); converted to str implicitly".format(
name=name, value=value, type=type(value).__name__
)
),
stacklevel=2,
)
value = str(value)
if prepend and name in os.environ:
value = value + prepend + os.environ[name]
self._warn_if_env_name_is_not_str(name)
self.setitem(os.environ, name, value)
def delenv(self, name, raising=True):
""" Delete ``name`` from the environment. Raise KeyError if it does
not exist.
If ``raising`` is set to False, no exception will be raised if the
environment variable is missing.
"""
self._warn_if_env_name_is_not_str(name)
self.delitem(os.environ, name, raising=raising)
def syspath_prepend(self, path):
""" Prepend ``path`` to ``sys.path`` list of import locations. """
from pkg_resources import fixup_namespace_packages
if self._savesyspath is None:
self._savesyspath = sys.path[:]
sys.path.insert(0, str(path))
# https://github.com/pypa/setuptools/blob/d8b901bc/docs/pkg_resources.txt#L162-L171
fixup_namespace_packages(str(path))
def chdir(self, path):
""" Change the current working directory to the specified path.
Path can be a string or a py.path.local object.
"""
if self._cwd is None:
self._cwd = os.getcwd()
if hasattr(path, "chdir"):
path.chdir()
elif isinstance(path, Path):
# modern python uses the fspath protocol here LEGACY
os.chdir(str(path))
else:
os.chdir(path)
def undo(self):
""" Undo previous changes. This call consumes the
undo stack. Calling it a second time has no effect unless
you do more monkeypatching after the undo call.
There is generally no need to call `undo()`, since it is
called automatically during tear-down.
Note that the same `monkeypatch` fixture is used across a
single test function invocation. If `monkeypatch` is used both by
the test function itself and one of the test fixtures,
calling `undo()` will undo all of the changes made in
both functions.
"""
for obj, name, value in reversed(self._setattr):
if value is not notset:
setattr(obj, name, value)
else:
delattr(obj, name)
self._setattr[:] = []
for dictionary, name, value in reversed(self._setitem):
if value is notset:
try:
del dictionary[name]
except KeyError:
pass # was already deleted, so we have the desired state
else:
dictionary[name] = value
self._setitem[:] = []
if self._savesyspath is not None:
sys.path[:] = self._savesyspath
self._savesyspath = None
if self._cwd is not None:
os.chdir(self._cwd)
self._cwd = None

View File

@ -0,0 +1,428 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import warnings
import py
import six
import _pytest._code
from _pytest.compat import getfslineno
from _pytest.mark.structures import NodeKeywords
from _pytest.outcomes import fail
SEP = "/"
tracebackcutdir = py.path.local(_pytest.__file__).dirpath()
def _splitnode(nodeid):
"""Split a nodeid into constituent 'parts'.
Node IDs are strings, and can be things like:
''
'testing/code'
'testing/code/test_excinfo.py'
'testing/code/test_excinfo.py::TestFormattedExcinfo'
Return values are lists e.g.
[]
['testing', 'code']
['testing', 'code', 'test_excinfo.py']
['testing', 'code', 'test_excinfo.py', 'TestFormattedExcinfo', '()']
"""
if nodeid == "":
# If there is no root node at all, return an empty list so the caller's logic can remain sane
return []
parts = nodeid.split(SEP)
# Replace single last element 'test_foo.py::Bar' with multiple elements 'test_foo.py', 'Bar'
parts[-1:] = parts[-1].split("::")
return parts
def ischildnode(baseid, nodeid):
"""Return True if the nodeid is a child node of the baseid.
E.g. 'foo/bar::Baz' is a child of 'foo', 'foo/bar' and 'foo/bar::Baz', but not of 'foo/blorp'
"""
base_parts = _splitnode(baseid)
node_parts = _splitnode(nodeid)
if len(node_parts) < len(base_parts):
return False
return node_parts[: len(base_parts)] == base_parts
class Node(object):
""" base class for Collector and Item the test collection tree.
Collector subclasses have children, Items are terminal nodes."""
def __init__(
self, name, parent=None, config=None, session=None, fspath=None, nodeid=None
):
#: a unique name within the scope of the parent node
self.name = name
#: the parent collector node.
self.parent = parent
#: the pytest config object
self.config = config or parent.config
#: the session this node is part of
self.session = session or parent.session
#: filesystem path where this node was collected from (can be None)
self.fspath = fspath or getattr(parent, "fspath", None)
#: keywords/markers collected from all scopes
self.keywords = NodeKeywords(self)
#: the marker objects belonging to this node
self.own_markers = []
#: allow adding of extra keywords to use for matching
self.extra_keyword_matches = set()
# used for storing artificial fixturedefs for direct parametrization
self._name2pseudofixturedef = {}
if nodeid is not None:
assert "::()" not in nodeid
self._nodeid = nodeid
else:
self._nodeid = self.parent.nodeid
if self.name != "()":
self._nodeid += "::" + self.name
@property
def ihook(self):
""" fspath sensitive hook proxy used to call pytest hooks"""
return self.session.gethookproxy(self.fspath)
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, getattr(self, "name", None))
def warn(self, warning):
"""Issue a warning for this item.
Warnings will be displayed after the test session, unless explicitly suppressed
:param Warning warning: the warning instance to issue. Must be a subclass of PytestWarning.
:raise ValueError: if ``warning`` instance is not a subclass of PytestWarning.
Example usage:
.. code-block:: python
node.warn(PytestWarning("some message"))
"""
from _pytest.warning_types import PytestWarning
if not isinstance(warning, PytestWarning):
raise ValueError(
"warning must be an instance of PytestWarning or subclass, got {!r}".format(
warning
)
)
path, lineno = get_fslocation_from_item(self)
warnings.warn_explicit(
warning,
category=None,
filename=str(path),
lineno=lineno + 1 if lineno is not None else None,
)
# methods for ordering nodes
@property
def nodeid(self):
""" a ::-separated string denoting its collection tree address. """
return self._nodeid
def __hash__(self):
return hash(self.nodeid)
def setup(self):
pass
def teardown(self):
pass
def listchain(self):
""" return list of all parent collectors up to self,
starting from root of collection tree. """
chain = []
item = self
while item is not None:
chain.append(item)
item = item.parent
chain.reverse()
return chain
def add_marker(self, marker, append=True):
"""dynamically add a marker object to the node.
:type marker: ``str`` or ``pytest.mark.*`` object
:param marker:
``append=True`` whether to append the marker,
if ``False`` insert at position ``0``.
"""
from _pytest.mark import MarkDecorator, MARK_GEN
if isinstance(marker, six.string_types):
marker = getattr(MARK_GEN, marker)
elif not isinstance(marker, MarkDecorator):
raise ValueError("is not a string or pytest.mark.* Marker")
self.keywords[marker.name] = marker
if append:
self.own_markers.append(marker.mark)
else:
self.own_markers.insert(0, marker.mark)
def iter_markers(self, name=None):
"""
:param name: if given, filter the results by the name attribute
iterate over all markers of the node
"""
return (x[1] for x in self.iter_markers_with_node(name=name))
def iter_markers_with_node(self, name=None):
"""
:param name: if given, filter the results by the name attribute
iterate over all markers of the node
returns sequence of tuples (node, mark)
"""
for node in reversed(self.listchain()):
for mark in node.own_markers:
if name is None or getattr(mark, "name", None) == name:
yield node, mark
def get_closest_marker(self, name, default=None):
"""return the first marker matching the name, from closest (for example function) to farther level (for example
module level).
:param default: fallback return value of no marker was found
:param name: name to filter by
"""
return next(self.iter_markers(name=name), default)
def listextrakeywords(self):
""" Return a set of all extra keywords in self and any parents."""
extra_keywords = set()
for item in self.listchain():
extra_keywords.update(item.extra_keyword_matches)
return extra_keywords
def listnames(self):
return [x.name for x in self.listchain()]
def addfinalizer(self, fin):
""" register a function to be called when this node is finalized.
This method can only be called when this node is active
in a setup chain, for example during self.setup().
"""
self.session._setupstate.addfinalizer(fin, self)
def getparent(self, cls):
""" get the next parent node (including ourself)
which is an instance of the given class"""
current = self
while current and not isinstance(current, cls):
current = current.parent
return current
def _prunetraceback(self, excinfo):
pass
def _repr_failure_py(self, excinfo, style=None):
if excinfo.errisinstance(fail.Exception):
if not excinfo.value.pytrace:
return six.text_type(excinfo.value)
fm = self.session._fixturemanager
if excinfo.errisinstance(fm.FixtureLookupError):
return excinfo.value.formatrepr()
tbfilter = True
if self.config.option.fulltrace:
style = "long"
else:
tb = _pytest._code.Traceback([excinfo.traceback[-1]])
self._prunetraceback(excinfo)
if len(excinfo.traceback) == 0:
excinfo.traceback = tb
tbfilter = False # prunetraceback already does it
if style == "auto":
style = "long"
# XXX should excinfo.getrepr record all data and toterminal() process it?
if style is None:
if self.config.option.tbstyle == "short":
style = "short"
else:
style = "long"
if self.config.option.verbose > 1:
truncate_locals = False
else:
truncate_locals = True
try:
os.getcwd()
abspath = False
except OSError:
abspath = True
return excinfo.getrepr(
funcargs=True,
abspath=abspath,
showlocals=self.config.option.showlocals,
style=style,
tbfilter=tbfilter,
truncate_locals=truncate_locals,
)
repr_failure = _repr_failure_py
def get_fslocation_from_item(item):
"""Tries to extract the actual location from an item, depending on available attributes:
* "fslocation": a pair (path, lineno)
* "obj": a Python object that the item wraps.
* "fspath": just a path
:rtype: a tuple of (str|LocalPath, int) with filename and line number.
"""
result = getattr(item, "location", None)
if result is not None:
return result[:2]
obj = getattr(item, "obj", None)
if obj is not None:
return getfslineno(obj)
return getattr(item, "fspath", "unknown location"), -1
class Collector(Node):
""" Collector instances create children through collect()
and thus iteratively build a tree.
"""
class CollectError(Exception):
""" an error during collection, contains a custom message. """
def collect(self):
""" returns a list of children (items and collectors)
for this collection node.
"""
raise NotImplementedError("abstract")
def repr_failure(self, excinfo):
""" represent a collection failure. """
if excinfo.errisinstance(self.CollectError):
exc = excinfo.value
return str(exc.args[0])
# Respect explicit tbstyle option, but default to "short"
# (None._repr_failure_py defaults to "long" without "fulltrace" option).
tbstyle = self.config.getoption("tbstyle")
if tbstyle == "auto":
tbstyle = "short"
return self._repr_failure_py(excinfo, style=tbstyle)
def _prunetraceback(self, excinfo):
if hasattr(self, "fspath"):
traceback = excinfo.traceback
ntraceback = traceback.cut(path=self.fspath)
if ntraceback == traceback:
ntraceback = ntraceback.cut(excludepath=tracebackcutdir)
excinfo.traceback = ntraceback.filter()
def _check_initialpaths_for_relpath(session, fspath):
for initial_path in session._initialpaths:
if fspath.common(initial_path) == initial_path:
return fspath.relto(initial_path)
class FSCollector(Collector):
def __init__(self, fspath, parent=None, config=None, session=None, nodeid=None):
fspath = py.path.local(fspath) # xxx only for test_resultlog.py?
name = fspath.basename
if parent is not None:
rel = fspath.relto(parent.fspath)
if rel:
name = rel
name = name.replace(os.sep, SEP)
self.fspath = fspath
session = session or parent.session
if nodeid is None:
nodeid = self.fspath.relto(session.config.rootdir)
if not nodeid:
nodeid = _check_initialpaths_for_relpath(session, fspath)
if nodeid and os.sep != SEP:
nodeid = nodeid.replace(os.sep, SEP)
super(FSCollector, self).__init__(
name, parent, config, session, nodeid=nodeid, fspath=fspath
)
class File(FSCollector):
""" base class for collecting tests from a file. """
class Item(Node):
""" a basic test invocation item. Note that for a single function
there might be multiple test invocation items.
"""
nextitem = None
def __init__(self, name, parent=None, config=None, session=None, nodeid=None):
super(Item, self).__init__(name, parent, config, session, nodeid=nodeid)
self._report_sections = []
#: user properties is a list of tuples (name, value) that holds user
#: defined properties for this test.
self.user_properties = []
def add_report_section(self, when, key, content):
"""
Adds a new report section, similar to what's done internally to add stdout and
stderr captured output::
item.add_report_section("call", "stdout", "report section contents")
:param str when:
One of the possible capture states, ``"setup"``, ``"call"``, ``"teardown"``.
:param str key:
Name of the section, can be customized at will. Pytest uses ``"stdout"`` and
``"stderr"`` internally.
:param str content:
The full contents as a string.
"""
if content:
self._report_sections.append((when, key, content))
def reportinfo(self):
return self.fspath, None, ""
@property
def location(self):
try:
return self._location
except AttributeError:
location = self.reportinfo()
fspath = self.session._node_location_to_relpath(location[0])
location = (fspath, location[1], str(location[2]))
self._location = location
return location

View File

@ -0,0 +1,68 @@
""" run test suites written for nose. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import six
from _pytest import python
from _pytest import runner
from _pytest import unittest
from _pytest.config import hookimpl
def get_skip_exceptions():
skip_classes = set()
for module_name in ("unittest", "unittest2", "nose"):
mod = sys.modules.get(module_name)
if hasattr(mod, "SkipTest"):
skip_classes.add(mod.SkipTest)
return tuple(skip_classes)
def pytest_runtest_makereport(item, call):
if call.excinfo and call.excinfo.errisinstance(get_skip_exceptions()):
# let's substitute the excinfo with a pytest.skip one
call2 = runner.CallInfo.from_call(
lambda: runner.skip(six.text_type(call.excinfo.value)), call.when
)
call.excinfo = call2.excinfo
@hookimpl(trylast=True)
def pytest_runtest_setup(item):
if is_potential_nosetest(item):
if not call_optional(item.obj, "setup"):
# call module level setup if there is no object level one
call_optional(item.parent.obj, "setup")
# XXX this implies we only call teardown when setup worked
item.session._setupstate.addfinalizer((lambda: teardown_nose(item)), item)
def teardown_nose(item):
if is_potential_nosetest(item):
if not call_optional(item.obj, "teardown"):
call_optional(item.parent.obj, "teardown")
# if hasattr(item.parent, '_nosegensetup'):
# #call_optional(item._nosegensetup, 'teardown')
# del item.parent._nosegensetup
def is_potential_nosetest(item):
# extra check needed since we do not do nose style setup/teardown
# on direct unittest style classes
return isinstance(item, python.Function) and not isinstance(
item, unittest.TestCaseFunction
)
def call_optional(obj, name):
method = getattr(obj, name, None)
isfixture = hasattr(method, "_pytestfixturefunction")
if method is not None and not isfixture and callable(method):
# If there's any problems allow the exception to raise rather than
# silently ignoring them
method()
return True

View File

@ -0,0 +1,193 @@
"""
exception classes and constants handling test outcomes
as well as functions creating them
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
class OutcomeException(BaseException):
""" OutcomeException and its subclass instances indicate and
contain info about test and collection outcomes.
"""
def __init__(self, msg=None, pytrace=True):
BaseException.__init__(self, msg)
self.msg = msg
self.pytrace = pytrace
def __repr__(self):
if self.msg:
val = self.msg
if isinstance(val, bytes):
val = val.decode("UTF-8", errors="replace")
return val
return "<%s instance>" % (self.__class__.__name__,)
__str__ = __repr__
TEST_OUTCOME = (OutcomeException, Exception)
class Skipped(OutcomeException):
# XXX hackish: on 3k we fake to live in the builtins
# in order to have Skipped exception printing shorter/nicer
__module__ = "builtins"
def __init__(self, msg=None, pytrace=True, allow_module_level=False):
OutcomeException.__init__(self, msg=msg, pytrace=pytrace)
self.allow_module_level = allow_module_level
class Failed(OutcomeException):
""" raised from an explicit call to pytest.fail() """
__module__ = "builtins"
class Exit(Exception):
""" raised for immediate program exits (no tracebacks/summaries)"""
def __init__(self, msg="unknown reason", returncode=None):
self.msg = msg
self.returncode = returncode
super(Exit, self).__init__(msg)
# exposed helper methods
def exit(msg, returncode=None):
"""
Exit testing process.
:param str msg: message to display upon exit.
:param int returncode: return code to be used when exiting pytest.
"""
__tracebackhide__ = True
raise Exit(msg, returncode)
exit.Exception = Exit
def skip(msg="", **kwargs):
"""
Skip an executing test with the given message.
This function should be called only during testing (setup, call or teardown) or
during collection by using the ``allow_module_level`` flag. This function can
be called in doctests as well.
:kwarg bool allow_module_level: allows this function to be called at
module level, skipping the rest of the module. Default to False.
.. note::
It is better to use the :ref:`pytest.mark.skipif ref` marker when possible to declare a test to be
skipped under certain conditions like mismatching platforms or
dependencies.
Similarly, use the ``# doctest: +SKIP`` directive (see `doctest.SKIP
<https://docs.python.org/3/library/doctest.html#doctest.SKIP>`_)
to skip a doctest statically.
"""
__tracebackhide__ = True
allow_module_level = kwargs.pop("allow_module_level", False)
if kwargs:
keys = [k for k in kwargs.keys()]
raise TypeError("unexpected keyword arguments: {}".format(keys))
raise Skipped(msg=msg, allow_module_level=allow_module_level)
skip.Exception = Skipped
def fail(msg="", pytrace=True):
"""
Explicitly fail an executing test with the given message.
:param str msg: the message to show the user as reason for the failure.
:param bool pytrace: if false the msg represents the full failure information and no
python traceback will be reported.
"""
__tracebackhide__ = True
raise Failed(msg=msg, pytrace=pytrace)
fail.Exception = Failed
class XFailed(fail.Exception):
""" raised from an explicit call to pytest.xfail() """
def xfail(reason=""):
"""
Imperatively xfail an executing test or setup functions with the given reason.
This function should be called only during testing (setup, call or teardown).
.. note::
It is better to use the :ref:`pytest.mark.xfail ref` marker when possible to declare a test to be
xfailed under certain conditions like known bugs or missing features.
"""
__tracebackhide__ = True
raise XFailed(reason)
xfail.Exception = XFailed
def importorskip(modname, minversion=None, reason=None):
"""Imports and returns the requested module ``modname``, or skip the current test
if the module cannot be imported.
:param str modname: the name of the module to import
:param str minversion: if given, the imported module ``__version__`` attribute must be
at least this minimal version, otherwise the test is still skipped.
:param str reason: if given, this reason is shown as the message when the module
cannot be imported.
"""
import warnings
__tracebackhide__ = True
compile(modname, "", "eval") # to catch syntaxerrors
should_skip = False
with warnings.catch_warnings():
# make sure to ignore ImportWarnings that might happen because
# of existing directories with the same name we're trying to
# import but without a __init__.py file
warnings.simplefilter("ignore")
try:
__import__(modname)
except ImportError:
# Do not raise chained exception here(#1485)
should_skip = True
if should_skip:
if reason is None:
reason = "could not import %r" % (modname,)
raise Skipped(reason, allow_module_level=True)
mod = sys.modules[modname]
if minversion is None:
return mod
verattr = getattr(mod, "__version__", None)
if minversion is not None:
try:
from pkg_resources import parse_version as pv
except ImportError:
raise Skipped(
"we have a required version for %r but can not import "
"pkg_resources to parse version strings." % (modname,),
allow_module_level=True,
)
if verattr is None or pv(verattr) < pv(minversion):
raise Skipped(
"module %r has __version__ %r, required is: %r"
% (modname, verattr, minversion),
allow_module_level=True,
)
return mod

View File

@ -0,0 +1,113 @@
""" submit failure or test session information to a pastebin service. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import tempfile
import six
import pytest
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting")
group._addoption(
"--pastebin",
metavar="mode",
action="store",
dest="pastebin",
default=None,
choices=["failed", "all"],
help="send failed|all info to bpaste.net pastebin service.",
)
@pytest.hookimpl(trylast=True)
def pytest_configure(config):
if config.option.pastebin == "all":
tr = config.pluginmanager.getplugin("terminalreporter")
# if no terminal reporter plugin is present, nothing we can do here;
# this can happen when this function executes in a slave node
# when using pytest-xdist, for example
if tr is not None:
# pastebin file will be utf-8 encoded binary file
config._pastebinfile = tempfile.TemporaryFile("w+b")
oldwrite = tr._tw.write
def tee_write(s, **kwargs):
oldwrite(s, **kwargs)
if isinstance(s, six.text_type):
s = s.encode("utf-8")
config._pastebinfile.write(s)
tr._tw.write = tee_write
def pytest_unconfigure(config):
if hasattr(config, "_pastebinfile"):
# get terminal contents and delete file
config._pastebinfile.seek(0)
sessionlog = config._pastebinfile.read()
config._pastebinfile.close()
del config._pastebinfile
# undo our patching in the terminal reporter
tr = config.pluginmanager.getplugin("terminalreporter")
del tr._tw.__dict__["write"]
# write summary
tr.write_sep("=", "Sending information to Paste Service")
pastebinurl = create_new_paste(sessionlog)
tr.write_line("pastebin session-log: %s\n" % pastebinurl)
def create_new_paste(contents):
"""
Creates a new paste using bpaste.net service.
:contents: paste contents as utf-8 encoded bytes
:returns: url to the pasted contents
"""
import re
if sys.version_info < (3, 0):
from urllib import urlopen, urlencode
else:
from urllib.request import urlopen
from urllib.parse import urlencode
params = {
"code": contents,
"lexer": "python3" if sys.version_info[0] == 3 else "python",
"expiry": "1week",
}
url = "https://bpaste.net"
response = urlopen(url, data=urlencode(params).encode("ascii")).read()
m = re.search(r'href="/raw/(\w+)"', response.decode("utf-8"))
if m:
return "%s/show/%s" % (url, m.group(1))
else:
return "bad response: " + response
def pytest_terminal_summary(terminalreporter):
import _pytest.config
if terminalreporter.config.option.pastebin != "failed":
return
tr = terminalreporter
if "failed" in tr.stats:
terminalreporter.write_sep("=", "Sending information to Paste Service")
for rep in terminalreporter.stats.get("failed"):
try:
msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc
except AttributeError:
msg = tr._getfailureheadline(rep)
tw = _pytest.config.create_terminal_writer(
terminalreporter.config, stringio=True
)
rep.toterminal(tw)
s = tw.stringio.getvalue()
assert len(s)
pastebinurl = create_new_paste(s)
tr.write_line("%s --> %s" % (msg, pastebinurl))

View File

@ -0,0 +1,319 @@
import atexit
import errno
import fnmatch
import itertools
import operator
import os
import shutil
import sys
import uuid
from functools import reduce
from os.path import expanduser
from os.path import expandvars
from os.path import isabs
from os.path import sep
from posixpath import sep as posix_sep
import six
from six.moves import map
from .compat import PY36
if PY36:
from pathlib import Path, PurePath
else:
from pathlib2 import Path, PurePath
__all__ = ["Path", "PurePath"]
LOCK_TIMEOUT = 60 * 60 * 3
get_lock_path = operator.methodcaller("joinpath", ".lock")
def ensure_reset_dir(path):
"""
ensures the given path is an empty directory
"""
if path.exists():
rmtree(path, force=True)
path.mkdir()
def rmtree(path, force=False):
if force:
# NOTE: ignore_errors might leave dead folders around.
# Python needs a rm -rf as a followup.
shutil.rmtree(str(path), ignore_errors=True)
else:
shutil.rmtree(str(path))
def find_prefixed(root, prefix):
"""finds all elements in root that begin with the prefix, case insensitive"""
l_prefix = prefix.lower()
for x in root.iterdir():
if x.name.lower().startswith(l_prefix):
yield x
def extract_suffixes(iter, prefix):
"""
:param iter: iterator over path names
:param prefix: expected prefix of the path names
:returns: the parts of the paths following the prefix
"""
p_len = len(prefix)
for p in iter:
yield p.name[p_len:]
def find_suffixes(root, prefix):
"""combines find_prefixes and extract_suffixes
"""
return extract_suffixes(find_prefixed(root, prefix), prefix)
def parse_num(maybe_num):
"""parses number path suffixes, returns -1 on error"""
try:
return int(maybe_num)
except ValueError:
return -1
if six.PY2:
def _max(iterable, default):
"""needed due to python2.7 lacking the default argument for max"""
return reduce(max, iterable, default)
else:
_max = max
def _force_symlink(root, target, link_to):
"""helper to create the current symlink
it's full of race conditions that are reasonably ok to ignore
for the context of best effort linking to the latest testrun
the presumption being thatin case of much parallelism
the inaccuracy is going to be acceptable
"""
current_symlink = root.joinpath(target)
try:
current_symlink.unlink()
except OSError:
pass
try:
current_symlink.symlink_to(link_to)
except Exception:
pass
def make_numbered_dir(root, prefix):
"""create a directory with an increased number as suffix for the given prefix"""
for i in range(10):
# try up to 10 times to create the folder
max_existing = _max(map(parse_num, find_suffixes(root, prefix)), default=-1)
new_number = max_existing + 1
new_path = root.joinpath("{}{}".format(prefix, new_number))
try:
new_path.mkdir()
except Exception:
pass
else:
_force_symlink(root, prefix + "current", new_path)
return new_path
else:
raise EnvironmentError(
"could not create numbered dir with prefix "
"{prefix} in {root} after 10 tries".format(prefix=prefix, root=root)
)
def create_cleanup_lock(p):
"""crates a lock to prevent premature folder cleanup"""
lock_path = get_lock_path(p)
try:
fd = os.open(str(lock_path), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644)
except OSError as e:
if e.errno == errno.EEXIST:
six.raise_from(
EnvironmentError("cannot create lockfile in {path}".format(path=p)), e
)
else:
raise
else:
pid = os.getpid()
spid = str(pid)
if not isinstance(spid, bytes):
spid = spid.encode("ascii")
os.write(fd, spid)
os.close(fd)
if not lock_path.is_file():
raise EnvironmentError("lock path got renamed after successful creation")
return lock_path
def register_cleanup_lock_removal(lock_path, register=atexit.register):
"""registers a cleanup function for removing a lock, by default on atexit"""
pid = os.getpid()
def cleanup_on_exit(lock_path=lock_path, original_pid=pid):
current_pid = os.getpid()
if current_pid != original_pid:
# fork
return
try:
lock_path.unlink()
except (OSError, IOError):
pass
return register(cleanup_on_exit)
def maybe_delete_a_numbered_dir(path):
"""removes a numbered directory if its lock can be obtained and it does not seem to be in use"""
lock_path = None
try:
lock_path = create_cleanup_lock(path)
parent = path.parent
garbage = parent.joinpath("garbage-{}".format(uuid.uuid4()))
path.rename(garbage)
rmtree(garbage, force=True)
except (OSError, EnvironmentError):
# known races:
# * other process did a cleanup at the same time
# * deletable folder was found
# * process cwd (Windows)
return
finally:
# if we created the lock, ensure we remove it even if we failed
# to properly remove the numbered dir
if lock_path is not None:
try:
lock_path.unlink()
except (OSError, IOError):
pass
def ensure_deletable(path, consider_lock_dead_if_created_before):
"""checks if a lock exists and breaks it if its considered dead"""
if path.is_symlink():
return False
lock = get_lock_path(path)
if not lock.exists():
return True
try:
lock_time = lock.stat().st_mtime
except Exception:
return False
else:
if lock_time < consider_lock_dead_if_created_before:
lock.unlink()
return True
else:
return False
def try_cleanup(path, consider_lock_dead_if_created_before):
"""tries to cleanup a folder if we can ensure it's deletable"""
if ensure_deletable(path, consider_lock_dead_if_created_before):
maybe_delete_a_numbered_dir(path)
def cleanup_candidates(root, prefix, keep):
"""lists candidates for numbered directories to be removed - follows py.path"""
max_existing = _max(map(parse_num, find_suffixes(root, prefix)), default=-1)
max_delete = max_existing - keep
paths = find_prefixed(root, prefix)
paths, paths2 = itertools.tee(paths)
numbers = map(parse_num, extract_suffixes(paths2, prefix))
for path, number in zip(paths, numbers):
if number <= max_delete:
yield path
def cleanup_numbered_dir(root, prefix, keep, consider_lock_dead_if_created_before):
"""cleanup for lock driven numbered directories"""
for path in cleanup_candidates(root, prefix, keep):
try_cleanup(path, consider_lock_dead_if_created_before)
for path in root.glob("garbage-*"):
try_cleanup(path, consider_lock_dead_if_created_before)
def make_numbered_dir_with_cleanup(root, prefix, keep, lock_timeout):
"""creates a numbered dir with a cleanup lock and removes old ones"""
e = None
for i in range(10):
try:
p = make_numbered_dir(root, prefix)
lock_path = create_cleanup_lock(p)
register_cleanup_lock_removal(lock_path)
except Exception as exc:
e = exc
else:
consider_lock_dead_if_created_before = p.stat().st_mtime - lock_timeout
cleanup_numbered_dir(
root=root,
prefix=prefix,
keep=keep,
consider_lock_dead_if_created_before=consider_lock_dead_if_created_before,
)
return p
assert e is not None
raise e
def resolve_from_str(input, root):
assert not isinstance(input, Path), "would break on py2"
root = Path(root)
input = expanduser(input)
input = expandvars(input)
if isabs(input):
return Path(input)
else:
return root.joinpath(input)
def fnmatch_ex(pattern, path):
"""FNMatcher port from py.path.common which works with PurePath() instances.
The difference between this algorithm and PurePath.match() is that the latter matches "**" glob expressions
for each part of the path, while this algorithm uses the whole path instead.
For example:
"tests/foo/bar/doc/test_foo.py" matches pattern "tests/**/doc/test*.py" with this algorithm, but not with
PurePath.match().
This algorithm was ported to keep backward-compatibility with existing settings which assume paths match according
this logic.
References:
* https://bugs.python.org/issue29249
* https://bugs.python.org/issue34731
"""
path = PurePath(path)
iswin32 = sys.platform.startswith("win")
if iswin32 and sep not in pattern and posix_sep in pattern:
# Running on Windows, the pattern has no Windows path separators,
# and the pattern has one or more Posix path separators. Replace
# the Posix path separators with the Windows path separator.
pattern = pattern.replace(posix_sep, sep)
if sep not in pattern:
name = path.name
else:
name = six.text_type(path)
return fnmatch.fnmatch(name, pattern)
def parts(s):
parts = s.split(sep)
return {sep.join(parts[: i + 1]) or sep for i in range(len(parts))}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,736 @@
from __future__ import absolute_import
import math
import pprint
import sys
import warnings
from decimal import Decimal
from numbers import Number
import six
from more_itertools.more import always_iterable
from six.moves import filterfalse
from six.moves import zip
import _pytest._code
from _pytest import deprecated
from _pytest.compat import isclass
from _pytest.compat import Iterable
from _pytest.compat import Mapping
from _pytest.compat import Sized
from _pytest.compat import STRING_TYPES
from _pytest.outcomes import fail
BASE_TYPE = (type, STRING_TYPES)
def _cmp_raises_type_error(self, other):
"""__cmp__ implementation which raises TypeError. Used
by Approx base classes to implement only == and != and raise a
TypeError for other comparisons.
Needed in Python 2 only, Python 3 all it takes is not implementing the
other operators at all.
"""
__tracebackhide__ = True
raise TypeError(
"Comparison operators other than == and != not supported by approx objects"
)
def _non_numeric_type_error(value, at):
at_str = " at {}".format(at) if at else ""
return TypeError(
"cannot make approximate comparisons to non-numeric values: {!r} {}".format(
value, at_str
)
)
# builtin pytest.approx helper
class ApproxBase(object):
"""
Provide shared utilities for making approximate comparisons between numbers
or sequences of numbers.
"""
# Tell numpy to use our `__eq__` operator instead of its.
__array_ufunc__ = None
__array_priority__ = 100
def __init__(self, expected, rel=None, abs=None, nan_ok=False):
__tracebackhide__ = True
self.expected = expected
self.abs = abs
self.rel = rel
self.nan_ok = nan_ok
self._check_type()
def __repr__(self):
raise NotImplementedError
def __eq__(self, actual):
return all(
a == self._approx_scalar(x) for a, x in self._yield_comparisons(actual)
)
__hash__ = None
def __ne__(self, actual):
return not (actual == self)
if sys.version_info[0] == 2:
__cmp__ = _cmp_raises_type_error
def _approx_scalar(self, x):
return ApproxScalar(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok)
def _yield_comparisons(self, actual):
"""
Yield all the pairs of numbers to be compared. This is used to
implement the `__eq__` method.
"""
raise NotImplementedError
def _check_type(self):
"""
Raise a TypeError if the expected value is not a valid type.
"""
# This is only a concern if the expected value is a sequence. In every
# other case, the approx() function ensures that the expected value has
# a numeric type. For this reason, the default is to do nothing. The
# classes that deal with sequences should reimplement this method to
# raise if there are any non-numeric elements in the sequence.
pass
def _recursive_list_map(f, x):
if isinstance(x, list):
return list(_recursive_list_map(f, xi) for xi in x)
else:
return f(x)
class ApproxNumpy(ApproxBase):
"""
Perform approximate comparisons where the expected value is numpy array.
"""
def __repr__(self):
list_scalars = _recursive_list_map(self._approx_scalar, self.expected.tolist())
return "approx({!r})".format(list_scalars)
if sys.version_info[0] == 2:
__cmp__ = _cmp_raises_type_error
def __eq__(self, actual):
import numpy as np
# self.expected is supposed to always be an array here
if not np.isscalar(actual):
try:
actual = np.asarray(actual)
except: # noqa
raise TypeError("cannot compare '{}' to numpy.ndarray".format(actual))
if not np.isscalar(actual) and actual.shape != self.expected.shape:
return False
return ApproxBase.__eq__(self, actual)
def _yield_comparisons(self, actual):
import numpy as np
# `actual` can either be a numpy array or a scalar, it is treated in
# `__eq__` before being passed to `ApproxBase.__eq__`, which is the
# only method that calls this one.
if np.isscalar(actual):
for i in np.ndindex(self.expected.shape):
yield actual, self.expected[i].item()
else:
for i in np.ndindex(self.expected.shape):
yield actual[i].item(), self.expected[i].item()
class ApproxMapping(ApproxBase):
"""
Perform approximate comparisons where the expected value is a mapping with
numeric values (the keys can be anything).
"""
def __repr__(self):
return "approx({!r})".format(
{k: self._approx_scalar(v) for k, v in self.expected.items()}
)
def __eq__(self, actual):
if set(actual.keys()) != set(self.expected.keys()):
return False
return ApproxBase.__eq__(self, actual)
def _yield_comparisons(self, actual):
for k in self.expected.keys():
yield actual[k], self.expected[k]
def _check_type(self):
__tracebackhide__ = True
for key, value in self.expected.items():
if isinstance(value, type(self.expected)):
msg = "pytest.approx() does not support nested dictionaries: key={!r} value={!r}\n full mapping={}"
raise TypeError(msg.format(key, value, pprint.pformat(self.expected)))
elif not isinstance(value, Number):
raise _non_numeric_type_error(self.expected, at="key={!r}".format(key))
class ApproxSequencelike(ApproxBase):
"""
Perform approximate comparisons where the expected value is a sequence of
numbers.
"""
def __repr__(self):
seq_type = type(self.expected)
if seq_type not in (tuple, list, set):
seq_type = list
return "approx({!r})".format(
seq_type(self._approx_scalar(x) for x in self.expected)
)
def __eq__(self, actual):
if len(actual) != len(self.expected):
return False
return ApproxBase.__eq__(self, actual)
def _yield_comparisons(self, actual):
return zip(actual, self.expected)
def _check_type(self):
__tracebackhide__ = True
for index, x in enumerate(self.expected):
if isinstance(x, type(self.expected)):
msg = "pytest.approx() does not support nested data structures: {!r} at index {}\n full sequence: {}"
raise TypeError(msg.format(x, index, pprint.pformat(self.expected)))
elif not isinstance(x, Number):
raise _non_numeric_type_error(
self.expected, at="index {}".format(index)
)
class ApproxScalar(ApproxBase):
"""
Perform approximate comparisons where the expected value is a single number.
"""
DEFAULT_ABSOLUTE_TOLERANCE = 1e-12
DEFAULT_RELATIVE_TOLERANCE = 1e-6
def __repr__(self):
"""
Return a string communicating both the expected value and the tolerance
for the comparison being made, e.g. '1.0 +- 1e-6'. Use the unicode
plus/minus symbol if this is python3 (it's too hard to get right for
python2).
"""
if isinstance(self.expected, complex):
return str(self.expected)
# Infinities aren't compared using tolerances, so don't show a
# tolerance.
if math.isinf(self.expected):
return str(self.expected)
# If a sensible tolerance can't be calculated, self.tolerance will
# raise a ValueError. In this case, display '???'.
try:
vetted_tolerance = "{:.1e}".format(self.tolerance)
except ValueError:
vetted_tolerance = "???"
if sys.version_info[0] == 2:
return "{} +- {}".format(self.expected, vetted_tolerance)
else:
return u"{} \u00b1 {}".format(self.expected, vetted_tolerance)
def __eq__(self, actual):
"""
Return true if the given value is equal to the expected value within
the pre-specified tolerance.
"""
if _is_numpy_array(actual):
# Call ``__eq__()`` manually to prevent infinite-recursion with
# numpy<1.13. See #3748.
return all(self.__eq__(a) for a in actual.flat)
# Short-circuit exact equality.
if actual == self.expected:
return True
# Allow the user to control whether NaNs are considered equal to each
# other or not. The abs() calls are for compatibility with complex
# numbers.
if math.isnan(abs(self.expected)):
return self.nan_ok and math.isnan(abs(actual))
# Infinity shouldn't be approximately equal to anything but itself, but
# if there's a relative tolerance, it will be infinite and infinity
# will seem approximately equal to everything. The equal-to-itself
# case would have been short circuited above, so here we can just
# return false if the expected value is infinite. The abs() call is
# for compatibility with complex numbers.
if math.isinf(abs(self.expected)):
return False
# Return true if the two numbers are within the tolerance.
return abs(self.expected - actual) <= self.tolerance
__hash__ = None
@property
def tolerance(self):
"""
Return the tolerance for the comparison. This could be either an
absolute tolerance or a relative tolerance, depending on what the user
specified or which would be larger.
"""
def set_default(x, default):
return x if x is not None else default
# Figure out what the absolute tolerance should be. ``self.abs`` is
# either None or a value specified by the user.
absolute_tolerance = set_default(self.abs, self.DEFAULT_ABSOLUTE_TOLERANCE)
if absolute_tolerance < 0:
raise ValueError(
"absolute tolerance can't be negative: {}".format(absolute_tolerance)
)
if math.isnan(absolute_tolerance):
raise ValueError("absolute tolerance can't be NaN.")
# If the user specified an absolute tolerance but not a relative one,
# just return the absolute tolerance.
if self.rel is None:
if self.abs is not None:
return absolute_tolerance
# Figure out what the relative tolerance should be. ``self.rel`` is
# either None or a value specified by the user. This is done after
# we've made sure the user didn't ask for an absolute tolerance only,
# because we don't want to raise errors about the relative tolerance if
# we aren't even going to use it.
relative_tolerance = set_default(
self.rel, self.DEFAULT_RELATIVE_TOLERANCE
) * abs(self.expected)
if relative_tolerance < 0:
raise ValueError(
"relative tolerance can't be negative: {}".format(absolute_tolerance)
)
if math.isnan(relative_tolerance):
raise ValueError("relative tolerance can't be NaN.")
# Return the larger of the relative and absolute tolerances.
return max(relative_tolerance, absolute_tolerance)
class ApproxDecimal(ApproxScalar):
"""
Perform approximate comparisons where the expected value is a decimal.
"""
DEFAULT_ABSOLUTE_TOLERANCE = Decimal("1e-12")
DEFAULT_RELATIVE_TOLERANCE = Decimal("1e-6")
def approx(expected, rel=None, abs=None, nan_ok=False):
"""
Assert that two numbers (or two sets of numbers) are equal to each other
within some tolerance.
Due to the `intricacies of floating-point arithmetic`__, numbers that we
would intuitively expect to be equal are not always so::
>>> 0.1 + 0.2 == 0.3
False
__ https://docs.python.org/3/tutorial/floatingpoint.html
This problem is commonly encountered when writing tests, e.g. when making
sure that floating-point values are what you expect them to be. One way to
deal with this problem is to assert that two floating-point numbers are
equal to within some appropriate tolerance::
>>> abs((0.1 + 0.2) - 0.3) < 1e-6
True
However, comparisons like this are tedious to write and difficult to
understand. Furthermore, absolute comparisons like the one above are
usually discouraged because there's no tolerance that works well for all
situations. ``1e-6`` is good for numbers around ``1``, but too small for
very big numbers and too big for very small ones. It's better to express
the tolerance as a fraction of the expected value, but relative comparisons
like that are even more difficult to write correctly and concisely.
The ``approx`` class performs floating-point comparisons using a syntax
that's as intuitive as possible::
>>> from pytest import approx
>>> 0.1 + 0.2 == approx(0.3)
True
The same syntax also works for sequences of numbers::
>>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6))
True
Dictionary *values*::
>>> {'a': 0.1 + 0.2, 'b': 0.2 + 0.4} == approx({'a': 0.3, 'b': 0.6})
True
``numpy`` arrays::
>>> import numpy as np # doctest: +SKIP
>>> np.array([0.1, 0.2]) + np.array([0.2, 0.4]) == approx(np.array([0.3, 0.6])) # doctest: +SKIP
True
And for a ``numpy`` array against a scalar::
>>> import numpy as np # doctest: +SKIP
>>> np.array([0.1, 0.2]) + np.array([0.2, 0.1]) == approx(0.3) # doctest: +SKIP
True
By default, ``approx`` considers numbers within a relative tolerance of
``1e-6`` (i.e. one part in a million) of its expected value to be equal.
This treatment would lead to surprising results if the expected value was
``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``.
To handle this case less surprisingly, ``approx`` also considers numbers
within an absolute tolerance of ``1e-12`` of its expected value to be
equal. Infinity and NaN are special cases. Infinity is only considered
equal to itself, regardless of the relative tolerance. NaN is not
considered equal to anything by default, but you can make it be equal to
itself by setting the ``nan_ok`` argument to True. (This is meant to
facilitate comparing arrays that use NaN to mean "no data".)
Both the relative and absolute tolerances can be changed by passing
arguments to the ``approx`` constructor::
>>> 1.0001 == approx(1)
False
>>> 1.0001 == approx(1, rel=1e-3)
True
>>> 1.0001 == approx(1, abs=1e-3)
True
If you specify ``abs`` but not ``rel``, the comparison will not consider
the relative tolerance at all. In other words, two numbers that are within
the default relative tolerance of ``1e-6`` will still be considered unequal
if they exceed the specified absolute tolerance. If you specify both
``abs`` and ``rel``, the numbers will be considered equal if either
tolerance is met::
>>> 1 + 1e-8 == approx(1)
True
>>> 1 + 1e-8 == approx(1, abs=1e-12)
False
>>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12)
True
If you're thinking about using ``approx``, then you might want to know how
it compares to other good ways of comparing floating-point numbers. All of
these algorithms are based on relative and absolute tolerances and should
agree for the most part, but they do have meaningful differences:
- ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative
tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute
tolerance is met. Because the relative tolerance is calculated w.r.t.
both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor
``b`` is a "reference value"). You have to specify an absolute tolerance
if you want to compare to ``0.0`` because there is no tolerance by
default. Only available in python>=3.5. `More information...`__
__ https://docs.python.org/3/library/math.html#math.isclose
- ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference
between ``a`` and ``b`` is less that the sum of the relative tolerance
w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance
is only calculated w.r.t. ``b``, this test is asymmetric and you can
think of ``b`` as the reference value. Support for comparing sequences
is provided by ``numpy.allclose``. `More information...`__
__ http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.isclose.html
- ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b``
are within an absolute tolerance of ``1e-7``. No relative tolerance is
considered and the absolute tolerance cannot be changed, so this function
is not appropriate for very large or very small numbers. Also, it's only
available in subclasses of ``unittest.TestCase`` and it's ugly because it
doesn't follow PEP8. `More information...`__
__ https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual
- ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative
tolerance is met w.r.t. ``b`` or if the absolute tolerance is met.
Because the relative tolerance is only calculated w.r.t. ``b``, this test
is asymmetric and you can think of ``b`` as the reference value. In the
special case that you explicitly specify an absolute tolerance but not a
relative tolerance, only the absolute tolerance is considered.
.. warning::
.. versionchanged:: 3.2
In order to avoid inconsistent behavior, ``TypeError`` is
raised for ``>``, ``>=``, ``<`` and ``<=`` comparisons.
The example below illustrates the problem::
assert approx(0.1) > 0.1 + 1e-10 # calls approx(0.1).__gt__(0.1 + 1e-10)
assert 0.1 + 1e-10 > approx(0.1) # calls approx(0.1).__lt__(0.1 + 1e-10)
In the second example one expects ``approx(0.1).__le__(0.1 + 1e-10)``
to be called. But instead, ``approx(0.1).__lt__(0.1 + 1e-10)`` is used to
comparison. This is because the call hierarchy of rich comparisons
follows a fixed behavior. `More information...`__
__ https://docs.python.org/3/reference/datamodel.html#object.__ge__
"""
# Delegate the comparison to a class that knows how to deal with the type
# of the expected value (e.g. int, float, list, dict, numpy.array, etc).
#
# The primary responsibility of these classes is to implement ``__eq__()``
# and ``__repr__()``. The former is used to actually check if some
# "actual" value is equivalent to the given expected value within the
# allowed tolerance. The latter is used to show the user the expected
# value and tolerance, in the case that a test failed.
#
# The actual logic for making approximate comparisons can be found in
# ApproxScalar, which is used to compare individual numbers. All of the
# other Approx classes eventually delegate to this class. The ApproxBase
# class provides some convenient methods and overloads, but isn't really
# essential.
__tracebackhide__ = True
if isinstance(expected, Decimal):
cls = ApproxDecimal
elif isinstance(expected, Number):
cls = ApproxScalar
elif isinstance(expected, Mapping):
cls = ApproxMapping
elif _is_numpy_array(expected):
cls = ApproxNumpy
elif (
isinstance(expected, Iterable)
and isinstance(expected, Sized)
and not isinstance(expected, STRING_TYPES)
):
cls = ApproxSequencelike
else:
raise _non_numeric_type_error(expected, at=None)
return cls(expected, rel, abs, nan_ok)
def _is_numpy_array(obj):
"""
Return true if the given object is a numpy array. Make a special effort to
avoid importing numpy unless it's really necessary.
"""
import sys
np = sys.modules.get("numpy")
if np is not None:
return isinstance(obj, np.ndarray)
return False
# builtin pytest.raises helper
def raises(expected_exception, *args, **kwargs):
r"""
Assert that a code block/function call raises ``expected_exception``
or raise a failure exception otherwise.
:kwparam match: if specified, asserts that the exception matches a text or regex
:kwparam message: **(deprecated since 4.1)** if specified, provides a custom failure message
if the exception is not raised
.. currentmodule:: _pytest._code
Use ``pytest.raises`` as a context manager, which will capture the exception of the given
type::
>>> with raises(ZeroDivisionError):
... 1/0
If the code block does not raise the expected exception (``ZeroDivisionError`` in the example
above), or no exception at all, the check will fail instead.
You can also use the keyword argument ``match`` to assert that the
exception matches a text or regex::
>>> with raises(ValueError, match='must be 0 or None'):
... raise ValueError("value must be 0 or None")
>>> with raises(ValueError, match=r'must be \d+$'):
... raise ValueError("value must be 42")
The context manager produces an :class:`ExceptionInfo` object which can be used to inspect the
details of the captured exception::
>>> with raises(ValueError) as exc_info:
... raise ValueError("value must be 42")
>>> assert exc_info.type is ValueError
>>> assert exc_info.value.args[0] == "value must be 42"
.. deprecated:: 4.1
In the context manager form you may use the keyword argument
``message`` to specify a custom failure message that will be displayed
in case the ``pytest.raises`` check fails. This has been deprecated as it
is considered error prone as users often mean to use ``match`` instead.
.. note::
When using ``pytest.raises`` as a context manager, it's worthwhile to
note that normal context manager rules apply and that the exception
raised *must* be the final line in the scope of the context manager.
Lines of code after that, within the scope of the context manager will
not be executed. For example::
>>> value = 15
>>> with raises(ValueError) as exc_info:
... if value > 10:
... raise ValueError("value must be <= 10")
... assert exc_info.type is ValueError # this will not execute
Instead, the following approach must be taken (note the difference in
scope)::
>>> with raises(ValueError) as exc_info:
... if value > 10:
... raise ValueError("value must be <= 10")
...
>>> assert exc_info.type is ValueError
**Using with** ``pytest.mark.parametrize``
When using :ref:`pytest.mark.parametrize ref`
it is possible to parametrize tests such that
some runs raise an exception and others do not.
See :ref:`parametrizing_conditional_raising` for an example.
**Legacy form**
It is possible to specify a callable by passing a to-be-called lambda::
>>> raises(ZeroDivisionError, lambda: 1/0)
<ExceptionInfo ...>
or you can specify an arbitrary callable with arguments::
>>> def f(x): return 1/x
...
>>> raises(ZeroDivisionError, f, 0)
<ExceptionInfo ...>
>>> raises(ZeroDivisionError, f, x=0)
<ExceptionInfo ...>
The form above is fully supported but discouraged for new code because the
context manager form is regarded as more readable and less error-prone.
.. note::
Similar to caught exception objects in Python, explicitly clearing
local references to returned ``ExceptionInfo`` objects can
help the Python interpreter speed up its garbage collection.
Clearing those references breaks a reference cycle
(``ExceptionInfo`` --> caught exception --> frame stack raising
the exception --> current frame stack --> local variables -->
``ExceptionInfo``) which makes Python keep all objects referenced
from that cycle (including all local variables in the current
frame) alive until the next cyclic garbage collection run. See the
official Python ``try`` statement documentation for more detailed
information.
"""
__tracebackhide__ = True
for exc in filterfalse(isclass, always_iterable(expected_exception, BASE_TYPE)):
msg = (
"exceptions must be old-style classes or"
" derived from BaseException, not %s"
)
raise TypeError(msg % type(exc))
message = "DID NOT RAISE {}".format(expected_exception)
match_expr = None
if not args:
if "message" in kwargs:
message = kwargs.pop("message")
warnings.warn(deprecated.RAISES_MESSAGE_PARAMETER, stacklevel=2)
if "match" in kwargs:
match_expr = kwargs.pop("match")
if kwargs:
msg = "Unexpected keyword arguments passed to pytest.raises: "
msg += ", ".join(kwargs.keys())
raise TypeError(msg)
return RaisesContext(expected_exception, message, match_expr)
elif isinstance(args[0], str):
warnings.warn(deprecated.RAISES_EXEC, stacklevel=2)
code, = args
assert isinstance(code, str)
frame = sys._getframe(1)
loc = frame.f_locals.copy()
loc.update(kwargs)
# print "raises frame scope: %r" % frame.f_locals
try:
code = _pytest._code.Source(code).compile(_genframe=frame)
six.exec_(code, frame.f_globals, loc)
# XXX didn't mean f_globals == f_locals something special?
# this is destroyed here ...
except expected_exception:
return _pytest._code.ExceptionInfo.from_current()
else:
func = args[0]
try:
func(*args[1:], **kwargs)
except expected_exception:
return _pytest._code.ExceptionInfo.from_current()
fail(message)
raises.Exception = fail.Exception
class RaisesContext(object):
def __init__(self, expected_exception, message, match_expr):
self.expected_exception = expected_exception
self.message = message
self.match_expr = match_expr
self.excinfo = None
def __enter__(self):
self.excinfo = _pytest._code.ExceptionInfo.for_later()
return self.excinfo
def __exit__(self, *tp):
__tracebackhide__ = True
if tp[0] is None:
fail(self.message)
self.excinfo.__init__(tp)
suppress_exception = issubclass(self.excinfo.type, self.expected_exception)
if sys.version_info[0] == 2 and suppress_exception:
sys.exc_clear()
if self.match_expr is not None and suppress_exception:
self.excinfo.match(self.match_expr)
return suppress_exception

View File

@ -0,0 +1,250 @@
""" recording warnings during test function execution. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import re
import sys
import warnings
import six
import _pytest._code
from _pytest.deprecated import PYTEST_WARNS_UNKNOWN_KWARGS
from _pytest.deprecated import WARNS_EXEC
from _pytest.fixtures import yield_fixture
from _pytest.outcomes import fail
@yield_fixture
def recwarn():
"""Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions.
See http://docs.python.org/library/warnings.html for information
on warning categories.
"""
wrec = WarningsRecorder()
with wrec:
warnings.simplefilter("default")
yield wrec
def deprecated_call(func=None, *args, **kwargs):
"""context manager that can be used to ensure a block of code triggers a
``DeprecationWarning`` or ``PendingDeprecationWarning``::
>>> import warnings
>>> def api_call_v2():
... warnings.warn('use v3 of this api', DeprecationWarning)
... return 200
>>> with deprecated_call():
... assert api_call_v2() == 200
``deprecated_call`` can also be used by passing a function and ``*args`` and ``*kwargs``,
in which case it will ensure calling ``func(*args, **kwargs)`` produces one of the warnings
types above.
"""
__tracebackhide__ = True
if func is not None:
args = (func,) + args
return warns((DeprecationWarning, PendingDeprecationWarning), *args, **kwargs)
def warns(expected_warning, *args, **kwargs):
r"""Assert that code raises a particular class of warning.
Specifically, the parameter ``expected_warning`` can be a warning class or
sequence of warning classes, and the inside the ``with`` block must issue a warning of that class or
classes.
This helper produces a list of :class:`warnings.WarningMessage` objects,
one for each warning raised.
This function can be used as a context manager, or any of the other ways
``pytest.raises`` can be used::
>>> with warns(RuntimeWarning):
... warnings.warn("my warning", RuntimeWarning)
In the context manager form you may use the keyword argument ``match`` to assert
that the exception matches a text or regex::
>>> with warns(UserWarning, match='must be 0 or None'):
... warnings.warn("value must be 0 or None", UserWarning)
>>> with warns(UserWarning, match=r'must be \d+$'):
... warnings.warn("value must be 42", UserWarning)
>>> with warns(UserWarning, match=r'must be \d+$'):
... warnings.warn("this is not here", UserWarning)
Traceback (most recent call last):
...
Failed: DID NOT WARN. No warnings of type ...UserWarning... was emitted...
"""
__tracebackhide__ = True
if not args:
match_expr = kwargs.pop("match", None)
if kwargs:
warnings.warn(
PYTEST_WARNS_UNKNOWN_KWARGS.format(args=sorted(kwargs)), stacklevel=2
)
return WarningsChecker(expected_warning, match_expr=match_expr)
elif isinstance(args[0], str):
warnings.warn(WARNS_EXEC, stacklevel=2)
code, = args
assert isinstance(code, str)
frame = sys._getframe(1)
loc = frame.f_locals.copy()
loc.update(kwargs)
with WarningsChecker(expected_warning):
code = _pytest._code.Source(code).compile()
six.exec_(code, frame.f_globals, loc)
else:
func = args[0]
with WarningsChecker(expected_warning):
return func(*args[1:], **kwargs)
class WarningsRecorder(warnings.catch_warnings):
"""A context manager to record raised warnings.
Adapted from `warnings.catch_warnings`.
"""
def __init__(self):
super(WarningsRecorder, self).__init__(record=True)
self._entered = False
self._list = []
@property
def list(self):
"""The list of recorded warnings."""
return self._list
def __getitem__(self, i):
"""Get a recorded warning by index."""
return self._list[i]
def __iter__(self):
"""Iterate through the recorded warnings."""
return iter(self._list)
def __len__(self):
"""The number of recorded warnings."""
return len(self._list)
def pop(self, cls=Warning):
"""Pop the first recorded warning, raise exception if not exists."""
for i, w in enumerate(self._list):
if issubclass(w.category, cls):
return self._list.pop(i)
__tracebackhide__ = True
raise AssertionError("%r not found in warning list" % cls)
def clear(self):
"""Clear the list of recorded warnings."""
self._list[:] = []
def __enter__(self):
if self._entered:
__tracebackhide__ = True
raise RuntimeError("Cannot enter %r twice" % self)
self._list = super(WarningsRecorder, self).__enter__()
warnings.simplefilter("always")
# python3 keeps track of a "filter version", when the filters are
# updated previously seen warnings can be re-warned. python2 has no
# concept of this so we must reset the warnings registry manually.
# trivial patching of `warnings.warn` seems to be enough somehow?
if six.PY2:
def warn(message, category=None, stacklevel=1):
# duplicate the stdlib logic due to
# bad handing in the c version of warnings
if isinstance(message, Warning):
category = message.__class__
# Check category argument
if category is None:
category = UserWarning
assert issubclass(category, Warning)
# emulate resetting the warn registry
f_globals = sys._getframe(stacklevel).f_globals
if "__warningregistry__" in f_globals:
orig = f_globals["__warningregistry__"]
f_globals["__warningregistry__"] = None
try:
return self._saved_warn(message, category, stacklevel + 1)
finally:
f_globals["__warningregistry__"] = orig
else:
return self._saved_warn(message, category, stacklevel + 1)
warnings.warn, self._saved_warn = warn, warnings.warn
return self
def __exit__(self, *exc_info):
if not self._entered:
__tracebackhide__ = True
raise RuntimeError("Cannot exit %r without entering first" % self)
# see above where `self._saved_warn` is assigned
if six.PY2:
warnings.warn = self._saved_warn
super(WarningsRecorder, self).__exit__(*exc_info)
# Built-in catch_warnings does not reset entered state so we do it
# manually here for this context manager to become reusable.
self._entered = False
class WarningsChecker(WarningsRecorder):
def __init__(self, expected_warning=None, match_expr=None):
super(WarningsChecker, self).__init__()
msg = "exceptions must be old-style classes or derived from Warning, not %s"
if isinstance(expected_warning, tuple):
for exc in expected_warning:
if not inspect.isclass(exc):
raise TypeError(msg % type(exc))
elif inspect.isclass(expected_warning):
expected_warning = (expected_warning,)
elif expected_warning is not None:
raise TypeError(msg % type(expected_warning))
self.expected_warning = expected_warning
self.match_expr = match_expr
def __exit__(self, *exc_info):
super(WarningsChecker, self).__exit__(*exc_info)
__tracebackhide__ = True
# only check if we're not currently handling an exception
if all(a is None for a in exc_info):
if self.expected_warning is not None:
if not any(issubclass(r.category, self.expected_warning) for r in self):
__tracebackhide__ = True
fail(
"DID NOT WARN. No warnings of type {} was emitted. "
"The list of emitted warnings is: {}.".format(
self.expected_warning, [each.message for each in self]
)
)
elif self.match_expr is not None:
for r in self:
if issubclass(r.category, self.expected_warning):
if re.compile(self.match_expr).search(str(r.message)):
break
else:
fail(
"DID NOT WARN. No warnings of type {} matching"
" ('{}') was emitted. The list of emitted warnings"
" is: {}.".format(
self.expected_warning,
self.match_expr,
[each.message for each in self],
)
)

View File

@ -0,0 +1,427 @@
from pprint import pprint
import py
import six
from _pytest._code.code import ExceptionInfo
from _pytest._code.code import ReprEntry
from _pytest._code.code import ReprEntryNative
from _pytest._code.code import ReprExceptionInfo
from _pytest._code.code import ReprFileLocation
from _pytest._code.code import ReprFuncArgs
from _pytest._code.code import ReprLocals
from _pytest._code.code import ReprTraceback
from _pytest._code.code import TerminalRepr
from _pytest.outcomes import skip
from _pytest.pathlib import Path
def getslaveinfoline(node):
try:
return node._slaveinfocache
except AttributeError:
d = node.slaveinfo
ver = "%s.%s.%s" % d["version_info"][:3]
node._slaveinfocache = s = "[%s] %s -- Python %s %s" % (
d["id"],
d["sysplatform"],
ver,
d["executable"],
)
return s
class BaseReport(object):
when = None
location = None
def __init__(self, **kw):
self.__dict__.update(kw)
def toterminal(self, out):
if hasattr(self, "node"):
out.line(getslaveinfoline(self.node))
longrepr = self.longrepr
if longrepr is None:
return
if hasattr(longrepr, "toterminal"):
longrepr.toterminal(out)
else:
try:
out.line(longrepr)
except UnicodeEncodeError:
out.line("<unprintable longrepr>")
def get_sections(self, prefix):
for name, content in self.sections:
if name.startswith(prefix):
yield prefix, content
@property
def longreprtext(self):
"""
Read-only property that returns the full string representation
of ``longrepr``.
.. versionadded:: 3.0
"""
tw = py.io.TerminalWriter(stringio=True)
tw.hasmarkup = False
self.toterminal(tw)
exc = tw.stringio.getvalue()
return exc.strip()
@property
def caplog(self):
"""Return captured log lines, if log capturing is enabled
.. versionadded:: 3.5
"""
return "\n".join(
content for (prefix, content) in self.get_sections("Captured log")
)
@property
def capstdout(self):
"""Return captured text from stdout, if capturing is enabled
.. versionadded:: 3.0
"""
return "".join(
content for (prefix, content) in self.get_sections("Captured stdout")
)
@property
def capstderr(self):
"""Return captured text from stderr, if capturing is enabled
.. versionadded:: 3.0
"""
return "".join(
content for (prefix, content) in self.get_sections("Captured stderr")
)
passed = property(lambda x: x.outcome == "passed")
failed = property(lambda x: x.outcome == "failed")
skipped = property(lambda x: x.outcome == "skipped")
@property
def fspath(self):
return self.nodeid.split("::")[0]
@property
def count_towards_summary(self):
"""
**Experimental**
Returns True if this report should be counted towards the totals shown at the end of the
test session: "1 passed, 1 failure, etc".
.. note::
This function is considered **experimental**, so beware that it is subject to changes
even in patch releases.
"""
return True
@property
def head_line(self):
"""
**Experimental**
Returns the head line shown with longrepr output for this report, more commonly during
traceback representation during failures::
________ Test.foo ________
In the example above, the head_line is "Test.foo".
.. note::
This function is considered **experimental**, so beware that it is subject to changes
even in patch releases.
"""
if self.location is not None:
fspath, lineno, domain = self.location
return domain
def _to_json(self):
"""
This was originally the serialize_report() function from xdist (ca03269).
Returns the contents of this report as a dict of builtin entries, suitable for
serialization.
Experimental method.
"""
def disassembled_report(rep):
reprtraceback = rep.longrepr.reprtraceback.__dict__.copy()
reprcrash = rep.longrepr.reprcrash.__dict__.copy()
new_entries = []
for entry in reprtraceback["reprentries"]:
entry_data = {
"type": type(entry).__name__,
"data": entry.__dict__.copy(),
}
for key, value in entry_data["data"].items():
if hasattr(value, "__dict__"):
entry_data["data"][key] = value.__dict__.copy()
new_entries.append(entry_data)
reprtraceback["reprentries"] = new_entries
return {
"reprcrash": reprcrash,
"reprtraceback": reprtraceback,
"sections": rep.longrepr.sections,
}
d = self.__dict__.copy()
if hasattr(self.longrepr, "toterminal"):
if hasattr(self.longrepr, "reprtraceback") and hasattr(
self.longrepr, "reprcrash"
):
d["longrepr"] = disassembled_report(self)
else:
d["longrepr"] = six.text_type(self.longrepr)
else:
d["longrepr"] = self.longrepr
for name in d:
if isinstance(d[name], (py.path.local, Path)):
d[name] = str(d[name])
elif name == "result":
d[name] = None # for now
return d
@classmethod
def _from_json(cls, reportdict):
"""
This was originally the serialize_report() function from xdist (ca03269).
Factory method that returns either a TestReport or CollectReport, depending on the calling
class. It's the callers responsibility to know which class to pass here.
Experimental method.
"""
if reportdict["longrepr"]:
if (
"reprcrash" in reportdict["longrepr"]
and "reprtraceback" in reportdict["longrepr"]
):
reprtraceback = reportdict["longrepr"]["reprtraceback"]
reprcrash = reportdict["longrepr"]["reprcrash"]
unserialized_entries = []
reprentry = None
for entry_data in reprtraceback["reprentries"]:
data = entry_data["data"]
entry_type = entry_data["type"]
if entry_type == "ReprEntry":
reprfuncargs = None
reprfileloc = None
reprlocals = None
if data["reprfuncargs"]:
reprfuncargs = ReprFuncArgs(**data["reprfuncargs"])
if data["reprfileloc"]:
reprfileloc = ReprFileLocation(**data["reprfileloc"])
if data["reprlocals"]:
reprlocals = ReprLocals(data["reprlocals"]["lines"])
reprentry = ReprEntry(
lines=data["lines"],
reprfuncargs=reprfuncargs,
reprlocals=reprlocals,
filelocrepr=reprfileloc,
style=data["style"],
)
elif entry_type == "ReprEntryNative":
reprentry = ReprEntryNative(data["lines"])
else:
_report_unserialization_failure(entry_type, cls, reportdict)
unserialized_entries.append(reprentry)
reprtraceback["reprentries"] = unserialized_entries
exception_info = ReprExceptionInfo(
reprtraceback=ReprTraceback(**reprtraceback),
reprcrash=ReprFileLocation(**reprcrash),
)
for section in reportdict["longrepr"]["sections"]:
exception_info.addsection(*section)
reportdict["longrepr"] = exception_info
return cls(**reportdict)
def _report_unserialization_failure(type_name, report_class, reportdict):
url = "https://github.com/pytest-dev/pytest/issues"
stream = py.io.TextIO()
pprint("-" * 100, stream=stream)
pprint("INTERNALERROR: Unknown entry type returned: %s" % type_name, stream=stream)
pprint("report_name: %s" % report_class, stream=stream)
pprint(reportdict, stream=stream)
pprint("Please report this bug at %s" % url, stream=stream)
pprint("-" * 100, stream=stream)
raise RuntimeError(stream.getvalue())
class TestReport(BaseReport):
""" Basic test report object (also used for setup and teardown calls if
they fail).
"""
__test__ = False
def __init__(
self,
nodeid,
location,
keywords,
outcome,
longrepr,
when,
sections=(),
duration=0,
user_properties=None,
**extra
):
#: normalized collection node id
self.nodeid = nodeid
#: a (filesystempath, lineno, domaininfo) tuple indicating the
#: actual location of a test item - it might be different from the
#: collected one e.g. if a method is inherited from a different module.
self.location = location
#: a name -> value dictionary containing all keywords and
#: markers associated with a test invocation.
self.keywords = keywords
#: test outcome, always one of "passed", "failed", "skipped".
self.outcome = outcome
#: None or a failure representation.
self.longrepr = longrepr
#: one of 'setup', 'call', 'teardown' to indicate runtest phase.
self.when = when
#: user properties is a list of tuples (name, value) that holds user
#: defined properties of the test
self.user_properties = list(user_properties or [])
#: list of pairs ``(str, str)`` of extra information which needs to
#: marshallable. Used by pytest to add captured text
#: from ``stdout`` and ``stderr``, but may be used by other plugins
#: to add arbitrary information to reports.
self.sections = list(sections)
#: time it took to run just the test
self.duration = duration
self.__dict__.update(extra)
def __repr__(self):
return "<TestReport %r when=%r outcome=%r>" % (
self.nodeid,
self.when,
self.outcome,
)
@classmethod
def from_item_and_call(cls, item, call):
"""
Factory method to create and fill a TestReport with standard item and call info.
"""
when = call.when
duration = call.stop - call.start
keywords = {x: 1 for x in item.keywords}
excinfo = call.excinfo
sections = []
if not call.excinfo:
outcome = "passed"
longrepr = None
else:
if not isinstance(excinfo, ExceptionInfo):
outcome = "failed"
longrepr = excinfo
elif excinfo.errisinstance(skip.Exception):
outcome = "skipped"
r = excinfo._getreprcrash()
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
if call.when == "call":
longrepr = item.repr_failure(excinfo)
else: # exception in setup or teardown
longrepr = item._repr_failure_py(
excinfo, style=item.config.option.tbstyle
)
for rwhen, key, content in item._report_sections:
sections.append(("Captured %s %s" % (key, rwhen), content))
return cls(
item.nodeid,
item.location,
keywords,
outcome,
longrepr,
when,
sections,
duration,
user_properties=item.user_properties,
)
class CollectReport(BaseReport):
when = "collect"
def __init__(self, nodeid, outcome, longrepr, result, sections=(), **extra):
self.nodeid = nodeid
self.outcome = outcome
self.longrepr = longrepr
self.result = result or []
self.sections = list(sections)
self.__dict__.update(extra)
@property
def location(self):
return (self.fspath, None, self.fspath)
def __repr__(self):
return "<CollectReport %r lenresult=%s outcome=%r>" % (
self.nodeid,
len(self.result),
self.outcome,
)
class CollectErrorRepr(TerminalRepr):
def __init__(self, msg):
self.longrepr = msg
def toterminal(self, out):
out.line(self.longrepr, red=True)
def pytest_report_to_serializable(report):
if isinstance(report, (TestReport, CollectReport)):
data = report._to_json()
data["_report_type"] = report.__class__.__name__
return data
def pytest_report_from_serializable(data):
if "_report_type" in data:
if data["_report_type"] == "TestReport":
return TestReport._from_json(data)
elif data["_report_type"] == "CollectReport":
return CollectReport._from_json(data)
assert False, "Unknown report_type unserialize data: {}".format(
data["_report_type"]
)

View File

@ -0,0 +1,101 @@
""" log machine-parseable test session result information in a plain
text file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import py
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "resultlog plugin options")
group.addoption(
"--resultlog",
"--result-log",
action="store",
metavar="path",
default=None,
help="DEPRECATED path for machine-readable result log.",
)
def pytest_configure(config):
resultlog = config.option.resultlog
# prevent opening resultlog on slave nodes (xdist)
if resultlog and not hasattr(config, "slaveinput"):
dirname = os.path.dirname(os.path.abspath(resultlog))
if not os.path.isdir(dirname):
os.makedirs(dirname)
logfile = open(resultlog, "w", 1) # line buffered
config._resultlog = ResultLog(config, logfile)
config.pluginmanager.register(config._resultlog)
from _pytest.deprecated import RESULT_LOG
from _pytest.warnings import _issue_warning_captured
_issue_warning_captured(RESULT_LOG, config.hook, stacklevel=2)
def pytest_unconfigure(config):
resultlog = getattr(config, "_resultlog", None)
if resultlog:
resultlog.logfile.close()
del config._resultlog
config.pluginmanager.unregister(resultlog)
class ResultLog(object):
def __init__(self, config, logfile):
self.config = config
self.logfile = logfile # preferably line buffered
def write_log_entry(self, testpath, lettercode, longrepr):
print("%s %s" % (lettercode, testpath), file=self.logfile)
for line in longrepr.splitlines():
print(" %s" % line, file=self.logfile)
def log_outcome(self, report, lettercode, longrepr):
testpath = getattr(report, "nodeid", None)
if testpath is None:
testpath = report.fspath
self.write_log_entry(testpath, lettercode, longrepr)
def pytest_runtest_logreport(self, report):
if report.when != "call" and report.passed:
return
res = self.config.hook.pytest_report_teststatus(
report=report, config=self.config
)
code = res[1]
if code == "x":
longrepr = str(report.longrepr)
elif code == "X":
longrepr = ""
elif report.passed:
longrepr = ""
elif report.failed:
longrepr = str(report.longrepr)
elif report.skipped:
longrepr = str(report.longrepr[2])
self.log_outcome(report, code, longrepr)
def pytest_collectreport(self, report):
if not report.passed:
if report.failed:
code = "F"
longrepr = str(report.longrepr)
else:
assert report.skipped
code = "S"
longrepr = "%s:%d: %s" % report.longrepr
self.log_outcome(report, code, longrepr)
def pytest_internalerror(self, excrepr):
reprcrash = getattr(excrepr, "reprcrash", None)
path = getattr(reprcrash, "path", None)
if path is None:
path = "cwd:%s" % py.path.local()
self.write_log_entry(path, "!", str(excrepr))

View File

@ -0,0 +1,375 @@
""" basic collect and runtest protocol implementations """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import bdb
import os
import sys
from time import time
import attr
import six
from .reports import CollectErrorRepr
from .reports import CollectReport
from .reports import TestReport
from _pytest._code.code import ExceptionInfo
from _pytest.outcomes import Exit
from _pytest.outcomes import skip
from _pytest.outcomes import Skipped
from _pytest.outcomes import TEST_OUTCOME
#
# pytest plugin hooks
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group.addoption(
"--durations",
action="store",
type=int,
default=None,
metavar="N",
help="show N slowest setup/test durations (N=0 for all).",
),
def pytest_terminal_summary(terminalreporter):
durations = terminalreporter.config.option.durations
verbose = terminalreporter.config.getvalue("verbose")
if durations is None:
return
tr = terminalreporter
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if not dlist:
return
dlist.sort(key=lambda x: x.duration)
dlist.reverse()
if not durations:
tr.write_sep("=", "slowest test durations")
else:
tr.write_sep("=", "slowest %s test durations" % durations)
dlist = dlist[:durations]
for rep in dlist:
if verbose < 2 and rep.duration < 0.005:
tr.write_line("")
tr.write_line("(0.00 durations hidden. Use -vv to show these durations.)")
break
tr.write_line("%02.2fs %-8s %s" % (rep.duration, rep.when, rep.nodeid))
def pytest_sessionstart(session):
session._setupstate = SetupState()
def pytest_sessionfinish(session):
session._setupstate.teardown_all()
def pytest_runtest_protocol(item, nextitem):
item.ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)
runtestprotocol(item, nextitem=nextitem)
item.ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location)
return True
def runtestprotocol(item, log=True, nextitem=None):
hasrequest = hasattr(item, "_request")
if hasrequest and not item._request:
item._initrequest()
rep = call_and_report(item, "setup", log)
reports = [rep]
if rep.passed:
if item.config.getoption("setupshow", False):
show_test_item(item)
if not item.config.getoption("setuponly", False):
reports.append(call_and_report(item, "call", log))
reports.append(call_and_report(item, "teardown", log, nextitem=nextitem))
# after all teardown hooks have been called
# want funcargs and request info to go away
if hasrequest:
item._request = False
item.funcargs = None
return reports
def show_test_item(item):
"""Show test function, parameters and the fixtures of the test item."""
tw = item.config.get_terminal_writer()
tw.line()
tw.write(" " * 8)
tw.write(item._nodeid)
used_fixtures = sorted(item._fixtureinfo.name2fixturedefs.keys())
if used_fixtures:
tw.write(" (fixtures used: {})".format(", ".join(used_fixtures)))
def pytest_runtest_setup(item):
_update_current_test_var(item, "setup")
item.session._setupstate.prepare(item)
def pytest_runtest_call(item):
_update_current_test_var(item, "call")
sys.last_type, sys.last_value, sys.last_traceback = (None, None, None)
try:
item.runtest()
except Exception:
# Store trace info to allow postmortem debugging
type, value, tb = sys.exc_info()
tb = tb.tb_next # Skip *this* frame
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
del type, value, tb # Get rid of these in this frame
raise
def pytest_runtest_teardown(item, nextitem):
_update_current_test_var(item, "teardown")
item.session._setupstate.teardown_exact(item, nextitem)
_update_current_test_var(item, None)
def _update_current_test_var(item, when):
"""
Update PYTEST_CURRENT_TEST to reflect the current item and stage.
If ``when`` is None, delete PYTEST_CURRENT_TEST from the environment.
"""
var_name = "PYTEST_CURRENT_TEST"
if when:
value = "{} ({})".format(item.nodeid, when)
# don't allow null bytes on environment variables (see #2644, #2957)
value = value.replace("\x00", "(null)")
os.environ[var_name] = value
else:
os.environ.pop(var_name)
def pytest_report_teststatus(report):
if report.when in ("setup", "teardown"):
if report.failed:
# category, shortletter, verbose-word
return "error", "E", "ERROR"
elif report.skipped:
return "skipped", "s", "SKIPPED"
else:
return "", "", ""
#
# Implementation
def call_and_report(item, when, log=True, **kwds):
call = call_runtest_hook(item, when, **kwds)
hook = item.ihook
report = hook.pytest_runtest_makereport(item=item, call=call)
if log:
hook.pytest_runtest_logreport(report=report)
if check_interactive_exception(call, report):
hook.pytest_exception_interact(node=item, call=call, report=report)
return report
def check_interactive_exception(call, report):
return call.excinfo and not (
hasattr(report, "wasxfail")
or call.excinfo.errisinstance(skip.Exception)
or call.excinfo.errisinstance(bdb.BdbQuit)
)
def call_runtest_hook(item, when, **kwds):
hookname = "pytest_runtest_" + when
ihook = getattr(item.ihook, hookname)
reraise = (Exit,)
if not item.config.getoption("usepdb", False):
reraise += (KeyboardInterrupt,)
return CallInfo.from_call(
lambda: ihook(item=item, **kwds), when=when, reraise=reraise
)
@attr.s(repr=False)
class CallInfo(object):
""" Result/Exception info a function invocation. """
_result = attr.ib()
# Optional[ExceptionInfo]
excinfo = attr.ib()
start = attr.ib()
stop = attr.ib()
when = attr.ib()
@property
def result(self):
if self.excinfo is not None:
raise AttributeError("{!r} has no valid result".format(self))
return self._result
@classmethod
def from_call(cls, func, when, reraise=None):
#: context of invocation: one of "setup", "call",
#: "teardown", "memocollect"
start = time()
excinfo = None
try:
result = func()
except: # noqa
excinfo = ExceptionInfo.from_current()
if reraise is not None and excinfo.errisinstance(reraise):
raise
result = None
stop = time()
return cls(start=start, stop=stop, when=when, result=result, excinfo=excinfo)
def __repr__(self):
if self.excinfo is not None:
status = "exception"
value = self.excinfo.value
else:
# TODO: investigate unification
value = repr(self._result)
status = "result"
return "<CallInfo when={when!r} {status}: {value}>".format(
when=self.when, value=value, status=status
)
def pytest_runtest_makereport(item, call):
return TestReport.from_item_and_call(item, call)
def pytest_make_collect_report(collector):
call = CallInfo.from_call(lambda: list(collector.collect()), "collect")
longrepr = None
if not call.excinfo:
outcome = "passed"
else:
from _pytest import nose
skip_exceptions = (Skipped,) + nose.get_skip_exceptions()
if call.excinfo.errisinstance(skip_exceptions):
outcome = "skipped"
r = collector._repr_failure_py(call.excinfo, "line").reprcrash
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
errorinfo = collector.repr_failure(call.excinfo)
if not hasattr(errorinfo, "toterminal"):
errorinfo = CollectErrorRepr(errorinfo)
longrepr = errorinfo
rep = CollectReport(
collector.nodeid, outcome, longrepr, getattr(call, "result", None)
)
rep.call = call # see collect_one_node
return rep
class SetupState(object):
""" shared state for setting up/tearing down test items or collectors. """
def __init__(self):
self.stack = []
self._finalizers = {}
def addfinalizer(self, finalizer, colitem):
""" attach a finalizer to the given colitem.
if colitem is None, this will add a finalizer that
is called at the end of teardown_all().
"""
assert colitem and not isinstance(colitem, tuple)
assert callable(finalizer)
# assert colitem in self.stack # some unit tests don't setup stack :/
self._finalizers.setdefault(colitem, []).append(finalizer)
def _pop_and_teardown(self):
colitem = self.stack.pop()
self._teardown_with_finalization(colitem)
def _callfinalizers(self, colitem):
finalizers = self._finalizers.pop(colitem, None)
exc = None
while finalizers:
fin = finalizers.pop()
try:
fin()
except TEST_OUTCOME:
# XXX Only first exception will be seen by user,
# ideally all should be reported.
if exc is None:
exc = sys.exc_info()
if exc:
six.reraise(*exc)
def _teardown_with_finalization(self, colitem):
self._callfinalizers(colitem)
if hasattr(colitem, "teardown"):
colitem.teardown()
for colitem in self._finalizers:
assert (
colitem is None or colitem in self.stack or isinstance(colitem, tuple)
)
def teardown_all(self):
while self.stack:
self._pop_and_teardown()
for key in list(self._finalizers):
self._teardown_with_finalization(key)
assert not self._finalizers
def teardown_exact(self, item, nextitem):
needed_collectors = nextitem and nextitem.listchain() or []
self._teardown_towards(needed_collectors)
def _teardown_towards(self, needed_collectors):
exc = None
while self.stack:
if self.stack == needed_collectors[: len(self.stack)]:
break
try:
self._pop_and_teardown()
except TEST_OUTCOME:
# XXX Only first exception will be seen by user,
# ideally all should be reported.
if exc is None:
exc = sys.exc_info()
if exc:
six.reraise(*exc)
def prepare(self, colitem):
""" setup objects along the collector chain to the test-method
and teardown previously setup objects."""
needed_collectors = colitem.listchain()
self._teardown_towards(needed_collectors)
# check if the last collection node has raised an error
for col in self.stack:
if hasattr(col, "_prepare_exc"):
six.reraise(*col._prepare_exc)
for col in needed_collectors[len(self.stack) :]:
self.stack.append(col)
try:
col.setup()
except TEST_OUTCOME:
col._prepare_exc = sys.exc_info()
raise
def collect_one_node(collector):
ihook = collector.ihook
ihook.pytest_collectstart(collector=collector)
rep = ihook.pytest_make_collect_report(collector=collector)
call = rep.__dict__.pop("call", None)
if call and check_interactive_exception(call, rep):
ihook.pytest_exception_interact(node=collector, call=call, report=rep)
return rep

View File

@ -0,0 +1,88 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import pytest
def pytest_addoption(parser):
group = parser.getgroup("debugconfig")
group.addoption(
"--setuponly",
"--setup-only",
action="store_true",
help="only setup fixtures, do not execute tests.",
)
group.addoption(
"--setupshow",
"--setup-show",
action="store_true",
help="show setup of fixtures while executing tests.",
)
@pytest.hookimpl(hookwrapper=True)
def pytest_fixture_setup(fixturedef, request):
yield
config = request.config
if config.option.setupshow:
if hasattr(request, "param"):
# Save the fixture parameter so ._show_fixture_action() can
# display it now and during the teardown (in .finish()).
if fixturedef.ids:
if callable(fixturedef.ids):
fixturedef.cached_param = fixturedef.ids(request.param)
else:
fixturedef.cached_param = fixturedef.ids[request.param_index]
else:
fixturedef.cached_param = request.param
_show_fixture_action(fixturedef, "SETUP")
def pytest_fixture_post_finalizer(fixturedef):
if hasattr(fixturedef, "cached_result"):
config = fixturedef._fixturemanager.config
if config.option.setupshow:
_show_fixture_action(fixturedef, "TEARDOWN")
if hasattr(fixturedef, "cached_param"):
del fixturedef.cached_param
def _show_fixture_action(fixturedef, msg):
config = fixturedef._fixturemanager.config
capman = config.pluginmanager.getplugin("capturemanager")
if capman:
capman.suspend_global_capture()
out, err = capman.read_global_capture()
tw = config.get_terminal_writer()
tw.line()
tw.write(" " * 2 * fixturedef.scopenum)
tw.write(
"{step} {scope} {fixture}".format(
step=msg.ljust(8), # align the output to TEARDOWN
scope=fixturedef.scope[0].upper(),
fixture=fixturedef.argname,
)
)
if msg == "SETUP":
deps = sorted(arg for arg in fixturedef.argnames if arg != "request")
if deps:
tw.write(" (fixtures used: {})".format(", ".join(deps)))
if hasattr(fixturedef, "cached_param"):
tw.write("[{}]".format(fixturedef.cached_param))
if capman:
capman.resume_global_capture()
sys.stdout.write(out)
sys.stderr.write(err)
@pytest.hookimpl(tryfirst=True)
def pytest_cmdline_main(config):
if config.option.setuponly:
config.option.setupshow = True

View File

@ -0,0 +1,31 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
def pytest_addoption(parser):
group = parser.getgroup("debugconfig")
group.addoption(
"--setupplan",
"--setup-plan",
action="store_true",
help="show what fixtures and tests would be executed but "
"don't execute anything.",
)
@pytest.hookimpl(tryfirst=True)
def pytest_fixture_setup(fixturedef, request):
# Will return a dummy fixture if the setuponly option is provided.
if request.config.option.setupplan:
fixturedef.cached_result = (None, None, None)
return fixturedef.cached_result
@pytest.hookimpl(tryfirst=True)
def pytest_cmdline_main(config):
if config.option.setupplan:
config.option.setuponly = True
config.option.setupshow = True

View File

@ -0,0 +1,310 @@
""" support for skip/xfail functions and markers. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from _pytest.config import hookimpl
from _pytest.mark.evaluate import MarkEvaluator
from _pytest.outcomes import fail
from _pytest.outcomes import skip
from _pytest.outcomes import xfail
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
"--runxfail",
action="store_true",
dest="runxfail",
default=False,
help="run tests even if they are marked xfail",
)
parser.addini(
"xfail_strict",
"default for the strict parameter of xfail "
"markers when not given explicitly (default: False)",
default=False,
type="bool",
)
def pytest_configure(config):
if config.option.runxfail:
# yay a hack
import pytest
old = pytest.xfail
config._cleanup.append(lambda: setattr(pytest, "xfail", old))
def nop(*args, **kwargs):
pass
nop.Exception = xfail.Exception
setattr(pytest, "xfail", nop)
config.addinivalue_line(
"markers",
"skip(reason=None): skip the given test function with an optional reason. "
'Example: skip(reason="no way of currently testing this") skips the '
"test.",
)
config.addinivalue_line(
"markers",
"skipif(condition): skip the given test function if eval(condition) "
"results in a True value. Evaluation happens within the "
"module global context. Example: skipif('sys.platform == \"win32\"') "
"skips the test if we are on the win32 platform. see "
"https://docs.pytest.org/en/latest/skipping.html",
)
config.addinivalue_line(
"markers",
"xfail(condition, reason=None, run=True, raises=None, strict=False): "
"mark the test function as an expected failure if eval(condition) "
"has a True value. Optionally specify a reason for better reporting "
"and run=False if you don't even want to execute the test function. "
"If only specific exception(s) are expected, you can list them in "
"raises, and if the test fails in other ways, it will be reported as "
"a true failure. See https://docs.pytest.org/en/latest/skipping.html",
)
@hookimpl(tryfirst=True)
def pytest_runtest_setup(item):
# Check if skip or skipif are specified as pytest marks
item._skipped_by_mark = False
eval_skipif = MarkEvaluator(item, "skipif")
if eval_skipif.istrue():
item._skipped_by_mark = True
skip(eval_skipif.getexplanation())
for skip_info in item.iter_markers(name="skip"):
item._skipped_by_mark = True
if "reason" in skip_info.kwargs:
skip(skip_info.kwargs["reason"])
elif skip_info.args:
skip(skip_info.args[0])
else:
skip("unconditional skip")
item._evalxfail = MarkEvaluator(item, "xfail")
check_xfail_no_run(item)
@hookimpl(hookwrapper=True)
def pytest_pyfunc_call(pyfuncitem):
check_xfail_no_run(pyfuncitem)
outcome = yield
passed = outcome.excinfo is None
if passed:
check_strict_xfail(pyfuncitem)
def check_xfail_no_run(item):
"""check xfail(run=False)"""
if not item.config.option.runxfail:
evalxfail = item._evalxfail
if evalxfail.istrue():
if not evalxfail.get("run", True):
xfail("[NOTRUN] " + evalxfail.getexplanation())
def check_strict_xfail(pyfuncitem):
"""check xfail(strict=True) for the given PASSING test"""
evalxfail = pyfuncitem._evalxfail
if evalxfail.istrue():
strict_default = pyfuncitem.config.getini("xfail_strict")
is_strict_xfail = evalxfail.get("strict", strict_default)
if is_strict_xfail:
del pyfuncitem._evalxfail
explanation = evalxfail.getexplanation()
fail("[XPASS(strict)] " + explanation, pytrace=False)
@hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
rep = outcome.get_result()
evalxfail = getattr(item, "_evalxfail", None)
# unitttest special case, see setting of _unexpectedsuccess
if hasattr(item, "_unexpectedsuccess") and rep.when == "call":
from _pytest.compat import _is_unittest_unexpected_success_a_failure
if item._unexpectedsuccess:
rep.longrepr = "Unexpected success: {}".format(item._unexpectedsuccess)
else:
rep.longrepr = "Unexpected success"
if _is_unittest_unexpected_success_a_failure():
rep.outcome = "failed"
else:
rep.outcome = "passed"
rep.wasxfail = rep.longrepr
elif item.config.option.runxfail:
pass # don't interefere
elif call.excinfo and call.excinfo.errisinstance(xfail.Exception):
rep.wasxfail = "reason: " + call.excinfo.value.msg
rep.outcome = "skipped"
elif evalxfail and not rep.skipped and evalxfail.wasvalid() and evalxfail.istrue():
if call.excinfo:
if evalxfail.invalidraise(call.excinfo.value):
rep.outcome = "failed"
else:
rep.outcome = "skipped"
rep.wasxfail = evalxfail.getexplanation()
elif call.when == "call":
strict_default = item.config.getini("xfail_strict")
is_strict_xfail = evalxfail.get("strict", strict_default)
explanation = evalxfail.getexplanation()
if is_strict_xfail:
rep.outcome = "failed"
rep.longrepr = "[XPASS(strict)] {}".format(explanation)
else:
rep.outcome = "passed"
rep.wasxfail = explanation
elif (
getattr(item, "_skipped_by_mark", False)
and rep.skipped
and type(rep.longrepr) is tuple
):
# skipped by mark.skipif; change the location of the failure
# to point to the item definition, otherwise it will display
# the location of where the skip exception was raised within pytest
filename, line, reason = rep.longrepr
filename, line = item.location[:2]
rep.longrepr = filename, line, reason
# called by terminalreporter progress reporting
def pytest_report_teststatus(report):
if hasattr(report, "wasxfail"):
if report.skipped:
return "xfailed", "x", "XFAIL"
elif report.passed:
return "xpassed", "X", "XPASS"
# called by the terminalreporter instance/plugin
def pytest_terminal_summary(terminalreporter):
tr = terminalreporter
if not tr.reportchars:
return
lines = []
for char in tr.reportchars:
action = REPORTCHAR_ACTIONS.get(char, lambda tr, lines: None)
action(terminalreporter, lines)
if lines:
tr._tw.sep("=", "short test summary info")
for line in lines:
tr._tw.line(line)
def show_simple(terminalreporter, lines, stat):
failed = terminalreporter.stats.get(stat)
if failed:
config = terminalreporter.config
for rep in failed:
verbose_word = _get_report_str(config, rep)
pos = _get_pos(config, rep)
lines.append("%s %s" % (verbose_word, pos))
def show_xfailed(terminalreporter, lines):
xfailed = terminalreporter.stats.get("xfailed")
if xfailed:
config = terminalreporter.config
for rep in xfailed:
verbose_word = _get_report_str(config, rep)
pos = _get_pos(config, rep)
lines.append("%s %s" % (verbose_word, pos))
reason = rep.wasxfail
if reason:
lines.append(" " + str(reason))
def show_xpassed(terminalreporter, lines):
xpassed = terminalreporter.stats.get("xpassed")
if xpassed:
config = terminalreporter.config
for rep in xpassed:
verbose_word = _get_report_str(config, rep)
pos = _get_pos(config, rep)
reason = rep.wasxfail
lines.append("%s %s %s" % (verbose_word, pos, reason))
def folded_skips(skipped):
d = {}
for event in skipped:
key = event.longrepr
assert len(key) == 3, (event, key)
keywords = getattr(event, "keywords", {})
# folding reports with global pytestmark variable
# this is workaround, because for now we cannot identify the scope of a skip marker
# TODO: revisit after marks scope would be fixed
if (
event.when == "setup"
and "skip" in keywords
and "pytestmark" not in keywords
):
key = (key[0], None, key[2])
d.setdefault(key, []).append(event)
values = []
for key, events in d.items():
values.append((len(events),) + key)
return values
def show_skipped(terminalreporter, lines):
tr = terminalreporter
skipped = tr.stats.get("skipped", [])
if skipped:
fskips = folded_skips(skipped)
if fskips:
verbose_word = _get_report_str(terminalreporter.config, report=skipped[0])
for num, fspath, lineno, reason in fskips:
if reason.startswith("Skipped: "):
reason = reason[9:]
if lineno is not None:
lines.append(
"%s [%d] %s:%d: %s"
% (verbose_word, num, fspath, lineno + 1, reason)
)
else:
lines.append("%s [%d] %s: %s" % (verbose_word, num, fspath, reason))
def shower(stat):
def show_(terminalreporter, lines):
return show_simple(terminalreporter, lines, stat)
return show_
def _get_report_str(config, report):
_category, _short, verbose = config.hook.pytest_report_teststatus(
report=report, config=config
)
return verbose
def _get_pos(config, rep):
nodeid = config.cwd_relative_nodeid(rep.nodeid)
return nodeid
REPORTCHAR_ACTIONS = {
"x": show_xfailed,
"X": show_xpassed,
"f": shower("failed"),
"F": shower("failed"),
"s": show_skipped,
"S": show_skipped,
"p": shower("passed"),
"E": shower("error"),
}

View File

@ -0,0 +1,114 @@
import pytest
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
"--sw",
"--stepwise",
action="store_true",
dest="stepwise",
help="exit on test failure and continue from last failing test next time",
)
group.addoption(
"--stepwise-skip",
action="store_true",
dest="stepwise_skip",
help="ignore the first failing test but stop on the next failing test",
)
@pytest.hookimpl
def pytest_configure(config):
config.pluginmanager.register(StepwisePlugin(config), "stepwiseplugin")
class StepwisePlugin:
def __init__(self, config):
self.config = config
self.active = config.getvalue("stepwise")
self.session = None
if self.active:
self.lastfailed = config.cache.get("cache/stepwise", None)
self.skip = config.getvalue("stepwise_skip")
def pytest_sessionstart(self, session):
self.session = session
def pytest_collection_modifyitems(self, session, config, items):
if not self.active:
return
if not self.lastfailed:
self.report_status = "no previously failed tests, not skipping."
return
already_passed = []
found = False
# Make a list of all tests that have been run before the last failing one.
for item in items:
if item.nodeid == self.lastfailed:
found = True
break
else:
already_passed.append(item)
# If the previously failed test was not found among the test items,
# do not skip any tests.
if not found:
self.report_status = "previously failed test not found, not skipping."
already_passed = []
else:
self.report_status = "skipping {} already passed items.".format(
len(already_passed)
)
for item in already_passed:
items.remove(item)
config.hook.pytest_deselected(items=already_passed)
def pytest_collectreport(self, report):
if self.active and report.failed:
self.session.shouldstop = (
"Error when collecting test, stopping test execution."
)
def pytest_runtest_logreport(self, report):
# Skip this hook if plugin is not active or the test is xfailed.
if not self.active or "xfail" in report.keywords:
return
if report.failed:
if self.skip:
# Remove test from the failed ones (if it exists) and unset the skip option
# to make sure the following tests will not be skipped.
if report.nodeid == self.lastfailed:
self.lastfailed = None
self.skip = False
else:
# Mark test as the last failing and interrupt the test session.
self.lastfailed = report.nodeid
self.session.shouldstop = (
"Test failed, continuing from this test next run."
)
else:
# If the test was actually run and did pass.
if report.when == "call":
# Remove test from the failed ones, if exists.
if report.nodeid == self.lastfailed:
self.lastfailed = None
def pytest_report_collectionfinish(self):
if self.active and self.config.getoption("verbose") >= 0:
return "stepwise: %s" % self.report_status
def pytest_sessionfinish(self, session):
if self.active:
self.config.cache.set("cache/stepwise", self.lastfailed)
else:
# Clear the list of failing tests if the plugin is not active.
self.config.cache.set("cache/stepwise", [])

View File

@ -0,0 +1,924 @@
""" terminal reporting of the full testing process.
This is a good source for looking at the various reporting hooks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import platform
import sys
import time
import attr
import pluggy
import py
import six
from more_itertools import collapse
import pytest
from _pytest import nodes
from _pytest.main import EXIT_INTERRUPTED
from _pytest.main import EXIT_NOTESTSCOLLECTED
from _pytest.main import EXIT_OK
from _pytest.main import EXIT_TESTSFAILED
from _pytest.main import EXIT_USAGEERROR
REPORT_COLLECTING_RESOLUTION = 0.5
class MoreQuietAction(argparse.Action):
"""
a modified copy of the argparse count action which counts down and updates
the legacy quiet attribute at the same time
used to unify verbosity handling
"""
def __init__(self, option_strings, dest, default=None, required=False, help=None):
super(MoreQuietAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
default=default,
required=required,
help=help,
)
def __call__(self, parser, namespace, values, option_string=None):
new_count = getattr(namespace, self.dest, 0) - 1
setattr(namespace, self.dest, new_count)
# todo Deprecate config.quiet
namespace.quiet = getattr(namespace, "quiet", 0) + 1
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group._addoption(
"-v",
"--verbose",
action="count",
default=0,
dest="verbose",
help="increase verbosity.",
),
group._addoption(
"-q",
"--quiet",
action=MoreQuietAction,
default=0,
dest="verbose",
help="decrease verbosity.",
),
group._addoption(
"--verbosity", dest="verbose", type=int, default=0, help="set verbosity"
)
group._addoption(
"-r",
action="store",
dest="reportchars",
default="",
metavar="chars",
help="show extra test summary info as specified by chars (f)ailed, "
"(E)error, (s)skipped, (x)failed, (X)passed, "
"(p)passed, (P)passed with output, (a)all except pP. "
"Warnings are displayed at all times except when "
"--disable-warnings is set",
)
group._addoption(
"--disable-warnings",
"--disable-pytest-warnings",
default=False,
dest="disable_warnings",
action="store_true",
help="disable warnings summary",
)
group._addoption(
"-l",
"--showlocals",
action="store_true",
dest="showlocals",
default=False,
help="show locals in tracebacks (disabled by default).",
)
group._addoption(
"--tb",
metavar="style",
action="store",
dest="tbstyle",
default="auto",
choices=["auto", "long", "short", "no", "line", "native"],
help="traceback print mode (auto/long/short/line/native/no).",
)
group._addoption(
"--show-capture",
action="store",
dest="showcapture",
choices=["no", "stdout", "stderr", "log", "all"],
default="all",
help="Controls how captured stdout/stderr/log is shown on failed tests. "
"Default is 'all'.",
)
group._addoption(
"--fulltrace",
"--full-trace",
action="store_true",
default=False,
help="don't cut any tracebacks (default is to cut).",
)
group._addoption(
"--color",
metavar="color",
action="store",
dest="color",
default="auto",
choices=["yes", "no", "auto"],
help="color terminal output (yes/no/auto).",
)
parser.addini(
"console_output_style",
help="console output: classic or with additional progress information (classic|progress).",
default="progress",
)
def pytest_configure(config):
reporter = TerminalReporter(config, sys.stdout)
config.pluginmanager.register(reporter, "terminalreporter")
if config.option.debug or config.option.traceconfig:
def mywriter(tags, args):
msg = " ".join(map(str, args))
reporter.write_line("[traceconfig] " + msg)
config.trace.root.setprocessor("pytest:config", mywriter)
def getreportopt(config):
reportopts = ""
reportchars = config.option.reportchars
if not config.option.disable_warnings and "w" not in reportchars:
reportchars += "w"
elif config.option.disable_warnings and "w" in reportchars:
reportchars = reportchars.replace("w", "")
if reportchars:
for char in reportchars:
if char not in reportopts and char != "a":
reportopts += char
elif char == "a":
reportopts = "sxXwEf"
return reportopts
def pytest_report_teststatus(report):
if report.passed:
letter = "."
elif report.skipped:
letter = "s"
elif report.failed:
letter = "F"
if report.when != "call":
letter = "f"
return report.outcome, letter, report.outcome.upper()
@attr.s
class WarningReport(object):
"""
Simple structure to hold warnings information captured by ``pytest_warning_captured``.
:ivar str message: user friendly message about the warning
:ivar str|None nodeid: node id that generated the warning (see ``get_location``).
:ivar tuple|py.path.local fslocation:
file system location of the source of the warning (see ``get_location``).
"""
message = attr.ib()
nodeid = attr.ib(default=None)
fslocation = attr.ib(default=None)
count_towards_summary = True
def get_location(self, config):
"""
Returns the more user-friendly information about the location
of a warning, or None.
"""
if self.nodeid:
return self.nodeid
if self.fslocation:
if isinstance(self.fslocation, tuple) and len(self.fslocation) >= 2:
filename, linenum = self.fslocation[:2]
relpath = py.path.local(filename).relto(config.invocation_dir)
if not relpath:
relpath = str(filename)
return "%s:%s" % (relpath, linenum)
else:
return str(self.fslocation)
return None
class TerminalReporter(object):
def __init__(self, config, file=None):
import _pytest.config
self.config = config
self._numcollected = 0
self._session = None
self._showfspath = None
self.stats = {}
self.startdir = py.path.local()
if file is None:
file = sys.stdout
self._tw = _pytest.config.create_terminal_writer(config, file)
# self.writer will be deprecated in pytest-3.4
self.writer = self._tw
self._screen_width = self._tw.fullwidth
self.currentfspath = None
self.reportchars = getreportopt(config)
self.hasmarkup = self._tw.hasmarkup
self.isatty = file.isatty()
self._progress_nodeids_reported = set()
self._show_progress_info = self._determine_show_progress_info()
self._collect_report_last_write = None
def _determine_show_progress_info(self):
"""Return True if we should display progress information based on the current config"""
# do not show progress if we are not capturing output (#3038)
if self.config.getoption("capture", "no") == "no":
return False
# do not show progress if we are showing fixture setup/teardown
if self.config.getoption("setupshow", False):
return False
return self.config.getini("console_output_style") in ("progress", "count")
@property
def verbosity(self):
return self.config.option.verbose
@property
def showheader(self):
return self.verbosity >= 0
@property
def showfspath(self):
if self._showfspath is None:
return self.verbosity >= 0
return self._showfspath
@showfspath.setter
def showfspath(self, value):
self._showfspath = value
@property
def showlongtestinfo(self):
return self.verbosity > 0
def hasopt(self, char):
char = {"xfailed": "x", "skipped": "s"}.get(char, char)
return char in self.reportchars
def write_fspath_result(self, nodeid, res, **markup):
fspath = self.config.rootdir.join(nodeid.split("::")[0])
# NOTE: explicitly check for None to work around py bug, and for less
# overhead in general (https://github.com/pytest-dev/py/pull/207).
if self.currentfspath is None or fspath != self.currentfspath:
if self.currentfspath is not None and self._show_progress_info:
self._write_progress_information_filling_space()
self.currentfspath = fspath
fspath = self.startdir.bestrelpath(fspath)
self._tw.line()
self._tw.write(fspath + " ")
self._tw.write(res, **markup)
def write_ensure_prefix(self, prefix, extra="", **kwargs):
if self.currentfspath != prefix:
self._tw.line()
self.currentfspath = prefix
self._tw.write(prefix)
if extra:
self._tw.write(extra, **kwargs)
self.currentfspath = -2
def ensure_newline(self):
if self.currentfspath:
self._tw.line()
self.currentfspath = None
def write(self, content, **markup):
self._tw.write(content, **markup)
def write_line(self, line, **markup):
if not isinstance(line, six.text_type):
line = six.text_type(line, errors="replace")
self.ensure_newline()
self._tw.line(line, **markup)
def rewrite(self, line, **markup):
"""
Rewinds the terminal cursor to the beginning and writes the given line.
:kwarg erase: if True, will also add spaces until the full terminal width to ensure
previous lines are properly erased.
The rest of the keyword arguments are markup instructions.
"""
erase = markup.pop("erase", False)
if erase:
fill_count = self._tw.fullwidth - len(line) - 1
fill = " " * fill_count
else:
fill = ""
line = str(line)
self._tw.write("\r" + line + fill, **markup)
def write_sep(self, sep, title=None, **markup):
self.ensure_newline()
self._tw.sep(sep, title, **markup)
def section(self, title, sep="=", **kw):
self._tw.sep(sep, title, **kw)
def line(self, msg, **kw):
self._tw.line(msg, **kw)
def pytest_internalerror(self, excrepr):
for line in six.text_type(excrepr).split("\n"):
self.write_line("INTERNALERROR> " + line)
return 1
def pytest_warning_captured(self, warning_message, item):
# from _pytest.nodes import get_fslocation_from_item
from _pytest.warnings import warning_record_to_str
warnings = self.stats.setdefault("warnings", [])
fslocation = warning_message.filename, warning_message.lineno
message = warning_record_to_str(warning_message)
nodeid = item.nodeid if item is not None else ""
warning_report = WarningReport(
fslocation=fslocation, message=message, nodeid=nodeid
)
warnings.append(warning_report)
def pytest_plugin_registered(self, plugin):
if self.config.option.traceconfig:
msg = "PLUGIN registered: %s" % (plugin,)
# XXX this event may happen during setup/teardown time
# which unfortunately captures our output here
# which garbles our output if we use self.write_line
self.write_line(msg)
def pytest_deselected(self, items):
self.stats.setdefault("deselected", []).extend(items)
def pytest_runtest_logstart(self, nodeid, location):
# ensure that the path is printed before the
# 1st test of a module starts running
if self.showlongtestinfo:
line = self._locationline(nodeid, *location)
self.write_ensure_prefix(line, "")
elif self.showfspath:
fsid = nodeid.split("::")[0]
self.write_fspath_result(fsid, "")
def pytest_runtest_logreport(self, report):
self._tests_ran = True
rep = report
res = self.config.hook.pytest_report_teststatus(report=rep, config=self.config)
category, letter, word = res
if isinstance(word, tuple):
word, markup = word
else:
markup = None
self.stats.setdefault(category, []).append(rep)
if not letter and not word:
# probably passed setup/teardown
return
running_xdist = hasattr(rep, "node")
if markup is None:
was_xfail = hasattr(report, "wasxfail")
if rep.passed and not was_xfail:
markup = {"green": True}
elif rep.passed and was_xfail:
markup = {"yellow": True}
elif rep.failed:
markup = {"red": True}
elif rep.skipped:
markup = {"yellow": True}
else:
markup = {}
if self.verbosity <= 0:
if not running_xdist and self.showfspath:
self.write_fspath_result(rep.nodeid, letter, **markup)
else:
self._tw.write(letter, **markup)
else:
self._progress_nodeids_reported.add(rep.nodeid)
line = self._locationline(rep.nodeid, *rep.location)
if not running_xdist:
self.write_ensure_prefix(line, word, **markup)
if self._show_progress_info:
self._write_progress_information_filling_space()
else:
self.ensure_newline()
self._tw.write("[%s]" % rep.node.gateway.id)
if self._show_progress_info:
self._tw.write(
self._get_progress_information_message() + " ", cyan=True
)
else:
self._tw.write(" ")
self._tw.write(word, **markup)
self._tw.write(" " + line)
self.currentfspath = -2
def pytest_runtest_logfinish(self, nodeid):
if self.config.getini("console_output_style") == "count":
num_tests = self._session.testscollected
progress_length = len(" [{}/{}]".format(str(num_tests), str(num_tests)))
else:
progress_length = len(" [100%]")
if self.verbosity <= 0 and self._show_progress_info:
self._progress_nodeids_reported.add(nodeid)
last_item = (
len(self._progress_nodeids_reported) == self._session.testscollected
)
if last_item:
self._write_progress_information_filling_space()
else:
w = self._width_of_current_line
past_edge = w + progress_length + 1 >= self._screen_width
if past_edge:
msg = self._get_progress_information_message()
self._tw.write(msg + "\n", cyan=True)
def _get_progress_information_message(self):
collected = self._session.testscollected
if self.config.getini("console_output_style") == "count":
if collected:
progress = self._progress_nodeids_reported
counter_format = "{{:{}d}}".format(len(str(collected)))
format_string = " [{}/{{}}]".format(counter_format)
return format_string.format(len(progress), collected)
return " [ {} / {} ]".format(collected, collected)
else:
if collected:
progress = len(self._progress_nodeids_reported) * 100 // collected
return " [{:3d}%]".format(progress)
return " [100%]"
def _write_progress_information_filling_space(self):
msg = self._get_progress_information_message()
w = self._width_of_current_line
fill = self._tw.fullwidth - w - 1
self.write(msg.rjust(fill), cyan=True)
@property
def _width_of_current_line(self):
"""Return the width of current line, using the superior implementation of py-1.6 when available"""
try:
return self._tw.width_of_current_line
except AttributeError:
# py < 1.6.0
return self._tw.chars_on_current_line
def pytest_collection(self):
if self.isatty:
if self.config.option.verbose >= 0:
self.write("collecting ... ", bold=True)
self._collect_report_last_write = time.time()
elif self.config.option.verbose >= 1:
self.write("collecting ... ", bold=True)
def pytest_collectreport(self, report):
if report.failed:
self.stats.setdefault("error", []).append(report)
elif report.skipped:
self.stats.setdefault("skipped", []).append(report)
items = [x for x in report.result if isinstance(x, pytest.Item)]
self._numcollected += len(items)
if self.isatty:
self.report_collect()
def report_collect(self, final=False):
if self.config.option.verbose < 0:
return
if not final:
# Only write "collecting" report every 0.5s.
t = time.time()
if (
self._collect_report_last_write is not None
and self._collect_report_last_write > t - REPORT_COLLECTING_RESOLUTION
):
return
self._collect_report_last_write = t
errors = len(self.stats.get("error", []))
skipped = len(self.stats.get("skipped", []))
deselected = len(self.stats.get("deselected", []))
selected = self._numcollected - errors - skipped - deselected
if final:
line = "collected "
else:
line = "collecting "
line += (
str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s")
)
if errors:
line += " / %d errors" % errors
if deselected:
line += " / %d deselected" % deselected
if skipped:
line += " / %d skipped" % skipped
if self._numcollected > selected > 0:
line += " / %d selected" % selected
if self.isatty:
self.rewrite(line, bold=True, erase=True)
if final:
self.write("\n")
else:
self.write_line(line)
@pytest.hookimpl(trylast=True)
def pytest_collection_modifyitems(self):
self.report_collect(True)
@pytest.hookimpl(trylast=True)
def pytest_sessionstart(self, session):
self._session = session
self._sessionstarttime = time.time()
if not self.showheader:
return
self.write_sep("=", "test session starts", bold=True)
verinfo = platform.python_version()
msg = "platform %s -- Python %s" % (sys.platform, verinfo)
if hasattr(sys, "pypy_version_info"):
verinfo = ".".join(map(str, sys.pypy_version_info[:3]))
msg += "[pypy-%s-%s]" % (verinfo, sys.pypy_version_info[3])
msg += ", pytest-%s, py-%s, pluggy-%s" % (
pytest.__version__,
py.__version__,
pluggy.__version__,
)
if (
self.verbosity > 0
or self.config.option.debug
or getattr(self.config.option, "pastebin", None)
):
msg += " -- " + str(sys.executable)
self.write_line(msg)
lines = self.config.hook.pytest_report_header(
config=self.config, startdir=self.startdir
)
self._write_report_lines_from_hooks(lines)
def _write_report_lines_from_hooks(self, lines):
lines.reverse()
for line in collapse(lines):
self.write_line(line)
def pytest_report_header(self, config):
line = "rootdir: %s" % config.rootdir
if config.inifile:
line += ", inifile: " + config.rootdir.bestrelpath(config.inifile)
testpaths = config.getini("testpaths")
if testpaths and config.args == testpaths:
rel_paths = [config.rootdir.bestrelpath(x) for x in testpaths]
line += ", testpaths: {}".format(", ".join(rel_paths))
result = [line]
plugininfo = config.pluginmanager.list_plugin_distinfo()
if plugininfo:
result.append("plugins: %s" % ", ".join(_plugin_nameversions(plugininfo)))
return result
def pytest_collection_finish(self, session):
if self.config.getoption("collectonly"):
self._printcollecteditems(session.items)
lines = self.config.hook.pytest_report_collectionfinish(
config=self.config, startdir=self.startdir, items=session.items
)
self._write_report_lines_from_hooks(lines)
if self.config.getoption("collectonly"):
if self.stats.get("failed"):
self._tw.sep("!", "collection failures")
for rep in self.stats.get("failed"):
rep.toterminal(self._tw)
def _printcollecteditems(self, items):
# to print out items and their parent collectors
# we take care to leave out Instances aka ()
# because later versions are going to get rid of them anyway
if self.config.option.verbose < 0:
if self.config.option.verbose < -1:
counts = {}
for item in items:
name = item.nodeid.split("::", 1)[0]
counts[name] = counts.get(name, 0) + 1
for name, count in sorted(counts.items()):
self._tw.line("%s: %d" % (name, count))
else:
for item in items:
self._tw.line(item.nodeid)
return
stack = []
indent = ""
for item in items:
needed_collectors = item.listchain()[1:] # strip root node
while stack:
if stack == needed_collectors[: len(stack)]:
break
stack.pop()
for col in needed_collectors[len(stack) :]:
stack.append(col)
if col.name == "()": # Skip Instances.
continue
indent = (len(stack) - 1) * " "
self._tw.line("%s%s" % (indent, col))
if self.config.option.verbose >= 1:
if hasattr(col, "_obj") and col._obj.__doc__:
for line in col._obj.__doc__.strip().splitlines():
self._tw.line("%s%s" % (indent + " ", line.strip()))
@pytest.hookimpl(hookwrapper=True)
def pytest_sessionfinish(self, exitstatus):
outcome = yield
outcome.get_result()
self._tw.line("")
summary_exit_codes = (
EXIT_OK,
EXIT_TESTSFAILED,
EXIT_INTERRUPTED,
EXIT_USAGEERROR,
EXIT_NOTESTSCOLLECTED,
)
if exitstatus in summary_exit_codes:
self.config.hook.pytest_terminal_summary(
terminalreporter=self, exitstatus=exitstatus, config=self.config
)
if exitstatus == EXIT_INTERRUPTED:
self._report_keyboardinterrupt()
del self._keyboardinterrupt_memo
self.summary_stats()
@pytest.hookimpl(hookwrapper=True)
def pytest_terminal_summary(self):
self.summary_errors()
self.summary_failures()
self.summary_warnings()
yield
self.summary_passes()
# Display any extra warnings from teardown here (if any).
self.summary_warnings()
def pytest_keyboard_interrupt(self, excinfo):
self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True)
def pytest_unconfigure(self):
if hasattr(self, "_keyboardinterrupt_memo"):
self._report_keyboardinterrupt()
def _report_keyboardinterrupt(self):
excrepr = self._keyboardinterrupt_memo
msg = excrepr.reprcrash.message
self.write_sep("!", msg)
if "KeyboardInterrupt" in msg:
if self.config.option.fulltrace:
excrepr.toterminal(self._tw)
else:
excrepr.reprcrash.toterminal(self._tw)
self._tw.line(
"(to show a full traceback on KeyboardInterrupt use --fulltrace)",
yellow=True,
)
def _locationline(self, nodeid, fspath, lineno, domain):
def mkrel(nodeid):
line = self.config.cwd_relative_nodeid(nodeid)
if domain and line.endswith(domain):
line = line[: -len(domain)]
values = domain.split("[")
values[0] = values[0].replace(".", "::") # don't replace '.' in params
line += "[".join(values)
return line
# collect_fspath comes from testid which has a "/"-normalized path
if fspath:
res = mkrel(nodeid)
if self.verbosity >= 2 and nodeid.split("::")[0] != fspath.replace(
"\\", nodes.SEP
):
res += " <- " + self.startdir.bestrelpath(fspath)
else:
res = "[location]"
return res + " "
def _getfailureheadline(self, rep):
if rep.head_line:
return rep.head_line
else:
return "test session" # XXX?
def _getcrashline(self, rep):
try:
return str(rep.longrepr.reprcrash)
except AttributeError:
try:
return str(rep.longrepr)[:50]
except AttributeError:
return ""
#
# summaries for sessionfinish
#
def getreports(self, name):
values = []
for x in self.stats.get(name, []):
if not hasattr(x, "_pdbshown"):
values.append(x)
return values
def summary_warnings(self):
if self.hasopt("w"):
all_warnings = self.stats.get("warnings")
if not all_warnings:
return
final = hasattr(self, "_already_displayed_warnings")
if final:
warning_reports = all_warnings[self._already_displayed_warnings :]
else:
warning_reports = all_warnings
self._already_displayed_warnings = len(warning_reports)
if not warning_reports:
return
reports_grouped_by_message = collections.OrderedDict()
for wr in warning_reports:
reports_grouped_by_message.setdefault(wr.message, []).append(wr)
title = "warnings summary (final)" if final else "warnings summary"
self.write_sep("=", title, yellow=True, bold=False)
for message, warning_reports in reports_grouped_by_message.items():
has_any_location = False
for w in warning_reports:
location = w.get_location(self.config)
if location:
self._tw.line(str(location))
has_any_location = True
if has_any_location:
lines = message.splitlines()
indented = "\n".join(" " + x for x in lines)
message = indented.rstrip()
else:
message = message.rstrip()
self._tw.line(message)
self._tw.line()
self._tw.line("-- Docs: https://docs.pytest.org/en/latest/warnings.html")
def summary_passes(self):
if self.config.option.tbstyle != "no":
if self.hasopt("P"):
reports = self.getreports("passed")
if not reports:
return
self.write_sep("=", "PASSES")
for rep in reports:
if rep.sections:
msg = self._getfailureheadline(rep)
self.write_sep("_", msg)
self._outrep_summary(rep)
def print_teardown_sections(self, rep):
showcapture = self.config.option.showcapture
if showcapture == "no":
return
for secname, content in rep.sections:
if showcapture != "all" and showcapture not in secname:
continue
if "teardown" in secname:
self._tw.sep("-", secname)
if content[-1:] == "\n":
content = content[:-1]
self._tw.line(content)
def summary_failures(self):
if self.config.option.tbstyle != "no":
reports = self.getreports("failed")
if not reports:
return
self.write_sep("=", "FAILURES")
for rep in reports:
if self.config.option.tbstyle == "line":
line = self._getcrashline(rep)
self.write_line(line)
else:
msg = self._getfailureheadline(rep)
self.write_sep("_", msg, red=True, bold=True)
self._outrep_summary(rep)
for report in self.getreports(""):
if report.nodeid == rep.nodeid and report.when == "teardown":
self.print_teardown_sections(report)
def summary_errors(self):
if self.config.option.tbstyle != "no":
reports = self.getreports("error")
if not reports:
return
self.write_sep("=", "ERRORS")
for rep in self.stats["error"]:
msg = self._getfailureheadline(rep)
if rep.when == "collect":
msg = "ERROR collecting " + msg
elif rep.when == "setup":
msg = "ERROR at setup of " + msg
elif rep.when == "teardown":
msg = "ERROR at teardown of " + msg
self.write_sep("_", msg, red=True, bold=True)
self._outrep_summary(rep)
def _outrep_summary(self, rep):
rep.toterminal(self._tw)
showcapture = self.config.option.showcapture
if showcapture == "no":
return
for secname, content in rep.sections:
if showcapture != "all" and showcapture not in secname:
continue
self._tw.sep("-", secname)
if content[-1:] == "\n":
content = content[:-1]
self._tw.line(content)
def summary_stats(self):
session_duration = time.time() - self._sessionstarttime
(line, color) = build_summary_stats_line(self.stats)
msg = "%s in %.2f seconds" % (line, session_duration)
markup = {color: True, "bold": True}
if self.verbosity >= 0:
self.write_sep("=", msg, **markup)
if self.verbosity == -1:
self.write_line(msg, **markup)
def build_summary_stats_line(stats):
known_types = (
"failed passed skipped deselected xfailed xpassed warnings error".split()
)
unknown_type_seen = False
for found_type in stats:
if found_type not in known_types:
if found_type: # setup/teardown reports have an empty key, ignore them
known_types.append(found_type)
unknown_type_seen = True
parts = []
for key in known_types:
reports = stats.get(key, None)
if reports:
count = sum(
1 for rep in reports if getattr(rep, "count_towards_summary", True)
)
parts.append("%d %s" % (count, key))
if parts:
line = ", ".join(parts)
else:
line = "no tests ran"
if "failed" in stats or "error" in stats:
color = "red"
elif "warnings" in stats or unknown_type_seen:
color = "yellow"
elif "passed" in stats:
color = "green"
else:
color = "yellow"
return line, color
def _plugin_nameversions(plugininfo):
values = []
for plugin, dist in plugininfo:
# gets us name and version!
name = "{dist.project_name}-{dist.version}".format(dist=dist)
# questionable convenience, but it keeps things short
if name.startswith("pytest-"):
name = name[7:]
# we decided to print python package names
# they can have more than one plugin
if name not in values:
values.append(name)
return values

View File

@ -0,0 +1,196 @@
""" support for providing temporary directories to test functions. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import tempfile
import warnings
import attr
import py
import six
import pytest
from .pathlib import ensure_reset_dir
from .pathlib import LOCK_TIMEOUT
from .pathlib import make_numbered_dir
from .pathlib import make_numbered_dir_with_cleanup
from .pathlib import Path
from _pytest.monkeypatch import MonkeyPatch
@attr.s
class TempPathFactory(object):
"""Factory for temporary directories under the common base temp directory.
The base directory can be configured using the ``--basetemp`` option."""
_given_basetemp = attr.ib(
# using os.path.abspath() to get absolute path instead of resolve() as it
# does not work the same in all platforms (see #4427)
# Path.absolute() exists, but it is not public (see https://bugs.python.org/issue25012)
converter=attr.converters.optional(
lambda p: Path(os.path.abspath(six.text_type(p)))
)
)
_trace = attr.ib()
_basetemp = attr.ib(default=None)
@classmethod
def from_config(cls, config):
"""
:param config: a pytest configuration
"""
return cls(
given_basetemp=config.option.basetemp, trace=config.trace.get("tmpdir")
)
def mktemp(self, basename, numbered=True):
"""makes a temporary directory managed by the factory"""
if not numbered:
p = self.getbasetemp().joinpath(basename)
p.mkdir()
else:
p = make_numbered_dir(root=self.getbasetemp(), prefix=basename)
self._trace("mktemp", p)
return p
def getbasetemp(self):
""" return base temporary directory. """
if self._basetemp is None:
if self._given_basetemp is not None:
basetemp = self._given_basetemp
ensure_reset_dir(basetemp)
basetemp = basetemp.resolve()
else:
from_env = os.environ.get("PYTEST_DEBUG_TEMPROOT")
temproot = Path(from_env or tempfile.gettempdir()).resolve()
user = get_user() or "unknown"
# use a sub-directory in the temproot to speed-up
# make_numbered_dir() call
rootdir = temproot.joinpath("pytest-of-{}".format(user))
rootdir.mkdir(exist_ok=True)
basetemp = make_numbered_dir_with_cleanup(
prefix="pytest-", root=rootdir, keep=3, lock_timeout=LOCK_TIMEOUT
)
assert basetemp is not None
self._basetemp = t = basetemp
self._trace("new basetemp", t)
return t
else:
return self._basetemp
@attr.s
class TempdirFactory(object):
"""
backward comptibility wrapper that implements
:class:``py.path.local`` for :class:``TempPathFactory``
"""
_tmppath_factory = attr.ib()
def ensuretemp(self, string, dir=1):
""" (deprecated) return temporary directory path with
the given string as the trailing part. It is usually
better to use the 'tmpdir' function argument which
provides an empty unique-per-test-invocation directory
and is guaranteed to be empty.
"""
# py.log._apiwarn(">1.1", "use tmpdir function argument")
from .deprecated import PYTEST_ENSURETEMP
warnings.warn(PYTEST_ENSURETEMP, stacklevel=2)
return self.getbasetemp().ensure(string, dir=dir)
def mktemp(self, basename, numbered=True):
"""Create a subdirectory of the base temporary directory and return it.
If ``numbered``, ensure the directory is unique by adding a number
prefix greater than any existing one.
"""
return py.path.local(self._tmppath_factory.mktemp(basename, numbered).resolve())
def getbasetemp(self):
"""backward compat wrapper for ``_tmppath_factory.getbasetemp``"""
return py.path.local(self._tmppath_factory.getbasetemp().resolve())
def get_user():
"""Return the current user name, or None if getuser() does not work
in the current environment (see #1010).
"""
import getpass
try:
return getpass.getuser()
except (ImportError, KeyError):
return None
def pytest_configure(config):
"""Create a TempdirFactory and attach it to the config object.
This is to comply with existing plugins which expect the handler to be
available at pytest_configure time, but ideally should be moved entirely
to the tmpdir_factory session fixture.
"""
mp = MonkeyPatch()
tmppath_handler = TempPathFactory.from_config(config)
t = TempdirFactory(tmppath_handler)
config._cleanup.append(mp.undo)
mp.setattr(config, "_tmp_path_factory", tmppath_handler, raising=False)
mp.setattr(config, "_tmpdirhandler", t, raising=False)
mp.setattr(pytest, "ensuretemp", t.ensuretemp, raising=False)
@pytest.fixture(scope="session")
def tmpdir_factory(request):
"""Return a :class:`_pytest.tmpdir.TempdirFactory` instance for the test session.
"""
return request.config._tmpdirhandler
@pytest.fixture(scope="session")
def tmp_path_factory(request):
"""Return a :class:`_pytest.tmpdir.TempPathFactory` instance for the test session.
"""
return request.config._tmp_path_factory
def _mk_tmp(request, factory):
name = request.node.name
name = re.sub(r"[\W]", "_", name)
MAXVAL = 30
name = name[:MAXVAL]
return factory.mktemp(name, numbered=True)
@pytest.fixture
def tmpdir(tmp_path):
"""Return a temporary directory path object
which is unique to each test function invocation,
created as a sub directory of the base temporary
directory. The returned object is a `py.path.local`_
path object.
.. _`py.path.local`: https://py.readthedocs.io/en/latest/path.html
"""
return py.path.local(tmp_path)
@pytest.fixture
def tmp_path(request, tmp_path_factory):
"""Return a temporary directory path object
which is unique to each test function invocation,
created as a sub directory of the base temporary
directory. The returned object is a :class:`pathlib.Path`
object.
.. note::
in python < 3.6 this is a pathlib2.Path
"""
return _mk_tmp(request, tmp_path_factory)

View File

@ -0,0 +1,288 @@
""" discovery and running of std-library "unittest" style tests. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import traceback
import _pytest._code
import pytest
from _pytest.compat import getimfunc
from _pytest.config import hookimpl
from _pytest.outcomes import fail
from _pytest.outcomes import skip
from _pytest.outcomes import xfail
from _pytest.python import Class
from _pytest.python import Function
def pytest_pycollect_makeitem(collector, name, obj):
# has unittest been imported and is obj a subclass of its TestCase?
try:
if not issubclass(obj, sys.modules["unittest"].TestCase):
return
except Exception:
return
# yes, so let's collect it
return UnitTestCase(name, parent=collector)
class UnitTestCase(Class):
# marker for fixturemanger.getfixtureinfo()
# to declare that our children do not support funcargs
nofuncargs = True
def collect(self):
from unittest import TestLoader
cls = self.obj
if not getattr(cls, "__test__", True):
return
skipped = getattr(cls, "__unittest_skip__", False)
if not skipped:
self._inject_setup_teardown_fixtures(cls)
self._inject_setup_class_fixture()
self.session._fixturemanager.parsefactories(self, unittest=True)
loader = TestLoader()
foundsomething = False
for name in loader.getTestCaseNames(self.obj):
x = getattr(self.obj, name)
if not getattr(x, "__test__", True):
continue
funcobj = getimfunc(x)
yield TestCaseFunction(name, parent=self, callobj=funcobj)
foundsomething = True
if not foundsomething:
runtest = getattr(self.obj, "runTest", None)
if runtest is not None:
ut = sys.modules.get("twisted.trial.unittest", None)
if ut is None or runtest != ut.TestCase.runTest:
yield TestCaseFunction("runTest", parent=self)
def _inject_setup_teardown_fixtures(self, cls):
"""Injects a hidden auto-use fixture to invoke setUpClass/setup_method and corresponding
teardown functions (#517)"""
class_fixture = _make_xunit_fixture(
cls, "setUpClass", "tearDownClass", scope="class", pass_self=False
)
if class_fixture:
cls.__pytest_class_setup = class_fixture
method_fixture = _make_xunit_fixture(
cls, "setup_method", "teardown_method", scope="function", pass_self=True
)
if method_fixture:
cls.__pytest_method_setup = method_fixture
def _make_xunit_fixture(obj, setup_name, teardown_name, scope, pass_self):
setup = getattr(obj, setup_name, None)
teardown = getattr(obj, teardown_name, None)
if setup is None and teardown is None:
return None
@pytest.fixture(scope=scope, autouse=True)
def fixture(self, request):
if getattr(self, "__unittest_skip__", None):
reason = self.__unittest_skip_why__
pytest.skip(reason)
if setup is not None:
if pass_self:
setup(self, request.function)
else:
setup()
yield
if teardown is not None:
if pass_self:
teardown(self, request.function)
else:
teardown()
return fixture
class TestCaseFunction(Function):
nofuncargs = True
_excinfo = None
_testcase = None
def setup(self):
self._testcase = self.parent.obj(self.name)
self._fix_unittest_skip_decorator()
if hasattr(self, "_request"):
self._request._fillfixtures()
def _fix_unittest_skip_decorator(self):
"""
The @unittest.skip decorator calls functools.wraps(self._testcase)
The call to functools.wraps() fails unless self._testcase
has a __name__ attribute. This is usually automatically supplied
if the test is a function or method, but we need to add manually
here.
See issue #1169
"""
if sys.version_info[0] == 2:
setattr(self._testcase, "__name__", self.name)
def teardown(self):
self._testcase = None
def startTest(self, testcase):
pass
def _addexcinfo(self, rawexcinfo):
# unwrap potential exception info (see twisted trial support below)
rawexcinfo = getattr(rawexcinfo, "_rawexcinfo", rawexcinfo)
try:
excinfo = _pytest._code.ExceptionInfo(rawexcinfo)
# invoke the attributes to trigger storing the traceback
# trial causes some issue there
excinfo.value
excinfo.traceback
except TypeError:
try:
try:
values = traceback.format_exception(*rawexcinfo)
values.insert(
0,
"NOTE: Incompatible Exception Representation, "
"displaying natively:\n\n",
)
fail("".join(values), pytrace=False)
except (fail.Exception, KeyboardInterrupt):
raise
except: # noqa
fail(
"ERROR: Unknown Incompatible Exception "
"representation:\n%r" % (rawexcinfo,),
pytrace=False,
)
except KeyboardInterrupt:
raise
except fail.Exception:
excinfo = _pytest._code.ExceptionInfo.from_current()
self.__dict__.setdefault("_excinfo", []).append(excinfo)
def addError(self, testcase, rawexcinfo):
self._addexcinfo(rawexcinfo)
def addFailure(self, testcase, rawexcinfo):
self._addexcinfo(rawexcinfo)
def addSkip(self, testcase, reason):
try:
skip(reason)
except skip.Exception:
self._skipped_by_mark = True
self._addexcinfo(sys.exc_info())
def addExpectedFailure(self, testcase, rawexcinfo, reason=""):
try:
xfail(str(reason))
except xfail.Exception:
self._addexcinfo(sys.exc_info())
def addUnexpectedSuccess(self, testcase, reason=""):
self._unexpectedsuccess = reason
def addSuccess(self, testcase):
pass
def stopTest(self, testcase):
pass
def _handle_skip(self):
# implements the skipping machinery (see #2137)
# analog to pythons Lib/unittest/case.py:run
testMethod = getattr(self._testcase, self._testcase._testMethodName)
if getattr(self._testcase.__class__, "__unittest_skip__", False) or getattr(
testMethod, "__unittest_skip__", False
):
# If the class or method was skipped.
skip_why = getattr(
self._testcase.__class__, "__unittest_skip_why__", ""
) or getattr(testMethod, "__unittest_skip_why__", "")
try: # PY3, unittest2 on PY2
self._testcase._addSkip(self, self._testcase, skip_why)
except TypeError: # PY2
if sys.version_info[0] != 2:
raise
self._testcase._addSkip(self, skip_why)
return True
return False
def runtest(self):
if self.config.pluginmanager.get_plugin("pdbinvoke") is None:
self._testcase(result=self)
else:
# disables tearDown and cleanups for post mortem debugging (see #1890)
if self._handle_skip():
return
self._testcase.debug()
def _prunetraceback(self, excinfo):
Function._prunetraceback(self, excinfo)
traceback = excinfo.traceback.filter(
lambda x: not x.frame.f_globals.get("__unittest")
)
if traceback:
excinfo.traceback = traceback
@hookimpl(tryfirst=True)
def pytest_runtest_makereport(item, call):
if isinstance(item, TestCaseFunction):
if item._excinfo:
call.excinfo = item._excinfo.pop(0)
try:
del call.result
except AttributeError:
pass
# twisted trial support
@hookimpl(hookwrapper=True)
def pytest_runtest_protocol(item):
if isinstance(item, TestCaseFunction) and "twisted.trial.unittest" in sys.modules:
ut = sys.modules["twisted.python.failure"]
Failure__init__ = ut.Failure.__init__
check_testcase_implements_trial_reporter()
def excstore(
self, exc_value=None, exc_type=None, exc_tb=None, captureVars=None
):
if exc_value is None:
self._rawexcinfo = sys.exc_info()
else:
if exc_type is None:
exc_type = type(exc_value)
self._rawexcinfo = (exc_type, exc_value, exc_tb)
try:
Failure__init__(
self, exc_value, exc_type, exc_tb, captureVars=captureVars
)
except TypeError:
Failure__init__(self, exc_value, exc_type, exc_tb)
ut.Failure.__init__ = excstore
yield
ut.Failure.__init__ = Failure__init__
else:
yield
def check_testcase_implements_trial_reporter(done=[]):
if done:
return
from zope.interface import classImplements
from twisted.trial.itrial import IReporter
classImplements(TestCaseFunction, IReporter)
done.append(1)

View File

@ -0,0 +1,60 @@
import attr
class PytestWarning(UserWarning):
"""
Bases: :class:`UserWarning`.
Base class for all warnings emitted by pytest.
"""
class PytestDeprecationWarning(PytestWarning, DeprecationWarning):
"""
Bases: :class:`pytest.PytestWarning`, :class:`DeprecationWarning`.
Warning class for features that will be removed in a future version.
"""
class RemovedInPytest4Warning(PytestDeprecationWarning):
"""
Bases: :class:`pytest.PytestDeprecationWarning`.
Warning class for features scheduled to be removed in pytest 4.0.
"""
class PytestExperimentalApiWarning(PytestWarning, FutureWarning):
"""
Bases: :class:`pytest.PytestWarning`, :class:`FutureWarning`.
Warning category used to denote experiments in pytest. Use sparingly as the API might change or even be
removed completely in future version
"""
@classmethod
def simple(cls, apiname):
return cls(
"{apiname} is an experimental api that may change over time".format(
apiname=apiname
)
)
@attr.s
class UnformattedWarning(object):
"""Used to hold warnings that need to format their message at runtime, as opposed to a direct message.
Using this class avoids to keep all the warning types and messages in this module, avoiding misuse.
"""
category = attr.ib()
template = attr.ib()
def format(self, **kwargs):
"""Returns an instance of the warning category, formatted with given kwargs"""
return self.category(self.template.format(**kwargs))
PYTESTER_COPY_EXAMPLE = PytestExperimentalApiWarning.simple("testdir.copy_example")

View File

@ -0,0 +1,179 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import warnings
from contextlib import contextmanager
import pytest
from _pytest import compat
SHOW_PYTEST_WARNINGS_ARG = "-Walways::pytest.RemovedInPytest4Warning"
def _setoption(wmod, arg):
"""
Copy of the warning._setoption function but does not escape arguments.
"""
parts = arg.split(":")
if len(parts) > 5:
raise wmod._OptionError("too many fields (max 5): %r" % (arg,))
while len(parts) < 5:
parts.append("")
action, message, category, module, lineno = [s.strip() for s in parts]
action = wmod._getaction(action)
category = wmod._getcategory(category)
if lineno:
try:
lineno = int(lineno)
if lineno < 0:
raise ValueError
except (ValueError, OverflowError):
raise wmod._OptionError("invalid lineno %r" % (lineno,))
else:
lineno = 0
wmod.filterwarnings(action, message, category, module, lineno)
def pytest_addoption(parser):
group = parser.getgroup("pytest-warnings")
group.addoption(
"-W",
"--pythonwarnings",
action="append",
help="set which warnings to report, see -W option of python itself.",
)
parser.addini(
"filterwarnings",
type="linelist",
help="Each line specifies a pattern for "
"warnings.filterwarnings. "
"Processed after -W and --pythonwarnings.",
)
def pytest_configure(config):
config.addinivalue_line(
"markers",
"filterwarnings(warning): add a warning filter to the given test. "
"see https://docs.pytest.org/en/latest/warnings.html#pytest-mark-filterwarnings ",
)
@contextmanager
def catch_warnings_for_item(config, ihook, when, item):
"""
Context manager that catches warnings generated in the contained execution block.
``item`` can be None if we are not in the context of an item execution.
Each warning captured triggers the ``pytest_warning_captured`` hook.
"""
cmdline_filters = config.getoption("pythonwarnings") or []
inifilters = config.getini("filterwarnings")
with warnings.catch_warnings(record=True) as log:
if not sys.warnoptions:
# if user is not explicitly configuring warning filters, show deprecation warnings by default (#2908)
warnings.filterwarnings("always", category=DeprecationWarning)
warnings.filterwarnings("always", category=PendingDeprecationWarning)
warnings.filterwarnings("error", category=pytest.RemovedInPytest4Warning)
# filters should have this precedence: mark, cmdline options, ini
# filters should be applied in the inverse order of precedence
for arg in inifilters:
_setoption(warnings, arg)
for arg in cmdline_filters:
warnings._setoption(arg)
if item is not None:
for mark in item.iter_markers(name="filterwarnings"):
for arg in mark.args:
_setoption(warnings, arg)
yield
for warning_message in log:
ihook.pytest_warning_captured.call_historic(
kwargs=dict(warning_message=warning_message, when=when, item=item)
)
def warning_record_to_str(warning_message):
"""Convert a warnings.WarningMessage to a string.
This takes lot of unicode shenaningans into account for Python 2.
When Python 2 support is dropped this function can be greatly simplified.
"""
warn_msg = warning_message.message
unicode_warning = False
if compat._PY2 and any(isinstance(m, compat.UNICODE_TYPES) for m in warn_msg.args):
new_args = []
for m in warn_msg.args:
new_args.append(
compat.ascii_escaped(m) if isinstance(m, compat.UNICODE_TYPES) else m
)
unicode_warning = list(warn_msg.args) != new_args
warn_msg.args = new_args
msg = warnings.formatwarning(
warn_msg,
warning_message.category,
warning_message.filename,
warning_message.lineno,
warning_message.line,
)
if unicode_warning:
warnings.warn(
"Warning is using unicode non convertible to ascii, "
"converting to a safe representation:\n {!r}".format(compat.safe_str(msg)),
UnicodeWarning,
)
return msg
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_runtest_protocol(item):
with catch_warnings_for_item(
config=item.config, ihook=item.ihook, when="runtest", item=item
):
yield
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_collection(session):
config = session.config
with catch_warnings_for_item(
config=config, ihook=config.hook, when="collect", item=None
):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_terminal_summary(terminalreporter):
config = terminalreporter.config
with catch_warnings_for_item(
config=config, ihook=config.hook, when="config", item=None
):
yield
def _issue_warning_captured(warning, hook, stacklevel):
"""
This function should be used instead of calling ``warnings.warn`` directly when we are in the "configure" stage:
at this point the actual options might not have been set, so we manually trigger the pytest_warning_captured
hook so we can display this warnings in the terminal. This is a hack until we can sort out #2891.
:param warning: the warning instance.
:param hook: the hook caller
:param stacklevel: stacklevel forwarded to warnings.warn
"""
with warnings.catch_warnings(record=True) as records:
warnings.simplefilter("always", type(warning))
warnings.warn(warning, stacklevel=stacklevel)
hook.pytest_warning_captured.call_historic(
kwargs=dict(warning_message=records[0], when="config", item=None)
)

View File

@ -0,0 +1,4 @@
# coding: utf-8
# file generated by setuptools_scm
# don't change, don't track in version control
version = '1.10.3'

View File

@ -0,0 +1,144 @@
Metadata-Version: 2.1
Name: atomicwrites
Version: 1.3.0
Summary: Atomic file writes.
Home-page: https://github.com/untitaker/python-atomicwrites
Author: Markus Unterwaditzer
Author-email: markus@unterwaditzer.net
License: MIT
Platform: UNKNOWN
Classifier: License :: OSI Approved :: MIT License
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: Implementation :: CPython
Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*
===================
python-atomicwrites
===================
.. image:: https://travis-ci.org/untitaker/python-atomicwrites.svg?branch=master
:target: https://travis-ci.org/untitaker/python-atomicwrites
.. image:: https://ci.appveyor.com/api/projects/status/vadc4le3c27to59x/branch/master?svg=true
:target: https://ci.appveyor.com/project/untitaker/python-atomicwrites/branch/master
Atomic file writes.
.. code-block:: python
from atomicwrites import atomic_write
with atomic_write('foo.txt', overwrite=True) as f:
f.write('Hello world.')
# "foo.txt" doesn't exist yet.
# Now it does.
Features that distinguish it from other similar libraries (see `Alternatives and Credit`_):
- Race-free assertion that the target file doesn't yet exist. This can be
controlled with the ``overwrite`` parameter.
- Windows support, although not well-tested. The MSDN resources are not very
explicit about which operations are atomic. I'm basing my assumptions off `a
comment
<https://social.msdn.microsoft.com/Forums/windowsdesktop/en-US/449bb49d-8acc-48dc-a46f-0760ceddbfc3/movefileexmovefilereplaceexisting-ntfs-same-volume-atomic?forum=windowssdk#a239bc26-eaf0-4920-9f21-440bd2be9cc8>`_
by `Doug Crook
<https://social.msdn.microsoft.com/Profile/doug%20e.%20cook>`_, who appears
to be a Microsoft employee:
FAQ: Is MoveFileEx atomic
Frequently asked question: Is MoveFileEx atomic if the existing and new
files are both on the same drive?
The simple answer is "usually, but in some cases it will silently fall-back
to a non-atomic method, so don't count on it".
The implementation of MoveFileEx looks something like this: [...]
The problem is if the rename fails, you might end up with a CopyFile, which
is definitely not atomic.
If you really need atomic-or-nothing, you can try calling
NtSetInformationFile, which is unsupported but is much more likely to be
atomic.
- Simple high-level API that wraps a very flexible class-based API.
- Consistent error handling across platforms.
How it works
============
It uses a temporary file in the same directory as the given path. This ensures
that the temporary file resides on the same filesystem.
The temporary file will then be atomically moved to the target location: On
POSIX, it will use ``rename`` if files should be overwritten, otherwise a
combination of ``link`` and ``unlink``. On Windows, it uses MoveFileEx_ through
stdlib's ``ctypes`` with the appropriate flags.
Note that with ``link`` and ``unlink``, there's a timewindow where the file
might be available under two entries in the filesystem: The name of the
temporary file, and the name of the target file.
Also note that the permissions of the target file may change this way. In some
situations a ``chmod`` can be issued without any concurrency problems, but
since that is not always the case, this library doesn't do it by itself.
.. _MoveFileEx: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365240%28v=vs.85%29.aspx
fsync
-----
On POSIX, ``fsync`` is invoked on the temporary file after it is written (to
flush file content and metadata), and on the parent directory after the file is
moved (to flush filename).
``fsync`` does not take care of disks' internal buffers, but there don't seem
to be any standard POSIX APIs for that. On OS X, ``fcntl`` is used with
``F_FULLFSYNC`` instead of ``fsync`` for that reason.
On Windows, `_commit <https://msdn.microsoft.com/en-us/library/17618685.aspx>`_
is used, but there are no guarantees about disk internal buffers.
Alternatives and Credit
=======================
Atomicwrites is directly inspired by the following libraries (and shares a
minimal amount of code):
- The Trac project's `utility functions
<http://www.edgewall.org/docs/tags-trac-0.11.7/epydoc/trac.util-pysrc.html>`_,
also used in `Werkzeug <http://werkzeug.pocoo.org/>`_ and
`mitsuhiko/python-atomicfile
<https://github.com/mitsuhiko/python-atomicfile>`_. The idea to use
``ctypes`` instead of ``PyWin32`` originated there.
- `abarnert/fatomic <https://github.com/abarnert/fatomic>`_. Windows support
(based on ``PyWin32``) was originally taken from there.
Other alternatives to atomicwrites include:
- `sashka/atomicfile <https://github.com/sashka/atomicfile>`_. Originally I
considered using that, but at the time it was lacking a lot of features I
needed (Windows support, overwrite-parameter, overriding behavior through
subclassing).
- The `Boltons library collection <https://github.com/mahmoud/boltons>`_
features a class for atomic file writes, which seems to have a very similar
``overwrite`` parameter. It is lacking Windows support though.
License
=======
Licensed under the MIT, see ``LICENSE``.

View File

@ -0,0 +1,7 @@
atomicwrites-1.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
atomicwrites-1.3.0.dist-info/METADATA,sha256=T9BgDww1IuYBG4de40vAgvpSbNBrRtNjTBNcukt1HZU,5535
atomicwrites-1.3.0.dist-info/RECORD,,
atomicwrites-1.3.0.dist-info/WHEEL,sha256=gduuPyBvFJQSQ0zdyxF7k0zynDXbIbvg5ZBHoXum5uk,110
atomicwrites-1.3.0.dist-info/top_level.txt,sha256=ks64zKVUkrl2ZrrP046CsytXlSGf8gLG-IcoXpNyeoc,13
atomicwrites/__init__.py,sha256=00DapdQb04-k79KZjzzfwnI1Q8nfsiF5TPeVcqbGVw0,6562
atomicwrites/__init__.pyc,,

View File

@ -0,0 +1,6 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.31.1)
Root-Is-Purelib: true
Tag: py2-none-any
Tag: py3-none-any

View File

@ -0,0 +1 @@
atomicwrites

View File

@ -0,0 +1,216 @@
import contextlib
import io
import os
import sys
import tempfile
try:
import fcntl
except ImportError:
fcntl = None
__version__ = '1.3.0'
PY2 = sys.version_info[0] == 2
text_type = unicode if PY2 else str # noqa
def _path_to_unicode(x):
if not isinstance(x, text_type):
return x.decode(sys.getfilesystemencoding())
return x
DEFAULT_MODE = "wb" if PY2 else "w"
_proper_fsync = os.fsync
if sys.platform != 'win32':
if hasattr(fcntl, 'F_FULLFSYNC'):
def _proper_fsync(fd):
# https://lists.apple.com/archives/darwin-dev/2005/Feb/msg00072.html
# https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/fsync.2.html
# https://github.com/untitaker/python-atomicwrites/issues/6
fcntl.fcntl(fd, fcntl.F_FULLFSYNC)
def _sync_directory(directory):
# Ensure that filenames are written to disk
fd = os.open(directory, 0)
try:
_proper_fsync(fd)
finally:
os.close(fd)
def _replace_atomic(src, dst):
os.rename(src, dst)
_sync_directory(os.path.normpath(os.path.dirname(dst)))
def _move_atomic(src, dst):
os.link(src, dst)
os.unlink(src)
src_dir = os.path.normpath(os.path.dirname(src))
dst_dir = os.path.normpath(os.path.dirname(dst))
_sync_directory(dst_dir)
if src_dir != dst_dir:
_sync_directory(src_dir)
else:
from ctypes import windll, WinError
_MOVEFILE_REPLACE_EXISTING = 0x1
_MOVEFILE_WRITE_THROUGH = 0x8
_windows_default_flags = _MOVEFILE_WRITE_THROUGH
def _handle_errors(rv):
if not rv:
raise WinError()
def _replace_atomic(src, dst):
_handle_errors(windll.kernel32.MoveFileExW(
_path_to_unicode(src), _path_to_unicode(dst),
_windows_default_flags | _MOVEFILE_REPLACE_EXISTING
))
def _move_atomic(src, dst):
_handle_errors(windll.kernel32.MoveFileExW(
_path_to_unicode(src), _path_to_unicode(dst),
_windows_default_flags
))
def replace_atomic(src, dst):
'''
Move ``src`` to ``dst``. If ``dst`` exists, it will be silently
overwritten.
Both paths must reside on the same filesystem for the operation to be
atomic.
'''
return _replace_atomic(src, dst)
def move_atomic(src, dst):
'''
Move ``src`` to ``dst``. There might a timewindow where both filesystem
entries exist. If ``dst`` already exists, :py:exc:`FileExistsError` will be
raised.
Both paths must reside on the same filesystem for the operation to be
atomic.
'''
return _move_atomic(src, dst)
class AtomicWriter(object):
'''
A helper class for performing atomic writes. Usage::
with AtomicWriter(path).open() as f:
f.write(...)
:param path: The destination filepath. May or may not exist.
:param mode: The filemode for the temporary file. This defaults to `wb` in
Python 2 and `w` in Python 3.
:param overwrite: If set to false, an error is raised if ``path`` exists.
Errors are only raised after the file has been written to. Either way,
the operation is atomic.
If you need further control over the exact behavior, you are encouraged to
subclass.
'''
def __init__(self, path, mode=DEFAULT_MODE, overwrite=False,
**open_kwargs):
if 'a' in mode:
raise ValueError(
'Appending to an existing file is not supported, because that '
'would involve an expensive `copy`-operation to a temporary '
'file. Open the file in normal `w`-mode and copy explicitly '
'if that\'s what you\'re after.'
)
if 'x' in mode:
raise ValueError('Use the `overwrite`-parameter instead.')
if 'w' not in mode:
raise ValueError('AtomicWriters can only be written to.')
self._path = path
self._mode = mode
self._overwrite = overwrite
self._open_kwargs = open_kwargs
def open(self):
'''
Open the temporary file.
'''
return self._open(self.get_fileobject)
@contextlib.contextmanager
def _open(self, get_fileobject):
f = None # make sure f exists even if get_fileobject() fails
try:
success = False
with get_fileobject(**self._open_kwargs) as f:
yield f
self.sync(f)
self.commit(f)
success = True
finally:
if not success:
try:
self.rollback(f)
except Exception:
pass
def get_fileobject(self, suffix="", prefix=tempfile.template, dir=None,
**kwargs):
'''Return the temporary file to use.'''
if dir is None:
dir = os.path.normpath(os.path.dirname(self._path))
descriptor, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
dir=dir)
# io.open() will take either the descriptor or the name, but we need
# the name later for commit()/replace_atomic() and couldn't find a way
# to get the filename from the descriptor.
os.close(descriptor)
kwargs['mode'] = self._mode
kwargs['file'] = name
return io.open(**kwargs)
def sync(self, f):
'''responsible for clearing as many file caches as possible before
commit'''
f.flush()
_proper_fsync(f.fileno())
def commit(self, f):
'''Move the temporary file to the target location.'''
if self._overwrite:
replace_atomic(f.name, self._path)
else:
move_atomic(f.name, self._path)
def rollback(self, f):
'''Clean up all temporary resources.'''
os.unlink(f.name)
def atomic_write(path, writer_cls=AtomicWriter, **cls_kwargs):
'''
Simple atomic writes. This wraps :py:class:`AtomicWriter`::
with atomic_write(path) as f:
f.write(...)
:param path: The target path to write to.
:param writer_cls: The writer class to use. This parameter is useful if you
subclassed :py:class:`AtomicWriter` to change some behavior and want to
use that new subclass.
Additional keyword arguments are passed to the writer class. See
:py:class:`AtomicWriter`.
'''
return writer_cls(path, **cls_kwargs).open()

View File

@ -0,0 +1,475 @@
Metadata-Version: 2.1
Name: bandit
Version: 1.5.1
Summary: Security oriented static analyser for python code.
Home-page: https://bandit.readthedocs.io/en/latest/
Author: PyCQA
Author-email: code-quality@python.org
License: UNKNOWN
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Environment :: Console
Classifier: Intended Audience :: Information Technology
Classifier: Intended Audience :: System Administrators
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: Apache Software License
Classifier: Operating System :: POSIX :: Linux
Classifier: Operating System :: MacOS :: MacOS X
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Topic :: Security
Requires-Dist: GitPython (>=1.0.1)
Requires-Dist: PyYAML (>=3.12)
Requires-Dist: six (>=1.10.0)
Requires-Dist: stevedore (>=1.20.0)
.. image:: https://github.com/PyCQA/bandit/blob/master/logo/logotype-sm.png
:alt: Bandit
======
.. image:: https://travis-ci.org/PyCQA/bandit.svg?branch=master
:target: https://travis-ci.org/PyCQA/bandit/
:alt: Build Status
.. image:: https://img.shields.io/pypi/v/bandit.svg
:target: https://pypi.org/project/bandit/
:alt: Latest Version
.. image:: https://img.shields.io/pypi/pyversions/bandit.svg
:target: https://pypi.org/project/bandit/
:alt: Python Versions
.. image:: https://img.shields.io/pypi/format/bandit.svg
:target: https://pypi.org/project/bandit/
:alt: Format
.. image:: https://img.shields.io/badge/license-Apache%202-blue.svg
:target: https://github.com/PyCQA/bandit/blob/master/LICENSE
:alt: License
A security linter from PyCQA
* Free software: Apache license
* Documentation: https://bandit.readthedocs.io/en/latest/
* Source: https://github.com/PyCQA/bandit
* Bugs: https://github.com/PyCQA/bandit/issues
Overview
--------
Bandit is a tool designed to find common security issues in Python code. To do
this Bandit processes each file, builds an AST from it, and runs appropriate
plugins against the AST nodes. Once Bandit has finished scanning all the files
it generates a report.
Bandit was originally developed within the OpenStack Security Project and
later rehomed to PyCQA.
Installation
------------
Bandit is distributed on PyPI. The best way to install it is with pip:
Create a virtual environment (optional)::
virtualenv bandit-env
Install Bandit::
pip install bandit
# Or if you're working with a Python 3 project
pip3 install bandit
Run Bandit::
bandit -r path/to/your/code
Bandit can also be installed from source. To do so, download the source tarball
from PyPI, then install it::
python setup.py install
Usage
-----
Example usage across a code tree::
bandit -r ~/your_repos/project
Example usage across the ``examples/`` directory, showing three lines of
context and only reporting on the high-severity issues::
bandit examples/*.py -n 3 -lll
Bandit can be run with profiles. To run Bandit against the examples directory
using only the plugins listed in the ``ShellInjection`` profile::
bandit examples/*.py -p ShellInjection
Bandit also supports passing lines of code to scan using standard input. To
run Bandit with standard input::
cat examples/imports.py | bandit -
Usage::
$ bandit -h
usage: bandit [-h] [-r] [-a {file,vuln}] [-n CONTEXT_LINES] [-c CONFIG_FILE]
[-p PROFILE] [-t TESTS] [-s SKIPS] [-l] [-i]
[-f {csv,custom,html,json,screen,txt,xml,yaml}]
[--msg-template MSG_TEMPLATE] [-o [OUTPUT_FILE]] [-v] [-d]
[--ignore-nosec] [-x EXCLUDED_PATHS] [-b BASELINE]
[--ini INI_PATH] [--version]
[targets [targets ...]]
Bandit - a Python source code security analyzer
positional arguments:
targets source file(s) or directory(s) to be tested
optional arguments:
-h, --help show this help message and exit
-r, --recursive find and process files in subdirectories
-a {file,vuln}, --aggregate {file,vuln}
aggregate output by vulnerability (default) or by
filename
-n CONTEXT_LINES, --number CONTEXT_LINES
maximum number of code lines to output for each issue
-c CONFIG_FILE, --configfile CONFIG_FILE
optional config file to use for selecting plugins and
overriding defaults
-p PROFILE, --profile PROFILE
profile to use (defaults to executing all tests)
-t TESTS, --tests TESTS
comma-separated list of test IDs to run
-s SKIPS, --skip SKIPS
comma-separated list of test IDs to skip
-l, --level report only issues of a given severity level or higher
(-l for LOW, -ll for MEDIUM, -lll for HIGH)
-i, --confidence report only issues of a given confidence level or
higher (-i for LOW, -ii for MEDIUM, -iii for HIGH)
-f {csv,custom,html,json,screen,txt,xml,yaml}, --format {csv,custom,html,json,screen,txt,xml,yaml}
specify output format
--msg-template MSG_TEMPLATE
specify output message template (only usable with
--format custom), see CUSTOM FORMAT section for list
of available values
-o [OUTPUT_FILE], --output [OUTPUT_FILE]
write report to filename
-v, --verbose output extra information like excluded and included
files
-d, --debug turn on debug mode
--ignore-nosec do not skip lines with # nosec comments
-x EXCLUDED_PATHS, --exclude EXCLUDED_PATHS
comma-separated list of paths to exclude from scan
(note that these are in addition to the excluded paths
provided in the config file)
-b BASELINE, --baseline BASELINE
path of a baseline report to compare against (only
JSON-formatted files are accepted)
--ini INI_PATH path to a .bandit file that supplies command line
arguments
--version show program's version number and exit
CUSTOM FORMATTING
-----------------
Available tags:
{abspath}, {relpath}, {line}, {test_id},
{severity}, {msg}, {confidence}, {range}
Example usage:
Default template:
bandit -r examples/ --format custom --msg-template \
"{abspath}:{line}: {test_id}[bandit]: {severity}: {msg}"
Provides same output as:
bandit -r examples/ --format custom
Tags can also be formatted in python string.format() style:
bandit -r examples/ --format custom --msg-template \
"{relpath:20.20s}: {line:03}: {test_id:^8}: DEFECT: {msg:>20}"
See python documentation for more information about formatting style:
https://docs.python.org/3.4/library/string.html
The following tests were discovered and loaded:
-----------------------------------------------
B101 assert_used
B102 exec_used
B103 set_bad_file_permissions
B104 hardcoded_bind_all_interfaces
B105 hardcoded_password_string
B106 hardcoded_password_funcarg
B107 hardcoded_password_default
B108 hardcoded_tmp_directory
B110 try_except_pass
B112 try_except_continue
B201 flask_debug_true
B301 pickle
B302 marshal
B303 md5
B304 ciphers
B305 cipher_modes
B306 mktemp_q
B307 eval
B308 mark_safe
B309 httpsconnection
B310 urllib_urlopen
B311 random
B312 telnetlib
B313 xml_bad_cElementTree
B314 xml_bad_ElementTree
B315 xml_bad_expatreader
B316 xml_bad_expatbuilder
B317 xml_bad_sax
B318 xml_bad_minidom
B319 xml_bad_pulldom
B320 xml_bad_etree
B321 ftplib
B322 input
B323 unverified_context
B324 hashlib_new_insecure_functions
B325 tempnam
B401 import_telnetlib
B402 import_ftplib
B403 import_pickle
B404 import_subprocess
B405 import_xml_etree
B406 import_xml_sax
B407 import_xml_expat
B408 import_xml_minidom
B409 import_xml_pulldom
B410 import_lxml
B411 import_xmlrpclib
B412 import_httpoxy
B413 import_pycrypto
B414 import_pycryptodome
B501 request_with_no_cert_validation
B502 ssl_with_bad_version
B503 ssl_with_bad_defaults
B504 ssl_with_no_version
B505 weak_cryptographic_key
B506 yaml_load
B507 ssh_no_host_key_verification
B601 paramiko_calls
B602 subprocess_popen_with_shell_equals_true
B603 subprocess_without_shell_equals_true
B604 any_other_function_with_shell_equals_true
B605 start_process_with_a_shell
B606 start_process_with_no_shell
B607 start_process_with_partial_path
B608 hardcoded_sql_expressions
B609 linux_commands_wildcard_injection
B610 django_extra_used
B611 django_rawsql_used
B701 jinja2_autoescape_false
B702 use_of_mako_templates
B703 django_mark_safe
Configuration
-------------
An optional config file may be supplied and may include:
- lists of tests which should or shouldn't be run
- exclude_dirs - sections of the path, that if matched, will be excluded from
scanning
- overridden plugin settings - may provide different settings for some
plugins
Per Project Command Line Args
-----------------------------
Projects may include a `.bandit` file that specifies command line arguments
that should be supplied for that project. The currently supported arguments
are:
- targets: comma separated list of target dirs/files to run bandit on
- exclude: comma separated list of excluded paths
- skips: comma separated list of tests to skip
- tests: comma separated list of tests to run
To use this, put a .bandit file in your project's directory. For example:
::
[bandit]
exclude: /test
::
[bandit]
tests: B101,B102,B301
Exclusions
----------
In the event that a line of code triggers a Bandit issue, but that the line
has been reviewed and the issue is a false positive or acceptable for some
other reason, the line can be marked with a ``# nosec`` and any results
associated with it will not be reported.
For example, although this line may cause Bandit to report a potential
security issue, it will not be reported::
self.process = subprocess.Popen('/bin/echo', shell=True) # nosec
Vulnerability Tests
-------------------
Vulnerability tests or "plugins" are defined in files in the plugins directory.
Tests are written in Python and are autodiscovered from the plugins directory.
Each test can examine one or more type of Python statements. Tests are marked
with the types of Python statements they examine (for example: function call,
string, import, etc).
Tests are executed by the ``BanditNodeVisitor`` object as it visits each node
in the AST.
Test results are maintained in the ``BanditResultStore`` and aggregated for
output at the completion of a test run.
Writing Tests
-------------
To write a test:
- Identify a vulnerability to build a test for, and create a new file in
examples/ that contains one or more cases of that vulnerability.
- Consider the vulnerability you're testing for, mark the function with one
or more of the appropriate decorators:
- @checks('Call')
- @checks('Import', 'ImportFrom')
- @checks('Str')
- Create a new Python source file to contain your test, you can reference
existing tests for examples.
- The function that you create should take a parameter "context" which is
an instance of the context class you can query for information about the
current element being examined. You can also get the raw AST node for
more advanced use cases. Please see the context.py file for more.
- Extend your Bandit configuration file as needed to support your new test.
- Execute Bandit against the test file you defined in examples/ and ensure
that it detects the vulnerability. Consider variations on how this
vulnerability might present itself and extend the example file and the test
function accordingly.
Extending Bandit
----------------
Bandit allows users to write and register extensions for checks and formatters.
Bandit will load plugins from two entry-points:
- `bandit.formatters`
- `bandit.plugins`
Formatters need to accept 4 things:
- `result_store`: An instance of `bandit.core.BanditResultStore`
- `file_list`: The list of files which were inspected in the scope
- `scores`: The scores awarded to each file in the scope
- `excluded_files`: The list of files that were excluded from the scope
Plugins tend to take advantage of the `bandit.checks` decorator which allows
the author to register a check for a particular type of AST node. For example
::
@bandit.checks('Call')
def prohibit_unsafe_deserialization(context):
if 'unsafe_load' in context.call_function_name_qual:
return bandit.Issue(
severity=bandit.HIGH,
confidence=bandit.HIGH,
text="Unsafe deserialization detected."
)
To register your plugin, you have two options:
1. If you're using setuptools directly, add something like the following to
your ``setup`` call::
# If you have an imaginary bson formatter in the bandit_bson module
# and a function called `formatter`.
entry_points={'bandit.formatters': ['bson = bandit_bson:formatter']}
# Or a check for using mako templates in bandit_mako that
entry_points={'bandit.plugins': ['mako = bandit_mako']}
2. If you're using pbr, add something like the following to your `setup.cfg`
file::
[entry_points]
bandit.formatters =
bson = bandit_bson:formatter
bandit.plugins =
mako = bandit_mako
Contributing
------------
Contributions to Bandit are always welcome!
The best way to get started with Bandit is to grab the source::
git clone https://github.com/PyCQA/bandit.git
You can test any changes with tox::
pip install tox
tox -e pep8
tox -e py27
tox -e py35
tox -e docs
tox -e cover
Please make PR requests using your own branch, and not master::
git checkout -b mychange
git push origin mychange
Reporting Bugs
--------------
Bugs should be reported on github. To file a bug against Bandit, visit:
https://github.com/PyCQA/bandit/issues
Under Which Version of Python Should I Install Bandit?
------------------------------------------------------
The answer to this question depends on the project(s) you will be running
Bandit against. If your project is only compatible with Python 2.7, you
should install Bandit to run under Python 2.7. If your project is only
compatible with Python 3.5, then use 3.5 respectively. If your project supports
both, you *could* run Bandit with both versions but you don't have to.
Bandit uses the `ast` module from Python's standard library in order to
analyze your Python code. The `ast` module is only able to parse Python code
that is valid in the version of the interpreter from which it is imported. In
other words, if you try to use Python 2.7's `ast` module to parse code written
for 3.5 that uses, for example, `yield from` with asyncio, then you'll have
syntax errors that will prevent Bandit from working properly. Alternatively,
if you are relying on 2.7's octal notation of `0777` then you'll have a syntax
error if you run Bandit on 3.x.
References
==========
Bandit docs: https://bandit.readthedocs.io/en/latest/
Python AST module documentation: https://docs.python.org/2/library/ast.html
Green Tree Snakes - the missing Python AST docs:
https://greentreesnakes.readthedocs.org/en/latest/
Documentation of the various types of AST nodes that Bandit currently covers
or could be extended to cover:
https://greentreesnakes.readthedocs.org/en/latest/nodes.html

View File

@ -0,0 +1,130 @@
../../../bin/bandit,sha256=104PWCu_-KCIB55nCkgEcwG3GmFncbcuKSUpCQL9kg4,269
../../../bin/bandit-baseline,sha256=A_Co1__iRCL58cQKsqlSvxXhlIdHWDk-R5-SnI6nkd0,273
../../../bin/bandit-config-generator,sha256=Hv8hGDst-2fCSlxcchM_K7bWWMjOsw4lqzfKDfiSRh4,281
bandit-1.5.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
bandit-1.5.1.dist-info/METADATA,sha256=77zxQ-vXQNa1vq_h1wyIqnx5F4Ja_i55o6KP1rAvEUU,16686
bandit-1.5.1.dist-info/RECORD,,
bandit-1.5.1.dist-info/WHEEL,sha256=gduuPyBvFJQSQ0zdyxF7k0zynDXbIbvg5ZBHoXum5uk,110
bandit-1.5.1.dist-info/entry_points.txt,sha256=Yv0HmDCLhsqWQ2-Hr5vkyTuHd6k0obe3_53wnIQi0zI,3384
bandit-1.5.1.dist-info/pbr.json,sha256=nSZiV4vxHdaFvBn4mjRTK5N74aSZVZtoP9B7f8V27Zg,47
bandit-1.5.1.dist-info/top_level.txt,sha256=SVJ-U-In_cpe2PQq5ZOlxjEnlAV5MfjvfFuGzg8wgdg,7
bandit/__init__.py,sha256=BoWeY00rKrG-kKxrEAbq0O5Uan19b_c-OvTiu2kWIjw,1175
bandit/__init__.pyc,,
bandit/__main__.py,sha256=6K6Vee3y7MPC6ywhBQaTZrw26xJhnxEz_L2ayqgLa4I,62
bandit/__main__.pyc,,
bandit/blacklists/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
bandit/blacklists/__init__.pyc,,
bandit/blacklists/calls.py,sha256=Xzdo3kj31zKvCNuSL614ogPjJ7vp_lVKpfEu89xxO34,26560
bandit/blacklists/calls.pyc,,
bandit/blacklists/imports.py,sha256=HbuFwutXjHL4YSQtOPbPaOinJ7RxX5gnYHsPHBW2BgY,15796
bandit/blacklists/imports.pyc,,
bandit/blacklists/utils.py,sha256=O_AS3BPDHMeFRMAUGgqSHtOPz63TxVlmUCcZBVTI3t0,870
bandit/blacklists/utils.pyc,,
bandit/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
bandit/cli/__init__.pyc,,
bandit/cli/baseline.py,sha256=MhdDLYxDt65ZCEC-JfTcxxGqWlbOGGguYSBWQIUtk8k,7645
bandit/cli/baseline.pyc,,
bandit/cli/config_generator.py,sha256=ftBdIrXxzZzSvJWtP7m1qn6ejqQu2UCeG3k9m-mtu4I,6323
bandit/cli/config_generator.pyc,,
bandit/cli/main.py,sha256=phN5rVIb0hu3_fP6MRv8L2bqQsotxGhbr4zyBTQ510M,14610
bandit/cli/main.pyc,,
bandit/core/__init__.py,sha256=t3kUM7_udVcitNfjCOEIG9w8iqcZ4VLpR6GsdH-zL6o,1089
bandit/core/__init__.pyc,,
bandit/core/blacklisting.py,sha256=tzELQvilIIRr9cafqgFzfY0IjiVqkYbHdfQQf6IQ2s0,3045
bandit/core/blacklisting.pyc,,
bandit/core/config.py,sha256=vIVk923NQdw7-mXlQeyF5eDd7ppto_LxRxRPMLF83qc,9488
bandit/core/config.pyc,,
bandit/core/constants.py,sha256=pJ9kCKhQXYMnHiNKhKY5Ip_4bH_4oXXIBG6hkysrsQo,1639
bandit/core/constants.pyc,,
bandit/core/context.py,sha256=qJRYDHlxfPe-jdo3Fm8ae5zWGL3CG6hpg62UUhSRvZE,11612
bandit/core/context.pyc,,
bandit/core/docs_utils.py,sha256=A-ybwi1ddgX-Qc7JFYQ7zo6C6bXSCSKJjcp92xsu--k,2114
bandit/core/docs_utils.pyc,,
bandit/core/extension_loader.py,sha256=hQqRi6_HBcAfdJxub6z6L8VqEXubhhdps1VGLwtyqoM,4481
bandit/core/extension_loader.pyc,,
bandit/core/issue.py,sha256=BscF0Uk4u3-HmmanSLNFKYdwRevwIvrGK51Vq9fWoI0,4850
bandit/core/issue.pyc,,
bandit/core/manager.py,sha256=seYETNpM-Pi9ihMxPG3ggwA45WK3euXIR0EGqLgHGh4,15313
bandit/core/manager.pyc,,
bandit/core/meta_ast.py,sha256=3s4EI5dzUMpf5BMsycVNcU9Z3urZtZvAa5pTbWDlf7E,1659
bandit/core/meta_ast.pyc,,
bandit/core/metrics.py,sha256=R-3cfJJqzzndiGaTeLdi6QWobQfYtjhtDxRnAi_6T80,3609
bandit/core/metrics.pyc,,
bandit/core/node_visitor.py,sha256=S7kpy4n3_Q4sR8foFaUhQh5fV7f8P83FbE_QXwxtE54,10361
bandit/core/node_visitor.pyc,,
bandit/core/test_properties.py,sha256=C5Trw_91ymVhRDngPkUFRjhL3U24MabhuhzcVYo4RTo,2518
bandit/core/test_properties.pyc,,
bandit/core/test_set.py,sha256=5zEfjvAzGRHYwIxrRaXMG3J2dNGVYsUf80UCvUTfS2E,4569
bandit/core/test_set.pyc,,
bandit/core/tester.py,sha256=1Ir-WQLbnpuf6zdR9K3dbf_s9y-kimzohsEvMlukwHM,4119
bandit/core/tester.pyc,,
bandit/core/utils.py,sha256=BNIr3TW18LgkJw_UDWD2g9WRzr71zs_DwsR4441ZYvg,10957
bandit/core/utils.pyc,,
bandit/formatters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
bandit/formatters/__init__.pyc,,
bandit/formatters/csv.py,sha256=IJhqMszLEP1Vbr_PN9i6NCswYmqsC50K4q1wLY_SUDM,2716
bandit/formatters/csv.pyc,,
bandit/formatters/custom.py,sha256=7uZOdlmkQcKgWD0a47_RmMoerxL_kZoZ_bftlCePzeE,5841
bandit/formatters/custom.pyc,,
bandit/formatters/html.py,sha256=xnd5rB8JJKVsY1mzLkkU9Zp_hqZZ1zBB6BEc_SD6-lY,9137
bandit/formatters/html.pyc,,
bandit/formatters/json.py,sha256=4a0iL2ETzQWYAvaWM_Wwv47diE4SIsSX9zZx9HYvTiI,4643
bandit/formatters/json.pyc,,
bandit/formatters/screen.py,sha256=hpve2zCl196aW9dfQkkwqcoIQL07XznzLmYA1z1SeDk,6172
bandit/formatters/screen.pyc,,
bandit/formatters/text.py,sha256=CfVZvSyqDVF1_4H8tSPPmaZJ6OP3KIVp-Aa2yzncBK0,5764
bandit/formatters/text.pyc,,
bandit/formatters/utils.py,sha256=I5bz-jR51v0maJTvZqIQO9BGTxylJVvMhhu-7yECra0,1589
bandit/formatters/utils.pyc,,
bandit/formatters/xml.py,sha256=IX-pta4yKMUwJvytXN6FcrC1GuwyLL0Y_8l2cpJtI8k,3156
bandit/formatters/xml.pyc,,
bandit/formatters/yaml.py,sha256=iMA6AFDFZ_2QBC9ppD5d4ysN3eVSqxHetC6MZkc6XBU,3977
bandit/formatters/yaml.pyc,,
bandit/plugins/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
bandit/plugins/__init__.pyc,,
bandit/plugins/app_debug.py,sha256=FXPfM5qIuhI1uDLeziwaaXwCqUVdRVXo7OIKSqMQj3M,2538
bandit/plugins/app_debug.pyc,,
bandit/plugins/asserts.py,sha256=o3ugmGqXL_98fVDgLZSh4UpJ0IFSce-hiitYLl-Kf40,2095
bandit/plugins/asserts.pyc,,
bandit/plugins/crypto_request_no_cert_validation.py,sha256=zFay8uLWftFoNTzT8IAEB0gk7b26r5HLR21cXWH5UaU,2675
bandit/plugins/crypto_request_no_cert_validation.pyc,,
bandit/plugins/django_sql_injection.py,sha256=VboocL9oP-RZ71Peedv-0Cii5hJzR4LyoTOtoecqNGs,3712
bandit/plugins/django_sql_injection.pyc,,
bandit/plugins/django_xss.py,sha256=fR9JQ1NoZ_Qn2SYz5cRBCzRrBfSkoxIu7CNo67Mavy8,10937
bandit/plugins/django_xss.pyc,,
bandit/plugins/exec.py,sha256=mmV8cQhIwEKgAfan2Ze3dTTmRy5csafqgHR726RAadw,1811
bandit/plugins/exec.pyc,,
bandit/plugins/general_bad_file_permissions.py,sha256=JTGal1m1LdN-UrXJrpxVt3PeEpMyzmmSjUqP3yPr1-I,3250
bandit/plugins/general_bad_file_permissions.pyc,,
bandit/plugins/general_bind_all_interfaces.py,sha256=95BRu5MJO3VruW45ic3Ja90QJdZAcSXSh4Bq8MBHfxk,1792
bandit/plugins/general_bind_all_interfaces.pyc,,
bandit/plugins/general_hardcoded_password.py,sha256=6fw3jfc_HMtlSkXKr1TI5MSz6UNywgl4lIy-AUFudWs,6396
bandit/plugins/general_hardcoded_password.pyc,,
bandit/plugins/general_hardcoded_tmp.py,sha256=0Qn-xNv9GWm5VXBvdRScwCCBIW4QGdh5YOnGonwVKro,2553
bandit/plugins/general_hardcoded_tmp.pyc,,
bandit/plugins/hashlib_new_insecure_functions.py,sha256=LdHnVXFZ2TdLLU7SerW30XyYxW7HeEtOAy-Uxxv-Q-8,2371
bandit/plugins/hashlib_new_insecure_functions.pyc,,
bandit/plugins/injection_paramiko.py,sha256=F3dmc6S2bW9KRydJEiNBJIXLjRL1nO-aOPDOYWxhXXY,2643
bandit/plugins/injection_paramiko.pyc,,
bandit/plugins/injection_shell.py,sha256=F02FWh8jR7bxMVEBQNt4sz4LYRny0cfqNeIqhVZBctg,24971
bandit/plugins/injection_shell.pyc,,
bandit/plugins/injection_sql.py,sha256=joQaczwfUW9CC0s1bouy4Yd5iPjYN4ghJxgCsE1i338,3677
bandit/plugins/injection_sql.pyc,,
bandit/plugins/injection_wildcard.py,sha256=nrodof4DKcfR6lvm3Yh0VxWwOX9aaKRKwkWAJ4M-MbA,5242
bandit/plugins/injection_wildcard.pyc,,
bandit/plugins/insecure_ssl_tls.py,sha256=kdkCmm9UEvSxaKkzsMww-HZyzqXl_N_v9-ISBdA3NHk,10153
bandit/plugins/insecure_ssl_tls.pyc,,
bandit/plugins/jinja2_templates.py,sha256=WeIdyCYMqmkdL8XI4xrCGrpyItofWXGyPGdhN8vZlDc,5755
bandit/plugins/jinja2_templates.pyc,,
bandit/plugins/mako_templates.py,sha256=Cl-6Vmvpshx1kq-cwLy1LmMVA3rK3pTYsQ_-9klmLlk,2904
bandit/plugins/mako_templates.pyc,,
bandit/plugins/ssh_no_host_key_verification.py,sha256=pniaO6sIJDxnV5pVrq4VmG7WmDz6WDIjp5K7o-xskbo,2424
bandit/plugins/ssh_no_host_key_verification.pyc,,
bandit/plugins/try_except_continue.py,sha256=bNeh6vPLYGWxj6AsZSsXPFci64WVHyfh2rSUbZhMB08,3294
bandit/plugins/try_except_continue.pyc,,
bandit/plugins/try_except_pass.py,sha256=MsFF49zxF_Wd22MR1TukRzvVPDnf1lHgQGg_-HcE2Go,3162
bandit/plugins/try_except_pass.pyc,,
bandit/plugins/weak_cryptographic_key.py,sha256=yuxz3L9rqcITGSuvqYAW6U6UPFZdBSy66Ra179-Jwng,5146
bandit/plugins/weak_cryptographic_key.pyc,,
bandit/plugins/yaml_load.py,sha256=VREUsWCB2BnkFqF0LpgZNyqlmUDIxyvUYJTDBoDUn5A,2452
bandit/plugins/yaml_load.pyc,,

View File

@ -0,0 +1,6 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.31.1)
Root-Is-Purelib: true
Tag: py2-none-any
Tag: py3-none-any

View File

@ -0,0 +1,54 @@
[bandit.blacklists]
calls = bandit.blacklists.calls:gen_blacklist
imports = bandit.blacklists.imports:gen_blacklist
[bandit.formatters]
csv = bandit.formatters.csv:report
custom = bandit.formatters.custom:report
html = bandit.formatters.html:report
json = bandit.formatters.json:report
screen = bandit.formatters.screen:report
txt = bandit.formatters.text:report
xml = bandit.formatters.xml:report
yaml = bandit.formatters.yaml:report
[bandit.plugins]
any_other_function_with_shell_equals_true = bandit.plugins.injection_shell:any_other_function_with_shell_equals_true
assert_used = bandit.plugins.asserts:assert_used
django_extra_used = bandit.plugins.django_sql_injection:django_extra_used
django_mark_safe = bandit.plugins.django_xss:django_mark_safe
django_rawsql_used = bandit.plugins.django_sql_injection:django_rawsql_used
exec_used = bandit.plugins.exec:exec_used
flask_debug_true = bandit.plugins.app_debug:flask_debug_true
hardcoded_bind_all_interfaces = bandit.plugins.general_bind_all_interfaces:hardcoded_bind_all_interfaces
hardcoded_password_default = bandit.plugins.general_hardcoded_password:hardcoded_password_default
hardcoded_password_funcarg = bandit.plugins.general_hardcoded_password:hardcoded_password_funcarg
hardcoded_password_string = bandit.plugins.general_hardcoded_password:hardcoded_password_string
hardcoded_sql_expressions = bandit.plugins.injection_sql:hardcoded_sql_expressions
hardcoded_tmp_directory = bandit.plugins.general_hardcoded_tmp:hardcoded_tmp_directory
hashlib_new_insecure_functions = bandit.plugins.hashlib_new_insecure_functions:hashlib_new
jinja2_autoescape_false = bandit.plugins.jinja2_templates:jinja2_autoescape_false
linux_commands_wildcard_injection = bandit.plugins.injection_wildcard:linux_commands_wildcard_injection
paramiko_calls = bandit.plugins.injection_paramiko:paramiko_calls
request_with_no_cert_validation = bandit.plugins.crypto_request_no_cert_validation:request_with_no_cert_validation
set_bad_file_permissions = bandit.plugins.general_bad_file_permissions:set_bad_file_permissions
ssh_no_host_key_verification = bandit.plugins.ssh_no_host_key_verification:ssh_no_host_key_verification
ssl_with_bad_defaults = bandit.plugins.insecure_ssl_tls:ssl_with_bad_defaults
ssl_with_bad_version = bandit.plugins.insecure_ssl_tls:ssl_with_bad_version
ssl_with_no_version = bandit.plugins.insecure_ssl_tls:ssl_with_no_version
start_process_with_a_shell = bandit.plugins.injection_shell:start_process_with_a_shell
start_process_with_no_shell = bandit.plugins.injection_shell:start_process_with_no_shell
start_process_with_partial_path = bandit.plugins.injection_shell:start_process_with_partial_path
subprocess_popen_with_shell_equals_true = bandit.plugins.injection_shell:subprocess_popen_with_shell_equals_true
subprocess_without_shell_equals_true = bandit.plugins.injection_shell:subprocess_without_shell_equals_true
try_except_continue = bandit.plugins.try_except_continue:try_except_continue
try_except_pass = bandit.plugins.try_except_pass:try_except_pass
use_of_mako_templates = bandit.plugins.mako_templates:use_of_mako_templates
weak_cryptographic_key = bandit.plugins.weak_cryptographic_key:weak_cryptographic_key
yaml_load = bandit.plugins.yaml_load:yaml_load
[console_scripts]
bandit = bandit.cli.main:main
bandit-baseline = bandit.cli.baseline:main
bandit-config-generator = bandit.cli.config_generator:main

View File

@ -0,0 +1 @@
{"git_version": "2421df6", "is_release": false}

View File

@ -0,0 +1,31 @@
# -*- coding:utf-8 -*-
#
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
from bandit.core import config # noqa
from bandit.core import context # noqa
from bandit.core import manager # noqa
from bandit.core import meta_ast # noqa
from bandit.core import node_visitor # noqa
from bandit.core import test_set # noqa
from bandit.core import tester # noqa
from bandit.core import utils # noqa
from bandit.core.constants import * # noqa
from bandit.core.issue import * # noqa
from bandit.core.test_properties import * # noqa
__version__ = pbr.version.VersionInfo('bandit').version_string()

View File

@ -0,0 +1,3 @@
#!/usr/bin/env python
from bandit.cli import main
main.main()

View File

@ -0,0 +1,577 @@
# -*- coding:utf-8 -*-
#
# Copyright 2016 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
r"""
====================================================
Blacklist various Python calls known to be dangerous
====================================================
This blacklist data checks for a number of Python calls known to have possible
security implications. The following blacklist tests are run against any
function calls encoutered in the scanned code base, triggered by encoutering
ast.Call nodes.
B301: pickle
------------
Pickle and modules that wrap it can be unsafe when used to
deserialize untrusted data, possible security issue.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Calls | Severity |
+======+=====================+====================================+===========+
| B301 | pickle | - pickle.loads | Medium |
| | | - pickle.load | |
| | | - pickle.Unpickler | |
| | | - cPickle.loads | |
| | | - cPickle.load | |
| | | - cPickle.Unpickler | |
| | | - dill.loads | |
| | | - dill.load | |
| | | - dill.Unpickler | |
+------+---------------------+------------------------------------+-----------+
B302: marshal
-------------
Deserialization with the marshal module is possibly dangerous.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Calls | Severity |
+======+=====================+====================================+===========+
| B302 | marshal | - marshal.load | Medium |
| | | - marshal.loads | |
+------+---------------------+------------------------------------+-----------+
B303: md5
---------
Use of insecure MD2, MD4, MD5, or SHA1 hash function.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Calls | Severity |
+======+=====================+====================================+===========+
| B303 | md5 | - hashlib.md5 | Medium |
| | | - hashlib.sha1 | |
| | | - Crypto.Hash.MD2.new | |
| | | - Crypto.Hash.MD4.new | |
| | | - Crypto.Hash.MD5.new | |
| | | - Crypto.Hash.SHA.new | |
| | | - Cryptodome.Hash.MD2.new | |
| | | - Cryptodome.Hash.MD4.new | |
| | | - Cryptodome.Hash.MD5.new | |
| | | - Cryptodome.Hash.SHA.new | |
| | | - cryptography.hazmat.primitives | |
| | | .hashes.MD5 | |
| | | - cryptography.hazmat.primitives | |
| | | .hashes.SHA1 | |
+------+---------------------+------------------------------------+-----------+
B304 - B305: ciphers and modes
------------------------------
Use of insecure cipher or cipher mode. Replace with a known secure cipher such
as AES.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Calls | Severity |
+======+=====================+====================================+===========+
| B304 | ciphers | - Crypto.Cipher.ARC2.new | High |
| | | - Crypto.Cipher.ARC4.new | |
| | | - Crypto.Cipher.Blowfish.new | |
| | | - Crypto.Cipher.DES.new | |
| | | - Crypto.Cipher.XOR.new | |
| | | - Cryptodome.Cipher.ARC2.new | |
| | | - Cryptodome.Cipher.ARC4.new | |
| | | - Cryptodome.Cipher.Blowfish.new | |
| | | - Cryptodome.Cipher.DES.new | |
| | | - Cryptodome.Cipher.XOR.new | |
| | | - cryptography.hazmat.primitives | |
| | | .ciphers.algorithms.ARC4 | |
| | | - cryptography.hazmat.primitives | |
| | | .ciphers.algorithms.Blowfish | |
| | | - cryptography.hazmat.primitives | |
| | | .ciphers.algorithms.IDEA | |
+------+---------------------+------------------------------------+-----------+
| B305 | cipher_modes | - cryptography.hazmat.primitives | Medium |
| | | .ciphers.modes.ECB | |
+------+---------------------+------------------------------------+-----------+
B306: mktemp_q
--------------
Use of insecure and deprecated function (mktemp).
+------+---------------------+------------------------------------+-----------+
| ID | Name | Calls | Severity |
+======+=====================+====================================+===========+
| B306 | mktemp_q | - tempfile.mktemp | Medium |
+------+---------------------+------------------------------------+-----------+
B307: eval
----------
Use of possibly insecure function - consider using safer ast.literal_eval.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Calls | Severity |
+======+=====================+====================================+===========+
| B307 | eval | - eval | Medium |
+------+---------------------+------------------------------------+-----------+
B308: mark_safe
---------------
Use of mark_safe() may expose cross-site scripting vulnerabilities and should
be reviewed.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Calls | Severity |
+======+=====================+====================================+===========+
| B308 | mark_safe | - django.utils.safestring.mark_safe| Medium |
+------+---------------------+------------------------------------+-----------+
B309: httpsconnection
---------------------
Use of HTTPSConnection on older versions of Python prior to 2.7.9 and 3.4.3 do
not provide security, see https://wiki.openstack.org/wiki/OSSN/OSSN-0033
+------+---------------------+------------------------------------+-----------+
| ID | Name | Calls | Severity |
+======+=====================+====================================+===========+
| B309 | httpsconnection | - httplib.HTTPSConnection | Medium |
| | | - http.client.HTTPSConnection | |
| | | - six.moves.http_client | |
| | | .HTTPSConnection | |
+------+---------------------+------------------------------------+-----------+
B310: urllib_urlopen
--------------------
Audit url open for permitted schemes. Allowing use of 'file:'' or custom
schemes is often unexpected.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Calls | Severity |
+======+=====================+====================================+===========+
| B310 | urllib_urlopen | - urllib.urlopen | Medium |
| | | - urllib.request.urlopen | |
| | | - urllib.urlretrieve | |
| | | - urllib.request.urlretrieve | |
| | | - urllib.URLopener | |
| | | - urllib.request.URLopener | |
| | | - urllib.FancyURLopener | |
| | | - urllib.request.FancyURLopener | |
| | | - urllib2.urlopen | |
| | | - urllib2.Request | |
| | | - six.moves.urllib.request.urlopen | |
| | | - six.moves.urllib.request | |
| | | .urlretrieve | |
| | | - six.moves.urllib.request | |
| | | .URLopener | |
| | | - six.moves.urllib.request | |
| | | .FancyURLopener | |
+------+---------------------+------------------------------------+-----------+
B311: random
------------
Standard pseudo-random generators are not suitable for security/cryptographic
purposes.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Calls | Severity |
+======+=====================+====================================+===========+
| B311 | random | - random.random | Low |
| | | - random.randrange | |
| | | - random.randint | |
| | | - random.choice | |
| | | - random.uniform | |
| | | - random.triangular | |
+------+---------------------+------------------------------------+-----------+
B312: telnetlib
---------------
Telnet-related functions are being called. Telnet is considered insecure. Use
SSH or some other encrypted protocol.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Calls | Severity |
+======+=====================+====================================+===========+
| B312 | telnetlib | - telnetlib.\* | High |
+------+---------------------+------------------------------------+-----------+
B313 - B320: XML
----------------
Most of this is based off of Christian Heimes' work on defusedxml:
https://pypi.org/project/defusedxml/#defusedxml-sax
Using various XLM methods to parse untrusted XML data is known to be vulnerable
to XML attacks. Methods should be replaced with their defusedxml equivalents.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Calls | Severity |
+======+=====================+====================================+===========+
| B313 | xml_bad_cElementTree| - xml.etree.cElementTree.parse | Medium |
| | | - xml.etree.cElementTree.iterparse | |
| | | - xml.etree.cElementTree.fromstring| |
| | | - xml.etree.cElementTree.XMLParser | |
+------+---------------------+------------------------------------+-----------+
| B314 | xml_bad_ElementTree | - xml.etree.ElementTree.parse | Medium |
| | | - xml.etree.ElementTree.iterparse | |
| | | - xml.etree.ElementTree.fromstring | |
| | | - xml.etree.ElementTree.XMLParser | |
+------+---------------------+------------------------------------+-----------+
| B315 | xml_bad_expatreader | - xml.sax.expatreader.create_parser| Medium |
+------+---------------------+------------------------------------+-----------+
| B316 | xml_bad_expatbuilder| - xml.dom.expatbuilder.parse | Medium |
| | | - xml.dom.expatbuilder.parseString | |
+------+---------------------+------------------------------------+-----------+
| B317 | xml_bad_sax | - xml.sax.parse | Medium |
| | | - xml.sax.parseString | |
| | | - xml.sax.make_parser | |
+------+---------------------+------------------------------------+-----------+
| B318 | xml_bad_minidom | - xml.dom.minidom.parse | Medium |
| | | - xml.dom.minidom.parseString | |
+------+---------------------+------------------------------------+-----------+
| B319 | xml_bad_pulldom | - xml.dom.pulldom.parse | Medium |
| | | - xml.dom.pulldom.parseString | |
+------+---------------------+------------------------------------+-----------+
| B320 | xml_bad_etree | - lxml.etree.parse | Medium |
| | | - lxml.etree.fromstring | |
| | | - lxml.etree.RestrictedElement | |
| | | - lxml.etree.GlobalParserTLS | |
| | | - lxml.etree.getDefaultParser | |
| | | - lxml.etree.check_docinfo | |
+------+---------------------+------------------------------------+-----------+
B321: ftplib
------------
FTP-related functions are being called. FTP is considered insecure. Use
SSH/SFTP/SCP or some other encrypted protocol.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Calls | Severity |
+======+=====================+====================================+===========+
| B321 | ftplib | - ftplib.\* | High |
+------+---------------------+------------------------------------+-----------+
B322: input
------------
The input method in Python 2 will read from standard input, evaluate and
run the resulting string as python source code. This is similar, though in
many ways worse, then using eval. On Python 2, use raw_input instead, input
is safe in Python 3.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Calls | Severity |
+======+=====================+====================================+===========+
| B322 | input | - input | High |
+------+---------------------+------------------------------------+-----------+
B323: unverified_context
------------------------
By default, Python will create a secure, verified ssl context for use in such
classes as HTTPSConnection. However, it still allows using an insecure
context via the _create_unverified_context that reverts to the previous
behavior that does not validate certificates or perform hostname checks.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Calls | Severity |
+======+=====================+====================================+===========+
| B323 | unverified_context | - ssl._create_unverified_context | Medium |
+------+---------------------+------------------------------------+-----------+
B325: tempnam
--------------
Use of os.tempnam() and os.tmpnam() is vulnerable to symlink attacks. Consider
using tmpfile() instead.
For further information:
https://docs.python.org/2.7/library/os.html#os.tempnam
https://bugs.python.org/issue17880
+------+---------------------+------------------------------------+-----------+
| ID | Name | Calls | Severity |
+======+=====================+====================================+===========+
| B325 | tempnam | - os.tempnam | Medium |
| | | - os.tmpnam | |
+------+---------------------+------------------------------------+-----------+
"""
from bandit.blacklists import utils
def gen_blacklist():
"""Generate a list of items to blacklist.
Methods of this type, "bandit.blacklist" plugins, are used to build a list
of items that bandit's built in blacklisting tests will use to trigger
issues. They replace the older blacklist* test plugins and allow
blacklisted items to have a unique bandit ID for filtering and profile
usage.
:return: a dictionary mapping node types to a list of blacklist data
"""
sets = []
sets.append(utils.build_conf_dict(
'pickle', 'B301',
['pickle.loads',
'pickle.load',
'pickle.Unpickler',
'cPickle.loads',
'cPickle.load',
'cPickle.Unpickler',
'dill.loads',
'dill.load',
'dill.Unpickler'],
'Pickle and modules that wrap it can be unsafe when used to '
'deserialize untrusted data, possible security issue.'
))
sets.append(utils.build_conf_dict(
'marshal', 'B302', ['marshal.load', 'marshal.loads'],
'Deserialization with the marshal module is possibly dangerous.'
))
sets.append(utils.build_conf_dict(
'md5', 'B303',
['hashlib.md5',
'hashlib.sha1',
'Crypto.Hash.MD2.new',
'Crypto.Hash.MD4.new',
'Crypto.Hash.MD5.new',
'Crypto.Hash.SHA.new',
'Cryptodome.Hash.MD2.new',
'Cryptodome.Hash.MD4.new',
'Cryptodome.Hash.MD5.new',
'Cryptodome.Hash.SHA.new',
'cryptography.hazmat.primitives.hashes.MD5',
'cryptography.hazmat.primitives.hashes.SHA1'],
'Use of insecure MD2, MD4, MD5, or SHA1 hash function.'
))
sets.append(utils.build_conf_dict(
'ciphers', 'B304',
['Crypto.Cipher.ARC2.new',
'Crypto.Cipher.ARC4.new',
'Crypto.Cipher.Blowfish.new',
'Crypto.Cipher.DES.new',
'Crypto.Cipher.XOR.new',
'Cryptodome.Cipher.ARC2.new',
'Cryptodome.Cipher.ARC4.new',
'Cryptodome.Cipher.Blowfish.new',
'Cryptodome.Cipher.DES.new',
'Cryptodome.Cipher.XOR.new',
'cryptography.hazmat.primitives.ciphers.algorithms.ARC4',
'cryptography.hazmat.primitives.ciphers.algorithms.Blowfish',
'cryptography.hazmat.primitives.ciphers.algorithms.IDEA'],
'Use of insecure cipher {name}. Replace with a known secure'
' cipher such as AES.',
'HIGH'
))
sets.append(utils.build_conf_dict(
'cipher_modes', 'B305',
['cryptography.hazmat.primitives.ciphers.modes.ECB'],
'Use of insecure cipher mode {name}.'
))
sets.append(utils.build_conf_dict(
'mktemp_q', 'B306', ['tempfile.mktemp'],
'Use of insecure and deprecated function (mktemp).'
))
sets.append(utils.build_conf_dict(
'eval', 'B307', ['eval'],
'Use of possibly insecure function - consider using safer '
'ast.literal_eval.'
))
sets.append(utils.build_conf_dict(
'mark_safe', 'B308', ['django.utils.safestring.mark_safe'],
'Use of mark_safe() may expose cross-site scripting '
'vulnerabilities and should be reviewed.'
))
sets.append(utils.build_conf_dict(
'httpsconnection', 'B309',
['httplib.HTTPSConnection',
'http.client.HTTPSConnection',
'six.moves.http_client.HTTPSConnection'],
'Use of HTTPSConnection on older versions of Python prior to 2.7.9 '
'and 3.4.3 do not provide security, see '
'https://wiki.openstack.org/wiki/OSSN/OSSN-0033'
))
sets.append(utils.build_conf_dict(
'urllib_urlopen', 'B310',
['urllib.urlopen',
'urllib.request.urlopen',
'urllib.urlretrieve',
'urllib.request.urlretrieve',
'urllib.URLopener',
'urllib.request.URLopener',
'urllib.FancyURLopener',
'urllib.request.FancyURLopener',
'urllib2.urlopen',
'urllib2.Request',
'six.moves.urllib.request.urlopen',
'six.moves.urllib.request.urlretrieve',
'six.moves.urllib.request.URLopener',
'six.moves.urllib.request.FancyURLopener'],
'Audit url open for permitted schemes. Allowing use of file:/ or '
'custom schemes is often unexpected.'
))
sets.append(utils.build_conf_dict(
'random', 'B311',
['random.random',
'random.randrange',
'random.randint',
'random.choice',
'random.uniform',
'random.triangular'],
'Standard pseudo-random generators are not suitable for '
'security/cryptographic purposes.',
'LOW'
))
sets.append(utils.build_conf_dict(
'telnetlib', 'B312', ['telnetlib.*'],
'Telnet-related functions are being called. Telnet is considered '
'insecure. Use SSH or some other encrypted protocol.',
'HIGH'
))
# Most of this is based off of Christian Heimes' work on defusedxml:
# https://pypi.org/project/defusedxml/#defusedxml-sax
xml_msg = ('Using {name} to parse untrusted XML data is known to be '
'vulnerable to XML attacks. Replace {name} with its '
'defusedxml equivalent function or make sure '
'defusedxml.defuse_stdlib() is called')
sets.append(utils.build_conf_dict(
'xml_bad_cElementTree', 'B313',
['xml.etree.cElementTree.parse',
'xml.etree.cElementTree.iterparse',
'xml.etree.cElementTree.fromstring',
'xml.etree.cElementTree.XMLParser'],
xml_msg
))
sets.append(utils.build_conf_dict(
'xml_bad_ElementTree', 'B314',
['xml.etree.ElementTree.parse',
'xml.etree.ElementTree.iterparse',
'xml.etree.ElementTree.fromstring',
'xml.etree.ElementTree.XMLParser'],
xml_msg
))
sets.append(utils.build_conf_dict(
'xml_bad_expatreader', 'B315', ['xml.sax.expatreader.create_parser'],
xml_msg
))
sets.append(utils.build_conf_dict(
'xml_bad_expatbuilder', 'B316',
['xml.dom.expatbuilder.parse',
'xml.dom.expatbuilder.parseString'],
xml_msg
))
sets.append(utils.build_conf_dict(
'xml_bad_sax', 'B317',
['xml.sax.parse',
'xml.sax.parseString',
'xml.sax.make_parser'],
xml_msg
))
sets.append(utils.build_conf_dict(
'xml_bad_minidom', 'B318',
['xml.dom.minidom.parse',
'xml.dom.minidom.parseString'],
xml_msg
))
sets.append(utils.build_conf_dict(
'xml_bad_pulldom', 'B319',
['xml.dom.pulldom.parse',
'xml.dom.pulldom.parseString'],
xml_msg
))
sets.append(utils.build_conf_dict(
'xml_bad_etree', 'B320',
['lxml.etree.parse',
'lxml.etree.fromstring',
'lxml.etree.RestrictedElement',
'lxml.etree.GlobalParserTLS',
'lxml.etree.getDefaultParser',
'lxml.etree.check_docinfo'],
('Using {name} to parse untrusted XML data is known to be '
'vulnerable to XML attacks. Replace {name} with its '
'defusedxml equivalent function.')
))
# end of XML tests
sets.append(utils.build_conf_dict(
'ftplib', 'B321', ['ftplib.*'],
'FTP-related functions are being called. FTP is considered '
'insecure. Use SSH/SFTP/SCP or some other encrypted protocol.',
'HIGH'
))
sets.append(utils.build_conf_dict(
'input', 'B322', ['input'],
'The input method in Python 2 will read from standard input, '
'evaluate and run the resulting string as python source code. This '
'is similar, though in many ways worse, then using eval. On Python '
'2, use raw_input instead, input is safe in Python 3.',
'HIGH'
))
sets.append(utils.build_conf_dict(
'unverified_context', 'B323', ['ssl._create_unverified_context'],
'By default, Python will create a secure, verified ssl context for '
'use in such classes as HTTPSConnection. However, it still allows '
'using an insecure context via the _create_unverified_context that '
'reverts to the previous behavior that does not validate certificates '
'or perform hostname checks.'
))
# skipped B324 (used in bandit/plugins/hashlib_new_insecure_functions.py)
sets.append(utils.build_conf_dict(
'tempnam', 'B325', ['os.tempnam', 'os.tmpnam'],
'Use of os.tempnam() and os.tmpnam() is vulnerable to symlink '
'attacks. Consider using tmpfile() instead.'
))
return {'Call': sets}

View File

@ -0,0 +1,340 @@
# -*- coding:utf-8 -*-
#
# Copyright 2016 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
r"""
======================================================
Blacklist various Python imports known to be dangerous
======================================================
This blacklist data checks for a number of Python modules known to have
possible security implications. The following blacklist tests are run against
any import statements or calls encountered in the scanned code base.
Note that the XML rules listed here are mostly based off of Christian Heimes'
work on defusedxml: https://pypi.org/project/defusedxml/
B401: import_telnetlib
----------------------
A telnet-related module is being imported. Telnet is considered insecure. Use
SSH or some other encrypted protocol.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Imports | Severity |
+======+=====================+====================================+===========+
| B401 | import_telnetlib | - telnetlib | high |
+------+---------------------+------------------------------------+-----------+
B402: import_ftplib
-------------------
A FTP-related module is being imported. FTP is considered insecure. Use
SSH/SFTP/SCP or some other encrypted protocol.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Imports | Severity |
+======+=====================+====================================+===========+
| B402 | inport_ftplib | - ftplib | high |
+------+---------------------+------------------------------------+-----------+
B403: import_pickle
-------------------
Consider possible security implications associated with these modules.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Imports | Severity |
+======+=====================+====================================+===========+
| B403 | import_pickle | - pickle | low |
| | | - cPickle | |
| | | - dill | |
+------+---------------------+------------------------------------+-----------+
B404: import_subprocess
-----------------------
Consider possible security implications associated with these modules.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Imports | Severity |
+======+=====================+====================================+===========+
| B404 | import_subprocess | - subprocess | low |
+------+---------------------+------------------------------------+-----------+
B405: import_xml_etree
----------------------
Using various methods to parse untrusted XML data is known to be vulnerable to
XML attacks. Replace vulnerable imports with the equivalent defusedxml package,
or make sure defusedxml.defuse_stdlib() is called.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Imports | Severity |
+======+=====================+====================================+===========+
| B405 | import_xml_etree | - xml.etree.cElementTree | low |
| | | - xml.etree.ElementTree | |
+------+---------------------+------------------------------------+-----------+
B406: import_xml_sax
--------------------
Using various methods to parse untrusted XML data is known to be vulnerable to
XML attacks. Replace vulnerable imports with the equivalent defusedxml package,
or make sure defusedxml.defuse_stdlib() is called.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Imports | Severity |
+======+=====================+====================================+===========+
| B406 | import_xml_sax | - xml.sax | low |
+------+---------------------+------------------------------------+-----------+
B407: import_xml_expat
----------------------
Using various methods to parse untrusted XML data is known to be vulnerable to
XML attacks. Replace vulnerable imports with the equivalent defusedxml package,
or make sure defusedxml.defuse_stdlib() is called.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Imports | Severity |
+======+=====================+====================================+===========+
| B407 | import_xml_expat | - xml.dom.expatbuilder | low |
+------+---------------------+------------------------------------+-----------+
B408: import_xml_minidom
------------------------
Using various methods to parse untrusted XML data is known to be vulnerable to
XML attacks. Replace vulnerable imports with the equivalent defusedxml package,
or make sure defusedxml.defuse_stdlib() is called.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Imports | Severity |
+======+=====================+====================================+===========+
| B408 | import_xml_minidom | - xml.dom.minidom | low |
+------+---------------------+------------------------------------+-----------+
B409: import_xml_pulldom
------------------------
Using various methods to parse untrusted XML data is known to be vulnerable to
XML attacks. Replace vulnerable imports with the equivalent defusedxml package,
or make sure defusedxml.defuse_stdlib() is called.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Imports | Severity |
+======+=====================+====================================+===========+
| B409 | import_xml_pulldom | - xml.dom.pulldom | low |
+------+---------------------+------------------------------------+-----------+
B410: import_lxml
-----------------
Using various methods to parse untrusted XML data is known to be vulnerable to
XML attacks. Replace vulnerable imports with the equivalent defusedxml package.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Imports | Severity |
+======+=====================+====================================+===========+
| B410 | import_lxml | - lxml | low |
+------+---------------------+------------------------------------+-----------+
B411: import_xmlrpclib
----------------------
XMLRPC is particularly dangerous as it is also concerned with communicating
data over a network. Use defused.xmlrpc.monkey_patch() function to monkey-patch
xmlrpclib and mitigate remote XML attacks.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Imports | Severity |
+======+=====================+====================================+===========+
| B411 | import_xmlrpclib | - xmlrpclib | high |
+------+---------------------+------------------------------------+-----------+
B412: import_httpoxy
--------------------
httpoxy is a set of vulnerabilities that affect application code running in
CGI, or CGI-like environments. The use of CGI for web applications should be
avoided to prevent this class of attack. More details are available
at https://httpoxy.org/.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Imports | Severity |
+======+=====================+====================================+===========+
| B412 | import_httpoxy | - wsgiref.handlers.CGIHandler | high |
| | | - twisted.web.twcgi.CGIScript | |
+------+---------------------+------------------------------------+-----------+
B413: import_pycrypto
---------------------
pycrypto library is known to have publicly disclosed buffer overflow
vulnerability https://github.com/dlitz/pycrypto/issues/176. It is no longer
actively maintained and has been deprecated in favor of pyca/cryptography
library.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Imports | Severity |
+======+=====================+====================================+===========+
| B413 | import_pycrypto | - Crypto.Cipher | high |
| | | - Crypto.Hash | |
| | | - Crypto.IO | |
| | | - Crypto.Protocol | |
| | | - Crypto.PublicKey | |
| | | - Crypto.Random | |
| | | - Crypto.Signature | |
| | | - Crypto.Util | |
+------+---------------------+------------------------------------+-----------+
B414: import_pycryptodome
-------------------------
pycryptodome is a direct fork of pycrypto that has not fully addressed
the issues inherent in PyCrypto. It seems to exist, mainly, as an API
compatible continuation of pycrypto and should be deprecated in favor
of pyca/cryptography which has more support among the Python community.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Imports | Severity |
+======+=====================+====================================+===========+
| B414 | import_pycryptodome | - Cryptodome.Cipher | high |
| | | - Cryptodome.Hash | |
| | | - Cryptodome.IO | |
| | | - Cryptodome.Protocol | |
| | | - Cryptodome.PublicKey | |
| | | - Cryptodome.Random | |
| | | - Cryptodome.Signature | |
| | | - Cryptodome.Util | |
+------+---------------------+------------------------------------+-----------+
"""
from bandit.blacklists import utils
def gen_blacklist():
"""Generate a list of items to blacklist.
Methods of this type, "bandit.blacklist" plugins, are used to build a list
of items that bandit's built in blacklisting tests will use to trigger
issues. They replace the older blacklist* test plugins and allow
blacklisted items to have a unique bandit ID for filtering and profile
usage.
:return: a dictionary mapping node types to a list of blacklist data
"""
sets = []
sets.append(utils.build_conf_dict(
'import_telnetlib', 'B401', ['telnetlib'],
'A telnet-related module is being imported. Telnet is '
'considered insecure. Use SSH or some other encrypted protocol.',
'HIGH'
))
sets.append(utils.build_conf_dict(
'import_ftplib', 'B402', ['ftplib'],
'A FTP-related module is being imported. FTP is considered '
'insecure. Use SSH/SFTP/SCP or some other encrypted protocol.',
'HIGH'
))
sets.append(utils.build_conf_dict(
'import_pickle', 'B403', ['pickle', 'cPickle', 'dill'],
'Consider possible security implications associated with '
'{name} module.', 'LOW'
))
sets.append(utils.build_conf_dict(
'import_subprocess', 'B404', ['subprocess'],
'Consider possible security implications associated with '
'{name} module.', 'LOW'
))
# Most of this is based off of Christian Heimes' work on defusedxml:
# https://pypi.org/project/defusedxml/#defusedxml-sax
xml_msg = ('Using {name} to parse untrusted XML data is known to be '
'vulnerable to XML attacks. Replace {name} with the equivalent '
'defusedxml package, or make sure defusedxml.defuse_stdlib() '
'is called.')
lxml_msg = ('Using {name} to parse untrusted XML data is known to be '
'vulnerable to XML attacks. Replace {name} with the '
'equivalent defusedxml package.')
sets.append(utils.build_conf_dict(
'import_xml_etree', 'B405',
['xml.etree.cElementTree', 'xml.etree.ElementTree'], xml_msg, 'LOW'))
sets.append(utils.build_conf_dict(
'import_xml_sax', 'B406', ['xml.sax'], xml_msg, 'LOW'))
sets.append(utils.build_conf_dict(
'import_xml_expat', 'B407', ['xml.dom.expatbuilder'], xml_msg, 'LOW'))
sets.append(utils.build_conf_dict(
'import_xml_minidom', 'B408', ['xml.dom.minidom'], xml_msg, 'LOW'))
sets.append(utils.build_conf_dict(
'import_xml_pulldom', 'B409', ['xml.dom.pulldom'], xml_msg, 'LOW'))
sets.append(utils.build_conf_dict(
'import_lxml', 'B410', ['lxml'], lxml_msg, 'LOW'))
sets.append(utils.build_conf_dict(
'import_xmlrpclib', 'B411', ['xmlrpclib'],
'Using {name} to parse untrusted XML data is known to be '
'vulnerable to XML attacks. Use defused.xmlrpc.monkey_patch() '
'function to monkey-patch xmlrpclib and mitigate XML '
'vulnerabilities.', 'HIGH'))
sets.append(utils.build_conf_dict(
'import_httpoxy', 'B412',
['wsgiref.handlers.CGIHandler', 'twisted.web.twcgi.CGIScript',
'twisted.web.twcgi.CGIDirectory'],
'Consider possible security implications associated with '
'{name} module.', 'HIGH'
))
sets.append(utils.build_conf_dict(
'import_pycrypto', 'B413',
['Crypto.Cipher',
'Crypto.Hash',
'Crypto.IO',
'Crypto.Protocol',
'Crypto.PublicKey',
'Crypto.Random',
'Crypto.Signature',
'Crypto.Util'],
'The pyCrypto library and its module {name} are no longer actively '
'maintained and have been deprecated. '
'Consider using pyca/cryptography library.', 'HIGH'))
sets.append(utils.build_conf_dict(
'import_pycryptodome', 'B414',
['Cryptodome.Cipher',
'Cryptodome.Hash',
'Cryptodome.IO',
'Cryptodome.Protocol',
'Cryptodome.PublicKey',
'Cryptodome.Random',
'Cryptodome.Signature',
'Cryptodome.Util'],
'The pycryptodome library is not considered a secure alternative '
'to pycrypto.'
'Consider using pyca/cryptography library.', 'HIGH'))
return {'Import': sets, 'ImportFrom': sets, 'Call': sets}

View File

@ -0,0 +1,22 @@
# -*- coding:utf-8 -*-
#
# Copyright 2016 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def build_conf_dict(name, bid, qualnames, message, level='MEDIUM'):
"""Build and return a blacklist configuration dict."""
return {'name': name, 'id': bid, 'message': message,
'qualnames': qualnames, 'level': level}

View File

@ -0,0 +1,224 @@
# -*- coding:utf-8 -*-
#
# Copyright 2015 Hewlett-Packard Enterprise
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# #############################################################################
# Bandit Baseline is a tool that runs Bandit against a Git commit, and compares
# the current commit findings to the parent commit findings.
# To do this it checks out the parent commit, runs Bandit (with any provided
# filters or profiles), checks out the current commit, runs Bandit, and then
# reports on any new findings.
# #############################################################################
import argparse
import contextlib
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import git
bandit_args = sys.argv[1:]
baseline_tmp_file = '_bandit_baseline_run.json_'
current_commit = None
default_output_format = 'terminal'
LOG = logging.getLogger(__name__)
repo = None
report_basename = 'bandit_baseline_result'
valid_baseline_formats = ['txt', 'html', 'json']
def main():
# our cleanup function needs this and can't be passed arguments
global current_commit
global repo
parent_commit = None
output_format = None
repo = None
report_fname = None
init_logger()
output_format, repo, report_fname = initialize()
if not repo:
sys.exit(2)
# #################### Find current and parent commits ####################
try:
commit = repo.commit()
current_commit = commit.hexsha
LOG.info('Got current commit: [%s]', commit.name_rev)
commit = commit.parents[0]
parent_commit = commit.hexsha
LOG.info('Got parent commit: [%s]', commit.name_rev)
except git.GitCommandError:
LOG.error("Unable to get current or parent commit")
sys.exit(2)
except IndexError:
LOG.error("Parent commit not available")
sys.exit(2)
# #################### Run Bandit against both commits ####################
output_type = (['-f', 'txt'] if output_format == default_output_format
else ['-o', report_fname])
with baseline_setup() as t:
bandit_tmpfile = "{}/{}".format(t, baseline_tmp_file)
steps = [{'message': 'Getting Bandit baseline results',
'commit': parent_commit,
'args': bandit_args + ['-f', 'json', '-o', bandit_tmpfile]},
{'message': 'Comparing Bandit results to baseline',
'commit': current_commit,
'args': bandit_args + ['-b', bandit_tmpfile] + output_type}]
return_code = None
for step in steps:
repo.head.reset(commit=step['commit'], working_tree=True)
LOG.info(step['message'])
bandit_command = ['bandit'] + step['args']
try:
output = subprocess.check_output(bandit_command)
except subprocess.CalledProcessError as e:
output = e.output
return_code = e.returncode
else:
return_code = 0
output = output.decode('utf-8') # subprocess returns bytes
if return_code not in [0, 1]:
LOG.error("Error running command: %s\nOutput: %s\n",
bandit_args, output)
# #################### Output and exit ####################################
# print output or display message about written report
if output_format == default_output_format:
print(output)
else:
LOG.info("Successfully wrote %s", report_fname)
# exit with the code the last Bandit run returned
sys.exit(return_code)
# #################### Clean up before exit ###################################
@contextlib.contextmanager
def baseline_setup():
d = tempfile.mkdtemp()
yield d
shutil.rmtree(d, True)
if repo:
repo.head.reset(commit=current_commit, working_tree=True)
# #################### Setup logging ##########################################
def init_logger():
LOG.handlers = []
log_level = logging.INFO
log_format_string = "[%(levelname)7s ] %(message)s"
logging.captureWarnings(True)
LOG.setLevel(log_level)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter(log_format_string))
LOG.addHandler(handler)
# #################### Perform initialization and validate assumptions ########
def initialize():
valid = True
# #################### Parse Args #########################################
parser = argparse.ArgumentParser(
description='Bandit Baseline - Generates Bandit results compared to "'
'a baseline',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog='Additional Bandit arguments such as severity filtering (-ll) '
'can be added and will be passed to Bandit.'
)
parser.add_argument('targets', metavar='targets', type=str, nargs='+',
help='source file(s) or directory(s) to be tested')
parser.add_argument('-f', dest='output_format', action='store',
default='terminal', help='specify output format',
choices=valid_baseline_formats)
args, unknown = parser.parse_known_args()
# #################### Setup Output #######################################
# set the output format, or use a default if not provided
output_format = (args.output_format if args.output_format
else default_output_format)
if output_format == default_output_format:
LOG.info("No output format specified, using %s", default_output_format)
# set the report name based on the output format
report_fname = "{}.{}".format(report_basename, output_format)
# #################### Check Requirements #################################
try:
repo = git.Repo(os.getcwd())
except git.exc.InvalidGitRepositoryError:
LOG.error("Bandit baseline must be called from a git project root")
valid = False
except git.exc.GitCommandNotFound:
LOG.error("Git command not found")
valid = False
else:
if repo.is_dirty():
LOG.error("Current working directory is dirty and must be "
"resolved")
valid = False
# if output format is specified, we need to be able to write the report
if output_format != default_output_format and os.path.exists(report_fname):
LOG.error("File %s already exists, aborting", report_fname)
valid = False
# Bandit needs to be able to create this temp file
if os.path.exists(baseline_tmp_file):
LOG.error("Temporary file %s needs to be removed prior to running",
baseline_tmp_file)
valid = False
# we must validate -o is not provided, as it will mess up Bandit baseline
if '-o' in bandit_args:
LOG.error("Bandit baseline must not be called with the -o option")
valid = False
return (output_format, repo, report_fname) if valid else (None, None, None)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,186 @@
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import argparse
import importlib
import logging
import os
import sys
import yaml
from bandit.core import extension_loader
PROG_NAME = 'bandit_conf_generator'
LOG = logging.getLogger(__name__)
template = """
### Bandit config file generated from:
# '{cli}'
### This config may optionally select a subset of tests to run or skip by
### filling out the 'tests' and 'skips' lists given below. If no tests are
### specified for inclusion then it is assumed all tests are desired. The skips
### set will remove specific tests from the include set. This can be controlled
### using the -t/-s CLI options. Note that the same test ID should not appear
### in both 'tests' and 'skips', this would be nonsensical and is detected by
### Bandit at runtime.
# Available tests:
{test_list}
# (optional) list included test IDs here, eg '[B101, B406]':
{test}
# (optional) list skipped test IDs here, eg '[B101, B406]':
{skip}
### (optional) plugin settings - some test plugins require configuration data
### that may be given here, per-plugin. All bandit test plugins have a built in
### set of sensible defaults and these will be used if no configuration is
### provided. It is not necessary to provide settings for every (or any) plugin
### if the defaults are acceptable.
{settings}
"""
def init_logger():
LOG.handlers = []
log_level = logging.INFO
log_format_string = "[%(levelname)5s]: %(message)s"
logging.captureWarnings(True)
LOG.setLevel(log_level)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter(log_format_string))
LOG.addHandler(handler)
def parse_args():
help_description = """Bandit Config Generator
This tool is used to generate an optional profile. The profile may be used
to include or skip tests and override values for plugins.
When used to store an output profile, this tool will output a template that
includes all plugins and their default settings. Any settings which aren't
being overridden can be safely removed from the profile and default values
will be used. Bandit will prefer settings from the profile over the built
in values."""
parser = argparse.ArgumentParser(
description=help_description,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--show-defaults', dest='show_defaults',
action='store_true',
help='show the default settings values for each '
'plugin but do not output a profile')
parser.add_argument('-o', '--out', dest='output_file',
action='store',
help='output file to save profile')
parser.add_argument(
'-t', '--tests', dest='tests',
action='store', default=None, type=str,
help='list of test names to run')
parser.add_argument(
'-s', '--skip', dest='skips',
action='store', default=None, type=str,
help='list of test names to skip')
args = parser.parse_args()
if not args.output_file and not args.show_defaults:
parser.print_help()
parser.exit(1)
return args
def get_config_settings():
config = {}
for plugin in extension_loader.MANAGER.plugins:
fn_name = plugin.name
function = plugin.plugin
# if a function takes config...
if hasattr(function, '_takes_config'):
fn_module = importlib.import_module(function.__module__)
# call the config generator if it exists
if hasattr(fn_module, 'gen_config'):
config[fn_name] = fn_module.gen_config(function._takes_config)
return yaml.safe_dump(config, default_flow_style=False)
def main():
init_logger()
args = parse_args()
yaml_settings = get_config_settings()
if args.show_defaults:
print(yaml_settings)
if args.output_file:
if os.path.exists(os.path.abspath(args.output_file)):
LOG.error("File %s already exists, exiting", args.output_file)
sys.exit(2)
try:
with open(args.output_file, 'w') as f:
skips = args.skips.split(',') if args.skips else []
tests = args.tests.split(',') if args.tests else []
for skip in skips:
if not extension_loader.MANAGER.check_id(skip):
raise RuntimeError('unknown ID in skips: %s' % skip)
for test in tests:
if not extension_loader.MANAGER.check_id(test):
raise RuntimeError('unknown ID in tests: %s' % test)
tpl = "# {0} : {1}"
test_list = [tpl.format(t.plugin._test_id, t.name)
for t in extension_loader.MANAGER.plugins]
others = [tpl.format(k, v['name']) for k, v in (
extension_loader.MANAGER.blacklist_by_id.items())]
test_list.extend(others)
test_list.sort()
contents = template.format(
cli=" ".join(sys.argv),
settings=yaml_settings,
test_list="\n".join(test_list),
skip='skips: ' + str(skips) if skips else 'skips:',
test='tests: ' + str(tests) if tests else 'tests:')
f.write(contents)
except IOError:
LOG.error("Unable to open %s for writing", args.output_file)
except Exception as e:
LOG.error("Error: %s", e)
else:
LOG.info("Successfully wrote profile: %s", args.output_file)
return 0
if __name__ == '__main__':
sys.exit(main())

View File

@ -0,0 +1,404 @@
# -*- coding:utf-8 -*-
#
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import fnmatch
import logging
import os
import sys
import textwrap
import bandit
from bandit.core import config as b_config
from bandit.core import constants
from bandit.core import manager as b_manager
from bandit.core import utils
BASE_CONFIG = 'bandit.yaml'
LOG = logging.getLogger()
def _init_logger(debug=False, log_format=None):
'''Initialize the logger
:param debug: Whether to enable debug mode
:return: An instantiated logging instance
'''
LOG.handlers = []
log_level = logging.INFO
if debug:
log_level = logging.DEBUG
if not log_format:
# default log format
log_format_string = constants.log_format_string
else:
log_format_string = log_format
logging.captureWarnings(True)
LOG.setLevel(log_level)
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(logging.Formatter(log_format_string))
LOG.addHandler(handler)
LOG.debug("logging initialized")
def _get_options_from_ini(ini_path, target):
"""Return a dictionary of config options or None if we can't load any."""
ini_file = None
if ini_path:
ini_file = ini_path
else:
bandit_files = []
for t in target:
for root, dirnames, filenames in os.walk(t):
for filename in fnmatch.filter(filenames, '.bandit'):
bandit_files.append(os.path.join(root, filename))
if len(bandit_files) > 1:
LOG.error('Multiple .bandit files found - scan separately or '
'choose one with --ini\n\t%s', ', '.join(bandit_files))
sys.exit(2)
elif len(bandit_files) == 1:
ini_file = bandit_files[0]
LOG.info('Found project level .bandit file: %s', bandit_files[0])
if ini_file:
return utils.parse_ini_file(ini_file)
else:
return None
def _init_extensions():
from bandit.core import extension_loader as ext_loader
return ext_loader.MANAGER
def _log_option_source(arg_val, ini_val, option_name):
"""It's useful to show the source of each option."""
if arg_val:
LOG.info("Using command line arg for %s", option_name)
return arg_val
elif ini_val:
LOG.info("Using ini file for %s", option_name)
return ini_val
else:
return None
def _running_under_virtualenv():
if hasattr(sys, 'real_prefix'):
return True
elif sys.prefix != getattr(sys, 'base_prefix', sys.prefix):
return True
def _get_profile(config, profile_name, config_path):
profile = {}
if profile_name:
profiles = config.get_option('profiles') or {}
profile = profiles.get(profile_name)
if profile is None:
raise utils.ProfileNotFound(config_path, profile_name)
LOG.debug("read in legacy profile '%s': %s", profile_name, profile)
else:
profile['include'] = set(config.get_option('tests') or [])
profile['exclude'] = set(config.get_option('skips') or [])
return profile
def _log_info(args, profile):
inc = ",".join([t for t in profile['include']]) or "None"
exc = ",".join([t for t in profile['exclude']]) or "None"
LOG.info("profile include tests: %s", inc)
LOG.info("profile exclude tests: %s", exc)
LOG.info("cli include tests: %s", args.tests)
LOG.info("cli exclude tests: %s", args.skips)
def main():
# bring our logging stuff up as early as possible
debug = ('-d' in sys.argv or '--debug' in sys.argv)
_init_logger(debug)
extension_mgr = _init_extensions()
baseline_formatters = [f.name for f in filter(lambda x:
hasattr(x.plugin,
'_accepts_baseline'),
extension_mgr.formatters)]
# now do normal startup
parser = argparse.ArgumentParser(
description='Bandit - a Python source code security analyzer',
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'targets', metavar='targets', type=str, nargs='*',
help='source file(s) or directory(s) to be tested'
)
parser.add_argument(
'-r', '--recursive', dest='recursive',
action='store_true', help='find and process files in subdirectories'
)
parser.add_argument(
'-a', '--aggregate', dest='agg_type',
action='store', default='file', type=str,
choices=['file', 'vuln'],
help='aggregate output by vulnerability (default) or by filename'
)
parser.add_argument(
'-n', '--number', dest='context_lines',
action='store', default=3, type=int,
help='maximum number of code lines to output for each issue'
)
parser.add_argument(
'-c', '--configfile', dest='config_file',
action='store', default=None, type=str,
help='optional config file to use for selecting plugins and '
'overriding defaults'
)
parser.add_argument(
'-p', '--profile', dest='profile',
action='store', default=None, type=str,
help='profile to use (defaults to executing all tests)'
)
parser.add_argument(
'-t', '--tests', dest='tests',
action='store', default=None, type=str,
help='comma-separated list of test IDs to run'
)
parser.add_argument(
'-s', '--skip', dest='skips',
action='store', default=None, type=str,
help='comma-separated list of test IDs to skip'
)
parser.add_argument(
'-l', '--level', dest='severity', action='count',
default=1, help='report only issues of a given severity level or '
'higher (-l for LOW, -ll for MEDIUM, -lll for HIGH)'
)
parser.add_argument(
'-i', '--confidence', dest='confidence', action='count',
default=1, help='report only issues of a given confidence level or '
'higher (-i for LOW, -ii for MEDIUM, -iii for HIGH)'
)
output_format = 'screen' if sys.stdout.isatty() else 'txt'
parser.add_argument(
'-f', '--format', dest='output_format', action='store',
default=output_format, help='specify output format',
choices=sorted(extension_mgr.formatter_names)
)
parser.add_argument(
'--msg-template', action='store',
default=None, help='specify output message template'
' (only usable with --format custom),'
' see CUSTOM FORMAT section'
' for list of available values',
)
parser.add_argument(
'-o', '--output', dest='output_file', action='store', nargs='?',
type=argparse.FileType('w'), default=sys.stdout,
help='write report to filename'
)
parser.add_argument(
'-v', '--verbose', dest='verbose', action='store_true',
help='output extra information like excluded and included files'
)
parser.add_argument(
'-d', '--debug', dest='debug', action='store_true',
help='turn on debug mode'
)
parser.add_argument(
'--ignore-nosec', dest='ignore_nosec', action='store_true',
help='do not skip lines with # nosec comments'
)
parser.add_argument(
'-x', '--exclude', dest='excluded_paths', action='store',
default='', help='comma-separated list of paths to exclude from scan '
'(note that these are in addition to the excluded '
'paths provided in the config file)'
)
parser.add_argument(
'-b', '--baseline', dest='baseline', action='store',
default=None, help='path of a baseline report to compare against '
'(only JSON-formatted files are accepted)'
)
parser.add_argument(
'--ini', dest='ini_path', action='store', default=None,
help='path to a .bandit file that supplies command line arguments'
)
python_ver = sys.version.replace('\n', '')
parser.add_argument(
'--version', action='version',
version='%(prog)s {version}\n python version = {python}'.format(
version=bandit.__version__, python=python_ver)
)
parser.set_defaults(debug=False)
parser.set_defaults(verbose=False)
parser.set_defaults(ignore_nosec=False)
plugin_info = ["%s\t%s" % (a[0], a[1].name) for a in
extension_mgr.plugins_by_id.items()]
blacklist_info = []
for a in extension_mgr.blacklist.items():
for b in a[1]:
blacklist_info.append('%s\t%s' % (b['id'], b['name']))
plugin_list = '\n\t'.join(sorted(set(plugin_info + blacklist_info)))
dedent_text = textwrap.dedent('''
CUSTOM FORMATTING
-----------------
Available tags:
{abspath}, {relpath}, {line}, {test_id},
{severity}, {msg}, {confidence}, {range}
Example usage:
Default template:
bandit -r examples/ --format custom --msg-template \\
"{abspath}:{line}: {test_id}[bandit]: {severity}: {msg}"
Provides same output as:
bandit -r examples/ --format custom
Tags can also be formatted in python string.format() style:
bandit -r examples/ --format custom --msg-template \\
"{relpath:20.20s}: {line:03}: {test_id:^8}: DEFECT: {msg:>20}"
See python documentation for more information about formatting style:
https://docs.python.org/3.4/library/string.html
The following tests were discovered and loaded:
-----------------------------------------------
''')
parser.epilog = dedent_text + "\t{0}".format(plugin_list)
# setup work - parse arguments, and initialize BanditManager
args = parser.parse_args()
# Check if `--msg-template` is not present without custom formatter
if args.output_format != 'custom' and args.msg_template is not None:
parser.error("--msg-template can only be used with --format=custom")
try:
b_conf = b_config.BanditConfig(config_file=args.config_file)
except utils.ConfigError as e:
LOG.error(e)
sys.exit(2)
# Handle .bandit files in projects to pass cmdline args from file
ini_options = _get_options_from_ini(args.ini_path, args.targets)
if ini_options:
# prefer command line, then ini file
args.excluded_paths = _log_option_source(args.excluded_paths,
ini_options.get('exclude'),
'excluded paths')
args.skips = _log_option_source(args.skips, ini_options.get('skips'),
'skipped tests')
args.tests = _log_option_source(args.tests, ini_options.get('tests'),
'selected tests')
ini_targets = ini_options.get('targets')
if ini_targets:
ini_targets = ini_targets.split(',')
args.targets = _log_option_source(args.targets, ini_targets,
'selected targets')
# TODO(tmcpeak): any other useful options to pass from .bandit?
if not args.targets:
LOG.error("No targets found in CLI or ini files, exiting.")
sys.exit(2)
# if the log format string was set in the options, reinitialize
if b_conf.get_option('log_format'):
log_format = b_conf.get_option('log_format')
_init_logger(debug, log_format=log_format)
try:
profile = _get_profile(b_conf, args.profile, args.config_file)
_log_info(args, profile)
profile['include'].update(args.tests.split(',') if args.tests else [])
profile['exclude'].update(args.skips.split(',') if args.skips else [])
extension_mgr.validate_profile(profile)
except (utils.ProfileNotFound, ValueError) as e:
LOG.error(e)
sys.exit(2)
b_mgr = b_manager.BanditManager(b_conf, args.agg_type, args.debug,
profile=profile, verbose=args.verbose,
ignore_nosec=args.ignore_nosec)
if args.baseline is not None:
try:
with open(args.baseline) as bl:
data = bl.read()
b_mgr.populate_baseline(data)
except IOError:
LOG.warning("Could not open baseline report: %s", args.baseline)
sys.exit(2)
if args.output_format not in baseline_formatters:
LOG.warning('Baseline must be used with one of the following '
'formats: ' + str(baseline_formatters))
sys.exit(2)
if args.output_format != "json":
if args.config_file:
LOG.info("using config: %s", args.config_file)
LOG.info("running on Python %d.%d.%d", sys.version_info.major,
sys.version_info.minor, sys.version_info.micro)
# initiate file discovery step within Bandit Manager
b_mgr.discover_files(args.targets, args.recursive, args.excluded_paths)
if not b_mgr.b_ts.tests:
LOG.error('No tests would be run, please check the profile.')
sys.exit(2)
# initiate execution of tests within Bandit Manager
b_mgr.run_tests()
LOG.debug(b_mgr.b_ma)
LOG.debug(b_mgr.metrics)
# trigger output of results by Bandit Manager
sev_level = constants.RANKING[args.severity - 1]
conf_level = constants.RANKING[args.confidence - 1]
b_mgr.output_results(args.context_lines,
sev_level,
conf_level,
args.output_file,
args.output_format,
args.msg_template)
# return an exit code of 1 if there are results, 0 otherwise
if b_mgr.results_count(sev_filter=sev_level, conf_filter=conf_level) > 0:
sys.exit(1)
else:
sys.exit(0)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,27 @@
# -*- coding:utf-8 -*-
#
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from bandit.core import config # noqa
from bandit.core import context # noqa
from bandit.core import manager # noqa
from bandit.core import meta_ast # noqa
from bandit.core import node_visitor # noqa
from bandit.core import test_set # noqa
from bandit.core import tester # noqa
from bandit.core import utils # noqa
from bandit.core.constants import * # noqa
from bandit.core.issue import * # noqa
from bandit.core.test_properties import * # noqa

Some files were not shown because too many files have changed in this diff Show More